code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import getpass
import os
import sys
import math
from io import StringIO
import shutil
import datetime
from os.path import splitext
from difflib import unified_diff
import pytest
from astropy.io import fits
from astropy.io.fits import FITSDiff
from astropy.utils.data import conf
import numpy as np
import stwcs
from stsci.tools import fileutil
from ci_watson.artifactory_helpers import get_bigdata, generate_upload_schema
from ci_watson.hst_helpers import download_crds, ref_from_image
# Base classes for actual tests.
# NOTE: Named in a way so pytest will not pick them up here.
@pytest.mark.bigdata
class BaseCal:
prevdir = os.getcwd()
use_ftp_crds = True
timeout = 30 # seconds
tree = 'dev'
# Numpy default for allclose comparison
rtol = 1e-6
atol = 1e-5
# To be defined by instrument
refstr = ''
prevref = ''
input_loc = ''
ref_loc = ''
ignore_keywords = []
# To be defined by individual test
subdir = ''
@pytest.fixture(autouse=True)
def setup_class(self, tmpdir, envopt, pytestconfig):
"""
Run test in own dir so we can keep results separate from
other tests.
"""
if not tmpdir.ensure(self.subdir, dir=True):
p = tmpdir.mkdir(self.subdir).strpath
else:
p = tmpdir.join(self.subdir).strpath
os.chdir(p)
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = p + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
# This controls astropy.io.fits timeout
conf.remote_timeout = self.timeout
# Update tree to point to correct environment
self.tree = envopt
# Collect pytest configuration values specified in setup.cfg or pytest.ini
self.inputs_root = pytestconfig.getini('inputs_root')[0]
self.results_root = pytestconfig.getini('results_root')[0]
def teardown_class(self):
"""Reset path and variables."""
conf.reset('remote_timeout')
os.chdir(self.prevdir)
if self.use_ftp_crds and self.prevref is not None:
os.environ[self.refstr] = self.prevref
def get_data(self, *args):
"""
Download `filename` into working directory using
`get_bigdata`. This will then return the full path to
the local copy of the file.
"""
local_file = get_bigdata(self.inputs_root, self.tree, self.input_loc, *args)
return local_file
def get_input_file(self, *args, refsep='$'):
"""
Download or copy input file (e.g., RAW) into the working directory.
The associated CRDS reference files in ``refstr`` are also
downloaded, if necessary.
"""
filename = self.get_data(*args)
ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE'])
print("Looking for REF_FILES: {}".format(ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
refname = self.get_data('customRef', ref_file)
else: # Download from FTP, if applicable
refname = os.path.join(ref_file)
if self.use_ftp_crds:
download_crds(refname, self.timeout)
return filename
def compare_outputs(self, outputs, raise_error=True):
"""
Compare output with "truth" using appropriate
diff routine; namely,
``fitsdiff`` for FITS file comparisons
``unified_diff`` for ASCII products.
Parameters
----------
outputs : list of tuple
A list of tuples, each containing filename (without path)
of CALXXX output and truth, in that order.
raise_error : bool
Raise ``AssertionError`` if difference is found.
Returns
-------
report : str
Report from ``fitsdiff``.
This is part of error message if ``raise_error=True``.
"""
all_okay = True
creature_report = ''
# Create instructions for uploading results to artifactory for use
# as new comparison/truth files
testpath, testname = os.path.split(os.path.abspath(os.curdir))
# organize results by day test was run...could replace with git-hash
whoami = getpass.getuser() or 'nobody'
dt = datetime.datetime.now().strftime("%d%b%YT")
ttime = datetime.datetime.now().strftime("%H_%M_%S")
user_tag = 'NOT_CI_{}_{}'.format(whoami, ttime)
build_tag = os.environ.get('BUILD_TAG', user_tag)
build_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', 'standalone')
testdir = "{}_{}_{}".format(testname, build_tag, build_suffix)
tree = os.path.join(self.results_root, self.input_loc,
dt, testdir) + os.sep
updated_outputs = []
for actual, desired in outputs:
# Get "truth" image
s = self.get_data('truth', desired)
if s is not None:
desired = s
if actual.endswith('fits'):
# Working with FITS files...
fdiff = FITSDiff(actual, desired, rtol=self.rtol, atol=self.atol,
ignore_keywords=self.ignore_keywords)
creature_report += fdiff.report()
if not fdiff.identical:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not fdiff.identical and all_okay:
all_okay = False
else:
# Try ASCII-based diff
with open(actual) as afile:
actual_lines = afile.readlines()
with open(desired) as dfile:
desired_lines = dfile.readlines()
udiff = unified_diff(actual_lines, desired_lines,
fromfile=actual, tofile=desired)
old_stdout = sys.stdout
udiffIO = StringIO()
sys.stdout = udiffIO
sys.stdout.writelines(udiff)
sys.stdout = old_stdout
udiff_report = udiffIO.getvalue()
creature_report += udiff_report
if len(udiff_report) > 2 and all_okay:
all_okay = False
if len(udiff_report) > 2:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not all_okay:
# Write out JSON file to enable retention of different results
new_truths = [os.path.abspath(i[1]) for i in updated_outputs]
for files in updated_outputs:
print("Renaming {} as new 'truth' file: {}".format(
files[0], files[1]))
shutil.move(files[0], files[1])
log_pattern = [os.path.join(os.path.dirname(x), '*.log') for x in new_truths]
generate_upload_schema(pattern=new_truths + log_pattern,
testname=testname,
target= tree)
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
class BaseACS(BaseCal):
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseACSHRC(BaseACS):
input_loc = 'acs/hrc'
ref_loc = 'acs/hrc/ref'
class BaseACSWFC(BaseACS):
input_loc = 'acs/wfc'
ref_loc = 'acs/wfc/ref'
class BaseWFC3(BaseCal):
refstr = 'iref'
input_loc = 'wfc3'
ref_loc = 'wfc3/ref'
prevref = os.environ.get(refstr)
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseSTIS(BaseCal):
refstr = 'oref'
prevref = os.environ.get(refstr)
input_loc = 'stis'
ref_loc = 'stis/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseWFPC2(BaseCal):
refstr = 'uref'
prevref = os.environ.get(refstr)
input_loc = 'wfpc2'
ref_loc = 'wfpc2/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
def centroid_compare(centroid):
return centroid[1]
class BaseUnit(BaseCal):
buff = 0
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
atol = 1.0e-5
def bound_image(self, image):
"""
Compute region where image is non-zero
"""
coords = np.nonzero(image)
ymin = coords[0].min()
ymax = coords[0].max()
xmin = coords[1].min()
xmax = coords[1].max()
return (ymin, ymax, xmin, xmax)
def centroid(self, image, size, center):
"""
Compute the centroid of a rectangular area
"""
ylo = int(center[0]) - size // 2
yhi = min(ylo + size, image.shape[0])
xlo = int(center[1]) - size // 2
xhi = min(xlo + size, image.shape[1])
center = [0.0, 0.0, 0.0]
for y in range(ylo, yhi):
for x in range(xlo, xhi):
center[0] += y * image[y,x]
center[1] += x * image[y,x]
center[2] += image[y,x]
if center[2] == 0.0: return None
center[0] /= center[2]
center[1] /= center[2]
return center
def centroid_close(self, list_of_centroids, size, point):
"""
Find if any centroid is close to a point
"""
for i in range(len(list_of_centroids)-1, -1, -1):
if (abs(list_of_centroids[i][0] - point[0]) < size / 2 and
abs(list_of_centroids[i][1] - point[1]) < size / 2):
return 1
return 0
def centroid_distances(self, image1, image2, amp, size):
"""
Compute a list of centroids and the distances between them in two images
"""
distances = []
list_of_centroids, lst_pts = self.centroid_list(image2, amp, size)
for center2, pt in zip(list_of_centroids, lst_pts):
center1 = self.centroid(image1, size, pt)
if center1 is None: continue
disty = center2[0] - center1[0]
distx = center2[1] - center1[1]
dist = math.sqrt(disty * disty + distx * distx)
dflux = abs(center2[2] - center1[2])
distances.append([dist, dflux, center1, center2])
distances.sort(key=centroid_compare)
return distances
def centroid_list(self, image, amp, size):
"""
Find the next centroid
"""
list_of_centroids = []
list_of_points = []
points = np.transpose(np.nonzero(image > amp))
for point in points:
if not self.centroid_close(list_of_centroids, size, point):
center = self.centroid(image, size, point)
list_of_centroids.append(center)
list_of_points.append(point)
return list_of_centroids, list_of_points
def centroid_statistics(self, title, fname, image1, image2, amp, size):
"""
write centroid statistics to compare differences btw two images
"""
stats = ("minimum", "median", "maximum")
images = (None, None, image1, image2)
im_type = ("", "", "test", "reference")
diff = []
distances = self.centroid_distances(image1, image2, amp, size)
indexes = (0, len(distances)//2, len(distances)-1)
fd = open(fname, 'w')
fd.write("*** %s ***\n" % title)
if len(distances) == 0:
diff = [0.0, 0.0, 0.0]
fd.write("No matches!!\n")
elif len(distances) == 1:
diff = [distances[0][0], distances[0][0], distances[0][0]]
fd.write("1 match\n")
fd.write("distance = %f flux difference = %f\n" % (distances[0][0], distances[0][1]))
for j in range(2, 4):
ylo = int(distances[0][j][0]) - (1+self.buff)
yhi = int(distances[0][j][0]) + (2+self.buff)
xlo = int(distances[0][j][1]) - (1+self.buff)
xhi = int(distances[0][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s image centroid = (%f,%f) image flux = %f\n" %
(im_type[j], distances[0][j][0], distances[0][j][1], distances[0][j][2]))
fd.write(str(subimage) + "\n")
else:
fd.write("%d matches\n" % len(distances))
for k in range(0,3):
i = indexes[k]
diff.append(distances[i][0])
fd.write("\n%s distance = %f flux difference = %f\n" % (stats[k], distances[i][0], distances[i][1]))
for j in range(2, 4):
ylo = int(distances[i][j][0]) - (1+self.buff)
yhi = int(distances[i][j][0]) + (2+self.buff)
xlo = int(distances[i][j][1]) - (1+self.buff)
xhi = int(distances[i][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s %s image centroid = (%f,%f) image flux = %f\n" %
(stats[k], im_type[j], distances[i][j][0], distances[i][j][1], distances[i][j][2]))
fd.write(str(subimage) + "\n")
fd.close()
return tuple(diff)
def make_point_image(self, input_image, point, value):
"""
Create an image with a single point set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
output_image[point] = value
return output_image
def make_grid_image(self, input_image, spacing, value):
"""
Create an image with points on a grid set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
shape = output_image.shape
for y in range(spacing//2, shape[0], spacing):
for x in range(spacing//2, shape[1], spacing):
output_image[y,x] = value
return output_image
def print_wcs(self, title, wcs):
"""
Print the wcs header cards
"""
print("=== %s ===" % title)
print(wcs.to_header_string())
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def read_wcs(self, filename):
"""
Read the wcs of a fits file
"""
hdu = fits.open(filename)
wcs = stwcs.wcsutil.HSTWCS(hdu, 1)
hdu.close()
return wcs
def write_wcs(self, hdu, image_wcs):
"""
Update header with WCS keywords
"""
hdu.header['ORIENTAT'] = image_wcs.orientat
hdu.header['CD1_1'] = image_wcs.wcs.cd[0][0]
hdu.header['CD1_2'] = image_wcs.wcs.cd[0][1]
hdu.header['CD2_1'] = image_wcs.wcs.cd[1][0]
hdu.header['CD2_2'] = image_wcs.wcs.cd[1][1]
hdu.header['CRVAL1'] = image_wcs.wcs.crval[0]
hdu.header['CRVAL2'] = image_wcs.wcs.crval[1]
hdu.header['CRPIX1'] = image_wcs.wcs.crpix[0]
hdu.header['CRPIX2'] = image_wcs.wcs.crpix[1]
hdu.header['CTYPE1'] = image_wcs.wcs.ctype[0]
hdu.header['CTYPE2'] = image_wcs.wcs.ctype[1]
hdu.header['VAFACTOR'] = 1.0
def write_image(self, filename, wcs, *args):
"""
Read the image from a fits file
"""
extarray = ['SCI', 'WHT', 'CTX']
pimg = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['NDRIZIM'] = 1
phdu.header['ROOTNAME'] = filename
pimg.append(phdu)
for img in args:
# Create a MEF file with the specified extname
extn = extarray.pop(0)
extname = fileutil.parseExtn(extn)
ehdu = fits.ImageHDU(data=img)
ehdu.header['EXTNAME'] = extname[0]
ehdu.header['EXTVER'] = extname[1]
self.write_wcs(ehdu, wcs)
pimg.append(ehdu)
pimg.writeto(filename)
del pimg
def add_suffix(fname, suffix, range=None):
"""Add suffix to file name
Parameters
----------
fname: str
The file name to add the suffix to
suffix: str
The suffix to add_suffix
range: range
If specified, the set of indexes will be added to the
outputs.
Returns
-------
fname, fname_with_suffix
2-tuple of the original file name and name with suffix.
If `range` is defined, `fname_with_suffix` will be a list.
"""
fname_root, fname_ext = splitext(fname)
if range is None:
with_suffix = ''.join([
fname_root,
'_',
suffix,
fname_ext
])
else:
with_suffix = []
for idx in range:
with_suffix.append(''.join([
fname_root,
'_',
str(idx),
'_',
suffix,
fname_ext
]))
return fname, with_suffix
| [
"math.sqrt",
"astropy.utils.data.conf.reset",
"difflib.unified_diff",
"stwcs.wcsutil.HSTWCS",
"astropy.io.fits.FITSDiff",
"ci_watson.artifactory_helpers.get_bigdata",
"astropy.io.fits.open",
"getpass.getuser",
"pytest.fixture",
"sys.stdout.writelines",
"ci_watson.hst_helpers.ref_from_image",
"... | [((635, 646), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (644, 646), False, 'import os\n'), ((984, 1012), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (998, 1012), False, 'import pytest\n'), ((8054, 8076), 'os.environ.get', 'os.environ.get', (['refstr'], {}), '(refstr)\n', (8068, 8076), False, 'import os\n'), ((8598, 8620), 'os.environ.get', 'os.environ.get', (['refstr'], {}), '(refstr)\n', (8612, 8620), False, 'import os\n'), ((8886, 8908), 'os.environ.get', 'os.environ.get', (['refstr'], {}), '(refstr)\n', (8900, 8908), False, 'import os\n'), ((9223, 9245), 'os.environ.get', 'os.environ.get', (['refstr'], {}), '(refstr)\n', (9237, 9245), False, 'import os\n'), ((9629, 9651), 'os.environ.get', 'os.environ.get', (['refstr'], {}), '(refstr)\n', (9643, 9651), False, 'import os\n'), ((18226, 18241), 'os.path.splitext', 'splitext', (['fname'], {}), '(fname)\n', (18234, 18241), False, 'from os.path import splitext\n'), ((1354, 1365), 'os.chdir', 'os.chdir', (['p'], {}), '(p)\n', (1362, 1365), False, 'import os\n'), ((2436, 2464), 'astropy.utils.data.conf.reset', 'conf.reset', (['"""remote_timeout"""'], {}), "('remote_timeout')\n", (2446, 2464), False, 'from astropy.utils.data import conf\n'), ((2473, 2495), 'os.chdir', 'os.chdir', (['self.prevdir'], {}), '(self.prevdir)\n', (2481, 2495), False, 'import os\n'), ((2839, 2902), 'ci_watson.artifactory_helpers.get_bigdata', 'get_bigdata', (['self.inputs_root', 'self.tree', 'self.input_loc', '*args'], {}), '(self.inputs_root, self.tree, self.input_loc, *args)\n', (2850, 2902), False, 'from ci_watson.artifactory_helpers import get_bigdata, generate_upload_schema\n'), ((3241, 3327), 'ci_watson.hst_helpers.ref_from_image', 'ref_from_image', (['filename', "['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE']"], {}), "(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE',\n 'DGEOFILE'])\n", (3255, 3327), False, 'from ci_watson.hst_helpers import download_crds, ref_from_image\n'), ((5092, 5129), 'os.environ.get', 'os.environ.get', (['"""BUILD_TAG"""', 'user_tag'], {}), "('BUILD_TAG', user_tag)\n", (5106, 5129), False, 'import os\n'), ((5154, 5205), 'os.environ.get', 'os.environ.get', (['"""BUILD_MATRIX_SUFFIX"""', '"""standalone"""'], {}), "('BUILD_MATRIX_SUFFIX', 'standalone')\n", (5168, 5205), False, 'import os\n'), ((10039, 10056), 'numpy.nonzero', 'np.nonzero', (['image'], {}), '(image)\n', (10049, 10056), True, 'import numpy as np\n'), ((15077, 15129), 'numpy.zeros', 'np.zeros', (['input_image.shape'], {'dtype': 'input_image.dtype'}), '(input_image.shape, dtype=input_image.dtype)\n', (15085, 15129), True, 'import numpy as np\n'), ((15352, 15404), 'numpy.zeros', 'np.zeros', (['input_image.shape'], {'dtype': 'input_image.dtype'}), '(input_image.shape, dtype=input_image.dtype)\n', (15360, 15404), True, 'import numpy as np\n'), ((15913, 15932), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (15922, 15932), False, 'from astropy.io import fits\n'), ((16112, 16131), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (16121, 16131), False, 'from astropy.io import fits\n'), ((16147, 16175), 'stwcs.wcsutil.HSTWCS', 'stwcs.wcsutil.HSTWCS', (['hdu', '(1)'], {}), '(hdu, 1)\n', (16167, 16175), False, 'import stwcs\n'), ((17117, 17131), 'astropy.io.fits.HDUList', 'fits.HDUList', ([], {}), '()\n', (17129, 17131), False, 'from astropy.io import fits\n'), ((17147, 17164), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (17162, 17164), False, 'from astropy.io import fits\n'), ((4746, 4772), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (4761, 4772), False, 'import os\n'), ((4868, 4885), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (4883, 4885), False, 'import getpass\n'), ((5292, 5352), 'os.path.join', 'os.path.join', (['self.results_root', 'self.input_loc', 'dt', 'testdir'], {}), '(self.results_root, self.input_loc, dt, testdir)\n', (5304, 5352), False, 'import os\n'), ((7712, 7804), 'ci_watson.artifactory_helpers.generate_upload_schema', 'generate_upload_schema', ([], {'pattern': '(new_truths + log_pattern)', 'testname': 'testname', 'target': 'tree'}), '(pattern=new_truths + log_pattern, testname=testname,\n target=tree)\n', (7734, 7804), False, 'from ci_watson.artifactory_helpers import get_bigdata, generate_upload_schema\n'), ((11783, 11823), 'math.sqrt', 'math.sqrt', (['(disty * disty + distx * distx)'], {}), '(disty * disty + distx * distx)\n', (11792, 11823), False, 'import math\n'), ((12198, 12221), 'numpy.nonzero', 'np.nonzero', (['(image > amp)'], {}), '(image > amp)\n', (12208, 12221), True, 'import numpy as np\n'), ((17411, 17435), 'stsci.tools.fileutil.parseExtn', 'fileutil.parseExtn', (['extn'], {}), '(extn)\n', (17429, 17435), False, 'from stsci.tools import fileutil\n'), ((17456, 17479), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', ([], {'data': 'img'}), '(data=img)\n', (17469, 17479), False, 'from astropy.io import fits\n'), ((3681, 3703), 'os.path.join', 'os.path.join', (['ref_file'], {}), '(ref_file)\n', (3693, 3703), False, 'import os\n'), ((4911, 4934), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4932, 4934), False, 'import datetime\n'), ((4971, 4994), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4992, 4994), False, 'import datetime\n'), ((5708, 5808), 'astropy.io.fits.FITSDiff', 'FITSDiff', (['actual', 'desired'], {'rtol': 'self.rtol', 'atol': 'self.atol', 'ignore_keywords': 'self.ignore_keywords'}), '(actual, desired, rtol=self.rtol, atol=self.atol, ignore_keywords=\n self.ignore_keywords)\n', (5716, 5808), False, 'from astropy.io.fits import FITSDiff\n'), ((6492, 6566), 'difflib.unified_diff', 'unified_diff', (['actual_lines', 'desired_lines'], {'fromfile': 'actual', 'tofile': 'desired'}), '(actual_lines, desired_lines, fromfile=actual, tofile=desired)\n', (6504, 6566), False, 'from difflib import unified_diff\n'), ((6671, 6681), 'io.StringIO', 'StringIO', ([], {}), '()\n', (6679, 6681), False, 'from io import StringIO\n'), ((6735, 6763), 'sys.stdout.writelines', 'sys.stdout.writelines', (['udiff'], {}), '(udiff)\n', (6756, 6763), False, 'import sys\n'), ((7361, 7382), 'os.path.abspath', 'os.path.abspath', (['i[1]'], {}), '(i[1])\n', (7376, 7382), False, 'import os\n'), ((7578, 7609), 'shutil.move', 'shutil.move', (['files[0]', 'files[1]'], {}), '(files[0], files[1])\n', (7589, 7609), False, 'import shutil\n'), ((3762, 3798), 'ci_watson.hst_helpers.download_crds', 'download_crds', (['refname', 'self.timeout'], {}), '(refname, self.timeout)\n', (3775, 3798), False, 'from ci_watson.hst_helpers import download_crds, ref_from_image\n'), ((7650, 7668), 'os.path.dirname', 'os.path.dirname', (['x'], {}), '(x)\n', (7665, 7668), False, 'import os\n')] |
import math
import numpy as np
'''
This is v1 code using the old input format! If you are new please look at v2
'''
'''
Hi! You can use this code as a template to create your own bot. Also if you don't mind writing a blurb
about your bot's strategy you can put it as a comment here. I'd appreciate it, especially if I can help
debug any runtime issues that occur with your bot.
'''
# Optional Information. Fill out only if you wish.
# Your real name:
# Contact Email:
# Can this bot's code be shared publicly (Default: No):
# Can non-tournment gameplay of this bot be displayed publicly (Default: No):
# This is the name that will be displayed on screen in the real time display!
BOT_NAME = "AlwaysTowardsBallAgent"
class agent:
def __init__(self, team):
self.team = team # use self.team to determine what team you are. I will set to "blue" or "orange"
def convert_new_input_to_old_input(self, sharedValue):
UU_TO_GAMEVALUES = 50
UCONST_Pi = 3.1415926
URotation180 = float(32768)
URotationToRadians = UCONST_Pi / URotation180
inputs = np.zeros(38)
scoring = np.zeros(12)
gameTickPacket = sharedValue.GameTickPacket
numCars = gameTickPacket.numCars
numBoosts = gameTickPacket.numBoosts
team1Blue = (gameTickPacket.gamecars[0].Team == 0)
if team1Blue:
blueIndex = 0
orngIndex = 1
else:
blueIndex = 1
orngIndex = 0
# -------------------------------
# First convert ball info
# -------------------------------
# Ball positions
inputs[2] = gameTickPacket.gameball.Location.Y / UU_TO_GAMEVALUES
inputs[7] = gameTickPacket.gameball.Location.X / UU_TO_GAMEVALUES
inputs[17] = gameTickPacket.gameball.Location.Z / UU_TO_GAMEVALUES
# Ball velocities
inputs[28] = gameTickPacket.gameball.Velocity.X / UU_TO_GAMEVALUES
inputs[29] = gameTickPacket.gameball.Velocity.Z / UU_TO_GAMEVALUES
inputs[30] = gameTickPacket.gameball.Velocity.Y / UU_TO_GAMEVALUES
# -------------------------------
# Now do all scoreboard values
# -------------------------------
scoring[0] = gameTickPacket.gamecars[blueIndex].Score.Goals + gameTickPacket.gamecars[1].Score.OwnGoals # Blue Scoreboard Score
scoring[1] = gameTickPacket.gamecars[orngIndex].Score.Goals + gameTickPacket.gamecars[0].Score.OwnGoals # Orange Scoreboard Score
scoring[2] = gameTickPacket.gamecars[orngIndex].Score.Demolitions # Demos by orange
scoring[3] = gameTickPacket.gamecars[blueIndex].Score.Demolitions # Demos by blue
scoring[4] = gameTickPacket.gamecars[blueIndex].Score.Score # Blue points
scoring[5] = gameTickPacket.gamecars[orngIndex].Score.Score # Orange points
scoring[6] = gameTickPacket.gamecars[blueIndex].Score.Goals # Blue Goals
scoring[7] = gameTickPacket.gamecars[blueIndex].Score.Saves # Blue Saves
scoring[8] = gameTickPacket.gamecars[blueIndex].Score.Shots # Blue Shots
scoring[9] = gameTickPacket.gamecars[orngIndex].Score.Goals # Orange Goals
scoring[10] = gameTickPacket.gamecars[orngIndex].Score.Saves # Orange Saves
scoring[11] = gameTickPacket.gamecars[orngIndex].Score.Shots # Orange Shots
# -------------------------------
# Now do all car values
# -------------------------------
# Blue pos
inputs[1] = gameTickPacket.gamecars[blueIndex].Location.Y / UU_TO_GAMEVALUES
inputs[5] = gameTickPacket.gamecars[blueIndex].Location.X / UU_TO_GAMEVALUES
inputs[4] = gameTickPacket.gamecars[blueIndex].Location.Z / UU_TO_GAMEVALUES
# Orange pos
inputs[3] = gameTickPacket.gamecars[orngIndex].Location.Y / UU_TO_GAMEVALUES
inputs[18] = gameTickPacket.gamecars[orngIndex].Location.X / UU_TO_GAMEVALUES
inputs[17] = gameTickPacket.gamecars[orngIndex].Location.Z / UU_TO_GAMEVALUES
# Blue velocity
inputs[28] = gameTickPacket.gamecars[blueIndex].Velocity.X / UU_TO_GAMEVALUES
inputs[29] = gameTickPacket.gamecars[blueIndex].Velocity.Z / UU_TO_GAMEVALUES
inputs[30] = gameTickPacket.gamecars[blueIndex].Velocity.Y / UU_TO_GAMEVALUES
# Orange velocity
inputs[34] = gameTickPacket.gamecars[orngIndex].Velocity.X / UU_TO_GAMEVALUES
inputs[35] = gameTickPacket.gamecars[orngIndex].Velocity.Z / UU_TO_GAMEVALUES
inputs[36] = gameTickPacket.gamecars[orngIndex].Velocity.Y / UU_TO_GAMEVALUES
# Boost
inputs[0] = gameTickPacket.gamecars[blueIndex].Boost
inputs[37] = gameTickPacket.gamecars[orngIndex].Boost
# Rotations
bluePitch = float(gameTickPacket.gamecars[blueIndex].Rotation.Pitch)
blueYaw = float(gameTickPacket.gamecars[blueIndex].Rotation.Yaw)
blueRoll = float(gameTickPacket.gamecars[blueIndex].Rotation.Roll)
orngPitch = float(gameTickPacket.gamecars[orngIndex].Rotation.Pitch)
orngYaw = float(gameTickPacket.gamecars[orngIndex].Rotation.Yaw)
orngRoll = float(gameTickPacket.gamecars[orngIndex].Rotation.Roll)
# Blue rotations
inputs[8] = math.cos(bluePitch * URotationToRadians) * math.cos(blueYaw * URotationToRadians) # Rot 1
inputs[9] = math.sin(blueRoll * URotationToRadians) * math.sin(bluePitch * URotationToRadians) * math.cos(blueYaw * URotationToRadians) - math.cos(blueRoll * URotationToRadians) * math.sin(blueYaw * URotationToRadians) # Rot2
inputs[10] = -1 * math.cos(blueRoll * URotationToRadians) * math.sin(bluePitch * URotationToRadians) * math.cos(blueYaw * URotationToRadians) + math.sin(blueRoll * URotationToRadians) * math.sin(blueYaw * URotationToRadians) # Rot 3
inputs[11] = math.cos(bluePitch * URotationToRadians) * math.sin(blueYaw * URotationToRadians) # Rot 4
inputs[12] = math.sin(blueRoll * URotationToRadians) * math.sin(bluePitch * URotationToRadians) * math.sin(blueYaw * URotationToRadians) + math.cos(blueRoll * URotationToRadians) * math.cos(blueYaw * URotationToRadians) # Rot5
inputs[13] = math.cos(blueYaw * URotationToRadians) * math.sin(blueRoll * URotationToRadians) - math.cos(blueRoll * URotationToRadians) * math.sin(bluePitch * URotationToRadians) * math.sin(blueYaw * URotationToRadians) # Rot 6
inputs[14] = math.sin(bluePitch * URotationToRadians) # Rot 7
inputs[15] = -1 * math.sin(blueRoll * URotationToRadians) * math.cos(bluePitch * URotationToRadians) # Rot 8
inputs[16] = math.cos(blueRoll * URotationToRadians) * math.cos(bluePitch * URotationToRadians) # Rot 9
# Orange rot
inputs[19] = math.cos(orngPitch * URotationToRadians) * math.cos(orngYaw * URotationToRadians) # Rot 1
inputs[20] = math.sin(orngRoll * URotationToRadians) * math.sin(orngPitch * URotationToRadians) * math.cos(orngYaw * URotationToRadians) - math.cos(orngRoll * URotationToRadians) * math.sin(orngYaw * URotationToRadians) # Rot2
inputs[21] = -1 * math.cos(orngRoll * URotationToRadians) * math.sin(orngPitch * URotationToRadians) * math.cos(orngYaw * URotationToRadians) + math.sin(orngRoll * URotationToRadians) * math.sin(orngYaw * URotationToRadians) # Rot 3
inputs[22] = math.cos(orngPitch * URotationToRadians) * math.sin(orngYaw * URotationToRadians) # Rot 4
inputs[23] = math.sin(orngRoll * URotationToRadians) * math.sin(orngPitch * URotationToRadians) * math.sin(orngYaw * URotationToRadians) + math.cos(orngRoll * URotationToRadians) * math.cos(orngYaw * URotationToRadians) # Rot5
inputs[24] = math.cos(orngYaw * URotationToRadians) * math.sin(orngRoll * URotationToRadians) - math.cos(orngRoll * URotationToRadians) * math.sin(orngPitch * URotationToRadians) * math.sin(orngYaw * URotationToRadians) # Rot 6
inputs[25] = math.sin(orngPitch * URotationToRadians) # Rot 7
inputs[26] = -1 * math.sin(orngRoll * URotationToRadians) * math.cos(orngPitch * URotationToRadians) # Rot 8
inputs[27] = math.cos(orngRoll * URotationToRadians) * math.cos(orngPitch * URotationToRadians) # Rot 9
return(inputs,scoring)
def get_output_vector(self, sharedValue):
input = self.convert_new_input_to_old_input(sharedValue)
ball_z = input[0][2]
ball_x = input[0][7]
turn = 16383
if (self.team == "blue"):
player_z = input[0][1]
player_x = input[0][5]
player_rot1 = input[0][8]
player_rot4 = input[0][11]
else:
player_z = input[0][3]
player_x = input[0][18]
player_rot1 = input[0][19]
player_rot4 = input[0][22]
# Need to handle atan2(0,0) case, aka straight up or down, eventually
player_front_direction_in_radians = math.atan2(player_rot1, player_rot4)
relative_angle_to_ball_in_radians = math.atan2((ball_x - player_x), (ball_z - player_z))
if (not (abs(player_front_direction_in_radians - relative_angle_to_ball_in_radians) < math.pi)):
# Add 2pi to negative values
if (player_front_direction_in_radians < 0):
player_front_direction_in_radians += 2 * math.pi
if (relative_angle_to_ball_in_radians < 0):
relative_angle_to_ball_in_radians += 2 * math.pi
if (relative_angle_to_ball_in_radians > player_front_direction_in_radians):
turn = 0
else:
turn = 32767
return [turn, 16383, 32767, 0, 0, 0, 0]
| [
"math.cos",
"numpy.zeros",
"math.sin",
"math.atan2"
] | [((1070, 1082), 'numpy.zeros', 'np.zeros', (['(38)'], {}), '(38)\n', (1078, 1082), True, 'import numpy as np\n'), ((1095, 1107), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (1103, 1107), True, 'import numpy as np\n'), ((5959, 5999), 'math.sin', 'math.sin', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (5967, 5999), False, 'import math\n'), ((7392, 7432), 'math.sin', 'math.sin', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (7400, 7432), False, 'import math\n'), ((8229, 8265), 'math.atan2', 'math.atan2', (['player_rot1', 'player_rot4'], {}), '(player_rot1, player_rot4)\n', (8239, 8265), False, 'import math\n'), ((8304, 8352), 'math.atan2', 'math.atan2', (['(ball_x - player_x)', '(ball_z - player_z)'], {}), '(ball_x - player_x, ball_z - player_z)\n', (8314, 8352), False, 'import math\n'), ((4826, 4866), 'math.cos', 'math.cos', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (4834, 4866), False, 'import math\n'), ((4869, 4907), 'math.cos', 'math.cos', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (4877, 4907), False, 'import math\n'), ((5395, 5435), 'math.cos', 'math.cos', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (5403, 5435), False, 'import math\n'), ((5438, 5476), 'math.sin', 'math.sin', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5446, 5476), False, 'import math\n'), ((6070, 6110), 'math.cos', 'math.cos', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (6078, 6110), False, 'import math\n'), ((6134, 6173), 'math.cos', 'math.cos', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (6142, 6173), False, 'import math\n'), ((6176, 6216), 'math.cos', 'math.cos', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (6184, 6216), False, 'import math\n'), ((6258, 6298), 'math.cos', 'math.cos', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (6266, 6298), False, 'import math\n'), ((6301, 6339), 'math.cos', 'math.cos', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (6309, 6339), False, 'import math\n'), ((6828, 6868), 'math.cos', 'math.cos', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (6836, 6868), False, 'import math\n'), ((6871, 6909), 'math.sin', 'math.sin', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (6879, 6909), False, 'import math\n'), ((7503, 7543), 'math.cos', 'math.cos', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (7511, 7543), False, 'import math\n'), ((7567, 7606), 'math.cos', 'math.cos', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (7575, 7606), False, 'import math\n'), ((7609, 7649), 'math.cos', 'math.cos', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (7617, 7649), False, 'import math\n'), ((5015, 5053), 'math.cos', 'math.cos', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5023, 5053), False, 'import math\n'), ((5056, 5095), 'math.cos', 'math.cos', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (5064, 5095), False, 'import math\n'), ((5098, 5136), 'math.sin', 'math.sin', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5106, 5136), False, 'import math\n'), ((5249, 5287), 'math.cos', 'math.cos', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5257, 5287), False, 'import math\n'), ((5290, 5329), 'math.sin', 'math.sin', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (5298, 5329), False, 'import math\n'), ((5332, 5370), 'math.sin', 'math.sin', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5340, 5370), False, 'import math\n'), ((5585, 5623), 'math.sin', 'math.sin', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5593, 5623), False, 'import math\n'), ((5626, 5665), 'math.cos', 'math.cos', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (5634, 5665), False, 'import math\n'), ((5668, 5706), 'math.cos', 'math.cos', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5676, 5706), False, 'import math\n'), ((5729, 5767), 'math.cos', 'math.cos', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5737, 5767), False, 'import math\n'), ((5770, 5809), 'math.sin', 'math.sin', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (5778, 5809), False, 'import math\n'), ((5897, 5935), 'math.sin', 'math.sin', (['(blueYaw * URotationToRadians)'], {}), '(blueYaw * URotationToRadians)\n', (5905, 5935), False, 'import math\n'), ((6028, 6067), 'math.sin', 'math.sin', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (6036, 6067), False, 'import math\n'), ((6448, 6486), 'math.cos', 'math.cos', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (6456, 6486), False, 'import math\n'), ((6489, 6528), 'math.cos', 'math.cos', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (6497, 6528), False, 'import math\n'), ((6531, 6569), 'math.sin', 'math.sin', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (6539, 6569), False, 'import math\n'), ((6682, 6720), 'math.cos', 'math.cos', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (6690, 6720), False, 'import math\n'), ((6723, 6762), 'math.sin', 'math.sin', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (6731, 6762), False, 'import math\n'), ((6765, 6803), 'math.sin', 'math.sin', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (6773, 6803), False, 'import math\n'), ((7018, 7056), 'math.sin', 'math.sin', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (7026, 7056), False, 'import math\n'), ((7059, 7098), 'math.cos', 'math.cos', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (7067, 7098), False, 'import math\n'), ((7101, 7139), 'math.cos', 'math.cos', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (7109, 7139), False, 'import math\n'), ((7162, 7200), 'math.cos', 'math.cos', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (7170, 7200), False, 'import math\n'), ((7203, 7242), 'math.sin', 'math.sin', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (7211, 7242), False, 'import math\n'), ((7330, 7368), 'math.sin', 'math.sin', (['(orngYaw * URotationToRadians)'], {}), '(orngYaw * URotationToRadians)\n', (7338, 7368), False, 'import math\n'), ((7461, 7500), 'math.sin', 'math.sin', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (7469, 7500), False, 'import math\n'), ((4930, 4969), 'math.sin', 'math.sin', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (4938, 4969), False, 'import math\n'), ((4972, 5012), 'math.sin', 'math.sin', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (4980, 5012), False, 'import math\n'), ((5206, 5246), 'math.sin', 'math.sin', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (5214, 5246), False, 'import math\n'), ((5500, 5539), 'math.sin', 'math.sin', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (5508, 5539), False, 'import math\n'), ((5542, 5582), 'math.sin', 'math.sin', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (5550, 5582), False, 'import math\n'), ((5812, 5851), 'math.cos', 'math.cos', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (5820, 5851), False, 'import math\n'), ((5854, 5894), 'math.sin', 'math.sin', (['(bluePitch * URotationToRadians)'], {}), '(bluePitch * URotationToRadians)\n', (5862, 5894), False, 'import math\n'), ((6363, 6402), 'math.sin', 'math.sin', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (6371, 6402), False, 'import math\n'), ((6405, 6445), 'math.sin', 'math.sin', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (6413, 6445), False, 'import math\n'), ((6639, 6679), 'math.sin', 'math.sin', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (6647, 6679), False, 'import math\n'), ((6933, 6972), 'math.sin', 'math.sin', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (6941, 6972), False, 'import math\n'), ((6975, 7015), 'math.sin', 'math.sin', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (6983, 7015), False, 'import math\n'), ((7245, 7284), 'math.cos', 'math.cos', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (7253, 7284), False, 'import math\n'), ((7287, 7327), 'math.sin', 'math.sin', (['(orngPitch * URotationToRadians)'], {}), '(orngPitch * URotationToRadians)\n', (7295, 7327), False, 'import math\n'), ((5164, 5203), 'math.cos', 'math.cos', (['(blueRoll * URotationToRadians)'], {}), '(blueRoll * URotationToRadians)\n', (5172, 5203), False, 'import math\n'), ((6597, 6636), 'math.cos', 'math.cos', (['(orngRoll * URotationToRadians)'], {}), '(orngRoll * URotationToRadians)\n', (6605, 6636), False, 'import math\n')] |
import numpy as np
import matplotlib.pyplot as plt
from gym.spaces import Discrete, Box
from tfg.games import GameEnv, WHITE, BLACK
class ConnectN(GameEnv):
def __init__(self, n=4, rows=6, cols=7):
if rows < n and cols < n:
raise ValueError("invalid board shape and number to connect")
self.observation_space = Box(BLACK, WHITE, shape=(rows, cols),
dtype=np.int8)
self.action_space = Discrete(cols)
self._n = n
self.board = None
self.indices = None
self._to_play = WHITE
self._winner = None
self._move_count = 0
self.reset()
@property
def n(self):
return self._n
@property
def to_play(self):
return self._to_play
def step(self, action):
self._check_action(action)
i, j = self.indices[action], action
self.board[i, j] = self._to_play
self.indices[action] += 1
self._move_count += 1
reward, done = self._check_board(i, j)
if done:
self._winner = reward
self._to_play *= -1
info = {'to_play': self._to_play, 'winner': self._winner}
return self.board.copy(), reward, done, info
def legal_actions(self):
rows, cols = self.observation_space.shape
return np.arange(0, cols)[self.indices < rows].tolist()
def winner(self):
return self._winner
def reset(self):
self.board = np.zeros(shape=self.observation_space.shape, dtype=np.int8)
self.indices = np.zeros(shape=(self.action_space.n,), dtype=np.int8)
self._to_play = WHITE
self._winner = None
self._move_count = 0
return self.board.copy()
def render(self, mode='human'):
mapping = {-1: '○', 0: ' ', 1: '●'}
tokens = [[mapping[cell] for cell in row] for row in self.board]
print(f"Connect {self._n}")
print("\n".join(
['|' + '|'.join([token for token in row]) + '|'
for row in reversed(tokens)]
))
cols = self.action_space.n
print('-'.join(['+'] * (cols + 1)))
print(' ' + ' '.join(['^'] * cols))
print(' ' + ' '.join(str(i) for i in range(cols)))
def _check_action(self, action):
rows, _ = self.observation_space.shape
if self.indices[action] == rows:
raise ValueError(f"found an illegal action {action}; "
f"legal actions are {self.legal_actions()}")
def _check_board(self, i, j):
n = self._n
if self._move_count < 2 * n - 1:
return 0, False
rows, cols = self.observation_space.shape
possible_winner = self.board[i, j]
c = 0
for t in self.board[i]:
c = c + 1 if t == possible_winner else 0
if c == self._n:
return possible_winner, True
c = 0
for t in self.board[:, j]:
c = c + 1 if t == possible_winner else 0
if c == self._n:
return possible_winner, True
d = np.diag(self.board, k=j - i)
if len(d) >= self._n:
c = 0
for t in d:
c = c + 1 if t == possible_winner else 0
if c == self._n:
return possible_winner, True
d = np.diag(np.fliplr(self.board), k=(cols - 1 - j) - i)
if len(d) >= self._n:
c = 0
for t in d:
c = c + 1 if t == possible_winner else 0
if c == self._n:
return possible_winner, True
if self._move_count == rows * cols:
# Draw
return 0, True
return 0, False
def n_connected_heuristic(n):
def analyze_line(line):
s = 0
t = 0
prev = None
count = 0
zeros = 0
# Check if aligned tokens are surrounded by opponents tokens
surrounded = True
for x in line:
if x == 0:
zeros += 1
# Add remaining
s += t
t = 0
surrounded = False
if zeros == 2:
# Reset if we find two zeros
count = 0
zeros = 0
else:
zeros = 0
if x == prev:
count += 1
if count == n:
t += x
count -= 1
else:
if not surrounded:
s += t
t = 0
surrounded = prev != 0
count = 1
prev = x
return s
def heuristic(observation, to_play=None):
rows, cols = observation.shape
s = 0
for i in range(rows):
s += analyze_line(observation[i])
for j in range(cols):
s += analyze_line(observation[:, j])
for k in range(-rows, cols + 1):
s += analyze_line(np.diag(observation, k=k))
s += analyze_line(np.diag(np.fliplr(observation), k=k))
return s / np.multiply(*observation.shape)
return heuristic
def plot_board(board, bg_color=None, white_tokens_color=None,
black_tokens_color=None):
if bg_color is None:
bg_color = (64 / 255, 128 / 255, 239 / 255)
if white_tokens_color is None:
white_tokens_color = (208 / 255, 26 / 255, 59 / 255)
if black_tokens_color is None:
black_tokens_color = (225 / 255, 241 / 255, 47 / 255)
ax = plt.gca()
ax.set_axis_off()
ax.set_aspect('equal', adjustable='box')
plt.gcf().patch.set_facecolor(bg_color)
rows, cols = board.shape
plt.xlim([0, cols])
plt.ylim([0, rows])
for i in range(rows):
for j in range(cols):
token = board[i, j]
color = (white_tokens_color if token == WHITE else
black_tokens_color if token == BLACK else
'w')
circle = plt.Circle((j + .5, i + .5), .4, color=color, ec='k')
ax.add_patch(circle)
| [
"numpy.multiply",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.gca",
"numpy.fliplr",
"matplotlib.pyplot.gcf",
"gym.spaces.Discrete",
"gym.spaces.Box",
"numpy.diag",
"numpy.zeros",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.arange"
] | [((5606, 5615), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5613, 5615), True, 'import matplotlib.pyplot as plt\n'), ((5764, 5783), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, cols]'], {}), '([0, cols])\n', (5772, 5783), True, 'import matplotlib.pyplot as plt\n'), ((5788, 5807), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, rows]'], {}), '([0, rows])\n', (5796, 5807), True, 'import matplotlib.pyplot as plt\n'), ((347, 399), 'gym.spaces.Box', 'Box', (['BLACK', 'WHITE'], {'shape': '(rows, cols)', 'dtype': 'np.int8'}), '(BLACK, WHITE, shape=(rows, cols), dtype=np.int8)\n', (350, 399), False, 'from gym.spaces import Discrete, Box\n'), ((465, 479), 'gym.spaces.Discrete', 'Discrete', (['cols'], {}), '(cols)\n', (473, 479), False, 'from gym.spaces import Discrete, Box\n'), ((1481, 1540), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.observation_space.shape', 'dtype': 'np.int8'}), '(shape=self.observation_space.shape, dtype=np.int8)\n', (1489, 1540), True, 'import numpy as np\n'), ((1564, 1617), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.action_space.n,)', 'dtype': 'np.int8'}), '(shape=(self.action_space.n,), dtype=np.int8)\n', (1572, 1617), True, 'import numpy as np\n'), ((3097, 3125), 'numpy.diag', 'np.diag', (['self.board'], {'k': '(j - i)'}), '(self.board, k=j - i)\n', (3104, 3125), True, 'import numpy as np\n'), ((3358, 3379), 'numpy.fliplr', 'np.fliplr', (['self.board'], {}), '(self.board)\n', (3367, 3379), True, 'import numpy as np\n'), ((5166, 5197), 'numpy.multiply', 'np.multiply', (['*observation.shape'], {}), '(*observation.shape)\n', (5177, 5197), True, 'import numpy as np\n'), ((6070, 6126), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(j + 0.5, i + 0.5)', '(0.4)'], {'color': 'color', 'ec': '"""k"""'}), "((j + 0.5, i + 0.5), 0.4, color=color, ec='k')\n", (6080, 6126), True, 'import matplotlib.pyplot as plt\n'), ((5052, 5077), 'numpy.diag', 'np.diag', (['observation'], {'k': 'k'}), '(observation, k=k)\n', (5059, 5077), True, 'import numpy as np\n'), ((5689, 5698), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5696, 5698), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1356), 'numpy.arange', 'np.arange', (['(0)', 'cols'], {}), '(0, cols)\n', (1347, 1356), True, 'import numpy as np\n'), ((5117, 5139), 'numpy.fliplr', 'np.fliplr', (['observation'], {}), '(observation)\n', (5126, 5139), True, 'import numpy as np\n')] |
# further developed by <NAME>, <NAME>, <NAME> and <NAME>
import random
import sort_task_set
import math
import numpy
import task
USet=[]
PSet=[]
possiblePeriods = [1, 2, 5, 10, 50, 100, 200, 1000]
def init():
global USet,PSet
USet=[]
PSet=[]
def taskGeneration_rounded( numTasks, uTotal ):
random.seed()
init()
UUniFast_Discard( numTasks, uTotal/100 )
CSet_generate_rounded( 1, 2 )
return PSet
def taskGeneration_rounded_random( numTasks, uTotal ):
random.seed()
init()
UUniFast_Discard( numTasks, uTotal/100 )
CSet_generate_rounded_random_periods( 1, 2 )
return PSet
def UUniFast_Discard( n, U_avg ):
while 1:
sumU = U_avg
for i in range(1, n):
#nextSumU = sumU * math.pow( random.random(), 1/(n-i) )
nextSumU = sumU * numpy.random.random() ** (1.0 / (n - i))
USet.append( sumU - nextSumU )
sumU = nextSumU
USet.append(sumU)
if max(USet) <= 0.5 and min(USet) > 0.001:
break
del USet[:]
def CSet_generate_rounded( Pmin, numLog ):
global USet,PSet
while 1:
executions = []
#j=0
for x, i in enumerate(USet):
#thN=j%numLog
p = random.randint( 0, len( possiblePeriods) - 1 ) #random.uniform(Pmin*math.pow(10, thN), Pmin*math.pow(10, thN+1))#calcExecution(Pmin, thN, 10, 2, i)
period = possiblePeriods[p] #round( p, 2 )#*random.uniform(1)
deadline = period #round( p, 2 )#*random.uniform(1)
execution = i * period #round( i * p, 2 )
executions.append( execution )
pair = task.Task( x, period, deadline, execution)
PSet.append(pair)
#j=j+1
#if min(executions) > 0:
break
# print("Taskset had 0")
del PSet[:]
del executions
def CSet_generate_rounded_random_periods( Pmin, numLog ):
global USet,PSet
while 1:
executions = []
j=0
for x, i in enumerate(USet):
thN=j%numLog
p = random.uniform(Pmin*math.pow(10, thN), Pmin*math.pow(10, thN+1))#calcExecution(Pmin, thN, 10, 2, i)
period = round( p, 2 )#*random.uniform(1)
deadline = round( p, 2 )#*random.uniform(1)
execution = round( i * p, 2 )
executions.append( execution )
pair = task.Task( x, period, deadline, execution)
PSet.append(pair)
j=j+1
#if min(executions) > 0:
break
# print("Taskset had 0")
del PSet[:]
del executions
def mixed_task_set(tasks, factor):
allTasks=[]
for task in tasks:
task.abnormal_exe = task.execution * factor
allTasks.append(task)
return sort_task_set.sort(allTasks, 'period')
# füge zu task ein Prozessor hinzu
def add_processor_to_task( tasks, processorsNum ):
processors = [0 for x in range(processorsNum)]
for task in tasks:
task.uti = task["execution"]/task["period"]
tasks = sort_task_set.sort(tasks, "uti")
tasks.reverse()
for task in tasks:
processor = lowestUtilizationProcessor(processors)
uti = task.execution/task.period
processors[processor] += uti
task.processor = processor
def lowestUtilizationProcessor(processors):
x = 0
minUti = processors[0]
for i in range(len(processors)):
if processors[i] < minUti:
minUti = processors[i]
x = i
return x
# add a priority to each task
def addPrioritiesToTasks(tasks):
#print(tasks)
taskPriorities = [x for x in range(len(tasks))]
#print(taskPriorities)
allTasks = []
for task in tasks:
#adds a random priority to a task
randomPrioIndex = random.random() * len(taskPriorities)
task.setPriority(taskPriorities.pop(int(randomPrioIndex)) + 1)
allTasks.append(task)
return sort_task_set.sort(allTasks, 'priority')
def addPrioritiesToTasksByPeriod(tasks):
currentPriority = 1
sortedTasks = sort_task_set.sortEvent(tasks, 'period')
for task in sortedTasks:
task.setPriority(currentPriority)
currentPriority += 1
return sortedTasks
def addPrioritiesToTasksByDeadline(tasks):
currentPriority = 1
sortedTasks = sort_task_set.sortEvent(tasks, 'deadline')
for task in sortedTasks:
task.setPriority(currentPriority)
currentPriority += 1
return sortedTasks
def convertArrTasks(arr, processors):
tasks = []
periods = [0 for x in range(processors)]
executions = [0 for x in range(processors)]
uti = [0.0 for x in range(processors)]
for a in arr:
t = task.Task(a['id'], a['period'], a['deadline'], a['execution'])
t.abnormal_exe = a['abnormal_exe']
t.priority = a['priority']
t.processor = a['processor']
tasks.append(t)
i = int(a['processor'])
periods[i] += a['period']
executions[i] += a['execution']
uti[i] += a['execution']/a['period']
#print("Periods: " + str(periods))
#print("executions: " + str(executions))
#print("uti: " + str(uti))
return tasks
def convertArrTasksOrig(arr, processors):
tasks = []
uti = 0.0
for id, a in enumerate(arr):
t = task.Task(id, a['period'], a['deadline'], a['execution'])
t.abnormal_exe = a['abnormal_exe']
tasks.append(t)
uti += a['execution']/a['period']
#print("Periods: " + str(periods))
#print("executions: " + str(executions))
#print("uti: " + str(uti))
return tasks
# def main():
#def main():
# tasks = [{},{},{},{},{},{},{}]
# print(tasks)
# addPriorityToTask(tasks)
# print(tasks)
# if __name__ == "__main__":
# main()
| [
"sort_task_set.sort",
"task.Task",
"numpy.random.random",
"math.pow",
"task.setPriority",
"random.seed",
"random.random",
"sort_task_set.sortEvent"
] | [((311, 324), 'random.seed', 'random.seed', ([], {}), '()\n', (322, 324), False, 'import random\n'), ((491, 504), 'random.seed', 'random.seed', ([], {}), '()\n', (502, 504), False, 'import random\n'), ((2860, 2898), 'sort_task_set.sort', 'sort_task_set.sort', (['allTasks', '"""period"""'], {}), "(allTasks, 'period')\n", (2878, 2898), False, 'import sort_task_set\n'), ((3127, 3159), 'sort_task_set.sort', 'sort_task_set.sort', (['tasks', '"""uti"""'], {}), "(tasks, 'uti')\n", (3145, 3159), False, 'import sort_task_set\n'), ((4032, 4072), 'sort_task_set.sort', 'sort_task_set.sort', (['allTasks', '"""priority"""'], {}), "(allTasks, 'priority')\n", (4050, 4072), False, 'import sort_task_set\n'), ((4158, 4198), 'sort_task_set.sortEvent', 'sort_task_set.sortEvent', (['tasks', '"""period"""'], {}), "(tasks, 'period')\n", (4181, 4198), False, 'import sort_task_set\n'), ((4413, 4455), 'sort_task_set.sortEvent', 'sort_task_set.sortEvent', (['tasks', '"""deadline"""'], {}), "(tasks, 'deadline')\n", (4436, 4455), False, 'import sort_task_set\n'), ((4241, 4274), 'task.setPriority', 'task.setPriority', (['currentPriority'], {}), '(currentPriority)\n', (4257, 4274), False, 'import task\n'), ((4498, 4531), 'task.setPriority', 'task.setPriority', (['currentPriority'], {}), '(currentPriority)\n', (4514, 4531), False, 'import task\n'), ((4805, 4867), 'task.Task', 'task.Task', (["a['id']", "a['period']", "a['deadline']", "a['execution']"], {}), "(a['id'], a['period'], a['deadline'], a['execution'])\n", (4814, 4867), False, 'import task\n'), ((5408, 5465), 'task.Task', 'task.Task', (['id', "a['period']", "a['deadline']", "a['execution']"], {}), "(id, a['period'], a['deadline'], a['execution'])\n", (5417, 5465), False, 'import task\n'), ((1735, 1776), 'task.Task', 'task.Task', (['x', 'period', 'deadline', 'execution'], {}), '(x, period, deadline, execution)\n', (1744, 1776), False, 'import task\n'), ((2474, 2515), 'task.Task', 'task.Task', (['x', 'period', 'deadline', 'execution'], {}), '(x, period, deadline, execution)\n', (2483, 2515), False, 'import task\n'), ((3882, 3897), 'random.random', 'random.random', ([], {}), '()\n', (3895, 3897), False, 'import random\n'), ((823, 844), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (842, 844), False, 'import numpy\n'), ((2180, 2197), 'math.pow', 'math.pow', (['(10)', 'thN'], {}), '(10, thN)\n', (2188, 2197), False, 'import math\n'), ((2204, 2225), 'math.pow', 'math.pow', (['(10)', '(thN + 1)'], {}), '(10, thN + 1)\n', (2212, 2225), False, 'import math\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson
from scipy.stats import uniform
from scipy.stats import norm
# Data
data = np.array([0.3120639, 0.5550930, 0.2493114, 0.9785842])
# Grid.
mus = np.linspace(0, 1, num=100)
sigmas = np.linspace(0, 1, num=100)
x = []
y = []
z = []
# Grid and computation of the posterior.
for mu in mus:
for sigma in sigmas:
posterior = np.prod(norm.pdf(x=data, loc=mu, scale=sigma)) * uniform.pdf(mu, 0, 1) * uniform.pdf(sigma, 0, 1)
x.append(mu)
y.append(sigma)
z.append(posterior)
# Plot using colors.
plt.scatter(x, y, c=z)
plt.show()
| [
"numpy.array",
"numpy.linspace",
"scipy.stats.uniform.pdf",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show"
] | [((159, 212), 'numpy.array', 'np.array', (['[0.3120639, 0.555093, 0.2493114, 0.9785842]'], {}), '([0.3120639, 0.555093, 0.2493114, 0.9785842])\n', (167, 212), True, 'import numpy as np\n'), ((229, 255), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(100)'}), '(0, 1, num=100)\n', (240, 255), True, 'import numpy as np\n'), ((265, 291), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(100)'}), '(0, 1, num=100)\n', (276, 291), True, 'import numpy as np\n'), ((610, 632), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'z'}), '(x, y, c=z)\n', (621, 632), True, 'import matplotlib.pyplot as plt\n'), ((633, 643), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (641, 643), True, 'import matplotlib.pyplot as plt\n'), ((489, 513), 'scipy.stats.uniform.pdf', 'uniform.pdf', (['sigma', '(0)', '(1)'], {}), '(sigma, 0, 1)\n', (500, 513), False, 'from scipy.stats import uniform\n'), ((465, 486), 'scipy.stats.uniform.pdf', 'uniform.pdf', (['mu', '(0)', '(1)'], {}), '(mu, 0, 1)\n', (476, 486), False, 'from scipy.stats import uniform\n'), ((424, 461), 'scipy.stats.norm.pdf', 'norm.pdf', ([], {'x': 'data', 'loc': 'mu', 'scale': 'sigma'}), '(x=data, loc=mu, scale=sigma)\n', (432, 461), False, 'from scipy.stats import norm\n')] |
## testing RigidMassInfo
# a difficulty is to test the orientatoin of the summed up rigid frame since it is not deterministic (axis swapping is expected)
# it is indirectly tested with test where the resultant sum is symmetric (a cube) so that the inertia is equal on all axis
import os
import numpy
from SofaTest.Macro import *
from SofaPython import Quaternion
from SofaPython import mass
def run():
ok=True
# cube_1, size 1, one corner at origin
cubeMass_1 = mass.RigidMassInfo()
cubeMass_1.mass = 1.
cubeMass_1.com=[0.5,0.5,0.5]
cubeMass_1.diagonal_inertia=[1./6.,1./6.,1./6.]
cubeMass_1.density = 1.5
# cube_2, half cube, along x axis, positive side
cubeMass_2 = mass.RigidMassInfo()
cubeMass_2.mass = 0.5
cubeMass_2.com=[0.25,0.,0.]
cubeMass_2.diagonal_inertia=(0.5/12.)*numpy.array([2.,1.25,1.25])
cubeMass_2.density = 1.
# cube_3, half cube, along x axis, negative side
cubeMass_3 = mass.RigidMassInfo()
cubeMass_3.mass = 0.5
cubeMass_3.com=[-0.25,0.,0.]
cubeMass_3.diagonal_inertia=cubeMass_2.diagonal_inertia
cubeMass_3.density = 2.
ok &= EXPECT_MAT_EQ([[2./3., -0.25, -0.25],[-0.25,2./3.,-0.25],[-0.25,-0.25,2./3.]],
cubeMass_1.getWorldInertia(),
"RigidMassInfo.getWorldInertia() cube 1")
cubeMass_1_1 = cubeMass_1+cubeMass_1
ok &= EXPECT_FLOAT_EQ(2., cubeMass_1_1.mass, "RigidMassInfo.add cube_1+cube_1 - mass")
ok &= EXPECT_FLOAT_EQ(cubeMass_1.density, cubeMass_1_1.density, "RigidMassInfo.add cube_1+cube_1 - density")
ok &= EXPECT_VEC_EQ([0.5,0.5,0.5], cubeMass_1_1.com, "RigidMassInfo.add cube_1+cube_1 - com")
ok &= EXPECT_MAT_EQ([1./3.,1./3.,1./3.], cubeMass_1_1.diagonal_inertia, "RigidMassInfo.add cube_1+cube_1 - diagonal_inertia" )
cubeMass_2_3 = cubeMass_2+cubeMass_3
ok &= EXPECT_FLOAT_EQ(1., cubeMass_2_3.mass, "RigidMassInfo.add cube_2+cube_3 - mass")
ok &= EXPECT_FLOAT_EQ(1.+1./3., cubeMass_2_3.density, "RigidMassInfo.add cube_2+cube_3 - density")
ok &= EXPECT_VEC_EQ([0.,0.,0.], cubeMass_2_3.com, "RigidMassInfo.add cube_2+cube_3 - com")
ok &= EXPECT_MAT_EQ([1./6.,1./6.,1./6.], cubeMass_2_3.diagonal_inertia, "RigidMassInfo.add cube_2+cube_3 - diagonal_inertia" )
# modif cube 2 and 3 to be rotated around z axis
qq = [ Quaternion.axisToQuat([0.,0.,1.],math.radians(30)),
Quaternion.axisToQuat([0.,-2.,1.],math.radians(-60)),
Quaternion.axisToQuat([-3.,2.,-1.],math.radians(160)) ]
for q in qq:
cubeMass_2.com=Quaternion.rotate(q, [0.25,0.,0.])
cubeMass_2.inertia_rotation=Quaternion.inv(q)
cubeMass_3.com=Quaternion.rotate(q, [-0.25,0.,0.])
cubeMass_3.inertia_rotation=Quaternion.inv(q)
cubeMass_2_3 = cubeMass_2+cubeMass_3
ok &= EXPECT_FLOAT_EQ(1., cubeMass_2_3.mass, "RigidMassInfo.add rotated cube_2+cube_3 - mass")
ok &= EXPECT_VEC_EQ([0.,0.,0.], cubeMass_2_3.com, "RigidMassInfo.add rotated cube_2+cube_3 - com")
ok &= EXPECT_MAT_EQ([1./6.,1./6.,1./6.], cubeMass_2_3.diagonal_inertia, "RigidMassInfo.add rotated cube_2+cube_3 - diagonal_inertia" )
return ok
| [
"numpy.array",
"SofaPython.Quaternion.rotate",
"SofaPython.Quaternion.inv",
"SofaPython.mass.RigidMassInfo"
] | [((479, 499), 'SofaPython.mass.RigidMassInfo', 'mass.RigidMassInfo', ([], {}), '()\n', (497, 499), False, 'from SofaPython import mass\n'), ((710, 730), 'SofaPython.mass.RigidMassInfo', 'mass.RigidMassInfo', ([], {}), '()\n', (728, 730), False, 'from SofaPython import mass\n'), ((958, 978), 'SofaPython.mass.RigidMassInfo', 'mass.RigidMassInfo', ([], {}), '()\n', (976, 978), False, 'from SofaPython import mass\n'), ((831, 861), 'numpy.array', 'numpy.array', (['[2.0, 1.25, 1.25]'], {}), '([2.0, 1.25, 1.25])\n', (842, 861), False, 'import numpy\n'), ((2563, 2601), 'SofaPython.Quaternion.rotate', 'Quaternion.rotate', (['q', '[0.25, 0.0, 0.0]'], {}), '(q, [0.25, 0.0, 0.0])\n', (2580, 2601), False, 'from SofaPython import Quaternion\n'), ((2634, 2651), 'SofaPython.Quaternion.inv', 'Quaternion.inv', (['q'], {}), '(q)\n', (2648, 2651), False, 'from SofaPython import Quaternion\n'), ((2675, 2714), 'SofaPython.Quaternion.rotate', 'Quaternion.rotate', (['q', '[-0.25, 0.0, 0.0]'], {}), '(q, [-0.25, 0.0, 0.0])\n', (2692, 2714), False, 'from SofaPython import Quaternion\n'), ((2747, 2764), 'SofaPython.Quaternion.inv', 'Quaternion.inv', (['q'], {}), '(q)\n', (2761, 2764), False, 'from SofaPython import Quaternion\n')] |
from typing import Tuple, Any, Dict, Optional, List
import numpy
import numpy as np
from plotly import graph_objects
from plotly.subplots import make_subplots
from plotly.tools import DEFAULT_PLOTLY_COLORS
from phi import math, field
from phi.field import SampledField, PointCloud, Grid, StaggeredGrid
from phi.geom import Sphere, BaseBox
from phi.math import instance, Tensor, spatial
from phi.vis._dash.colormaps import COLORMAPS
from phi.vis._plot_util import smooth_uniform_curve, down_sample_curve
from phi.vis._vis_base import PlottingLibrary
class PlotlyPlots(PlottingLibrary):
def __init__(self):
super().__init__('plotly', [graph_objects.Figure])
def create_figure(self,
size: tuple,
rows: int,
cols: int,
subplots: Dict[Tuple[int, int], int],
titles: Tensor) -> Tuple[Any, Dict[Tuple[int, int], Any]]:
titles = [titles.rows[r].cols[c].native() for r in range(rows) for c in range(cols)]
specs = [[{'type': 'xy' if subplots.get((row, col), 0) < 3 else 'surface'} for col in range(cols)] for row in range(rows)]
fig = self.current_figure = make_subplots(rows=rows, cols=cols, subplot_titles=titles, specs=specs)
fig._phi_size = size
return fig, {pos: (pos[0]+1, pos[1]+1) for pos in subplots.keys()}
def plot(self, data: SampledField, figure: graph_objects.Figure, subplot, min_val: float = None, max_val: float = None,
show_color_bar: bool = True, **plt_args):
_plot(data, figure, row=subplot[0], col=subplot[1], size=(800, 600), colormap=None, show_color_bar=show_color_bar)
def show(self, figure: graph_objects.Figure):
figure.show()
def save(self, figure: graph_objects.Figure, path: str, dpi: float):
width, height = figure._phi_size
figure.layout.update(margin=dict(l=0, r=0, b=0, t=0))
scale = dpi/90.
figure.write_image(path, width=width * dpi / scale, height=height * dpi / scale, scale=scale)
PLOTLY = PlotlyPlots()
def _plot(data: SampledField,
fig: graph_objects.Figure,
size: tuple,
colormap: str or None,
show_color_bar: bool,
row: int = None, col: int = None,
):
subplot = fig.get_subplot(row, col)
if data.spatial_rank == 1 and isinstance(data, Grid):
x = data.points.vector[0].numpy().flatten()
channels = data.values.shape.channel
if channels.rank == 1 and channels.get_item_names(0) is not None:
for i, name in enumerate(channels.get_item_names(0)):
y = math.reshaped_native(real_values(data[{channels.name: i}]), [data.shape.spatial], to_numpy=True)
fig.add_trace(graph_objects.Scatter(x=x, y=y, mode='lines+markers', name=name), row=row, col=col)
fig.update_layout(showlegend=True)
else:
for channel in channels.meshgrid():
y = math.reshaped_native(real_values(data[channel]), [data.shape.spatial], to_numpy=True)
fig.add_trace(graph_objects.Scatter(x=x, y=y, mode='lines+markers', name='Multi-channel'), row=row, col=col)
fig.update_layout(showlegend=False)
elif data.spatial_rank == 2 and isinstance(data, Grid) and 'vector' not in data.shape: # heatmap
dims = spatial(data)
values = real_values(data).numpy(dims.reversed)
x = data.points.vector[dims[0].name].dimension(dims[1].name)[0].numpy()
y = data.points.vector[dims[1].name].dimension(dims[0].name)[0].numpy()
min_val, max_val = numpy.nanmin(values), numpy.nanmax(values)
min_val, max_val = min_val if numpy.isfinite(min_val) else 0, max_val if numpy.isfinite(max_val) else 0
color_scale = get_div_map(min_val, max_val, equal_scale=True, colormap=colormap)
# color_bar = graph_objects.heatmap.ColorBar(x=1.15) , colorbar=color_bar
fig.add_heatmap(row=row, col=col, x=x, y=y, z=values, zauto=False, zmin=min_val, zmax=max_val, colorscale=color_scale, showscale=show_color_bar)
subplot.xaxis.update(scaleanchor=f'y{subplot.yaxis.plotly_name[5:]}', scaleratio=1, constrain='domain', title=dims.names[0])
subplot.yaxis.update(constrain='domain', title=dims.names[1])
elif data.spatial_rank == 2 and isinstance(data, Grid): # vector field
if isinstance(data, StaggeredGrid):
data = data.at_centers()
x, y = [d.numpy('x,y') for d in data.points.vector.unstack_spatial('x,y')]
# ToDo Additional channel dims as multiple vectors
extra_channels = data.shape.channel.without('vector')
values = math.pack_dims(real_values(data), extra_channels, math.channel('channels'))
data_x, data_y = [d.numpy('channels,x,y') for d in values.vector.unstack_spatial('x,y')]
lower_x, lower_y = [float(l) for l in data.bounds.lower.vector.unstack_spatial('x,y')]
upper_x, upper_y = [float(u) for u in data.bounds.upper.vector.unstack_spatial('x,y')]
x_range = [lower_x, upper_x]
y_range = [lower_y, upper_y]
y = y.flatten()
x = x.flatten()
for ch in range(data_x.shape[0]):
# quiver = figure_factory.create_quiver(x, y, data_x[ch], data_y[ch], scale=1.0) # 7 points per arrow
# fig.add_trace(quiver, row=row, col=col)
data_y_flat = data_y[ch].flatten()
data_x_flat = data_x[ch].flatten()
# lines_y = numpy.stack([y, y + data_y_flat, [None] * len(x)], -1).flatten() # 3 points per arrow
# lines_x = numpy.stack([x, x + data_x_flat, [None] * len(x)], -1).flatten()
lines_y = numpy.stack([y - data_y_flat / 2, y + data_y_flat / 2, [None] * len(x)], -1).flatten() # 3 points per arrow
lines_x = numpy.stack([x - data_x_flat / 2, x + data_x_flat / 2, [None] * len(x)], -1).flatten()
name = extra_channels.get_item_names(0)[ch] if extra_channels.rank == 1 and extra_channels.get_item_names(0) is not None else None
fig.add_scatter(x=lines_x, y=lines_y, mode='lines', row=row, col=col, name=name)
if data_x.shape[0] == 1:
fig.update_layout(showlegend=False)
fig.update_xaxes(range=x_range)
fig.update_yaxes(range=y_range)
subplot.xaxis.update(scaleanchor=f'y{subplot.yaxis.plotly_name[5:]}', scaleratio=1, constrain='domain')
subplot.yaxis.update(constrain='domain')
elif data.spatial_rank == 3 and isinstance(data, Grid) and data.shape.channel.volume == 1: # 3D heatmap
values = real_values(data).numpy('z,y,x')
x = data.points.vector['x'].numpy('z,y,x')
y = data.points.vector['y'].numpy('z,y,x')
z = data.points.vector['z'].numpy('z,y,x')
min_val, max_val = numpy.nanmin(values), numpy.nanmax(values)
min_val, max_val = min_val if numpy.isfinite(min_val) else 0, max_val if numpy.isfinite(max_val) else 0
color_scale = get_div_map(min_val, max_val, equal_scale=True, colormap=colormap)
fig.add_volume(x=x.flatten(), y=y.flatten(), z=z.flatten(), value=values.flatten(),
showscale=show_color_bar, colorscale=color_scale, cmin=min_val, cmax=max_val, cauto=False,
isomin=0.1, isomax=0.8,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
row=row, col=col)
fig.update_layout(uirevision=True)
elif data.spatial_rank == 3 and isinstance(data, Grid): # 3D vector field
if isinstance(data, StaggeredGrid):
data = data.at_centers()
u = real_values(data).vector['x'].numpy('z,y,x')
v = real_values(data).vector['y'].numpy('z,y,x')
w = real_values(data).vector['z'].numpy('z,y,x')
x = data.points.vector['x'].numpy('z,y,x')
y = data.points.vector['y'].numpy('z,y,x')
z = data.points.vector['z'].numpy('z,y,x')
fig.add_cone(x=x.flatten(), y=y.flatten(), z=z.flatten(), u=u.flatten(), v=v.flatten(), w=w.flatten(),
colorscale='Blues',
sizemode="absolute", sizeref=1,
row=row, col=col)
elif isinstance(data, PointCloud) and data.spatial_rank == 2:
lower_x, lower_y = [float(d) for d in data.bounds.lower.vector.unstack_spatial('x,y')]
upper_x, upper_y = [float(d) for d in data.bounds.upper.vector.unstack_spatial('x,y')]
if data.points.shape.non_channel.rank > 1:
data_list = field.unstack(data, data.points.shape.non_channel[0].name)
for d in data_list:
_plot(d, fig, size, colormap, show_color_bar, row, col)
else:
x, y = [d.numpy() for d in data.points.vector.unstack_spatial('x,y')]
if data.color.shape.instance_rank == 0:
color = str(data.color)
else:
color = [str(d) for d in math.unstack(data.color, instance)]
subplot_height = (subplot.yaxis.domain[1] - subplot.yaxis.domain[0]) * size[1]
if isinstance(data.elements, Sphere):
symbol = 'circle'
marker_size = data.elements.bounding_radius().numpy() * 1.9
elif isinstance(data.elements, BaseBox):
symbol = 'square'
marker_size = math.mean(data.elements.bounding_half_extent(), 'vector').numpy() * 1
else:
symbol = 'asterisk'
marker_size = data.elements.bounding_radius().numpy()
marker_size *= subplot_height / (upper_y - lower_y)
marker = graph_objects.scatter.Marker(size=marker_size, color=color, sizemode='diameter', symbol=symbol)
fig.add_scatter(mode='markers', x=x, y=y, marker=marker, row=row, col=col)
fig.update_xaxes(range=[lower_x, upper_x])
fig.update_yaxes(range=[lower_y, upper_y])
fig.update_layout(showlegend=False)
subplot.xaxis.update(scaleanchor=f'y{subplot.yaxis.plotly_name[5:]}', scaleratio=1, constrain='domain')
subplot.yaxis.update(constrain='domain')
elif isinstance(data, PointCloud) and data.spatial_rank == 3:
lower_x, lower_y, lower_z = [float(d) for d in data.bounds.lower.vector.unstack_spatial('x,y,z')]
upper_x, upper_y, upper_z = [float(d) for d in data.bounds.upper.vector.unstack_spatial('x,y,z')]
if data.points.shape.non_channel.rank > 1:
data_list = field.unstack(data, data.points.shape.non_channel[0].name)
for d in data_list:
_plot(d, fig, size, colormap, show_color_bar, row, col)
else:
x, y, z = [d.numpy() for d in data.points.vector.unstack_spatial('x,y,z')]
if data.color.shape.instance_rank == 0:
color = str(data.color)
else:
color = [str(d) for d in math.unstack(data.color, instance)]
domain_y = fig.layout[subplot.plotly_name].domain.y
if isinstance(data.elements, Sphere):
symbol = 'circle'
marker_size = data.elements.bounding_radius().numpy() * 2
elif isinstance(data.elements, BaseBox):
symbol = 'square'
marker_size = math.mean(data.elements.bounding_half_extent(), 'vector').numpy() * 1
else:
symbol = 'asterisk'
marker_size = data.elements.bounding_radius().numpy()
marker_size *= size[1] * (domain_y[1] - domain_y[0]) / (upper_y - lower_y) * 0.5
marker = graph_objects.scatter3d.Marker(size=marker_size, color=color, sizemode='diameter', symbol=symbol)
fig.add_scatter3d(mode='markers', x=x, y=y, z=z, marker=marker, row=row, col=col)
subplot.xaxis.update(range=[lower_x, upper_x])
subplot.yaxis.update(range=[lower_y, upper_y])
subplot.zaxis.update(range=[lower_z, upper_z])
fig.update_layout(showlegend=False)
else:
raise NotImplementedError(f"No figure recipe for {data}")
def real_values(field: SampledField):
return field.values if field.values.dtype.kind != complex else abs(field.values)
def get_div_map(zmin, zmax, equal_scale=False, colormap: str = None):
"""
Args:
colormap(list or array, optional): colormap defined as list of [fraction_val, red_frac, green_frac, blue_frac] (Default value = None)
zmin:
zmax:
equal_scale: (Default value = False)
"""
colormap = COLORMAPS[colormap]
# Ensure slicing
cm_arr = numpy.array(colormap).astype(numpy.float64)
# Centeral color
if 0.5 not in cm_arr[:, 0]:
central_color = get_color_interpolation(0.5, cm_arr)[1:]
else:
central_color = cm_arr[cm_arr[:, 0] == 0.5][-1][1:]
# Return base
if zmin == zmax:
central_color = numpy.round(central_color).astype(numpy.int32)
return [(0, "rgb({},{},{})".format(*central_color)), (1, "rgb({},{},{})".format(*central_color))]
center = abs(zmin / (zmax - zmin))
if zmin > 0:
center = 0
# Rescaling
if not equal_scale:
# Full range, Zero-centered
neg_flag = cm_arr[:, 0] < 0.5
pos_flag = cm_arr[:, 0] >= 0.5
cm_arr[neg_flag, 0] = cm_arr[neg_flag, 0] * 2 * center # Scale (0, 0.5) -> (0, center)
cm_arr[pos_flag, 0] = (cm_arr[pos_flag, 0] - 0.5) * 2 * (1 - center) + center # Scale (0.5, 1) -> (center, 0.5)
# Drop duplicate zeros. Allow for not center value in original map.
if zmin == 0:
cm_arr = cm_arr[numpy.max(numpy.arange(len(cm_arr))[cm_arr[:, 0] == 0]):]
else:
cm_arr[:, 0] = cm_arr[:, 0] - 0.5 # center at zero (-0.5, 0.5)
# Scale desired range
if zmax > abs(zmin):
cm_scale = (1 - center) / (numpy.max(cm_arr[:, 0])) # scale by plositives
else:
cm_scale = center / (numpy.max(cm_arr[:, 0])) # scale by negatives
# Scale the maximum to +1 when centered
cm_arr[:, 0] *= cm_scale
cm_arr[:, 0] += center # center
# Add zero if it doesn't exist
if 0 not in cm_arr[:, 0]:
new_min = get_color_interpolation(0, cm_arr)
cm_arr = numpy.vstack([new_min, cm_arr])
# Add one if it doesn't exist
if 1 not in cm_arr[:, 0]:
new_max = get_color_interpolation(1, cm_arr)
cm_arr = numpy.vstack([cm_arr, new_max])
# Compare center
# new_center = get_color_interpolation(center, cm_arr)
# if not all(new_center == [center, *central_color]):
# print("Failed center comparison.")
# print("Center: {}".format(new_center))
# print("Center should be: {}".format([center, *central_color]))
# assert False
# Cut to (0, 1)
cm_arr = cm_arr[cm_arr[:, 0] >= 0]
cm_arr = cm_arr[cm_arr[:, 0] <= 1]
cm_arr[:, 1:] = numpy.clip(cm_arr[:, 1:], 0, 255)
return [[val, "rgb({:.0f},{:.0f},{:.0f})".format(*colors)] for val, colors in zip(cm_arr[:, 0], cm_arr[:, 1:])]
def get_color_interpolation(val, cm_arr):
"""
Weighted average between point smaller and larger than it
Args:
val:
cm_arr:
Returns:
"""
if 0 in cm_arr[:, 0] - val:
center = cm_arr[cm_arr[:, 0] == val][-1]
else:
offset_positions = cm_arr[:, 0] - val
color1 = cm_arr[numpy.argmax(offset_positions[offset_positions < 0])] # largest value smaller than control
color2 = cm_arr[numpy.argmin(offset_positions[offset_positions > 0])] # smallest value larger than control
if color1[0] == color2[0]:
center = color1
else:
x = (val - color1[0]) / (color2[0] - color1[0]) # weight of row2
center = color1 * (1 - x) + color2 * x
center[0] = val
return center
def plot_scalars(curves: tuple or list, labels, subplots=True, log_scale='', smooth: int = 1):
if not curves:
return graph_objects.Figure()
if subplots:
fig = make_subplots(rows=1, cols=len(curves), subplot_titles=labels)
for col, (label, (x, y)) in enumerate(zip(labels, curves)):
for trace in _graph(label, x, y, smooth, col):
fig.add_trace(trace, row=1, col=1 + col)
else:
fig = graph_objects.Figure()
for col, (label, (x, y)) in enumerate(zip(labels, curves)):
for trace in _graph(label, x, y, smooth, col):
fig.add_trace(trace)
fig.update_layout(showlegend=not subplots, paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
if 'x' in log_scale:
fig.update_xaxes(type='log')
if 'y' in log_scale:
fig.update_yaxes(type='log')
return fig
def _graph(label: str, x: np.ndarray, y: np.ndarray, smooth: int, index: int, max_points=2000):
color = DEFAULT_PLOTLY_COLORS[index % len(DEFAULT_PLOTLY_COLORS)]
if len(x) > len(y):
x = x[:len(y)]
if len(y) > len(x):
y = y[:len(x)]
curves = split_curve(np.stack([x, y], -1))
low_res = [down_sample_curve(c, max_points) for c in curves]
x, y = join_curves(low_res).T
if smooth <= 1:
return [graph_objects.Scatter(x=x, y=y, name=label, line=graph_objects.scatter.Line(color=color))]
else: # smooth
smooth_curves = [smooth_uniform_curve(c, smooth) for c in curves]
low_res_smooth = [down_sample_curve(c, max_points) for c in smooth_curves]
smooth_x, smooth_y = join_curves(low_res_smooth).T
transparent_color = f"rgba{color[3:-1]}, 0.4)"
return [
graph_objects.Scatter(x=x, y=y, line=graph_objects.scatter.Line(color=transparent_color, width=1), showlegend=False),
graph_objects.Scatter(x=smooth_x, y=smooth_y, name=label, line=graph_objects.scatter.Line(color=color, width=3), mode='lines')
]
def split_curve(curve: np.ndarray) -> List[np.ndarray]:
x = curve[..., 0]
backtracks = numpy.argwhere(x[1:] < x[:-1])[:, 0] + 1
if len(backtracks) == 0:
return [curve]
cuts = [0] + list(backtracks) + [curve.shape[-2]]
return [curve[s:e] for s, e in zip(cuts[:-1], cuts[1:])]
def join_curves(curves: List[np.ndarray]) -> np.ndarray:
curves = [np.append(np.array(c, numpy.float), numpy.nan, -2) for c in curves[:-1]] + [curves[-1]]
return np.concatenate(curves, -2)
| [
"numpy.clip",
"numpy.array",
"numpy.isfinite",
"numpy.nanmin",
"phi.field.unstack",
"plotly.graph_objects.scatter.Line",
"plotly.graph_objects.scatter.Marker",
"phi.vis._plot_util.down_sample_curve",
"numpy.max",
"numpy.stack",
"plotly.graph_objects.Scatter",
"numpy.vstack",
"numpy.concatena... | [((15019, 15052), 'numpy.clip', 'numpy.clip', (['cm_arr[:, 1:]', '(0)', '(255)'], {}), '(cm_arr[:, 1:], 0, 255)\n', (15029, 15052), False, 'import numpy\n'), ((18446, 18472), 'numpy.concatenate', 'np.concatenate', (['curves', '(-2)'], {}), '(curves, -2)\n', (18460, 18472), True, 'import numpy as np\n'), ((1204, 1275), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': 'rows', 'cols': 'cols', 'subplot_titles': 'titles', 'specs': 'specs'}), '(rows=rows, cols=cols, subplot_titles=titles, specs=specs)\n', (1217, 1275), False, 'from plotly.subplots import make_subplots\n'), ((16088, 16110), 'plotly.graph_objects.Figure', 'graph_objects.Figure', ([], {}), '()\n', (16108, 16110), False, 'from plotly import graph_objects\n'), ((16413, 16435), 'plotly.graph_objects.Figure', 'graph_objects.Figure', ([], {}), '()\n', (16433, 16435), False, 'from plotly import graph_objects\n'), ((17134, 17154), 'numpy.stack', 'np.stack', (['[x, y]', '(-1)'], {}), '([x, y], -1)\n', (17142, 17154), True, 'import numpy as np\n'), ((17171, 17203), 'phi.vis._plot_util.down_sample_curve', 'down_sample_curve', (['c', 'max_points'], {}), '(c, max_points)\n', (17188, 17203), False, 'from phi.vis._plot_util import smooth_uniform_curve, down_sample_curve\n'), ((3370, 3383), 'phi.math.spatial', 'spatial', (['data'], {}), '(data)\n', (3377, 3383), False, 'from phi.math import instance, Tensor, spatial\n'), ((12651, 12672), 'numpy.array', 'numpy.array', (['colormap'], {}), '(colormap)\n', (12662, 12672), False, 'import numpy\n'), ((14323, 14354), 'numpy.vstack', 'numpy.vstack', (['[new_min, cm_arr]'], {}), '([new_min, cm_arr])\n', (14335, 14354), False, 'import numpy\n'), ((14505, 14536), 'numpy.vstack', 'numpy.vstack', (['[cm_arr, new_max]'], {}), '([cm_arr, new_max])\n', (14517, 14536), False, 'import numpy\n'), ((15505, 15557), 'numpy.argmax', 'numpy.argmax', (['offset_positions[offset_positions < 0]'], {}), '(offset_positions[offset_positions < 0])\n', (15517, 15557), False, 'import numpy\n'), ((15621, 15673), 'numpy.argmin', 'numpy.argmin', (['offset_positions[offset_positions > 0]'], {}), '(offset_positions[offset_positions > 0])\n', (15633, 15673), False, 'import numpy\n'), ((17427, 17458), 'phi.vis._plot_util.smooth_uniform_curve', 'smooth_uniform_curve', (['c', 'smooth'], {}), '(c, smooth)\n', (17447, 17458), False, 'from phi.vis._plot_util import smooth_uniform_curve, down_sample_curve\n'), ((17502, 17534), 'phi.vis._plot_util.down_sample_curve', 'down_sample_curve', (['c', 'max_points'], {}), '(c, max_points)\n', (17519, 17534), False, 'from phi.vis._plot_util import smooth_uniform_curve, down_sample_curve\n'), ((18066, 18096), 'numpy.argwhere', 'numpy.argwhere', (['(x[1:] < x[:-1])'], {}), '(x[1:] < x[:-1])\n', (18080, 18096), False, 'import numpy\n'), ((3627, 3647), 'numpy.nanmin', 'numpy.nanmin', (['values'], {}), '(values)\n', (3639, 3647), False, 'import numpy\n'), ((3649, 3669), 'numpy.nanmax', 'numpy.nanmax', (['values'], {}), '(values)\n', (3661, 3669), False, 'import numpy\n'), ((12946, 12972), 'numpy.round', 'numpy.round', (['central_color'], {}), '(central_color)\n', (12957, 12972), False, 'import numpy\n'), ((13908, 13931), 'numpy.max', 'numpy.max', (['cm_arr[:, 0]'], {}), '(cm_arr[:, 0])\n', (13917, 13931), False, 'import numpy\n'), ((14003, 14026), 'numpy.max', 'numpy.max', (['cm_arr[:, 0]'], {}), '(cm_arr[:, 0])\n', (14012, 14026), False, 'import numpy\n'), ((18357, 18381), 'numpy.array', 'np.array', (['c', 'numpy.float'], {}), '(c, numpy.float)\n', (18365, 18381), True, 'import numpy as np\n'), ((2781, 2845), 'plotly.graph_objects.Scatter', 'graph_objects.Scatter', ([], {'x': 'x', 'y': 'y', 'mode': '"""lines+markers"""', 'name': 'name'}), "(x=x, y=y, mode='lines+markers', name=name)\n", (2802, 2845), False, 'from plotly import graph_objects\n'), ((3110, 3185), 'plotly.graph_objects.Scatter', 'graph_objects.Scatter', ([], {'x': 'x', 'y': 'y', 'mode': '"""lines+markers"""', 'name': '"""Multi-channel"""'}), "(x=x, y=y, mode='lines+markers', name='Multi-channel')\n", (3131, 3185), False, 'from plotly import graph_objects\n'), ((3708, 3731), 'numpy.isfinite', 'numpy.isfinite', (['min_val'], {}), '(min_val)\n', (3722, 3731), False, 'import numpy\n'), ((3751, 3774), 'numpy.isfinite', 'numpy.isfinite', (['max_val'], {}), '(max_val)\n', (3765, 3774), False, 'import numpy\n'), ((4739, 4763), 'phi.math.channel', 'math.channel', (['"""channels"""'], {}), "('channels')\n", (4751, 4763), False, 'from phi import math, field\n'), ((17340, 17379), 'plotly.graph_objects.scatter.Line', 'graph_objects.scatter.Line', ([], {'color': 'color'}), '(color=color)\n', (17366, 17379), False, 'from plotly import graph_objects\n'), ((17739, 17799), 'plotly.graph_objects.scatter.Line', 'graph_objects.scatter.Line', ([], {'color': 'transparent_color', 'width': '(1)'}), '(color=transparent_color, width=1)\n', (17765, 17799), False, 'from plotly import graph_objects\n'), ((17895, 17943), 'plotly.graph_objects.scatter.Line', 'graph_objects.scatter.Line', ([], {'color': 'color', 'width': '(3)'}), '(color=color, width=3)\n', (17921, 17943), False, 'from plotly import graph_objects\n'), ((6816, 6836), 'numpy.nanmin', 'numpy.nanmin', (['values'], {}), '(values)\n', (6828, 6836), False, 'import numpy\n'), ((6838, 6858), 'numpy.nanmax', 'numpy.nanmax', (['values'], {}), '(values)\n', (6850, 6858), False, 'import numpy\n'), ((6897, 6920), 'numpy.isfinite', 'numpy.isfinite', (['min_val'], {}), '(min_val)\n', (6911, 6920), False, 'import numpy\n'), ((6940, 6963), 'numpy.isfinite', 'numpy.isfinite', (['max_val'], {}), '(max_val)\n', (6954, 6963), False, 'import numpy\n'), ((8638, 8696), 'phi.field.unstack', 'field.unstack', (['data', 'data.points.shape.non_channel[0].name'], {}), '(data, data.points.shape.non_channel[0].name)\n', (8651, 8696), False, 'from phi import math, field\n'), ((9731, 9831), 'plotly.graph_objects.scatter.Marker', 'graph_objects.scatter.Marker', ([], {'size': 'marker_size', 'color': 'color', 'sizemode': '"""diameter"""', 'symbol': 'symbol'}), "(size=marker_size, color=color, sizemode=\n 'diameter', symbol=symbol)\n", (9759, 9831), False, 'from plotly import graph_objects\n'), ((10574, 10632), 'phi.field.unstack', 'field.unstack', (['data', 'data.points.shape.non_channel[0].name'], {}), '(data, data.points.shape.non_channel[0].name)\n', (10587, 10632), False, 'from phi import math, field\n'), ((11672, 11774), 'plotly.graph_objects.scatter3d.Marker', 'graph_objects.scatter3d.Marker', ([], {'size': 'marker_size', 'color': 'color', 'sizemode': '"""diameter"""', 'symbol': 'symbol'}), "(size=marker_size, color=color, sizemode=\n 'diameter', symbol=symbol)\n", (11702, 11774), False, 'from plotly import graph_objects\n'), ((9048, 9082), 'phi.math.unstack', 'math.unstack', (['data.color', 'instance'], {}), '(data.color, instance)\n', (9060, 9082), False, 'from phi import math, field\n'), ((10989, 11023), 'phi.math.unstack', 'math.unstack', (['data.color', 'instance'], {}), '(data.color, instance)\n', (11001, 11023), False, 'from phi import math, field\n')] |
import random
from abc import abstractmethod
import math
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
def euclidian_distance(a, b):
"""
Calculating Euclidian distance between 2 vectors
:param a: 1st vector
:param b: 2nd vector
:return:
"""
assert len(a) == len(b)
d = 0
for feature in range(len(a)):
d += (a[feature] - b[feature]) ** 2
return d ** 0.5
class Network(object):
"""
Radial Basis Function network
"""
def __init__(self, num_rbf, epochs=50, seed=999):
"""
Initialize the network
"""
np.random.seed(seed)
self.X = None
self.y = None
self.num_rbf = num_rbf
self.errors_train = []
self.errors_val = []
self.rbf_layer = None
self.output_layer = None
self.epochs = epochs
def fit(self, X_train, y_train, X_val, y_val):
self.rbf_layer = RBFlayer(self.num_rbf, X.shape[1])
self.output_layer = OutLayer(self.num_rbf, y.shape[1])
self.rbf_layer.find_centers(X)
self.rbf_layer.find_sizes()
# loop per every epochs and pattern
for ep in range(1, self.epochs + 1):
errors_train = []
for pattern, teacher in zip(X_train, y_train):
# rbf layer
R_vect = self.rbf_layer.forward(pattern)
output = self.output_layer.forward(R_vect)
self.output_layer.adjust_weights(teacher)
loss = self.mean_squared_error(teacher, output)
errors_train.append(loss)
self.errors_train.append(sum(errors_train) / len(errors_train))
# validation
errors_val = []
for pattern, teacher in zip(X_val, y_val):
R_vect = self.rbf_layer.forward(pattern)
output = self.output_layer.forward(R_vect)
loss = self.mean_squared_error(teacher, output)
errors_val.append(loss)
self.errors_val.append(sum(errors_val) / len(errors_val))
print('Epoch ({})\t||\ttrain loss: {:.4f}\t||\tval loss: {:.4f}'.format(ep, self.errors_train[-1],
self.errors_val[-1]))
self.save_errors()
@staticmethod
def mean_squared_error(teacher, output):
error = np.sum((teacher - output) ** 2)
return error
def predict(self, X):
outputs = []
for pattern in X:
# forward computation
R_vect = self.rbf_layer.forward(pattern)
output = self.output_layer.forward(R_vect)
outputs.append(output)
return np.array(outputs)
def save_errors(self):
f = open('learning.curve', 'w')
print('#\tX\tY', file=f)
for x, y in enumerate(self.errors_val):
print('\t{}\t{}'.format(x, y), file=f)
f.close()
class RBFlayer:
def __init__(self, num_neurons, len_input, closest_percent=0.1):
self.centers = np.zeros((num_neurons, len_input))
self.sizes = np.zeros((num_neurons,))
self.num_neurons = num_neurons
self.distances_matrix = np.zeros((num_neurons, num_neurons))
# how many closest centers to consider for each center
# when computing its radius
self.closest_percent = closest_percent
def find_centers(self, all_inputs):
"""
Sets self.centers with centers found with K-means clustering algorithm
the number of centers is the number of neurons.
:param all_inputs:
:return:
"""
# find center vectors with Kmeans clustering method
kmeans = KMeans(n_clusters=self.num_neurons)
kmeans.fit(all_inputs)
self.centers = kmeans.cluster_centers_
def find_sizes(self):
# fill in distance matrix
for i in range(self.num_neurons):
for j in range(i + 1, self.num_neurons):
if i == j:
self.distances_matrix[i, j] = 0
else:
a = self.centers[i, :]
b = self.centers[j, :]
dist = euclidian_distance(a, b)
self.distances_matrix[i, j] = dist
self.distances_matrix[j, i] = dist
# set size for each center to the mean of the distances
# to 'closest_percent' of the closest centers
num_closest = math.ceil(self.num_neurons * self.closest_percent)
# sorting each row of the distance matrix
sorted_distances = np.sort(self.distances_matrix)
for i, c in enumerate(self.centers):
# and taking 'num_closest' distances starting from the second one
# because first is 0 distance between a center and itself
self.sizes[i] = np.mean(sorted_distances[i, 1:num_closest + 1])
def forward(self, X):
"""
calculate the output of the RBF layer
:param X: input pattern
:return:
"""
distances = []
# get distances from centers
for i in range(self.num_neurons):
distances.append(euclidian_distance(self.centers[i], X))
# apply the rest of the formula
distances = np.array([distances], dtype=float)
distances /= 2 * np.square(self.sizes)
distances = np.exp(-distances)
return distances
class OutLayer:
def __init__(self,
n_rbf,
n_outputs,
learning_rate=0.01):
self.net = None
self.out_rbf = None
self.output = None
self.sigma = None
# initialize weights randomly
self.weights = np.random.uniform(-.5, .5, size=(n_rbf, n_outputs))
self.learning_rate = learning_rate
def forward(self, R):
"""
Forward propagation.
:param X:
:return:
"""
self.out_rbf = R
self.output = np.dot(R, self.weights)
return self.output
def adjust_weights(self, teacher):
out_sub = (teacher - self.output)
# calculate weight changes with delta rule
delta = self.learning_rate * np.dot(self.out_rbf.T, out_sub)
# apply weight changes
self.weights += delta
def read_dat(name):
"""Read data from file.
"""
X = []
y = []
with open(name) as f:
for line in f:
if line.startswith('# P'):
# second line
# P=350 N=2 M=1
splits = line.split(' ')
N = int(splits[1][2:])
M = int(splits[2][2:])
continue
elif line[0] == '#':
continue
line = line.strip()
elements = line.split(' ')
if '' in elements:
elements = list(filter(''.__ne__, elements))
X.append(elements[:N])
y.append(elements[N:N + M])
X = np.array(X).astype(np.float)
y = np.array(y).astype(np.float)
return X, y
def train_test_split(X, y, split=0.75):
assert X.shape[0] == y.shape[0]
size = X.shape[0]
sep = int(split * size)
for i in range(size):
j = random.randint(0, size - 1)
x_tmp = X[i, :]
X[i, :] = X[j, :]
X[j, :] = x_tmp
y_tmp = y[i, :]
y[i, :] = y[j, :]
y[j, :] = y_tmp
return X[:sep, :], y[:sep, :], X[sep:, :], y[sep:, :]
if __name__ == "__main__":
# read dataset
X, y = read_dat('PA-B-train-04.dat')
# take 'split' percent of the data
# and further split it to train and validation samples
X_train, y_train, X_val, y_val = train_test_split(X, y, split=.8)
# initialize the network
net = Network(num_rbf=100, epochs=60, seed=7)
# train and validate each epoch
net.fit(X_train, y_train, X_val, y_val)
# test the network by predicting output from the unseen data
print('Test prediction:')
prediction = net.predict(X_val)
# print predicted and true values side bt side
for i, (y_pred, y_val) in enumerate(zip(prediction, y_val)):
print('Pattern ({}) || prediction: {:.5f}, actual value: {:.5f}'.format(i, y_pred[0, 0], y_val[0]))
# plot errors for training and validation
plt.plot(net.errors_train)
plt.plot(net.errors_val)
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.show()
| [
"sklearn.cluster.KMeans",
"numpy.mean",
"math.ceil",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.sort",
"numpy.square",
"numpy.exp",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.random.seed",
"numpy.random.uniform",
"random... | [((8331, 8357), 'matplotlib.pyplot.plot', 'plt.plot', (['net.errors_train'], {}), '(net.errors_train)\n', (8339, 8357), True, 'import matplotlib.pyplot as plt\n'), ((8362, 8386), 'matplotlib.pyplot.plot', 'plt.plot', (['net.errors_val'], {}), '(net.errors_val)\n', (8370, 8386), True, 'import matplotlib.pyplot as plt\n'), ((8391, 8409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (8401, 8409), True, 'import matplotlib.pyplot as plt\n'), ((8414, 8434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (8424, 8434), True, 'import matplotlib.pyplot as plt\n'), ((8439, 8449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8447, 8449), True, 'import matplotlib.pyplot as plt\n'), ((638, 658), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (652, 658), True, 'import numpy as np\n'), ((2424, 2455), 'numpy.sum', 'np.sum', (['((teacher - output) ** 2)'], {}), '((teacher - output) ** 2)\n', (2430, 2455), True, 'import numpy as np\n'), ((2744, 2761), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (2752, 2761), True, 'import numpy as np\n'), ((3090, 3124), 'numpy.zeros', 'np.zeros', (['(num_neurons, len_input)'], {}), '((num_neurons, len_input))\n', (3098, 3124), True, 'import numpy as np\n'), ((3146, 3170), 'numpy.zeros', 'np.zeros', (['(num_neurons,)'], {}), '((num_neurons,))\n', (3154, 3170), True, 'import numpy as np\n'), ((3242, 3278), 'numpy.zeros', 'np.zeros', (['(num_neurons, num_neurons)'], {}), '((num_neurons, num_neurons))\n', (3250, 3278), True, 'import numpy as np\n'), ((3746, 3781), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.num_neurons'}), '(n_clusters=self.num_neurons)\n', (3752, 3781), False, 'from sklearn.cluster import KMeans\n'), ((4506, 4556), 'math.ceil', 'math.ceil', (['(self.num_neurons * self.closest_percent)'], {}), '(self.num_neurons * self.closest_percent)\n', (4515, 4556), False, 'import math\n'), ((4634, 4664), 'numpy.sort', 'np.sort', (['self.distances_matrix'], {}), '(self.distances_matrix)\n', (4641, 4664), True, 'import numpy as np\n'), ((5312, 5346), 'numpy.array', 'np.array', (['[distances]'], {'dtype': 'float'}), '([distances], dtype=float)\n', (5320, 5346), True, 'import numpy as np\n'), ((5414, 5432), 'numpy.exp', 'np.exp', (['(-distances)'], {}), '(-distances)\n', (5420, 5432), True, 'import numpy as np\n'), ((5756, 5809), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)'], {'size': '(n_rbf, n_outputs)'}), '(-0.5, 0.5, size=(n_rbf, n_outputs))\n', (5773, 5809), True, 'import numpy as np\n'), ((6013, 6036), 'numpy.dot', 'np.dot', (['R', 'self.weights'], {}), '(R, self.weights)\n', (6019, 6036), True, 'import numpy as np\n'), ((7272, 7299), 'random.randint', 'random.randint', (['(0)', '(size - 1)'], {}), '(0, size - 1)\n', (7286, 7299), False, 'import random\n'), ((4886, 4933), 'numpy.mean', 'np.mean', (['sorted_distances[i, 1:num_closest + 1]'], {}), '(sorted_distances[i, 1:num_closest + 1])\n', (4893, 4933), True, 'import numpy as np\n'), ((5372, 5393), 'numpy.square', 'np.square', (['self.sizes'], {}), '(self.sizes)\n', (5381, 5393), True, 'import numpy as np\n'), ((6234, 6265), 'numpy.dot', 'np.dot', (['self.out_rbf.T', 'out_sub'], {}), '(self.out_rbf.T, out_sub)\n', (6240, 6265), True, 'import numpy as np\n'), ((7018, 7029), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (7026, 7029), True, 'import numpy as np\n'), ((7059, 7070), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7067, 7070), True, 'import numpy as np\n')] |
# Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved
import os
from ion.reports.plotters import plotters
from numpy import median
class KeyPlot:
def __init__(self, key, floworder, title=None):
self.data = None
self.key = key
self.floworder = floworder
self.title = title
self.average_peak = None
def plot(self, outdir=os.getcwd()):
expected = [1 for i in range(len(self.key) - 1)]
tracePlot = plotters.Iontrace(
self.key,
expected,
self.data,
title="Consensus Key 1-Mer - %s Ave. Peak = %s"
% (self.title, self.average_peak),
)
tracePlot.render()
tracePlot.save(
os.path.join(outdir, "iontrace_%s" % self.title.replace(" ", "_"))
)
def parse(self, fileIn):
d = open(fileIn, "r")
data = d.readlines()
d.close()
trace = {}
max = None # max length needed to fill in null values
for line in data:
t = line.strip().split(" ")
fTrace = [float(i) for i in t[1:]]
trace[t[0]] = fTrace
if max < len(fTrace) or max == None:
max = len(fTrace)
toPlot = []
for k in self.key:
if k in list(trace.keys()):
toPlot.append(trace[k])
else:
toPlot.append([0 for i in range(max)])
self.data = trace
return toPlot
def dump_max(self, fileName):
try:
with open(fileName, "a") as f:
max_array = [
max(trace) for k, trace in list(self.data.items()) if k in self.key
]
self.average_peak = int(median(max_array)) if len(max_array) > 0 else 0
f.write("%s = %s\n" % (self.title, self.average_peak))
except Exception:
print("Can't open file")
if __name__ == "__main__":
libKey = sys.argv[2]
floworder = sys.argv[3]
fileIn = sys.argv[1]
fileOut = sys.argv[4]
kp = KeyPlot(libKey, floworder, "Test Fragment")
kp.parse(fileIn)
kp.dump_max(fileOut)
kp.plot()
| [
"numpy.median",
"ion.reports.plotters.plotters.Iontrace",
"os.getcwd"
] | [((384, 395), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (393, 395), False, 'import os\n'), ((475, 616), 'ion.reports.plotters.plotters.Iontrace', 'plotters.Iontrace', (['self.key', 'expected', 'self.data'], {'title': "('Consensus Key 1-Mer - %s Ave. Peak = %s' % (self.title, self.average_peak))"}), "(self.key, expected, self.data, title=\n 'Consensus Key 1-Mer - %s Ave. Peak = %s' % (self.title, self.average_peak)\n )\n", (492, 616), False, 'from ion.reports.plotters import plotters\n'), ((1751, 1768), 'numpy.median', 'median', (['max_array'], {}), '(max_array)\n', (1757, 1768), False, 'from numpy import median\n')] |
import cv2
import os, logging, time, json
import requests, base64
from flask import Flask, jsonify, request, Response
import numpy as np
# for HTTP/1.1 support
from werkzeug.serving import WSGIRequestHandler
app = Flask(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)-10s %(message)s', datefmt="%Y-%m-%d-%H-%M-%S",
level=logging.INFO)
def main():
pass
def grab_image_from_stream():
repeat = 3
wait = 3
frame = None
for _ in range(repeat):
try:
video_capture = cv2.VideoCapture(args.camera)
video_capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
frame = video_capture.read()[1]
break
except:
# try to re-capture the stream
logging.info("Could not capture video. Recapturing and retrying...")
time.sleep(wait)
if frame is None:
logging.info("Failed to capture frame, sending blank image")
frame = np.zeros((300, 300, 3))
return frame
@app.route('/image/700')
def video_image():
frame = grab_image_from_stream()
_, jpeg = cv2.imencode('.jpg', frame)
response = Response(jpeg.tobytes(), headers={"content-length": len(jpeg)}, mimetype="image/jpeg")
return response
@app.route('/image/800')
def video_image_and_inference():
frame = grab_image_from_stream()
frame = cv2.resize(frame, (300, 300))
_, jpeg = cv2.imencode('.jpg', frame)
resp_img = jpeg.tobytes()
scoring_url = "http://grocerymodel:5001/score"
json_img = json.dumps({"img": frame.tolist()})
input_data = json_img
headers = {'Content-Type':'application/json'}
resp = requests.post(scoring_url, input_data, headers=headers)
logging.info(f'received response: {resp.status_code}')
resp_json = json.loads(resp.content)
resp_json["img"] = str(base64.b64encode(resp_img), "utf-8")
return jsonify(resp_json)
def start_app():
# set protocol to 1.1 so we keep the connection open
WSGIRequestHandler.protocol_version = "HTTP/1.1"
if args.fast:
logging.info("Running the `fast` version")
app.run(host="0.0.0.0", port=args.port)
else:
logging.info(f"Staring regular inventory cam. Port: {args.port}")
app.run(debug=False)
if __name__ == "__main__":
from cmdline import cmd_args
args = cmd_args.parse_camera_args()
if not args.fast:
app.config['SERVER_NAME'] = f'inventorycam:{args.port}'
if args.debug:
logging.info("Please attach a debugger to port 5678")
import ptvsd
ptvsd.enable_attach(('0.0.0.0', 5681))
ptvsd.wait_for_attach()
ptvsd.break_into_debugger()
start_app()
| [
"logging.basicConfig",
"json.loads",
"requests.post",
"cv2.imencode",
"flask.Flask",
"base64.b64encode",
"ptvsd.enable_attach",
"ptvsd.break_into_debugger",
"cmdline.cmd_args.parse_camera_args",
"time.sleep",
"numpy.zeros",
"cv2.VideoCapture",
"ptvsd.wait_for_attach",
"cv2.resize",
"logg... | [((216, 231), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (221, 231), False, 'from flask import Flask, jsonify, request, Response\n'), ((233, 357), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-10s %(message)s"""', 'datefmt': '"""%Y-%m-%d-%H-%M-%S"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)-10s %(message)s',\n datefmt='%Y-%m-%d-%H-%M-%S', level=logging.INFO)\n", (252, 357), False, 'import os, logging, time, json\n'), ((1122, 1149), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (1134, 1149), False, 'import cv2\n'), ((1380, 1409), 'cv2.resize', 'cv2.resize', (['frame', '(300, 300)'], {}), '(frame, (300, 300))\n', (1390, 1409), False, 'import cv2\n'), ((1425, 1452), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (1437, 1452), False, 'import cv2\n'), ((1673, 1728), 'requests.post', 'requests.post', (['scoring_url', 'input_data'], {'headers': 'headers'}), '(scoring_url, input_data, headers=headers)\n', (1686, 1728), False, 'import requests, base64\n'), ((1733, 1787), 'logging.info', 'logging.info', (['f"""received response: {resp.status_code}"""'], {}), "(f'received response: {resp.status_code}')\n", (1745, 1787), False, 'import os, logging, time, json\n'), ((1804, 1828), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (1814, 1828), False, 'import os, logging, time, json\n'), ((1906, 1924), 'flask.jsonify', 'jsonify', (['resp_json'], {}), '(resp_json)\n', (1913, 1924), False, 'from flask import Flask, jsonify, request, Response\n'), ((2358, 2386), 'cmdline.cmd_args.parse_camera_args', 'cmd_args.parse_camera_args', ([], {}), '()\n', (2384, 2386), False, 'from cmdline import cmd_args\n'), ((906, 966), 'logging.info', 'logging.info', (['"""Failed to capture frame, sending blank image"""'], {}), "('Failed to capture frame, sending blank image')\n", (918, 966), False, 'import os, logging, time, json\n'), ((983, 1006), 'numpy.zeros', 'np.zeros', (['(300, 300, 3)'], {}), '((300, 300, 3))\n', (991, 1006), True, 'import numpy as np\n'), ((1857, 1883), 'base64.b64encode', 'base64.b64encode', (['resp_img'], {}), '(resp_img)\n', (1873, 1883), False, 'import requests, base64\n'), ((2080, 2122), 'logging.info', 'logging.info', (['"""Running the `fast` version"""'], {}), "('Running the `fast` version')\n", (2092, 2122), False, 'import os, logging, time, json\n'), ((2190, 2255), 'logging.info', 'logging.info', (['f"""Staring regular inventory cam. Port: {args.port}"""'], {}), "(f'Staring regular inventory cam. Port: {args.port}')\n", (2202, 2255), False, 'import os, logging, time, json\n'), ((2502, 2555), 'logging.info', 'logging.info', (['"""Please attach a debugger to port 5678"""'], {}), "('Please attach a debugger to port 5678')\n", (2514, 2555), False, 'import os, logging, time, json\n'), ((2594, 2632), 'ptvsd.enable_attach', 'ptvsd.enable_attach', (["('0.0.0.0', 5681)"], {}), "(('0.0.0.0', 5681))\n", (2613, 2632), False, 'import ptvsd\n'), ((2641, 2664), 'ptvsd.wait_for_attach', 'ptvsd.wait_for_attach', ([], {}), '()\n', (2662, 2664), False, 'import ptvsd\n'), ((2673, 2700), 'ptvsd.break_into_debugger', 'ptvsd.break_into_debugger', ([], {}), '()\n', (2698, 2700), False, 'import ptvsd\n'), ((528, 557), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.camera'], {}), '(args.camera)\n', (544, 557), False, 'import cv2\n'), ((762, 830), 'logging.info', 'logging.info', (['"""Could not capture video. Recapturing and retrying..."""'], {}), "('Could not capture video. Recapturing and retrying...')\n", (774, 830), False, 'import os, logging, time, json\n'), ((843, 859), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (853, 859), False, 'import os, logging, time, json\n')] |
import json
import pickle
import numpy as np
import argparse
from pathlib import Path
from scipy.special import softmax
import csv
import statistics
import sys
import os
from functools import partial
import math
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import decimal_utils
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str)
parser.add_argument('--raw_result_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
def predict(color_score, gray_score, training_type, eval_mode=1):
if training_type == 'domain-discriminative':
score = np.concatenate([gray_score, color_score], axis=0)
if eval_mode == 1: # Sum without prior shift (result 1)
probs = softmax(score, axis=1)
predicted_classes = np.argmax(probs[:, :10] + probs[:, 10:], axis=1)
elif eval_mode == 2: # Max prob with prior shift (result 2)
prior_shift_weight = [1 / 5 if i % 2 == 0 else 1 / 95
for i in range(10)] + [1 / 95 if i % 2 == 0 else 1 / 5 for i in range(10)]
probs = softmax(score, axis=1) * prior_shift_weight
predicted_classes = np.argmax(np.stack((probs[:, :10], probs[:, 10:])).max(axis=0), axis=1)
elif eval_mode == 3: # Sum prob with prior shift
prior_shift_weight = [1 / 5 if i % 2 == 0 else 1 / 95
for i in range(10)] + [1 / 95 if i % 2 == 0 else 1 / 5 for i in range(10)]
probs = softmax(score, axis=1) * prior_shift_weight
predicted_classes = np.argmax(probs[:, :10] + probs[:, 10:], axis=1)
elif training_type == 'domain-independent':
if eval_mode == 1: # Conditional (result 1)
outputs = np.concatenate([gray_score[:, 10:], color_score[:, :10]], axis=0)
predicted_classes = np.argmax(outputs, axis=1)
elif eval_mode == 2: # Sum (result 2)
outputs = np.concatenate([gray_score, color_score], axis=0)
outputs = outputs[:, :10] + outputs[:, 10:]
predicted_classes = np.argmax(outputs, axis=1)
else:
score = np.concatenate([gray_score, color_score], axis=0)
predicted_classes = np.argmax(score, axis=1)
return predicted_classes
def get_bias(predicted_classes, test_labels):
domain_zeros = np.zeros([
10000,
], dtype=np.int32)
domain_ones = np.ones([
10000,
], dtype=np.int32)
domain_targets = np.concatenate([domain_zeros, domain_ones], axis=0)
class_targets = np.array(test_labels + test_labels)
class_count = 10
test_set_size = class_targets.shape[0]
count_per_class = np.zeros((class_count, 2), dtype=np.float64)
for i in range(test_set_size):
cur_predict = int(predicted_classes[i])
count_per_class[cur_predict][int(domain_targets[i])] += 1
bias = np.amax(count_per_class, axis=1) / np.sum(count_per_class, axis=1)
total_bias = np.abs(bias - 0.5)
mean_class_bias = np.mean(total_bias)
ret = {}
for idx in range(class_count):
key = 'class_' + str(idx) + '_bias'
ret[key] = total_bias[idx]
ret['mean_bias'] = mean_class_bias
return ret
with open(args.config, 'r') as f:
config_json = json.load(f)
for config in config_json:
class_bias_result = []
for no_try in range(config['no_tries']):
exp_result_path = Path(
args.raw_result_dir,
"{0}_{1}_{2}_{3}/{4}".format(config['network'],
config['training_type'],
config['dataset'],
config['random_seed'],
str(no_try)))
color_result_path = Path(
exp_result_path,
"record/{0}_{1}/e1/test_color_result.pkl".format(config['dataset'],
config['training_type'].replace('-', '_')))
gray_result_path = Path(
exp_result_path,
"record/{0}_{1}/e1/test_gray_result.pkl".format(config['dataset'],
config['training_type'].replace('-', '_')))
test_label_path = Path(exp_result_path, 'data/cifar_test_labels')
with open(str(color_result_path), 'rb') as f:
color_result = pickle.load(f)
with open(str(gray_result_path), 'rb') as f:
gray_result = pickle.load(f)
with open(str(test_label_path), 'rb') as f:
test_labels = pickle.load(f)
if 'outputs' in color_result:
outputs_key = 'outputs'
else:
outputs_key = 'class_outputs'
color_score = color_result[outputs_key]
gray_score = gray_result[outputs_key]
# Get Bias results
def eval_loop(modes):
for eval_mode in range(1, modes + 1):
if eval_mode <= 3:
predicted_classes = predict(color_score, gray_score, config['training_type'], eval_mode)
bias_result = get_bias(predicted_classes, test_labels)
else: # Load RBA bias_result
rba_bias_dict_path = Path(
exp_result_path,
"record/{0}_{1}/e1/rba_bias_dict.pkl".format(config['dataset'],
config['training_type'].replace('-', '_')))
with open(str(rba_bias_dict_path), 'rb') as f:
bias_result = pickle.load(f)
for key, value in bias_result.items():
if key not in class_bias_result[eval_mode - 1]:
class_bias_result[eval_mode - 1][key] = []
class_bias_result[eval_mode - 1][key].append(value)
if config['training_type'] == 'domain-discriminative':
if len(class_bias_result) == 0:
class_bias_result = [{}, {}, {}, {}]
eval_loop(4)
elif config['training_type'] == 'domain-independent':
if len(class_bias_result) == 0:
class_bias_result = [{}, {}]
eval_loop(2)
else:
if len(class_bias_result) == 0:
class_bias_result = [{}]
eval_loop(1)
# Output
def write_raw_dict(result_dict, writer):
headers = sorted(list(result_dict.keys()))
writer.writerow(['Trial'] + headers)
for no_try in range(config['no_tries']):
row = [no_try]
for key in headers:
row.append(str(result_dict[key][no_try]))
writer.writerow(row)
def write_stat(result_dict, writer):
bias_keys = sorted(list(result_dict.keys()))
writer.writerow(['Bias_name', 'max', 'min', 'mean', 'max_diff', 'stdev', 'rel_maxdiff'])
for key in bias_keys:
rd = partial(decimal_utils.round_significant_digit, digit=decimal_utils.GLOBAL_ROUND_DIGIT)
rf = partial(decimal_utils.round_significant_format, digit=decimal_utils.GLOBAL_ROUND_DIGIT)
rl = partial(decimal_utils.round_list, digit=decimal_utils.GLOBAL_ROUND_DIGIT)
value = result_dict[key]
value = rl(value)
max_v = max(value)
min_v = min(value)
max_diff = max_v - min_v
std_dev = statistics.stdev(value)
mean = statistics.mean(value)
if math.isclose(mean, 0):
rel_maxdiff = 0
else:
rel_maxdiff = rd(max_diff) / rd(mean)
writer.writerow([
key,
rf(max_v),
rf(min_v),
rf(mean),
rf(max_diff),
rf(std_dev),
rf(rel_maxdiff)
])
def write_one_result(raw_cw, analysis_cw, caption, result_dict):
raw_cw.writerow([caption])
raw_cw.writerow([])
analysis_cw.writerow([caption])
analysis_cw.writerow([])
write_raw_dict(result_dict, raw_cw)
raw_cw.writerow([])
write_stat(result_dict, analysis_cw)
analysis_cw.writerow([])
raw_output_path = Path(
args.output_dir,
"{0}_{1}_{2}_{3}_perclass_bias_raw.csv".format(config['network'],
config['training_type'],
config['dataset'],
config['random_seed']))
f = open(str(raw_output_path), 'w', newline='')
cw = csv.writer(f)
analysis_output_path = Path(
args.output_dir,
"{0}_{1}_{2}_{3}_perclass_variance_analysis.csv".format(config['network'],
config['training_type'],
config['dataset'],
config['random_seed']))
f2 = open(str(analysis_output_path), 'w', newline='')
cw2 = csv.writer(f2)
if config['training_type'] == 'domain-discriminative':
write_one_result(cw, cw2, 'Sum prob w/o prior shift (result 1)', class_bias_result[0])
write_one_result(cw, cw2, 'Max prob w/ prior shift (result 2)', class_bias_result[1])
write_one_result(cw, cw2, 'Sum prob w/ prior shift (result 3)', class_bias_result[2])
write_one_result(cw, cw2, 'RBA (result 4)', class_bias_result[3])
elif config['training_type'] == 'domain-independent':
write_one_result(cw, cw2, 'Bias conditional (result 1)', class_bias_result[0])
write_one_result(cw, cw2, 'Bias sum (result 2)', class_bias_result[1])
else:
write_one_result(cw, cw2, 'Mean bias', class_bias_result[0])
f.close()
f2.close()
| [
"statistics.stdev",
"numpy.array",
"numpy.mean",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.stack",
"numpy.concatenate",
"numpy.abs",
"numpy.ones",
"csv.writer",
"pickle.load",
"numpy.argmax",
"statistics.mean",
"math.isclose",
"os.path.realpath",
"numpy.sum",
"numpy.zeros",
... | [((339, 364), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (362, 364), False, 'import argparse\n'), ((2400, 2433), 'numpy.zeros', 'np.zeros', (['[10000]'], {'dtype': 'np.int32'}), '([10000], dtype=np.int32)\n', (2408, 2433), True, 'import numpy as np\n'), ((2467, 2499), 'numpy.ones', 'np.ones', (['[10000]'], {'dtype': 'np.int32'}), '([10000], dtype=np.int32)\n', (2474, 2499), True, 'import numpy as np\n'), ((2536, 2587), 'numpy.concatenate', 'np.concatenate', (['[domain_zeros, domain_ones]'], {'axis': '(0)'}), '([domain_zeros, domain_ones], axis=0)\n', (2550, 2587), True, 'import numpy as np\n'), ((2609, 2644), 'numpy.array', 'np.array', (['(test_labels + test_labels)'], {}), '(test_labels + test_labels)\n', (2617, 2644), True, 'import numpy as np\n'), ((2732, 2776), 'numpy.zeros', 'np.zeros', (['(class_count, 2)'], {'dtype': 'np.float64'}), '((class_count, 2), dtype=np.float64)\n', (2740, 2776), True, 'import numpy as np\n'), ((3021, 3039), 'numpy.abs', 'np.abs', (['(bias - 0.5)'], {}), '(bias - 0.5)\n', (3027, 3039), True, 'import numpy as np\n'), ((3062, 3081), 'numpy.mean', 'np.mean', (['total_bias'], {}), '(total_bias)\n', (3069, 3081), True, 'import numpy as np\n'), ((3319, 3331), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3328, 3331), False, 'import json\n'), ((8713, 8726), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (8723, 8726), False, 'import csv\n'), ((9196, 9210), 'csv.writer', 'csv.writer', (['f2'], {}), '(f2)\n', (9206, 9210), False, 'import csv\n'), ((663, 712), 'numpy.concatenate', 'np.concatenate', (['[gray_score, color_score]'], {'axis': '(0)'}), '([gray_score, color_score], axis=0)\n', (677, 712), True, 'import numpy as np\n'), ((2937, 2969), 'numpy.amax', 'np.amax', (['count_per_class'], {'axis': '(1)'}), '(count_per_class, axis=1)\n', (2944, 2969), True, 'import numpy as np\n'), ((2972, 3003), 'numpy.sum', 'np.sum', (['count_per_class'], {'axis': '(1)'}), '(count_per_class, axis=1)\n', (2978, 3003), True, 'import numpy as np\n'), ((4321, 4368), 'pathlib.Path', 'Path', (['exp_result_path', '"""data/cifar_test_labels"""'], {}), "(exp_result_path, 'data/cifar_test_labels')\n", (4325, 4368), False, 'from pathlib import Path\n'), ((798, 820), 'scipy.special.softmax', 'softmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (805, 820), False, 'from scipy.special import softmax\n'), ((853, 901), 'numpy.argmax', 'np.argmax', (['(probs[:, :10] + probs[:, 10:])'], {'axis': '(1)'}), '(probs[:, :10] + probs[:, 10:], axis=1)\n', (862, 901), True, 'import numpy as np\n'), ((2200, 2249), 'numpy.concatenate', 'np.concatenate', (['[gray_score, color_score]'], {'axis': '(0)'}), '([gray_score, color_score], axis=0)\n', (2214, 2249), True, 'import numpy as np\n'), ((2278, 2302), 'numpy.argmax', 'np.argmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (2287, 2302), True, 'import numpy as np\n'), ((4451, 4465), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4462, 4465), False, 'import pickle\n'), ((4545, 4559), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4556, 4559), False, 'import pickle\n'), ((4638, 4652), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4649, 4652), False, 'import pickle\n'), ((7002, 7093), 'functools.partial', 'partial', (['decimal_utils.round_significant_digit'], {'digit': 'decimal_utils.GLOBAL_ROUND_DIGIT'}), '(decimal_utils.round_significant_digit, digit=decimal_utils.\n GLOBAL_ROUND_DIGIT)\n', (7009, 7093), False, 'from functools import partial\n'), ((7106, 7198), 'functools.partial', 'partial', (['decimal_utils.round_significant_format'], {'digit': 'decimal_utils.GLOBAL_ROUND_DIGIT'}), '(decimal_utils.round_significant_format, digit=decimal_utils.\n GLOBAL_ROUND_DIGIT)\n', (7113, 7198), False, 'from functools import partial\n'), ((7211, 7284), 'functools.partial', 'partial', (['decimal_utils.round_list'], {'digit': 'decimal_utils.GLOBAL_ROUND_DIGIT'}), '(decimal_utils.round_list, digit=decimal_utils.GLOBAL_ROUND_DIGIT)\n', (7218, 7284), False, 'from functools import partial\n'), ((7487, 7510), 'statistics.stdev', 'statistics.stdev', (['value'], {}), '(value)\n', (7503, 7510), False, 'import statistics\n'), ((7530, 7552), 'statistics.mean', 'statistics.mean', (['value'], {}), '(value)\n', (7545, 7552), False, 'import statistics\n'), ((7568, 7589), 'math.isclose', 'math.isclose', (['mean', '(0)'], {}), '(mean, 0)\n', (7580, 7589), False, 'import math\n'), ((277, 303), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (293, 303), False, 'import os\n'), ((1815, 1880), 'numpy.concatenate', 'np.concatenate', (['[gray_score[:, 10:], color_score[:, :10]]'], {'axis': '(0)'}), '([gray_score[:, 10:], color_score[:, :10]], axis=0)\n', (1829, 1880), True, 'import numpy as np\n'), ((1913, 1939), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (1922, 1939), True, 'import numpy as np\n'), ((1166, 1188), 'scipy.special.softmax', 'softmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (1173, 1188), False, 'from scipy.special import softmax\n'), ((1643, 1691), 'numpy.argmax', 'np.argmax', (['(probs[:, :10] + probs[:, 10:])'], {'axis': '(1)'}), '(probs[:, :10] + probs[:, 10:], axis=1)\n', (1652, 1691), True, 'import numpy as np\n'), ((2009, 2058), 'numpy.concatenate', 'np.concatenate', (['[gray_score, color_score]'], {'axis': '(0)'}), '([gray_score, color_score], axis=0)\n', (2023, 2058), True, 'import numpy as np\n'), ((2147, 2173), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (2156, 2173), True, 'import numpy as np\n'), ((1567, 1589), 'scipy.special.softmax', 'softmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (1574, 1589), False, 'from scipy.special import softmax\n'), ((5645, 5659), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5656, 5659), False, 'import pickle\n'), ((1252, 1292), 'numpy.stack', 'np.stack', (['(probs[:, :10], probs[:, 10:])'], {}), '((probs[:, :10], probs[:, 10:]))\n', (1260, 1292), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import math, os, datetime
class data_manipulation_module:
def __init__(self):
self.a = 1
self.list_x = None
self.list_y = None
def init_graph_list(self):
self.list_x = []
self.list_y = []
def add_graph_list(self, element_x, element_y):
self.list_x.append(element_x)
self.list_y.append(element_y)
# 단순히 배열의 길이를 늘리기만 한다.
# 나머지 부분은 0으로 채운다.
def data_stretched_no_modify(self, data :np.ndarray, target_length :int):
self.a = 1
if data.size < target_length:
print("sizes are wrong")
return -1
ret = np.zeros(target_length)
ret[:data.size] = data
return ret
# np.interp 와 비슷한 연산을 한다.
# interp 와 다르게, origin_axis 범위 밖의 모든 부분들을 0으로 채운다.
# interp 는 낮은 부분들만 0으로 채운다.
# target_axis
# - y 값을 구할 x 축 좌표들이다.
# - 순서에 상관이 없다
# origin_axis
# - 기존의 x 축 좌표들이다.
# - x[i] <= x[j] for all i <= j
# data
# - 기존의 y 축 좌표들이다.
# - origin_axis 와 크기가 같아야 한다.
def data_interp_zeros(self, target_axis :np.ndarray, origin_axis :np.ndarray, data :np.ndarray):
self.a = 1
# if data.size is not origin_axis.size:
# print("DataManipulation__data_interp_zeros : origin data sizes are wrong %d %d" % (data.size, origin_axis.size))
return np.interp(target_axis, origin_axis, data) * ((origin_axis[0] <= target_axis) & (target_axis <= origin_axis[-1]))
# 측정 데이터의 시간 영역과 주파수 영역의 x 축 좌표들의 배열을 구한다.
# 시간 영역
# - N or n : Nano seconds
# - U or u : Micro seconds
# - M or m : Mili
# 주파수 영역
# - G or g : Giga
# - M or m : Mega
# - K or k : Kilo
def get_sample_spacing(self, samples_per_second :int, size :int, unit_output_time :str, unit_output_freq :str):
self.a = 1
if unit_output_time[0] == 'N' or unit_output_time[0] == 'n':
u_output_time = 1e9
elif unit_output_time[0] == 'U' or unit_output_time[0] == 'u':
u_output_time = 1e6
elif unit_output_time[0] == 'M' or unit_output_time[0] == 'm':
u_output_time = 1e3
else:
u_output_time = 1
if unit_output_freq[0] == 'G' or unit_output_freq[0] == 'g':
u_output_freq = 1e-9
elif unit_output_freq[0] == 'M' or unit_output_freq[0] == 'm':
u_output_freq = 1e-6
elif unit_output_freq[0] == 'K' or unit_output_freq[0] == 'u':
u_output_freq = 1e-3
else:
u_output_freq = 1
ret_time = np.arange(size) * u_output_time / samples_per_second
ret_freq = np.arange(size) * u_output_freq * samples_per_second / size
return ret_time, ret_freq
# 신호 데이터의 시간 영역 혹은 주파수 영역의 x축 단위를 샘플의 개수를 유지하면서 변환한다.
# 시간영역의 단위가 변하면 주파수 영역도 그에 따라서 변하도록 한다.
# 주파수 영역의 단위가 바뀌면 시간 영역의 단위도 그에 따라서 바뀐다.
#
# time_x : 변환 전 시간 영역의 x 좌표
# freq_x : 변환 전 주파수 영역의 x 좌표
# delta_before : 변환 전 단위의 크기(ex: 10MHz 에서 10)
# delta_after : 변환 후 단위의 크기
# unit_before : 변환 전 단위(ex: 10MHz 에서 MHz, 8.2ms 에서 ms), unit_after 와 시간 or 주파수가 일치해야됨
# unit_after : 변환 후 단위 unit_before 와 시간 or 주파수가 일치해야됨
def get_new_sample_spacing(self, time_x :np.ndarray, freq_x :np.ndarray, delta_before :float, delta_after :float, unit_before :str, unit_after :str):
if unit_before[0] == 'H' or unit_before[0] == 'h' or unit_before[1] == 'H' or unit_before[1] == 'h':
mode_is_freq = True
elif unit_before[0] == 'S' or unit_before[0] == 'S' or unit_before[1] == 'S' or unit_before[1] == 's':
mode_is_freq = False
else:
print("unit_before is wrong")
return None
if (unit_after[0] == 'H' or unit_after[0] == 'h' or unit_after[1] == 'H' or unit_after[1] == 'h') and (mode_is_freq is False) is True:
print("Input : time, Output : freq -> Wrong")
return None
elif (unit_after[0] == 'S' or unit_after[0] == 'S' or unit_after[1] == 'S' or unit_after[1] == 's') and (mode_is_freq is True) is True:
print("Input : freq, Output : time -> Wrong")
return None
if mode_is_freq:
if unit_before[0] == 'G' or unit_before[0] == 'g':
c = 1000
elif unit_before[0] == 'M' or unit_before[0] == 'm':
c = 1
elif unit_before[0] == 'K' or unit_before[0] == 'k':
c = 0.001
elif unit_before[0] == 'H' or unit_before[0] == 'h':
c = 0.000001
else:
print("Unit of frequency is too small")
return None
if unit_after[0] == 'G' or unit_after[0] == 'g':
d = 0.001
elif unit_after[0] == 'M' or unit_after[0] == 'm':
d = 1
elif unit_after[0] == 'K' or unit_after[0] == 'k':
d = 1000
elif unit_after[0] == 'H' or unit_after[0] == 'h':
d = 1000000
else:
print("Unit of frequency is too small")
return None
ret_freq = freq_x * c * d * delta_after / delta_before
ret_time = time_x * delta_before / (c * d * delta_after)
else:
if unit_before[0] == 'P' or unit_before[0] == 'p':
c = 0.000001
elif unit_before[0] == 'N' or unit_before[0] == 'n':
c = 0.0001
elif unit_before[0] == 'U' or unit_before[0] == 'u':
c = 1
elif unit_before[0] == 'M' or unit_before[0] == 'm':
c = 1000
elif unit_before[0] == 'S' or unit_before[0] == 's':
c = 1000000
else:
print("Unit of time is too large")
return None
if unit_before[0] == 'P' or unit_before[0] == 'p':
d = 1000000
elif unit_before[0] == 'N' or unit_before[0] == 'n':
d = 1000
elif unit_before[0] == 'U' or unit_before[0] == 'u':
d = 1
elif unit_before[0] == 'M' or unit_before[0] == 'm':
d = 0.001
elif unit_before[0] == 'S' or unit_before[0] == 's':
d = 0.000001
else:
print("Unit of time is too large")
return None
ret_time = time_x * c * d * delta_after / delta_before
ret_freq = freq_x * delta_before / (c * d * delta_after)
return ret_time, ret_freq
def _resizing(self, x_t, y_t):
self.a = 1
x_size = x_t.size
y_size = y_t.size
if x_size > y_size:
z = np.zeros(x_size)
z[:y_size] = y_t
return x_t, z
elif x_size < y_size:
z = np.zeros(y_size)
z[:x_size] = x_t
return z, y_t
else:
return x_t, y_t
def _return_mode(self, data, mode: str=None):
self.a = 1
if mode is None:
return data
elif mode is "complex" or mode is "cpx":
return np.real(data), np.imag(data)
elif mode[:4] is "real":
return np.real(data)
elif mode[:4] is "imag":
return np.imag(data)
elif mode[:3] is "abs" or mode[:3] is "Abs":
return np.abs(data)
else:
return data
def convert_deconvolution(self, x_t, y_t, any_value, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
x_f = np.fft.fft(x_t)
y_f = np.fft.fft(y_t)
x_f[0] = 1
h_f = y_f / x_f
h_t = np.fft.ifft(h_f)
return self._return_mode(h_t, output_mode)
def convert_selective_divide(self, x_t, y_t, threshold, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
x_f = np.fft.fft(x_t)
y_f = np.fft.fft(y_t)
sizes = len(x_f)
h_f = np.zeros(sizes)
for i in range(sizes):
if np.abs(x_f[i]) >= threshold:
h_f[i] = y_f[i]/x_f[i]
h_t = np.fft.ifft(h_f)
return self._return_mode(h_t, output_mode)
def convert_wiener_convolution(self, x_t, y_t, snr_dB, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
x_f = np.fft.fft(x_t)
y_f = np.fft.fft(y_t)
snr = math.pow(10, snr_dB/10)
g_f = np.conj(x_f) / (np.square(np.absolute(x_f)) + 1 / snr)
h_f = y_f * g_f
h_t = np.fft.ifft(h_f)
return self._return_mode(h_t, output_mode)
# y_t 가 고정된다
# cor[0] = x_t[-1]*y_t[0] 이다.
# x_t 의 오른쪽 끝부분부터 y_t 의 선두 부분이랑 접촉을 시작한다.
# x_t 의 시작부분과 y_t 의 시작부분이 만나는 지점부터의 데이터가 의미가 있다.
def convert_cross_correlation(self, x_t, y_t, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
length = x_t.size
h_cor = np.correlate(y_t, x_t, 'full')
h_t = h_cor[length-1:]
return self._return_mode(h_t, output_mode)
def convert_filter_lpf_f(self, x_t, ff, output_mode: str=None):
x_f = np.fft.fft(x_t)
for i in range(ff, x_t.size):
x_f[i] = 0
x_t = np.fft.ifft(x_f)
return self._return_mode(x_t, output_mode)
def convert_filter_lpf_t(self, x_t, ff, output_mode: str=None):
w0 = 2 * pi * ff
fs = 1
mothers = w0+2
y_t = np.zeros(x_t.size)
y_t[0] = 2*x_t[0]/mothers
for i in range(1, x_t.size):
y_t[i] = 2/mothers*x_t[i] - 2/mothers*x_t[i-1] - (w0-2)/mothers*y_t[i-1]
return self._return_mode(y_t, output_mode)
# arr_x 는 arr_y 와 차원이 같아야 한다.
# arr_x 는 3차원 리스트이다
# (row, col, data)
def graphing_1D(self, arr_x=None, arr_y=None, isDot :bool=False, isCpx :bool=False):
if arr_x is None:
arr_x = self.list_x
if arr_y is None:
arr_y = self.list_y
if arr_x is None or arr_y is None:
print("list_x and list_y should be filled")
return None
if len(arr_x) is not len(arr_y):
print("size of row is different")
return None
if len(arr_x[0]) is not len(arr_y[0]):
print("size of col is different")
return None
size_row = len(arr_x)
size_col = len(arr_x[0])
fig = plt.figure()
for i in range(size_row):
for j in range(size_col):
t = fig.add_subplot(size_row, size_col, i*size_col + j + 1)
if isCpx:
if isDot:
t.plot(arr_x[i][j], np.real(arr_y[i][j]), '.')
t.plot(arr_x[i][j], np.imag(arr_y[i][j]), '.')
else:
t.plot(arr_x[i][j], np.real(arr_y[i][j]))
t.plot(arr_x[i][j], np.imag(arr_y[i][j]))
else:
if isDot:
t.plot(arr_x[i][j], arr_y[i][j], '.')
else:
t.plot(arr_x[i][j], arr_y[i][j])
plt.show()
def create_image_directory(self, base_dir: str):
self.a = 1
os.makedirs(base_dir, exist_ok=True)
dtime = datetime.datetime.now()
dtimes = '/%d_%d_%d' % (int(dtime.year), int(dtime.month), int(dtime.day))
dir_name = base_dir + dtimes
os.makedirs(dir_name, exist_ok=True)
folder_count = len(os.listdir(dir_name))
dir_name_2 = dir_name + dtimes + '_%d' % folder_count
os.makedirs(dir_name_2, exist_ok=True)
return dir_name_2 + '/' | [
"numpy.abs",
"os.listdir",
"os.makedirs",
"math.pow",
"numpy.conj",
"numpy.absolute",
"numpy.fft.fft",
"datetime.datetime.now",
"numpy.zeros",
"numpy.correlate",
"matplotlib.pyplot.figure",
"numpy.real",
"numpy.interp",
"numpy.fft.ifft",
"numpy.imag",
"numpy.arange",
"matplotlib.pypl... | [((703, 726), 'numpy.zeros', 'np.zeros', (['target_length'], {}), '(target_length)\n', (711, 726), True, 'import numpy as np\n'), ((7689, 7704), 'numpy.fft.fft', 'np.fft.fft', (['x_t'], {}), '(x_t)\n', (7699, 7704), True, 'import numpy as np\n'), ((7719, 7734), 'numpy.fft.fft', 'np.fft.fft', (['y_t'], {}), '(y_t)\n', (7729, 7734), True, 'import numpy as np\n'), ((7792, 7808), 'numpy.fft.ifft', 'np.fft.ifft', (['h_f'], {}), '(h_f)\n', (7803, 7808), True, 'import numpy as np\n'), ((8004, 8019), 'numpy.fft.fft', 'np.fft.fft', (['x_t'], {}), '(x_t)\n', (8014, 8019), True, 'import numpy as np\n'), ((8034, 8049), 'numpy.fft.fft', 'np.fft.fft', (['y_t'], {}), '(y_t)\n', (8044, 8049), True, 'import numpy as np\n'), ((8091, 8106), 'numpy.zeros', 'np.zeros', (['sizes'], {}), '(sizes)\n', (8099, 8106), True, 'import numpy as np\n'), ((8237, 8253), 'numpy.fft.ifft', 'np.fft.ifft', (['h_f'], {}), '(h_f)\n', (8248, 8253), True, 'import numpy as np\n'), ((8449, 8464), 'numpy.fft.fft', 'np.fft.fft', (['x_t'], {}), '(x_t)\n', (8459, 8464), True, 'import numpy as np\n'), ((8479, 8494), 'numpy.fft.fft', 'np.fft.fft', (['y_t'], {}), '(y_t)\n', (8489, 8494), True, 'import numpy as np\n'), ((8509, 8534), 'math.pow', 'math.pow', (['(10)', '(snr_dB / 10)'], {}), '(10, snr_dB / 10)\n', (8517, 8534), False, 'import math, os, datetime\n'), ((8640, 8656), 'numpy.fft.ifft', 'np.fft.ifft', (['h_f'], {}), '(h_f)\n', (8651, 8656), True, 'import numpy as np\n'), ((9032, 9062), 'numpy.correlate', 'np.correlate', (['y_t', 'x_t', '"""full"""'], {}), "(y_t, x_t, 'full')\n", (9044, 9062), True, 'import numpy as np\n'), ((9228, 9243), 'numpy.fft.fft', 'np.fft.fft', (['x_t'], {}), '(x_t)\n', (9238, 9243), True, 'import numpy as np\n'), ((9319, 9335), 'numpy.fft.ifft', 'np.fft.ifft', (['x_f'], {}), '(x_f)\n', (9330, 9335), True, 'import numpy as np\n'), ((9534, 9552), 'numpy.zeros', 'np.zeros', (['x_t.size'], {}), '(x_t.size)\n', (9542, 9552), True, 'import numpy as np\n'), ((10494, 10506), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10504, 10506), True, 'import matplotlib.pyplot as plt\n'), ((11218, 11228), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11226, 11228), True, 'import matplotlib.pyplot as plt\n'), ((11310, 11346), 'os.makedirs', 'os.makedirs', (['base_dir'], {'exist_ok': '(True)'}), '(base_dir, exist_ok=True)\n', (11321, 11346), False, 'import math, os, datetime\n'), ((11364, 11387), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11385, 11387), False, 'import math, os, datetime\n'), ((11516, 11552), 'os.makedirs', 'os.makedirs', (['dir_name'], {'exist_ok': '(True)'}), '(dir_name, exist_ok=True)\n', (11527, 11552), False, 'import math, os, datetime\n'), ((11673, 11711), 'os.makedirs', 'os.makedirs', (['dir_name_2'], {'exist_ok': '(True)'}), '(dir_name_2, exist_ok=True)\n', (11684, 11711), False, 'import math, os, datetime\n'), ((1480, 1521), 'numpy.interp', 'np.interp', (['target_axis', 'origin_axis', 'data'], {}), '(target_axis, origin_axis, data)\n', (1489, 1521), True, 'import numpy as np\n'), ((6845, 6861), 'numpy.zeros', 'np.zeros', (['x_size'], {}), '(x_size)\n', (6853, 6861), True, 'import numpy as np\n'), ((8547, 8559), 'numpy.conj', 'np.conj', (['x_f'], {}), '(x_f)\n', (8554, 8559), True, 'import numpy as np\n'), ((11581, 11601), 'os.listdir', 'os.listdir', (['dir_name'], {}), '(dir_name)\n', (11591, 11601), False, 'import math, os, datetime\n'), ((2719, 2734), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (2728, 2734), True, 'import numpy as np\n'), ((6963, 6979), 'numpy.zeros', 'np.zeros', (['y_size'], {}), '(y_size)\n', (6971, 6979), True, 'import numpy as np\n'), ((8154, 8168), 'numpy.abs', 'np.abs', (['x_f[i]'], {}), '(x_f[i])\n', (8160, 8168), True, 'import numpy as np\n'), ((2791, 2806), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (2800, 2806), True, 'import numpy as np\n'), ((7264, 7277), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (7271, 7277), True, 'import numpy as np\n'), ((7279, 7292), 'numpy.imag', 'np.imag', (['data'], {}), '(data)\n', (7286, 7292), True, 'import numpy as np\n'), ((7345, 7358), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (7352, 7358), True, 'import numpy as np\n'), ((8573, 8589), 'numpy.absolute', 'np.absolute', (['x_f'], {}), '(x_f)\n', (8584, 8589), True, 'import numpy as np\n'), ((7411, 7424), 'numpy.imag', 'np.imag', (['data'], {}), '(data)\n', (7418, 7424), True, 'import numpy as np\n'), ((7497, 7509), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (7503, 7509), True, 'import numpy as np\n'), ((10756, 10776), 'numpy.real', 'np.real', (['arr_y[i][j]'], {}), '(arr_y[i][j])\n', (10763, 10776), True, 'import numpy as np\n'), ((10827, 10847), 'numpy.imag', 'np.imag', (['arr_y[i][j]'], {}), '(arr_y[i][j])\n', (10834, 10847), True, 'import numpy as np\n'), ((10924, 10944), 'numpy.real', 'np.real', (['arr_y[i][j]'], {}), '(arr_y[i][j])\n', (10931, 10944), True, 'import numpy as np\n'), ((10990, 11010), 'numpy.imag', 'np.imag', (['arr_y[i][j]'], {}), '(arr_y[i][j])\n', (10997, 11010), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#@Author: <NAME>
#@Date: 2019-11-18 20:53:24
#@Last Modified by: <NAME>
#@Last Modified time: 2019-11-18 21:44:1
import numpy as np
import torch
import torch.nn.functional as F
import os
def compute_pairwise_distance(x):
''' computation of pairwise distance matrix
---- Input
- x: input tensor (sample_number,2)
---- Return
- matrix: output matrix torch.Tensor [sample_number,sample_number]
'''
y=x
xx=torch.sum(torch.pow(x,2),dim=1)
yy=torch.sum(torch.pow(y,2),dim=1)
xy=torch.matmul(x,y.transpose(1,0))
xx=xx.unsqueeze(0).expand_as(xy)
yy=yy.unsqueeze(0).expand_as(xy)
dist=xx.transpose(1,0)+yy-2*xy
#be attention i do not use the norm
return torch.clamp(dist,min=1e-6)
def middle_p(i,j,size):
#current is just the simplest version
#u can try to add more middle steps then
pi=np.array([i//size,i%size])
pj=np.array([j//size,j%size])
if pi[1]>pj[1]:
pj+=pi
pi=pj-pi
pj=pj-pi
if pi[0]>pj[0]:
return pi[0]*size+pj[1]
else:
return pj[0]*size+pi[1]
def compute_norm_pairwise_distance(x):
''' computation of normalized pairwise distance matrix
---- Input
- x: input tensor torch.Tensor (sample_number,2)
---- Return
- matrix: output matrix torch.Tensor [sample_num, sample_num]
'''
x_pair_dist = compute_pairwise_distance(x)
connection=torch.zeros_like(x_pair_dist)
size=np.sqrt(x.shape[0])
# for i in range(x.shape[0]):
# for j in range(x.shape[0]):
# if i//size==j//size or i%size==j%size:
# connection=1
# dist_straight=x_pair_dist*connection
surface_dist=torch.zeros_like(x_pair_dist)
for i in range(x.shape[0]):
for j in range(x.shape[0]):
middle=torch.tensor(middle_p(i,j,size)).to(x.device).long()
surface_dist[i,j]=x_pair_dist[i,middle]+x_pair_dist[middle,j]
normalizer = torch.sum(surface_dist, dim = -1,keepdim=True)
x_norm_pair_dist = surface_dist / (normalizer + 1e-12).detach()
return x_norm_pair_dist
def NDiv_loss_surface(x, y, alpha=1,mode=2):
''' NDiv loss function.
---- Input
- x: (sample_number,2)
#x is the 2d grid, the shortest path the min 2d
- y: (sample_number,3)
#y is the 3d points, the corresponidng to 2d is set by index
- loss: normalized diversity loss.
'''
x=x.view(-1,2)
y=y.view(-1,3)
size=2/np.sqrt(x.shape[0])
S = x.shape[0]
x_norm_pair_dist = compute_norm_pairwise_distance(x)
y_norm_pair_dist = compute_norm_pairwise_distance(y)
if mode==0:
ndiv_loss_matrix = torch.abs(x_norm_pair_dist - y_norm_pair_dist)
if mode==1:
ndiv_loss_matrix = F.relu(y_norm_pair_dist-x_norm_pair_dist * alpha )
if mode==2:
ndiv_loss_matrix = F.relu(x_norm_pair_dist * alpha - y_norm_pair_dist)
if mode==3:
ndiv_loss_matrix =torch.clamp(torch.abs(x_norm_pair_dist - y_norm_pair_dist),min=0.1*size)
if mode==4:
ndiv_loss_matrix = F.relu(x_norm_pair_dist * alpha - y_norm_pair_dist)
ndiv_loss = ndiv_loss_matrix.sum(-1).sum(-1) / (S * (S - 1))
return ndiv_loss
if __name__ == '__main__':
x=torch.rand(100,2)
y=torch.rand(100,3)
loss=NDiv_loss_surface(x,y)
| [
"torch.abs",
"numpy.sqrt",
"torch.rand",
"torch.pow",
"numpy.array",
"torch.sum",
"torch.nn.functional.relu",
"torch.zeros_like",
"torch.clamp"
] | [((720, 748), 'torch.clamp', 'torch.clamp', (['dist'], {'min': '(1e-06)'}), '(dist, min=1e-06)\n', (731, 748), False, 'import torch\n'), ((862, 893), 'numpy.array', 'np.array', (['[i // size, i % size]'], {}), '([i // size, i % size])\n', (870, 893), True, 'import numpy as np\n'), ((894, 925), 'numpy.array', 'np.array', (['[j // size, j % size]'], {}), '([j // size, j % size])\n', (902, 925), True, 'import numpy as np\n'), ((1362, 1391), 'torch.zeros_like', 'torch.zeros_like', (['x_pair_dist'], {}), '(x_pair_dist)\n', (1378, 1391), False, 'import torch\n'), ((1399, 1418), 'numpy.sqrt', 'np.sqrt', (['x.shape[0]'], {}), '(x.shape[0])\n', (1406, 1418), True, 'import numpy as np\n'), ((1605, 1634), 'torch.zeros_like', 'torch.zeros_like', (['x_pair_dist'], {}), '(x_pair_dist)\n', (1621, 1634), False, 'import torch\n'), ((1841, 1886), 'torch.sum', 'torch.sum', (['surface_dist'], {'dim': '(-1)', 'keepdim': '(True)'}), '(surface_dist, dim=-1, keepdim=True)\n', (1850, 1886), False, 'import torch\n'), ((3038, 3056), 'torch.rand', 'torch.rand', (['(100)', '(2)'], {}), '(100, 2)\n', (3048, 3056), False, 'import torch\n'), ((3060, 3078), 'torch.rand', 'torch.rand', (['(100)', '(3)'], {}), '(100, 3)\n', (3070, 3078), False, 'import torch\n'), ((471, 486), 'torch.pow', 'torch.pow', (['x', '(2)'], {}), '(x, 2)\n', (480, 486), False, 'import torch\n'), ((508, 523), 'torch.pow', 'torch.pow', (['y', '(2)'], {}), '(y, 2)\n', (517, 523), False, 'import torch\n'), ((2318, 2337), 'numpy.sqrt', 'np.sqrt', (['x.shape[0]'], {}), '(x.shape[0])\n', (2325, 2337), True, 'import numpy as np\n'), ((2501, 2547), 'torch.abs', 'torch.abs', (['(x_norm_pair_dist - y_norm_pair_dist)'], {}), '(x_norm_pair_dist - y_norm_pair_dist)\n', (2510, 2547), False, 'import torch\n'), ((2584, 2635), 'torch.nn.functional.relu', 'F.relu', (['(y_norm_pair_dist - x_norm_pair_dist * alpha)'], {}), '(y_norm_pair_dist - x_norm_pair_dist * alpha)\n', (2590, 2635), True, 'import torch.nn.functional as F\n'), ((2671, 2722), 'torch.nn.functional.relu', 'F.relu', (['(x_norm_pair_dist * alpha - y_norm_pair_dist)'], {}), '(x_norm_pair_dist * alpha - y_norm_pair_dist)\n', (2677, 2722), True, 'import torch.nn.functional as F\n'), ((2867, 2918), 'torch.nn.functional.relu', 'F.relu', (['(x_norm_pair_dist * alpha - y_norm_pair_dist)'], {}), '(x_norm_pair_dist * alpha - y_norm_pair_dist)\n', (2873, 2918), True, 'import torch.nn.functional as F\n'), ((2770, 2816), 'torch.abs', 'torch.abs', (['(x_norm_pair_dist - y_norm_pair_dist)'], {}), '(x_norm_pair_dist - y_norm_pair_dist)\n', (2779, 2816), False, 'import torch\n')] |
"""Plot argmax and max."""
#
import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
import seaborn as sns
from logzero import logger
def plot_argmax(yargmax, ymax=None):
"""Plot yargmx and ymax."""
try:
len_ = yargmax.shape[0]
except Exception:
len_ = len(yargmax)
if ymax is not None:
df = pd.DataFrame({"lang2": range(len_), "argmax": yargmax, "max": ymax})
sns.relplot(x="lang2", y="argmax", size="max", hue="max", data=df)
else:
df = pd.DataFrame({"lang2": range(len_), "argmax": yargmax})
sns.relplot(x="lang2", y="argmax", data=df)
def plot_tset(res):
"""Plot triple set.
cmat = ren600xzh400
correlation mat: cmat.shape
(600, 400)
yargmax = cmat.argmax(axis=0)
ymax = cmat.max(axis=0)
res = [*zip(range(cmat.shape[0]), yargmax, ymax)]
"""
shape = np.array(res).shape
if len(shape) != 2:
logger.error("shape length not equal to 2: %s", shape)
return
if shape[1] == 2:
df_res = pd.DataFrame(res, columns=["lang2", "argmax"])
sns.relplot(x="lang2", y="argmax", data=df_res)
return
if shape[1] == 3:
df_res = pd.DataFrame(res, columns=["lang2", "argmax", "max"])
# fig = plt.figure(figsize=(8, 6))
# ax = fig.add_subplot(111)
# sns.lineplot(x="lang2", y="argmax", size="max", data=df_res, ax=ax)
# sns.lineplot(x="lang2", y="argmax" data=df_res, ax=ax)
# use this!!!
# sns.scatterplot(x="lang2", y="argmax", data=df_res, size="max", hue="max", ax=ax)
# sns.scatterplot(x="lang2", y="argmax", data=df_res, size="max", sizes=(10,100), hue="max", ax=ax)
# sizes=(10,100)
# ax.cla()
# ax.invert_yaxis()
sns.relplot(x="lang2", y="argmax", size="max", hue="max", data=df_res)
return
| [
"pandas.DataFrame",
"numpy.array",
"logzero.logger.error",
"seaborn.relplot"
] | [((431, 497), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""lang2"""', 'y': '"""argmax"""', 'size': '"""max"""', 'hue': '"""max"""', 'data': 'df'}), "(x='lang2', y='argmax', size='max', hue='max', data=df)\n", (442, 497), True, 'import seaborn as sns\n'), ((585, 628), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""lang2"""', 'y': '"""argmax"""', 'data': 'df'}), "(x='lang2', y='argmax', data=df)\n", (596, 628), True, 'import seaborn as sns\n'), ((893, 906), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (901, 906), True, 'import numpy as np\n'), ((945, 999), 'logzero.logger.error', 'logger.error', (['"""shape length not equal to 2: %s"""', 'shape'], {}), "('shape length not equal to 2: %s', shape)\n", (957, 999), False, 'from logzero import logger\n'), ((1055, 1101), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': "['lang2', 'argmax']"}), "(res, columns=['lang2', 'argmax'])\n", (1067, 1101), True, 'import pandas as pd\n'), ((1110, 1157), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""lang2"""', 'y': '"""argmax"""', 'data': 'df_res'}), "(x='lang2', y='argmax', data=df_res)\n", (1121, 1157), True, 'import seaborn as sns\n'), ((1213, 1266), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': "['lang2', 'argmax', 'max']"}), "(res, columns=['lang2', 'argmax', 'max'])\n", (1225, 1266), True, 'import pandas as pd\n'), ((1794, 1864), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""lang2"""', 'y': '"""argmax"""', 'size': '"""max"""', 'hue': '"""max"""', 'data': 'df_res'}), "(x='lang2', y='argmax', size='max', hue='max', data=df_res)\n", (1805, 1864), True, 'import seaborn as sns\n')] |
import numpy as np
from skimage import io
from skimage import feature
import cv2
from scipy import ndimage
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def main():
D = io.imread("data/attention-mri.tif")
print(D.shape)
im_x = D[63,:,:] #sagittal(x)
im_y = D[:,63,:] #coronal(y)
im_z = D[:,:,15] #transaxial(z)
plt.subplot(1,3,1), plt.imshow(im_x, cmap = 'gray', aspect = 0.5)
plt.title('Sagittal'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,2), plt.imshow(im_y, cmap = 'gray', aspect = 0.5)
plt.title('Coronal'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,3), plt.imshow(im_z, cmap = 'gray')
plt.title('Axial'), plt.xticks([]), plt.yticks([])
plt.show()
#def Edge_Filters(): (1):
Gx = cv2.Sobel(im_z, cv2.CV_64F, 1, 0, ksize=3)
Gy = cv2.Sobel(im_z, cv2.CV_64F, 0, 1, ksize=3)
Gmat = np.sqrt(Gx**2.0 + Gy**2.0)
plt.subplot(1,3,1), plt.imshow(Gx, cmap = 'gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,2), plt.imshow(Gy, cmap = 'gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,3), plt.imshow(Gmat, cmap = 'gray')
plt.title('Final'), plt.xticks([]), plt.yticks([])
plt.show()
#(2):
kernel_x = np.array([[1, 0, -1],[1, 0, -1],[1, 0, -1]])
kernel_y = np.array([[1, 1, 1],[0, 0, 0],[-1, -1, -1]])
Gx = cv2.filter2D(im_z, -1, kernel_x)
Gy = cv2.filter2D(im_z, -1, kernel_y)
Gmat = np.sqrt(Gx**2.0 + Gy**2.0)
plt.subplot(1,3,1), plt.imshow(Gx, cmap = 'gray')
plt.title('Prewitt X'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,2), plt.imshow(Gy, cmap = 'gray')
plt.title('Prewitt Y'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,3), plt.imshow(Gmat, cmap = 'gray')
plt.title('Final'), plt.xticks([]), plt.yticks([])
plt.show()
#(3):
ef1 = feature.canny(im_z, 1.0, 2, 5)
ef2 = feature.canny(im_z, 1.0, 3, 15)
plt.subplot(1,2,1), plt.imshow(ef1, cmap = 'gray')
plt.title('(2, 5)'), plt.xticks([]), plt.yticks([])
plt.subplot(1,2,2), plt.imshow(ef2, cmap = 'gray')
plt.title('(3, 15)'), plt.xticks([]), plt.yticks([])
plt.show()
#def Kmeans_clustering(): (2):
count = 1
X = im_z.reshape((-1, 1))
estimators = [
('kmeans_4', KMeans(n_clusters=4)),
('kmeans_8', KMeans(n_clusters=8)),
('kmeans_20', KMeans(n_clusters=20))
]
for name, est in estimators:
kmeans = est.fit(X)
labels = kmeans.labels_
choices = kmeans.cluster_centers_.squeeze()
img = np.choose(labels, choices)
img.shape = im_z.shape
plt.figure(figsize=(15, 15))
plt.subplot(1, 3, count), plt.imshow(img, plt.cm.Spectral)
plt.title(name)
count += 1
plt.show()
#(3):
trytime = 500
x_list = []
y_list = []
for i in range(trytime):
kmeans = KMeans(n_clusters=4).fit(im_z)
x_list.append(kmeans.n_iter_)
y_list.append(kmeans.inertia_)
ax = plt.gca()
ax.set_xlabel('Number of iterations')
ax.set_ylabel('Within-cluster sums')
ax.scatter(x_list, y_list, c='r', s=20, alpha=0.5)
plt.show()
if __name__ == "__main__":
main() | [
"matplotlib.pyplot.imshow",
"sklearn.cluster.KMeans",
"numpy.sqrt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"numpy.choose",
"cv2.filter2D",
"numpy.array",
"skimage.io.imread",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"skimage.feature.canny",
"matplotlib.pyplot.tit... | [((195, 230), 'skimage.io.imread', 'io.imread', (['"""data/attention-mri.tif"""'], {}), "('data/attention-mri.tif')\n", (204, 230), False, 'from skimage import io\n'), ((734, 744), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (742, 744), True, 'import matplotlib.pyplot as plt\n'), ((781, 823), 'cv2.Sobel', 'cv2.Sobel', (['im_z', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(3)'}), '(im_z, cv2.CV_64F, 1, 0, ksize=3)\n', (790, 823), False, 'import cv2\n'), ((833, 875), 'cv2.Sobel', 'cv2.Sobel', (['im_z', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(3)'}), '(im_z, cv2.CV_64F, 0, 1, ksize=3)\n', (842, 875), False, 'import cv2\n'), ((887, 917), 'numpy.sqrt', 'np.sqrt', (['(Gx ** 2.0 + Gy ** 2.0)'], {}), '(Gx ** 2.0 + Gy ** 2.0)\n', (894, 917), True, 'import numpy as np\n'), ((1252, 1262), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1260, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1335), 'numpy.array', 'np.array', (['[[1, 0, -1], [1, 0, -1], [1, 0, -1]]'], {}), '([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n', (1297, 1335), True, 'import numpy as np\n'), ((1349, 1395), 'numpy.array', 'np.array', (['[[1, 1, 1], [0, 0, 0], [-1, -1, -1]]'], {}), '([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])\n', (1357, 1395), True, 'import numpy as np\n'), ((1403, 1435), 'cv2.filter2D', 'cv2.filter2D', (['im_z', '(-1)', 'kernel_x'], {}), '(im_z, -1, kernel_x)\n', (1415, 1435), False, 'import cv2\n'), ((1445, 1477), 'cv2.filter2D', 'cv2.filter2D', (['im_z', '(-1)', 'kernel_y'], {}), '(im_z, -1, kernel_y)\n', (1457, 1477), False, 'import cv2\n'), ((1489, 1519), 'numpy.sqrt', 'np.sqrt', (['(Gx ** 2.0 + Gy ** 2.0)'], {}), '(Gx ** 2.0 + Gy ** 2.0)\n', (1496, 1519), True, 'import numpy as np\n'), ((1858, 1868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1866, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1920), 'skimage.feature.canny', 'feature.canny', (['im_z', '(1.0)', '(2)', '(5)'], {}), '(im_z, 1.0, 2, 5)\n', (1903, 1920), False, 'from skimage import feature\n'), ((1931, 1962), 'skimage.feature.canny', 'feature.canny', (['im_z', '(1.0)', '(3)', '(15)'], {}), '(im_z, 1.0, 3, 15)\n', (1944, 1962), False, 'from skimage import feature\n'), ((2191, 2201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2199, 2201), True, 'import matplotlib.pyplot as plt\n'), ((3054, 3063), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3061, 3063), True, 'import matplotlib.pyplot as plt\n'), ((3206, 3216), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3214, 3216), True, 'import matplotlib.pyplot as plt\n'), ((368, 388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (379, 388), True, 'import matplotlib.pyplot as plt\n'), ((388, 429), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_x'], {'cmap': '"""gray"""', 'aspect': '(0.5)'}), "(im_x, cmap='gray', aspect=0.5)\n", (398, 429), True, 'import matplotlib.pyplot as plt\n'), ((438, 459), 'matplotlib.pyplot.title', 'plt.title', (['"""Sagittal"""'], {}), "('Sagittal')\n", (447, 459), True, 'import matplotlib.pyplot as plt\n'), ((461, 475), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (471, 475), True, 'import matplotlib.pyplot as plt\n'), ((477, 491), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (487, 491), True, 'import matplotlib.pyplot as plt\n'), ((496, 516), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (507, 516), True, 'import matplotlib.pyplot as plt\n'), ((516, 557), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_y'], {'cmap': '"""gray"""', 'aspect': '(0.5)'}), "(im_y, cmap='gray', aspect=0.5)\n", (526, 557), True, 'import matplotlib.pyplot as plt\n'), ((566, 586), 'matplotlib.pyplot.title', 'plt.title', (['"""Coronal"""'], {}), "('Coronal')\n", (575, 586), True, 'import matplotlib.pyplot as plt\n'), ((588, 602), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (598, 602), True, 'import matplotlib.pyplot as plt\n'), ((604, 618), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (614, 618), True, 'import matplotlib.pyplot as plt\n'), ((623, 643), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (634, 643), True, 'import matplotlib.pyplot as plt\n'), ((643, 672), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_z'], {'cmap': '"""gray"""'}), "(im_z, cmap='gray')\n", (653, 672), True, 'import matplotlib.pyplot as plt\n'), ((679, 697), 'matplotlib.pyplot.title', 'plt.title', (['"""Axial"""'], {}), "('Axial')\n", (688, 697), True, 'import matplotlib.pyplot as plt\n'), ((699, 713), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (709, 713), True, 'import matplotlib.pyplot as plt\n'), ((715, 729), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (725, 729), True, 'import matplotlib.pyplot as plt\n'), ((919, 939), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (930, 939), True, 'import matplotlib.pyplot as plt\n'), ((939, 966), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gx'], {'cmap': '"""gray"""'}), "(Gx, cmap='gray')\n", (949, 966), True, 'import matplotlib.pyplot as plt\n'), ((973, 993), 'matplotlib.pyplot.title', 'plt.title', (['"""Sobel X"""'], {}), "('Sobel X')\n", (982, 993), True, 'import matplotlib.pyplot as plt\n'), ((995, 1009), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1005, 1009), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1025), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1021, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1030, 1050), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1041, 1050), True, 'import matplotlib.pyplot as plt\n'), ((1050, 1077), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gy'], {'cmap': '"""gray"""'}), "(Gy, cmap='gray')\n", (1060, 1077), True, 'import matplotlib.pyplot as plt\n'), ((1084, 1104), 'matplotlib.pyplot.title', 'plt.title', (['"""Sobel Y"""'], {}), "('Sobel Y')\n", (1093, 1104), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1120), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1116, 1120), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1136), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1132, 1136), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1161), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1152, 1161), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1190), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gmat'], {'cmap': '"""gray"""'}), "(Gmat, cmap='gray')\n", (1171, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1215), 'matplotlib.pyplot.title', 'plt.title', (['"""Final"""'], {}), "('Final')\n", (1206, 1215), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1231), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1227, 1231), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1247), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1243, 1247), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1541), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1532, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1541, 1568), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gx'], {'cmap': '"""gray"""'}), "(Gx, cmap='gray')\n", (1551, 1568), True, 'import matplotlib.pyplot as plt\n'), ((1575, 1597), 'matplotlib.pyplot.title', 'plt.title', (['"""Prewitt X"""'], {}), "('Prewitt X')\n", (1584, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1613), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1609, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1629), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1625, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1654), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1645, 1654), True, 'import matplotlib.pyplot as plt\n'), ((1654, 1681), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gy'], {'cmap': '"""gray"""'}), "(Gy, cmap='gray')\n", (1664, 1681), True, 'import matplotlib.pyplot as plt\n'), ((1688, 1710), 'matplotlib.pyplot.title', 'plt.title', (['"""Prewitt Y"""'], {}), "('Prewitt Y')\n", (1697, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1726), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1722, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1742), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1738, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1767), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1758, 1767), True, 'import matplotlib.pyplot as plt\n'), ((1767, 1796), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gmat'], {'cmap': '"""gray"""'}), "(Gmat, cmap='gray')\n", (1777, 1796), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1821), 'matplotlib.pyplot.title', 'plt.title', (['"""Final"""'], {}), "('Final')\n", (1812, 1821), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1837), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1833, 1837), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1853), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1849, 1853), True, 'import matplotlib.pyplot as plt\n'), ((1968, 1988), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1979, 1988), True, 'import matplotlib.pyplot as plt\n'), ((1988, 2016), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ef1'], {'cmap': '"""gray"""'}), "(ef1, cmap='gray')\n", (1998, 2016), True, 'import matplotlib.pyplot as plt\n'), ((2023, 2042), 'matplotlib.pyplot.title', 'plt.title', (['"""(2, 5)"""'], {}), "('(2, 5)')\n", (2032, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2058), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2054, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2060, 2074), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2070, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2099), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2090, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2127), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ef2'], {'cmap': '"""gray"""'}), "(ef2, cmap='gray')\n", (2109, 2127), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2154), 'matplotlib.pyplot.title', 'plt.title', (['"""(3, 15)"""'], {}), "('(3, 15)')\n", (2143, 2154), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2170), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2166, 2170), True, 'import matplotlib.pyplot as plt\n'), ((2172, 2186), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2182, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2602, 2628), 'numpy.choose', 'np.choose', (['labels', 'choices'], {}), '(labels, choices)\n', (2611, 2628), True, 'import numpy as np\n'), ((2668, 2696), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (2678, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2787), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (2781, 2787), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2825, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2319, 2339), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(4)'}), '(n_clusters=4)\n', (2325, 2339), False, 'from sklearn.cluster import KMeans\n'), ((2364, 2384), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(8)'}), '(n_clusters=8)\n', (2370, 2384), False, 'from sklearn.cluster import KMeans\n'), ((2410, 2431), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(20)'}), '(n_clusters=20)\n', (2416, 2431), False, 'from sklearn.cluster import KMeans\n'), ((2705, 2729), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'count'], {}), '(1, 3, count)\n', (2716, 2729), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2763), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img', 'plt.cm.Spectral'], {}), '(img, plt.cm.Spectral)\n', (2741, 2763), True, 'import matplotlib.pyplot as plt\n'), ((2936, 2956), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(4)'}), '(n_clusters=4)\n', (2942, 2956), False, 'from sklearn.cluster import KMeans\n')] |
# RGB -> HSV -> Gray
import cv2
import numpy as np
from . import print_image
from . import plot_image
def rotate(img, rotation_deg, crop, device, debug=None):
"""Rotate image, sometimes it is necessary to rotate image, especially when clustering for
multiple plants is needed.
Inputs:
img = image object, RGB colorspace (either single or three channel)
rotation_deg = rotation angle in degrees, should be an integer, can be a negative number,
positive values move counter clockwise.
crop = either true or false, if true, dimensions of rotated image will be same as original image.
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
device = device number
rotated_img = rotated image
:param img: numpy array
:param rotation_deg: int
:param device: int
:param debug: str
:return device: int
:return rotated_img: numpy array
"""
device += 1
if len(np.shape(img)) == 3:
iy, ix, iz = np.shape(img)
else:
iy, ix = np.shape(img)
M = cv2.getRotationMatrix2D((ix / 2, iy / 2), rotation_deg, 1)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
if crop==False:
# compute the new bounding dimensions of the image
nW = int((iy * sin) + (ix * cos))
nH = int((iy * cos) + (ix * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - (ix/2)
M[1, 2] += (nH / 2) - (iy/2)
rotated_img =cv2.warpAffine(img, M, (nW, nH))
else:
rotated_img = cv2.warpAffine(img, M, (ix, iy))
if debug == 'print':
print_image(rotated_img, (str(device) + '_rotated_img.png'))
elif debug == 'plot':
if len(np.shape(img)) == 3:
plot_image(rotated_img)
else:
plot_image(rotated_img, cmap='gray')
return device, rotated_img
| [
"cv2.getRotationMatrix2D",
"numpy.shape",
"cv2.warpAffine",
"numpy.abs"
] | [((1191, 1249), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(ix / 2, iy / 2)', 'rotation_deg', '(1)'], {}), '((ix / 2, iy / 2), rotation_deg, 1)\n', (1214, 1249), False, 'import cv2\n'), ((1261, 1276), 'numpy.abs', 'np.abs', (['M[0, 0]'], {}), '(M[0, 0])\n', (1267, 1276), True, 'import numpy as np\n'), ((1287, 1302), 'numpy.abs', 'np.abs', (['M[0, 1]'], {}), '(M[0, 1])\n', (1293, 1302), True, 'import numpy as np\n'), ((1127, 1140), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1135, 1140), True, 'import numpy as np\n'), ((1168, 1181), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1176, 1181), True, 'import numpy as np\n'), ((1634, 1666), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(nW, nH)'], {}), '(img, M, (nW, nH))\n', (1648, 1666), False, 'import cv2\n'), ((1699, 1731), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(ix, iy)'], {}), '(img, M, (ix, iy))\n', (1713, 1731), False, 'import cv2\n'), ((1085, 1098), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1093, 1098), True, 'import numpy as np\n'), ((1869, 1882), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1877, 1882), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plot
time = np.arange(0, 10, 0.1);
amplitude =np.sin(time)
plot.plot(time, amplitude)
plot.title('Sign Wave 1')
plot.xlabel('Time')
plot.ylabel('Amplitude = sin(time)')
plot.grid(True, which='both')
plot.axhline(y=0, color='k')
plot.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.sin",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((60, 81), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.1)'], {}), '(0, 10, 0.1)\n', (69, 81), True, 'import numpy as np\n'), ((95, 107), 'numpy.sin', 'np.sin', (['time'], {}), '(time)\n', (101, 107), True, 'import numpy as np\n'), ((109, 135), 'matplotlib.pyplot.plot', 'plot.plot', (['time', 'amplitude'], {}), '(time, amplitude)\n', (118, 135), True, 'import matplotlib.pyplot as plot\n'), ((137, 162), 'matplotlib.pyplot.title', 'plot.title', (['"""Sign Wave 1"""'], {}), "('Sign Wave 1')\n", (147, 162), True, 'import matplotlib.pyplot as plot\n'), ((164, 183), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Time"""'], {}), "('Time')\n", (175, 183), True, 'import matplotlib.pyplot as plot\n'), ((185, 221), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Amplitude = sin(time)"""'], {}), "('Amplitude = sin(time)')\n", (196, 221), True, 'import matplotlib.pyplot as plot\n'), ((223, 252), 'matplotlib.pyplot.grid', 'plot.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (232, 252), True, 'import matplotlib.pyplot as plot\n'), ((254, 282), 'matplotlib.pyplot.axhline', 'plot.axhline', ([], {'y': '(0)', 'color': '"""k"""'}), "(y=0, color='k')\n", (266, 282), True, 'import matplotlib.pyplot as plot\n'), ((284, 295), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (293, 295), True, 'import matplotlib.pyplot as plot\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2014-10-28 18:07:20
# @Last Modified by: marinheiro
# @Last Modified time: 2014-12-08 21:32:21
import scipy.sparse
import numpy
import rotation_averaging.so3 as so3
def create_matrix_from_indices(num_nodes, indices):
i = []
j = []
val = []
for line,ind in enumerate(indices):
i.append(line)
j.append(ind[0])
val.append(-1)
i.append(line)
j.append(ind[1])
val.append(1)
A = scipy.sparse.coo_matrix((val, (i, j)), shape=(len(indices), num_nodes)).tocsr()
return A
def compute_relative_log_matrix(global_rotations, relative_rotations, indices):
ret = []
for (line,ind) in enumerate(indices):
i = ind[0]
j = ind[1]
deltaRot = global_rotations[j].transpose().dot(relative_rotations[line].dot(global_rotations[i]))
(n, theta) = so3.matrix_to_axis_angle(deltaRot)
ret.append(so3.axis_angle_to_log(n, theta))
return numpy.hstack(ret).transpose()
def update_global_rotation_from_log(global_rotations, log_matrix):
for node in range(len(global_rotations)):
n, theta = so3.log_to_axis_angle(log_matrix[node])
n = numpy.array([[n[0]], [n[1]], [n[2]]])
global_rotations[node] = global_rotations[node].dot(so3.axis_angle_to_matrix(n, theta))
return global_rotations | [
"numpy.hstack",
"rotation_averaging.so3.axis_angle_to_log",
"numpy.array",
"rotation_averaging.so3.axis_angle_to_matrix",
"rotation_averaging.so3.log_to_axis_angle",
"rotation_averaging.so3.matrix_to_axis_angle"
] | [((835, 869), 'rotation_averaging.so3.matrix_to_axis_angle', 'so3.matrix_to_axis_angle', (['deltaRot'], {}), '(deltaRot)\n', (859, 869), True, 'import rotation_averaging.so3 as so3\n'), ((1079, 1118), 'rotation_averaging.so3.log_to_axis_angle', 'so3.log_to_axis_angle', (['log_matrix[node]'], {}), '(log_matrix[node])\n', (1100, 1118), True, 'import rotation_averaging.so3 as so3\n'), ((1125, 1162), 'numpy.array', 'numpy.array', (['[[n[0]], [n[1]], [n[2]]]'], {}), '([[n[0]], [n[1]], [n[2]]])\n', (1136, 1162), False, 'import numpy\n'), ((883, 914), 'rotation_averaging.so3.axis_angle_to_log', 'so3.axis_angle_to_log', (['n', 'theta'], {}), '(n, theta)\n', (904, 914), True, 'import rotation_averaging.so3 as so3\n'), ((925, 942), 'numpy.hstack', 'numpy.hstack', (['ret'], {}), '(ret)\n', (937, 942), False, 'import numpy\n'), ((1219, 1253), 'rotation_averaging.so3.axis_angle_to_matrix', 'so3.axis_angle_to_matrix', (['n', 'theta'], {}), '(n, theta)\n', (1243, 1253), True, 'import rotation_averaging.so3 as so3\n')] |
import numpy as np
def test_get_gini_score():
"""A quick example against a hand worked gini score."""
from my_ml.model.decision_tree import DecisionTree
dtree = DecisionTree()
low_y: np.ndarray = np.array([1, 0, 0])
high_y: np.ndarray = np.array([1, 0])
k: int = 2
gini_score: float = dtree._get_gini_score(low_y, high_y, k)
assert np.isclose(0.47, gini_score, rtol=0.008)
| [
"my_ml.model.decision_tree.DecisionTree",
"numpy.array",
"numpy.isclose"
] | [((176, 190), 'my_ml.model.decision_tree.DecisionTree', 'DecisionTree', ([], {}), '()\n', (188, 190), False, 'from my_ml.model.decision_tree import DecisionTree\n'), ((215, 234), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (223, 234), True, 'import numpy as np\n'), ((260, 276), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (268, 276), True, 'import numpy as np\n'), ((367, 407), 'numpy.isclose', 'np.isclose', (['(0.47)', 'gini_score'], {'rtol': '(0.008)'}), '(0.47, gini_score, rtol=0.008)\n', (377, 407), True, 'import numpy as np\n')] |
# Modelling Enslin and GopalKrishna 2001
# Major updates
from getpar import getparspace
from astropy.io import ascii
from operations import chicalc
import numpy as np
import matplotlib.pyplot as plt
####################################
def readfile(myspec):
dat = ascii.read(myspec)
return dat
myfile ='A1914_spec.dat' #col1 =freq col2 = flux col3 = error
spec = readfile(myfile)
print(spec)
f_nuobs=np.array(spec['nughz'])
f_err=np.array(spec['fmyerr'])
flx_obs = np.array(spec['fmy'])
flx_obs =[i*(1e-26) for i in flx_obs]
#f_nuobs=[0.1]
#flx_obs = [530]
#f_err=[0]
##################################### Input Parameters #####################################
# Extract the redshift and properties of the system from source.py
import source
#z =0 #t1 source.z
#B_src =2.7 #t1 source.B
#V_src =0.12 #t1 source.V
z =0.17120 #t1 source.z
B_src =5*1e-6 #t1 source.B
V_src =0.037*(3.08*1e24)**3 #t1 source.V
# To Take User Defined inputs regarding assumed scenario and phase for the system.
# Also checks for illegal entry and assigns default entry.
scen=input("Enter the scenario[A, B or C](default: scenario B): ")
if scen not in ['A','B','C']:
scen='B'
phase=input("Enter the phase[0,1,2,3 or 4](default: phase 3): ")
if phase not in ['0','1','2','3','4']:
phase='3'
phase=int(phase)
print('Choice of Scenario is:',scen,' ','Choice of Phase is',phase)
# Compression Ratio Index
b=[1.8,1.2,0,2.0,0]
##################################### Operational Parameters #####################################
# To generate time-scale parameter space and save in parex.dat
getparspace(scen,phase)
# Reading data from table to phase-wise iterative solutions
timeex= ascii.read('parex.dat')
print(timeex)
# DEFINE PARAMETERS TO STORE RESULT OF EACH SET OF TIMESCALES
chilist=[]
flx=[]
count=0 #counter
result =open('myresult.dat','w')
for i in timeex:
# instancing time scale for each set
delt =[0.0,0.0,0.0,0.0,0.0] #delt0=0 for all sets
tau =[0.0,0.0,np.inf,0.0,np.inf] #Time scale for tau_4 and tau_2 is prescribed infinity
#setting up tau
tau[0] =i['tau0']
tau[1] =(2.0/3.0)*tau[0]
tau[3] =i['tau3']
#setting up delt
delt[0] =0
delt[1] =i['delt1']
delt[2] =i['delt2']
delt[3] =i['delt3']
delt[4] =i['delt4']
delt =[i*(3.154*1e16) for i in delt]
tau =[i*(3.154*1e16) for i in tau]
print('Iteration number',count+1)
chi,flux=chicalc(delt,tau,phase,z,B_src,V_src,b,f_nuobs,f_err,flx_obs)
chilist.append(chi)
flx.append(flux)
myresult =str(count)+'\t'+str(chi)+'\n'
result.write(myresult)
count=count+1
chi_min_ind =np.nanargmin(chilist)
chi_min =chilist[chi_min_ind]
flx_min =flx[chi_min_ind]
# instancing time scale for each set
delt_m =[0.0,0.0,0.0,0.0,0.0] #delt0=0 for all sets
tau_m =[0.0,0.0,np.inf,0.0,np.inf] #Time scale for tau_4 and tau_2 is prescribed infinity
#setting up tau
tau_m[0] =timeex['tau0'][chi_min_ind]
tau_m[1] =(2.0/3.0)*tau_m[0]
tau_m[3] =timeex['tau3'][chi_min_ind]
#setting up delt
delt_m[0] =0
delt_m[1] =timeex['delt1'][chi_min_ind]
delt_m[2] =timeex['delt2'][chi_min_ind]
delt_m[3] =timeex['delt3'][chi_min_ind]
delt_m[4] =timeex['delt4'][chi_min_ind]
print('Chi_min',chi_min)
print('tau_min',tau_m)
print('delt_m',delt_m)
fig,ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('Frequency (GHz)')
ax.set_ylabel('Flux density (mJy)')
flx_obs =[i/(1e-26) for i in flx_obs]
ax.plot(f_nuobs,flx_obs,'bo',label='obs')
flx_min =[i/(1e-26) for i in flx_min]
ax.plot(f_nuobs,flx_min,'r-')
ax.legend()
plt.show()
###
### while writing the program for chi square calculation for a time step,
### we send only one value of b that coresponds to user input phase
### c=c+1
| [
"numpy.nanargmin",
"astropy.io.ascii.read",
"getpar.getparspace",
"numpy.array",
"operations.chicalc",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((411, 434), 'numpy.array', 'np.array', (["spec['nughz']"], {}), "(spec['nughz'])\n", (419, 434), True, 'import numpy as np\n'), ((441, 465), 'numpy.array', 'np.array', (["spec['fmyerr']"], {}), "(spec['fmyerr'])\n", (449, 465), True, 'import numpy as np\n'), ((476, 497), 'numpy.array', 'np.array', (["spec['fmy']"], {}), "(spec['fmy'])\n", (484, 497), True, 'import numpy as np\n'), ((1587, 1611), 'getpar.getparspace', 'getparspace', (['scen', 'phase'], {}), '(scen, phase)\n', (1598, 1611), False, 'from getpar import getparspace\n'), ((1682, 1705), 'astropy.io.ascii.read', 'ascii.read', (['"""parex.dat"""'], {}), "('parex.dat')\n", (1692, 1705), False, 'from astropy.io import ascii\n'), ((2560, 2581), 'numpy.nanargmin', 'np.nanargmin', (['chilist'], {}), '(chilist)\n', (2572, 2581), True, 'import numpy as np\n'), ((3212, 3226), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3224, 3226), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3510), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3508, 3510), True, 'import matplotlib.pyplot as plt\n'), ((271, 289), 'astropy.io.ascii.read', 'ascii.read', (['myspec'], {}), '(myspec)\n', (281, 289), False, 'from astropy.io import ascii\n'), ((2366, 2436), 'operations.chicalc', 'chicalc', (['delt', 'tau', 'phase', 'z', 'B_src', 'V_src', 'b', 'f_nuobs', 'f_err', 'flx_obs'], {}), '(delt, tau, phase, z, B_src, V_src, b, f_nuobs, f_err, flx_obs)\n', (2373, 2436), False, 'from operations import chicalc\n')] |
#
# Linear algebra subroutines
#
# Originally written in C++ by <NAME>, CSCS
# Ported to Python by <NAME>, CSCS
import collections
import numba
import numpy as np
import sys
import operators
# epsilon value use for matrix-vector approximation
EPS = 1.0e-8
EPS_INV = 1.0 / EPS
CGStatus = collections.namedtuple('CGStatus',
['converged', 'iters', 'residual'])
@numba.njit(cache=True)
def cg(x, x_old, b, boundary, options, tolerance, maxiters):
# Initialize temporary storage
Fx = np.zeros_like(x)
Fxold = np.zeros_like(x)
xold = np.copy(x)
# matrix vector multiplication is approximated with
# A*v = 1/epsilon * ( F(x+epsilon*v) - F(x) )
# = 1/epsilon * ( F(x+epsilon*v) - Fxold )
# we compute Fxold at startup
# we have to keep x so that we can compute the F(x+exps*v)
operators.diffusion(x, Fxold, x_old, boundary, options)
v = (1. + EPS) * x
# Fx = F(v)
operators.diffusion(v, Fx, x_old, boundary, options)
# r = b - A*x
# where A*x = (Fx-Fxold)/eps
r = b - EPS_INV * (Fx - Fxold)
# p = r
p = np.copy(r)
# rold = <r,r>
rold = r @ r
rnew = rold
if np.sqrt(rold) < tolerance:
return CGStatus(True, 0, np.sqrt(rnew))
for it in range(1, maxiters + 1):
# Ap = A*p
v = xold + EPS * p
operators.diffusion(v, Fx, x_old, boundary, options)
Ap = EPS_INV * (Fx - Fxold)
# alpha = rold / p'*Ap
alpha = rold / (p @ Ap)
x += alpha * p
r -= alpha * Ap
# find new norm
rnew = r @ r
residual = np.sqrt(rnew)
if (residual < tolerance):
return CGStatus(True, it, residual)
p = r + (rnew / rold) * p
rold = rnew
return CGStatus(False, it, residual)
| [
"numpy.copy",
"collections.namedtuple",
"numpy.sqrt",
"numba.njit",
"numpy.zeros_like",
"operators.diffusion"
] | [((291, 361), 'collections.namedtuple', 'collections.namedtuple', (['"""CGStatus"""', "['converged', 'iters', 'residual']"], {}), "('CGStatus', ['converged', 'iters', 'residual'])\n", (313, 361), False, 'import collections\n'), ((399, 421), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (409, 421), False, 'import numba\n'), ((528, 544), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (541, 544), True, 'import numpy as np\n'), ((557, 573), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (570, 573), True, 'import numpy as np\n'), ((585, 595), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (592, 595), True, 'import numpy as np\n'), ((855, 910), 'operators.diffusion', 'operators.diffusion', (['x', 'Fxold', 'x_old', 'boundary', 'options'], {}), '(x, Fxold, x_old, boundary, options)\n', (874, 910), False, 'import operators\n'), ((956, 1008), 'operators.diffusion', 'operators.diffusion', (['v', 'Fx', 'x_old', 'boundary', 'options'], {}), '(v, Fx, x_old, boundary, options)\n', (975, 1008), False, 'import operators\n'), ((1117, 1127), 'numpy.copy', 'np.copy', (['r'], {}), '(r)\n', (1124, 1127), True, 'import numpy as np\n'), ((1189, 1202), 'numpy.sqrt', 'np.sqrt', (['rold'], {}), '(rold)\n', (1196, 1202), True, 'import numpy as np\n'), ((1357, 1409), 'operators.diffusion', 'operators.diffusion', (['v', 'Fx', 'x_old', 'boundary', 'options'], {}), '(v, Fx, x_old, boundary, options)\n', (1376, 1409), False, 'import operators\n'), ((1626, 1639), 'numpy.sqrt', 'np.sqrt', (['rnew'], {}), '(rnew)\n', (1633, 1639), True, 'import numpy as np\n'), ((1249, 1262), 'numpy.sqrt', 'np.sqrt', (['rnew'], {}), '(rnew)\n', (1256, 1262), True, 'import numpy as np\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import six
from op_test import OpTest, skip_check_grad_ci
class PReluTest(OpTest):
def setUp(self):
self.init_input_shape()
self.init_attr()
self.op_type = "prelu"
x_np = np.random.uniform(-1, 1, self.x_shape)
# Since zero point in prelu is not differentiable, avoid randomize
# zero.
x_np[np.abs(x_np) < 0.005] = 0.02
if self.attrs == {'mode': "all"}:
alpha_np = np.random.uniform(-1, -0.5, (1))
elif self.attrs == {'mode': "channel"}:
alpha_np = np.random.uniform(-1, -0.5, (1, x_np.shape[1], 1, 1))
else:
alpha_np = np.random.uniform(-1, -0.5, \
(1, x_np.shape[1], x_np.shape[2], x_np.shape[3]))
self.inputs = {'X': x_np, 'Alpha': alpha_np}
out_np = np.maximum(self.inputs['X'], 0.)
out_np = out_np + np.minimum(self.inputs['X'],
0.) * self.inputs['Alpha']
assert out_np is not self.inputs['X']
self.outputs = {'Out': out_np}
def init_input_shape(self):
self.x_shape = (2, 100, 3, 4)
def init_attr(self):
self.attrs = {'mode': "channel"}
def test_check_output(self):
self.check_output()
def test_check_grad_1_ignore_x(self):
self.check_grad(['Alpha'], 'Out', no_grad_set=set('X'))
def test_check_grad_2(self):
self.check_grad(['X', 'Alpha'], 'Out')
def test_check_grad_3_ignore_alpha(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Alpha'))
# TODO(minqiyang): Resume these test cases after fixing Python3 CI job issues
if six.PY2:
@skip_check_grad_ci(
reason="[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode"
)
class TestModeAll(PReluTest):
def init_input_shape(self):
self.x_shape = (2, 3, 4, 5)
def init_attr(self):
self.attrs = {'mode': "all"}
class TestModeElt(PReluTest):
def init_input_shape(self):
self.x_shape = (3, 2, 5, 10)
def init_attr(self):
self.attrs = {'mode': "element"}
if __name__ == "__main__":
unittest.main()
| [
"numpy.abs",
"numpy.minimum",
"op_test.skip_check_grad_ci",
"numpy.random.uniform",
"unittest.main",
"numpy.maximum"
] | [((2331, 2449), 'op_test.skip_check_grad_ci', 'skip_check_grad_ci', ([], {'reason': '"""[skip shape check] Input(Alpha) must be 1-D and only has one data in \'all\' mode"""'}), '(reason=\n "[skip shape check] Input(Alpha) must be 1-D and only has one data in \'all\' mode"\n )\n', (2349, 2449), False, 'from op_test import OpTest, skip_check_grad_ci\n'), ((2855, 2870), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2868, 2870), False, 'import unittest\n'), ((897, 935), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'self.x_shape'], {}), '(-1, 1, self.x_shape)\n', (914, 935), True, 'import numpy as np\n'), ((1497, 1530), 'numpy.maximum', 'np.maximum', (["self.inputs['X']", '(0.0)'], {}), "(self.inputs['X'], 0.0)\n", (1507, 1530), True, 'import numpy as np\n'), ((1135, 1165), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(-0.5)', '(1)'], {}), '(-1, -0.5, 1)\n', (1152, 1165), True, 'import numpy as np\n'), ((1040, 1052), 'numpy.abs', 'np.abs', (['x_np'], {}), '(x_np)\n', (1046, 1052), True, 'import numpy as np\n'), ((1239, 1292), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(-0.5)', '(1, x_np.shape[1], 1, 1)'], {}), '(-1, -0.5, (1, x_np.shape[1], 1, 1))\n', (1256, 1292), True, 'import numpy as np\n'), ((1330, 1407), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(-0.5)', '(1, x_np.shape[1], x_np.shape[2], x_np.shape[3])'], {}), '(-1, -0.5, (1, x_np.shape[1], x_np.shape[2], x_np.shape[3]))\n', (1347, 1407), True, 'import numpy as np\n'), ((1556, 1589), 'numpy.minimum', 'np.minimum', (["self.inputs['X']", '(0.0)'], {}), "(self.inputs['X'], 0.0)\n", (1566, 1589), True, 'import numpy as np\n')] |
# Copyright (c) 2003-2015 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy
import treecorr
import os
from numpy import pi
from test_helper import get_from_wiki
def test_ascii():
nobj = 5000
numpy.random.seed(8675309)
x = numpy.random.random_sample(nobj)
y = numpy.random.random_sample(nobj)
z = numpy.random.random_sample(nobj)
ra = numpy.random.random_sample(nobj)
dec = numpy.random.random_sample(nobj)
r = numpy.random.random_sample(nobj)
w = numpy.random.random_sample(nobj)
g1 = numpy.random.random_sample(nobj)
g2 = numpy.random.random_sample(nobj)
k = numpy.random.random_sample(nobj)
flags = numpy.zeros(nobj).astype(int)
for flag in [ 1, 2, 4, 8, 16 ]:
sub = numpy.random.random_sample(nobj) < 0.1
flags[sub] = numpy.bitwise_or(flags[sub], flag)
file_name = os.path.join('data','test.dat')
with open(file_name, 'w') as fid:
# These are intentionally in a different order from the order we parse them.
fid.write('# ra,dec,x,y,k,g1,g2,w,flag,z,r\n')
for i in range(nobj):
fid.write((('%.8f '*10)+'%d\n')%(
ra[i],dec[i],x[i],y[i],k[i],g1[i],g2[i],w[i],z[i],r[i],flags[i]))
# Check basic input
config = {
'x_col' : 3,
'y_col' : 4,
'z_col' : 9,
'x_units' : 'rad',
'y_units' : 'rad',
'w_col' : 8,
'g1_col' : 6,
'g2_col' : 7,
'k_col' : 5,
}
cat1 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat1.x, x)
numpy.testing.assert_almost_equal(cat1.y, y)
numpy.testing.assert_almost_equal(cat1.z, z)
numpy.testing.assert_almost_equal(cat1.w, w)
numpy.testing.assert_almost_equal(cat1.g1, g1)
numpy.testing.assert_almost_equal(cat1.g2, g2)
numpy.testing.assert_almost_equal(cat1.k, k)
# Check flags
config['flag_col'] = 11
cat2 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat2.w[flags==0], w[flags==0])
numpy.testing.assert_almost_equal(cat2.w[flags!=0], 0.)
# Check ok_flag
config['ok_flag'] = 4
cat3 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat3.w[numpy.logical_or(flags==0, flags==4)],
w[numpy.logical_or(flags==0, flags==4)])
numpy.testing.assert_almost_equal(cat3.w[numpy.logical_and(flags!=0, flags!=4)], 0.)
# Check ignore_flag
del config['ok_flag']
config['ignore_flag'] = 16
cat4 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat4.w[flags < 16], w[flags < 16])
numpy.testing.assert_almost_equal(cat4.w[flags >= 16], 0.)
# Check different units for x,y
config['x_units'] = 'arcsec'
config['y_units'] = 'arcsec'
del config['z_col']
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x * (pi/180./3600.))
numpy.testing.assert_almost_equal(cat5.y, y * (pi/180./3600.))
config['x_units'] = 'arcmin'
config['y_units'] = 'arcmin'
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x * (pi/180./60.))
numpy.testing.assert_almost_equal(cat5.y, y * (pi/180./60.))
config['x_units'] = 'deg'
config['y_units'] = 'deg'
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x * (pi/180.))
numpy.testing.assert_almost_equal(cat5.y, y * (pi/180.))
del config['x_units'] # Default is radians
del config['y_units']
cat5 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat5.x, x)
numpy.testing.assert_almost_equal(cat5.y, y)
# Check ra,dec
del config['x_col']
del config['y_col']
config['ra_col'] = 1
config['dec_col'] = 2
config['r_col'] = 10
config['ra_units'] = 'rad'
config['dec_units'] = 'rad'
cat6 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat6.ra, ra)
numpy.testing.assert_almost_equal(cat6.dec, dec)
config['ra_units'] = 'deg'
config['dec_units'] = 'deg'
cat6 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat6.ra, ra * (pi/180.))
numpy.testing.assert_almost_equal(cat6.dec, dec * (pi/180.))
config['ra_units'] = 'hour'
config['dec_units'] = 'deg'
cat6 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat6.ra, ra * (pi/12.))
numpy.testing.assert_almost_equal(cat6.dec, dec * (pi/180.))
# Check using a different delimiter, comment marker
csv_file_name = os.path.join('data','test.csv')
with open(csv_file_name, 'w') as fid:
# These are intentionally in a different order from the order we parse them.
fid.write('% This file uses commas for its delimiter')
fid.write('% And more than one header line.')
fid.write('% Plus some extra comment lines every so often.')
fid.write('% And we use a weird comment marker to boot.')
fid.write('% ra,dec,x,y,k,g1,g2,w,flag\n')
for i in range(nobj):
fid.write((('%.8f,'*10)+'%d\n')%(
ra[i],dec[i],x[i],y[i],k[i],g1[i],g2[i],w[i],z[i],r[i],flags[i]))
if i%100 == 0:
fid.write('%%%% Line %d\n'%i)
config['delimiter'] = ','
config['comment_marker'] = '%'
cat7 = treecorr.Catalog(csv_file_name, config)
numpy.testing.assert_almost_equal(cat7.ra, ra * (pi/12.))
numpy.testing.assert_almost_equal(cat7.dec, dec * (pi/180.))
numpy.testing.assert_almost_equal(cat7.r, r)
numpy.testing.assert_almost_equal(cat7.g1, g1)
numpy.testing.assert_almost_equal(cat7.g2, g2)
numpy.testing.assert_almost_equal(cat7.w[flags < 16], w[flags < 16])
numpy.testing.assert_almost_equal(cat7.w[flags >= 16], 0.)
# Check flip_g1, flip_g2
del config['delimiter']
del config['comment_marker']
config['flip_g1'] = True
cat8 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat8.g1, -g1)
numpy.testing.assert_almost_equal(cat8.g2, g2)
config['flip_g2'] = 'true'
cat8 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat8.g1, -g1)
numpy.testing.assert_almost_equal(cat8.g2, -g2)
config['flip_g1'] = 'n'
config['flip_g2'] = 'yes'
cat8 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat8.g1, g1)
numpy.testing.assert_almost_equal(cat8.g2, -g2)
# Check overriding values with kwargs
cat8 = treecorr.Catalog(file_name, config, flip_g1=True, flip_g2=False)
numpy.testing.assert_almost_equal(cat8.g1, -g1)
numpy.testing.assert_almost_equal(cat8.g2, g2)
def test_fits():
get_from_wiki('Aardvark.fit')
file_name = os.path.join('data','Aardvark.fit')
config = treecorr.read_config('Aardvark.yaml')
config['verbose'] = 1
# Just test a few random particular values
cat1 = treecorr.Catalog(file_name, config)
numpy.testing.assert_equal(len(cat1.ra), 390935)
numpy.testing.assert_equal(cat1.nobj, 390935)
numpy.testing.assert_almost_equal(cat1.ra[0], 56.4195 * (pi/180.))
numpy.testing.assert_almost_equal(cat1.ra[390934], 78.4782 * (pi/180.))
numpy.testing.assert_almost_equal(cat1.dec[290333], 83.1579 * (pi/180.))
numpy.testing.assert_almost_equal(cat1.g1[46392], 0.0005066675)
numpy.testing.assert_almost_equal(cat1.g2[46392], -0.0001006742)
numpy.testing.assert_almost_equal(cat1.k[46392], -0.0008628797)
# The catalog doesn't have x, y, or w, but test that functionality as well.
del config['ra_col']
del config['dec_col']
config['x_col'] = 'RA'
config['y_col'] = 'DEC'
config['w_col'] = 'MU'
config['flag_col'] = 'INDEX'
config['ignore_flag'] = 64
cat2 = treecorr.Catalog(file_name, config)
numpy.testing.assert_almost_equal(cat2.x[390934], 78.4782, decimal=4)
numpy.testing.assert_almost_equal(cat2.y[290333], 83.1579, decimal=4)
numpy.testing.assert_almost_equal(cat2.w[46392], 0.) # index = 1200379
numpy.testing.assert_almost_equal(cat2.w[46393], 0.9995946) # index = 1200386
# Test using a limited set of rows
config['first_row'] = 101
config['last_row'] = 50000
cat3 = treecorr.Catalog(file_name, config)
numpy.testing.assert_equal(len(cat3.x), 49900)
numpy.testing.assert_equal(cat3.ntot, 49900)
numpy.testing.assert_equal(cat3.nobj, sum(cat3.w != 0))
numpy.testing.assert_equal(cat3.sumw, sum(cat3.w))
numpy.testing.assert_equal(cat3.sumw, sum(cat2.w[100:50000]))
numpy.testing.assert_almost_equal(cat3.g1[46292], 0.0005066675)
numpy.testing.assert_almost_equal(cat3.g2[46292], -0.0001006742)
numpy.testing.assert_almost_equal(cat3.k[46292], -0.0008628797)
def test_direct():
nobj = 5000
numpy.random.seed(8675309)
x = numpy.random.random_sample(nobj)
y = numpy.random.random_sample(nobj)
ra = numpy.random.random_sample(nobj)
dec = numpy.random.random_sample(nobj)
w = numpy.random.random_sample(nobj)
g1 = numpy.random.random_sample(nobj)
g2 = numpy.random.random_sample(nobj)
k = numpy.random.random_sample(nobj)
cat1 = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, k=k)
numpy.testing.assert_almost_equal(cat1.x, x)
numpy.testing.assert_almost_equal(cat1.y, y)
numpy.testing.assert_almost_equal(cat1.w, w)
numpy.testing.assert_almost_equal(cat1.g1, g1)
numpy.testing.assert_almost_equal(cat1.g2, g2)
numpy.testing.assert_almost_equal(cat1.k, k)
cat2 = treecorr.Catalog(ra=ra, dec=dec, w=w, g1=g1, g2=g2, k=k,
ra_units='hours', dec_units='degrees')
numpy.testing.assert_almost_equal(cat2.ra, ra * treecorr.hours)
numpy.testing.assert_almost_equal(cat2.dec, dec * treecorr.degrees)
numpy.testing.assert_almost_equal(cat2.w, w)
numpy.testing.assert_almost_equal(cat2.g1, g1)
numpy.testing.assert_almost_equal(cat2.g2, g2)
numpy.testing.assert_almost_equal(cat2.k, k)
def test_contiguous():
# This unit test comes from <NAME> who discovered a bug in earlier
# versions of the code that the Catalog didn't correctly handle input arrays
# that were not contiguous in memory. We want to make sure this kind of
# input works correctly. It also checks that the input dtype doesn't have
# to be float
source_data = numpy.array([
(0.0380569697547, 0.0142782758818, 0.330845443464, -0.111049332655),
(-0.0261291090735, 0.0863787933931, 0.122954685209, 0.40260430406),
(-0.0261291090735, 0.0863787933931, 0.122954685209, 0.40260430406),
(0.125086697534, 0.0283621046495, -0.208159531309, 0.142491564101),
(0.0457709426026, -0.0299249486373, -0.0406555089425, 0.24515956887),
(-0.00338578248926, 0.0460291122935, 0.363057738173, -0.524536297555)],
dtype=[('ra', None), ('dec', numpy.float64), ('g1', numpy.float32),
('g2', numpy.float128)])
config = {'min_sep': 0.05, 'max_sep': 0.2, 'sep_units': 'degrees', 'nbins': 5 }
cat1 = treecorr.Catalog(ra=[0], dec=[0], ra_units='deg', dec_units='deg') # dumb lens
cat2 = treecorr.Catalog(ra=source_data['ra'], ra_units='deg',
dec=source_data['dec'], dec_units='deg',
g1=source_data['g1'],
g2=source_data['g2'])
cat2_float = treecorr.Catalog(ra=source_data['ra'].astype(float), ra_units='deg',
dec=source_data['dec'].astype(float), dec_units='deg',
g1=source_data['g1'].astype(float),
g2=source_data['g2'].astype(float))
print("dtypes of original arrays: ", [source_data[key].dtype for key in ['ra','dec','g1','g2']])
print("dtypes of cat2 arrays: ", [getattr(cat2,key).dtype for key in ['ra','dec','g1','g2']])
print("is original g2 array contiguous?", source_data['g2'].flags['C_CONTIGUOUS'])
print("is cat2.g2 array contiguous?", cat2.g2.flags['C_CONTIGUOUS'])
assert not source_data['g2'].flags['C_CONTIGUOUS']
assert cat2.g2.flags['C_CONTIGUOUS']
ng = treecorr.NGCorrelation(config)
ng.process(cat1,cat2)
ng_float = treecorr.NGCorrelation(config)
ng_float.process(cat1,cat2_float)
numpy.testing.assert_equal(ng.xi, ng_float.xi)
# While we're at it, check that non-1d inputs work, but emit a warning.
if __name__ == '__main__':
v = 1
else:
v = 0
cat2_non1d = treecorr.Catalog(ra=source_data['ra'].reshape(3,2), ra_units='deg',
dec=source_data['dec'].reshape(1,1,1,6), dec_units='deg',
g1=source_data['g1'].reshape(6,1),
g2=source_data['g2'].reshape(1,6), verbose=v)
ng.process(cat1,cat2_non1d)
numpy.testing.assert_equal(ng.xi, ng_float.xi)
def test_list():
# Test different ways to read in a list of catalog names.
# This is based on the bug report for Issue #10.
nobj = 5000
numpy.random.seed(8675309)
x_list = []
y_list = []
file_names = []
ncats = 3
for k in range(ncats):
x = numpy.random.random_sample(nobj)
y = numpy.random.random_sample(nobj)
file_name = os.path.join('data','test_list%d.dat'%k)
with open(file_name, 'w') as fid:
# These are intentionally in a different order from the order we parse them.
fid.write('# ra,dec,x,y,k,g1,g2,w,flag\n')
for i in range(nobj):
fid.write(('%.8f %.8f\n')%(x[i],y[i]))
x_list.append(x)
y_list.append(y)
file_names.append(file_name)
# Start with file_name being a list:
config = {
'x_col' : 1,
'y_col' : 2,
'file_name' : file_names
}
cats = treecorr.read_catalogs(config, key='file_name')
numpy.testing.assert_equal(len(cats), ncats)
for k in range(ncats):
numpy.testing.assert_almost_equal(cats[k].x, x_list[k])
numpy.testing.assert_almost_equal(cats[k].y, y_list[k])
# Next check that the list can be just a string with spaces between names:
config['file_name'] = " ".join(file_names)
# Also check that it is ok to include file_list to read_catalogs.
cats = treecorr.read_catalogs(config, 'file_name', 'file_list')
numpy.testing.assert_equal(len(cats), ncats)
for k in range(ncats):
numpy.testing.assert_almost_equal(cats[k].x, x_list[k])
numpy.testing.assert_almost_equal(cats[k].y, y_list[k])
# Next check that having the names in a file_list file works:
list_name = os.path.join('data','test_list.txt')
with open(list_name, 'w') as fid:
for name in file_names:
fid.write(name + '\n')
del config['file_name']
config['file_list'] = list_name
cats = treecorr.read_catalogs(config, 'file_name', 'file_list')
numpy.testing.assert_equal(len(cats), ncats)
for k in range(ncats):
numpy.testing.assert_almost_equal(cats[k].x, x_list[k])
numpy.testing.assert_almost_equal(cats[k].y, y_list[k])
# Also, should be allowed to omit file_name arg:
cats = treecorr.read_catalogs(config, list_key='file_list')
numpy.testing.assert_equal(len(cats), ncats)
for k in range(ncats):
numpy.testing.assert_almost_equal(cats[k].x, x_list[k])
numpy.testing.assert_almost_equal(cats[k].y, y_list[k])
def test_write():
# Test that writing a Catalog to a file and then reading it back in works correctly
ngal = 20000
s = 10.
numpy.random.seed(8675309)
x = numpy.random.normal(222,50, (ngal,) )
y = numpy.random.normal(138,20, (ngal,) )
z = numpy.random.normal(912,130, (ngal,) )
w = numpy.random.normal(1.3, 0.1, (ngal,) )
ra = numpy.random.normal(11.34, 0.9, (ngal,) )
dec = numpy.random.normal(-48.12, 4.3, (ngal,) )
r = numpy.random.normal(1024, 230, (ngal,) )
k = numpy.random.normal(0,s, (ngal,) )
g1 = numpy.random.normal(0,s, (ngal,) )
g2 = numpy.random.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x, y=y, z=z)
cat2 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='hour', dec_units='deg',
w=w, g1=g1, g2=g2, k=k)
# Test ASCII output
cat1.write(os.path.join('output','cat1.dat'))
cat1_asc = treecorr.Catalog(os.path.join('output','cat1.dat'), file_type='ASCII',
x_col=1, y_col=2, z_col=3)
numpy.testing.assert_almost_equal(cat1_asc.x, x)
numpy.testing.assert_almost_equal(cat1_asc.y, y)
numpy.testing.assert_almost_equal(cat1_asc.z, z)
cat2.write(os.path.join('output','cat2.dat'), file_type='ASCII')
cat2_asc = treecorr.Catalog(os.path.join('output','cat2.dat'), ra_col=1, dec_col=2,
r_col=3, w_col=4, g1_col=5, g2_col=6, k_col=7,
ra_units='rad', dec_units='rad')
numpy.testing.assert_almost_equal(cat2_asc.ra, ra)
numpy.testing.assert_almost_equal(cat2_asc.dec, dec)
numpy.testing.assert_almost_equal(cat2_asc.r, r)
numpy.testing.assert_almost_equal(cat2_asc.w, w)
numpy.testing.assert_almost_equal(cat2_asc.g1, g1)
numpy.testing.assert_almost_equal(cat2_asc.g2, g2)
numpy.testing.assert_almost_equal(cat2_asc.k, k)
# Test FITS output
cat1.write(os.path.join('output','cat1.fits'), file_type='FITS')
cat1_fits = treecorr.Catalog(os.path.join('output','cat1.fits'),
x_col='x', y_col='y', z_col='z')
numpy.testing.assert_almost_equal(cat1_fits.x, x)
numpy.testing.assert_almost_equal(cat1_fits.y, y)
numpy.testing.assert_almost_equal(cat1_fits.z, z)
cat2.write(os.path.join('output','cat2.fits'))
cat2_fits = treecorr.Catalog(os.path.join('output','cat2.fits'), ra_col='ra', dec_col='dec',
r_col='r', w_col='w', g1_col='g1', g2_col='g2', k_col='k',
ra_units='rad', dec_units='rad', file_type='FITS')
numpy.testing.assert_almost_equal(cat2_fits.ra, ra)
numpy.testing.assert_almost_equal(cat2_fits.dec, dec)
numpy.testing.assert_almost_equal(cat2_fits.r, r)
numpy.testing.assert_almost_equal(cat2_fits.w, w)
numpy.testing.assert_almost_equal(cat2_fits.g1, g1)
numpy.testing.assert_almost_equal(cat2_fits.g2, g2)
numpy.testing.assert_almost_equal(cat2_fits.k, k)
if __name__ == '__main__':
test_ascii()
test_fits()
test_direct()
test_contiguous()
test_list()
test_write()
| [
"numpy.random.normal",
"numpy.random.random_sample",
"test_helper.get_from_wiki",
"numpy.testing.assert_equal",
"numpy.bitwise_or",
"numpy.logical_and",
"os.path.join",
"treecorr.read_config",
"numpy.logical_or",
"treecorr.NGCorrelation",
"numpy.testing.assert_almost_equal",
"treecorr.Catalog"... | [((779, 805), 'numpy.random.seed', 'numpy.random.seed', (['(8675309)'], {}), '(8675309)\n', (796, 805), False, 'import numpy\n'), ((814, 846), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (840, 846), False, 'import numpy\n'), ((855, 887), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (881, 887), False, 'import numpy\n'), ((896, 928), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (922, 928), False, 'import numpy\n'), ((938, 970), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (964, 970), False, 'import numpy\n'), ((981, 1013), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (1007, 1013), False, 'import numpy\n'), ((1022, 1054), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (1048, 1054), False, 'import numpy\n'), ((1063, 1095), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (1089, 1095), False, 'import numpy\n'), ((1105, 1137), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (1131, 1137), False, 'import numpy\n'), ((1147, 1179), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (1173, 1179), False, 'import numpy\n'), ((1188, 1220), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (1214, 1220), False, 'import numpy\n'), ((1426, 1458), 'os.path.join', 'os.path.join', (['"""data"""', '"""test.dat"""'], {}), "('data', 'test.dat')\n", (1438, 1458), False, 'import os\n'), ((2054, 2089), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (2070, 2089), False, 'import treecorr\n'), ((2094, 2138), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.x', 'x'], {}), '(cat1.x, x)\n', (2127, 2138), False, 'import numpy\n'), ((2143, 2187), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.y', 'y'], {}), '(cat1.y, y)\n', (2176, 2187), False, 'import numpy\n'), ((2192, 2236), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.z', 'z'], {}), '(cat1.z, z)\n', (2225, 2236), False, 'import numpy\n'), ((2241, 2285), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.w', 'w'], {}), '(cat1.w, w)\n', (2274, 2285), False, 'import numpy\n'), ((2290, 2336), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.g1', 'g1'], {}), '(cat1.g1, g1)\n', (2323, 2336), False, 'import numpy\n'), ((2341, 2387), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.g2', 'g2'], {}), '(cat1.g2, g2)\n', (2374, 2387), False, 'import numpy\n'), ((2392, 2436), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.k', 'k'], {}), '(cat1.k, k)\n', (2425, 2436), False, 'import numpy\n'), ((2495, 2530), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (2511, 2530), False, 'import treecorr\n'), ((2535, 2603), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.w[flags == 0]', 'w[flags == 0]'], {}), '(cat2.w[flags == 0], w[flags == 0])\n', (2568, 2603), False, 'import numpy\n'), ((2604, 2662), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.w[flags != 0]', '(0.0)'], {}), '(cat2.w[flags != 0], 0.0)\n', (2637, 2662), False, 'import numpy\n'), ((2718, 2753), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (2734, 2753), False, 'import treecorr\n'), ((3099, 3134), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (3115, 3134), False, 'import treecorr\n'), ((3139, 3207), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat4.w[flags < 16]', 'w[flags < 16]'], {}), '(cat4.w[flags < 16], w[flags < 16])\n', (3172, 3207), False, 'import numpy\n'), ((3212, 3271), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat4.w[flags >= 16]', '(0.0)'], {}), '(cat4.w[flags >= 16], 0.0)\n', (3245, 3271), False, 'import numpy\n'), ((3409, 3444), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (3425, 3444), False, 'import treecorr\n'), ((3449, 3517), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.x', '(x * (pi / 180.0 / 3600.0))'], {}), '(cat5.x, x * (pi / 180.0 / 3600.0))\n', (3482, 3517), False, 'import numpy\n'), ((3516, 3584), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.y', '(y * (pi / 180.0 / 3600.0))'], {}), '(cat5.y, y * (pi / 180.0 / 3600.0))\n', (3549, 3584), False, 'import numpy\n'), ((3657, 3692), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (3673, 3692), False, 'import treecorr\n'), ((3697, 3763), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.x', '(x * (pi / 180.0 / 60.0))'], {}), '(cat5.x, x * (pi / 180.0 / 60.0))\n', (3730, 3763), False, 'import numpy\n'), ((3762, 3828), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.y', '(y * (pi / 180.0 / 60.0))'], {}), '(cat5.y, y * (pi / 180.0 / 60.0))\n', (3795, 3828), False, 'import numpy\n'), ((3895, 3930), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (3911, 3930), False, 'import treecorr\n'), ((3935, 3994), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.x', '(x * (pi / 180.0))'], {}), '(cat5.x, x * (pi / 180.0))\n', (3968, 3994), False, 'import numpy\n'), ((3996, 4055), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.y', '(y * (pi / 180.0))'], {}), '(cat5.y, y * (pi / 180.0))\n', (4029, 4055), False, 'import numpy\n'), ((4139, 4174), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (4155, 4174), False, 'import treecorr\n'), ((4179, 4223), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.x', 'x'], {}), '(cat5.x, x)\n', (4212, 4223), False, 'import numpy\n'), ((4228, 4272), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat5.y', 'y'], {}), '(cat5.y, y)\n', (4261, 4272), False, 'import numpy\n'), ((4491, 4526), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (4507, 4526), False, 'import treecorr\n'), ((4531, 4577), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat6.ra', 'ra'], {}), '(cat6.ra, ra)\n', (4564, 4577), False, 'import numpy\n'), ((4582, 4630), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat6.dec', 'dec'], {}), '(cat6.dec, dec)\n', (4615, 4630), False, 'import numpy\n'), ((4706, 4741), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (4722, 4741), False, 'import treecorr\n'), ((4746, 4807), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat6.ra', '(ra * (pi / 180.0))'], {}), '(cat6.ra, ra * (pi / 180.0))\n', (4779, 4807), False, 'import numpy\n'), ((4809, 4872), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat6.dec', '(dec * (pi / 180.0))'], {}), '(cat6.dec, dec * (pi / 180.0))\n', (4842, 4872), False, 'import numpy\n'), ((4946, 4981), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (4962, 4981), False, 'import treecorr\n'), ((4986, 5046), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat6.ra', '(ra * (pi / 12.0))'], {}), '(cat6.ra, ra * (pi / 12.0))\n', (5019, 5046), False, 'import numpy\n'), ((5048, 5111), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat6.dec', '(dec * (pi / 180.0))'], {}), '(cat6.dec, dec * (pi / 180.0))\n', (5081, 5111), False, 'import numpy\n'), ((5186, 5218), 'os.path.join', 'os.path.join', (['"""data"""', '"""test.csv"""'], {}), "('data', 'test.csv')\n", (5198, 5218), False, 'import os\n'), ((5955, 5994), 'treecorr.Catalog', 'treecorr.Catalog', (['csv_file_name', 'config'], {}), '(csv_file_name, config)\n', (5971, 5994), False, 'import treecorr\n'), ((5999, 6059), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat7.ra', '(ra * (pi / 12.0))'], {}), '(cat7.ra, ra * (pi / 12.0))\n', (6032, 6059), False, 'import numpy\n'), ((6061, 6124), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat7.dec', '(dec * (pi / 180.0))'], {}), '(cat7.dec, dec * (pi / 180.0))\n', (6094, 6124), False, 'import numpy\n'), ((6126, 6170), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat7.r', 'r'], {}), '(cat7.r, r)\n', (6159, 6170), False, 'import numpy\n'), ((6175, 6221), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat7.g1', 'g1'], {}), '(cat7.g1, g1)\n', (6208, 6221), False, 'import numpy\n'), ((6226, 6272), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat7.g2', 'g2'], {}), '(cat7.g2, g2)\n', (6259, 6272), False, 'import numpy\n'), ((6277, 6345), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat7.w[flags < 16]', 'w[flags < 16]'], {}), '(cat7.w[flags < 16], w[flags < 16])\n', (6310, 6345), False, 'import numpy\n'), ((6350, 6409), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat7.w[flags >= 16]', '(0.0)'], {}), '(cat7.w[flags >= 16], 0.0)\n', (6383, 6409), False, 'import numpy\n'), ((6540, 6575), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (6556, 6575), False, 'import treecorr\n'), ((6580, 6627), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g1', '(-g1)'], {}), '(cat8.g1, -g1)\n', (6613, 6627), False, 'import numpy\n'), ((6632, 6678), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g2', 'g2'], {}), '(cat8.g2, g2)\n', (6665, 6678), False, 'import numpy\n'), ((6722, 6757), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (6738, 6757), False, 'import treecorr\n'), ((6762, 6809), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g1', '(-g1)'], {}), '(cat8.g1, -g1)\n', (6795, 6809), False, 'import numpy\n'), ((6814, 6861), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g2', '(-g2)'], {}), '(cat8.g2, -g2)\n', (6847, 6861), False, 'import numpy\n'), ((6932, 6967), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (6948, 6967), False, 'import treecorr\n'), ((6972, 7018), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g1', 'g1'], {}), '(cat8.g1, g1)\n', (7005, 7018), False, 'import numpy\n'), ((7023, 7070), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g2', '(-g2)'], {}), '(cat8.g2, -g2)\n', (7056, 7070), False, 'import numpy\n'), ((7125, 7189), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {'flip_g1': '(True)', 'flip_g2': '(False)'}), '(file_name, config, flip_g1=True, flip_g2=False)\n', (7141, 7189), False, 'import treecorr\n'), ((7194, 7241), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g1', '(-g1)'], {}), '(cat8.g1, -g1)\n', (7227, 7241), False, 'import numpy\n'), ((7246, 7292), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat8.g2', 'g2'], {}), '(cat8.g2, g2)\n', (7279, 7292), False, 'import numpy\n'), ((7316, 7345), 'test_helper.get_from_wiki', 'get_from_wiki', (['"""Aardvark.fit"""'], {}), "('Aardvark.fit')\n", (7329, 7345), False, 'from test_helper import get_from_wiki\n'), ((7362, 7398), 'os.path.join', 'os.path.join', (['"""data"""', '"""Aardvark.fit"""'], {}), "('data', 'Aardvark.fit')\n", (7374, 7398), False, 'import os\n'), ((7411, 7448), 'treecorr.read_config', 'treecorr.read_config', (['"""Aardvark.yaml"""'], {}), "('Aardvark.yaml')\n", (7431, 7448), False, 'import treecorr\n'), ((7534, 7569), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (7550, 7569), False, 'import treecorr\n'), ((7627, 7672), 'numpy.testing.assert_equal', 'numpy.testing.assert_equal', (['cat1.nobj', '(390935)'], {}), '(cat1.nobj, 390935)\n', (7653, 7672), False, 'import numpy\n'), ((7677, 7746), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.ra[0]', '(56.4195 * (pi / 180.0))'], {}), '(cat1.ra[0], 56.4195 * (pi / 180.0))\n', (7710, 7746), False, 'import numpy\n'), ((7748, 7822), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.ra[390934]', '(78.4782 * (pi / 180.0))'], {}), '(cat1.ra[390934], 78.4782 * (pi / 180.0))\n', (7781, 7822), False, 'import numpy\n'), ((7824, 7899), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.dec[290333]', '(83.1579 * (pi / 180.0))'], {}), '(cat1.dec[290333], 83.1579 * (pi / 180.0))\n', (7857, 7899), False, 'import numpy\n'), ((7901, 7964), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.g1[46392]', '(0.0005066675)'], {}), '(cat1.g1[46392], 0.0005066675)\n', (7934, 7964), False, 'import numpy\n'), ((7969, 8033), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.g2[46392]', '(-0.0001006742)'], {}), '(cat1.g2[46392], -0.0001006742)\n', (8002, 8033), False, 'import numpy\n'), ((8038, 8101), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.k[46392]', '(-0.0008628797)'], {}), '(cat1.k[46392], -0.0008628797)\n', (8071, 8101), False, 'import numpy\n'), ((8391, 8426), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (8407, 8426), False, 'import treecorr\n'), ((8431, 8500), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.x[390934]', '(78.4782)'], {'decimal': '(4)'}), '(cat2.x[390934], 78.4782, decimal=4)\n', (8464, 8500), False, 'import numpy\n'), ((8505, 8574), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.y[290333]', '(83.1579)'], {'decimal': '(4)'}), '(cat2.y[290333], 83.1579, decimal=4)\n', (8538, 8574), False, 'import numpy\n'), ((8579, 8632), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.w[46392]', '(0.0)'], {}), '(cat2.w[46392], 0.0)\n', (8612, 8632), False, 'import numpy\n'), ((8661, 8720), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.w[46393]', '(0.9995946)'], {}), '(cat2.w[46393], 0.9995946)\n', (8694, 8720), False, 'import numpy\n'), ((8851, 8886), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name', 'config'], {}), '(file_name, config)\n', (8867, 8886), False, 'import treecorr\n'), ((8942, 8986), 'numpy.testing.assert_equal', 'numpy.testing.assert_equal', (['cat3.ntot', '(49900)'], {}), '(cat3.ntot, 49900)\n', (8968, 8986), False, 'import numpy\n'), ((9172, 9235), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat3.g1[46292]', '(0.0005066675)'], {}), '(cat3.g1[46292], 0.0005066675)\n', (9205, 9235), False, 'import numpy\n'), ((9240, 9304), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat3.g2[46292]', '(-0.0001006742)'], {}), '(cat3.g2[46292], -0.0001006742)\n', (9273, 9304), False, 'import numpy\n'), ((9309, 9372), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat3.k[46292]', '(-0.0008628797)'], {}), '(cat3.k[46292], -0.0008628797)\n', (9342, 9372), False, 'import numpy\n'), ((9415, 9441), 'numpy.random.seed', 'numpy.random.seed', (['(8675309)'], {}), '(8675309)\n', (9432, 9441), False, 'import numpy\n'), ((9450, 9482), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9476, 9482), False, 'import numpy\n'), ((9491, 9523), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9517, 9523), False, 'import numpy\n'), ((9533, 9565), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9559, 9565), False, 'import numpy\n'), ((9576, 9608), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9602, 9608), False, 'import numpy\n'), ((9617, 9649), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9643, 9649), False, 'import numpy\n'), ((9659, 9691), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9685, 9691), False, 'import numpy\n'), ((9701, 9733), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9727, 9733), False, 'import numpy\n'), ((9742, 9774), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (9768, 9774), False, 'import numpy\n'), ((9787, 9837), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'w': 'w', 'g1': 'g1', 'g2': 'g2', 'k': 'k'}), '(x=x, y=y, w=w, g1=g1, g2=g2, k=k)\n', (9803, 9837), False, 'import treecorr\n'), ((9842, 9886), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.x', 'x'], {}), '(cat1.x, x)\n', (9875, 9886), False, 'import numpy\n'), ((9891, 9935), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.y', 'y'], {}), '(cat1.y, y)\n', (9924, 9935), False, 'import numpy\n'), ((9940, 9984), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.w', 'w'], {}), '(cat1.w, w)\n', (9973, 9984), False, 'import numpy\n'), ((9989, 10035), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.g1', 'g1'], {}), '(cat1.g1, g1)\n', (10022, 10035), False, 'import numpy\n'), ((10040, 10086), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.g2', 'g2'], {}), '(cat1.g2, g2)\n', (10073, 10086), False, 'import numpy\n'), ((10091, 10135), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1.k', 'k'], {}), '(cat1.k, k)\n', (10124, 10135), False, 'import numpy\n'), ((10148, 10247), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'w': 'w', 'g1': 'g1', 'g2': 'g2', 'k': 'k', 'ra_units': '"""hours"""', 'dec_units': '"""degrees"""'}), "(ra=ra, dec=dec, w=w, g1=g1, g2=g2, k=k, ra_units='hours',\n dec_units='degrees')\n", (10164, 10247), False, 'import treecorr\n'), ((10276, 10339), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.ra', '(ra * treecorr.hours)'], {}), '(cat2.ra, ra * treecorr.hours)\n', (10309, 10339), False, 'import numpy\n'), ((10344, 10411), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.dec', '(dec * treecorr.degrees)'], {}), '(cat2.dec, dec * treecorr.degrees)\n', (10377, 10411), False, 'import numpy\n'), ((10416, 10460), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.w', 'w'], {}), '(cat2.w, w)\n', (10449, 10460), False, 'import numpy\n'), ((10465, 10511), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.g1', 'g1'], {}), '(cat2.g1, g1)\n', (10498, 10511), False, 'import numpy\n'), ((10516, 10562), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.g2', 'g2'], {}), '(cat2.g2, g2)\n', (10549, 10562), False, 'import numpy\n'), ((10567, 10611), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2.k', 'k'], {}), '(cat2.k, k)\n', (10600, 10611), False, 'import numpy\n'), ((10981, 11535), 'numpy.array', 'numpy.array', (['[(0.0380569697547, 0.0142782758818, 0.330845443464, -0.111049332655), (-\n 0.0261291090735, 0.0863787933931, 0.122954685209, 0.40260430406), (-\n 0.0261291090735, 0.0863787933931, 0.122954685209, 0.40260430406), (\n 0.125086697534, 0.0283621046495, -0.208159531309, 0.142491564101), (\n 0.0457709426026, -0.0299249486373, -0.0406555089425, 0.24515956887), (-\n 0.00338578248926, 0.0460291122935, 0.363057738173, -0.524536297555)]'], {'dtype': "[('ra', None), ('dec', numpy.float64), ('g1', numpy.float32), ('g2', numpy.\n float128)]"}), "([(0.0380569697547, 0.0142782758818, 0.330845443464, -\n 0.111049332655), (-0.0261291090735, 0.0863787933931, 0.122954685209, \n 0.40260430406), (-0.0261291090735, 0.0863787933931, 0.122954685209, \n 0.40260430406), (0.125086697534, 0.0283621046495, -0.208159531309, \n 0.142491564101), (0.0457709426026, -0.0299249486373, -0.0406555089425, \n 0.24515956887), (-0.00338578248926, 0.0460291122935, 0.363057738173, -\n 0.524536297555)], dtype=[('ra', None), ('dec', numpy.float64), ('g1',\n numpy.float32), ('g2', numpy.float128)])\n", (10992, 11535), False, 'import numpy\n'), ((11703, 11769), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': '[0]', 'dec': '[0]', 'ra_units': '"""deg"""', 'dec_units': '"""deg"""'}), "(ra=[0], dec=[0], ra_units='deg', dec_units='deg')\n", (11719, 11769), False, 'import treecorr\n'), ((11793, 11937), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': "source_data['ra']", 'ra_units': '"""deg"""', 'dec': "source_data['dec']", 'dec_units': '"""deg"""', 'g1': "source_data['g1']", 'g2': "source_data['g2']"}), "(ra=source_data['ra'], ra_units='deg', dec=source_data[\n 'dec'], dec_units='deg', g1=source_data['g1'], g2=source_data['g2'])\n", (11809, 11937), False, 'import treecorr\n'), ((12798, 12828), 'treecorr.NGCorrelation', 'treecorr.NGCorrelation', (['config'], {}), '(config)\n', (12820, 12828), False, 'import treecorr\n'), ((12870, 12900), 'treecorr.NGCorrelation', 'treecorr.NGCorrelation', (['config'], {}), '(config)\n', (12892, 12900), False, 'import treecorr\n'), ((12943, 12989), 'numpy.testing.assert_equal', 'numpy.testing.assert_equal', (['ng.xi', 'ng_float.xi'], {}), '(ng.xi, ng_float.xi)\n', (12969, 12989), False, 'import numpy\n'), ((13498, 13544), 'numpy.testing.assert_equal', 'numpy.testing.assert_equal', (['ng.xi', 'ng_float.xi'], {}), '(ng.xi, ng_float.xi)\n', (13524, 13544), False, 'import numpy\n'), ((13700, 13726), 'numpy.random.seed', 'numpy.random.seed', (['(8675309)'], {}), '(8675309)\n', (13717, 13726), False, 'import numpy\n'), ((14486, 14533), 'treecorr.read_catalogs', 'treecorr.read_catalogs', (['config'], {'key': '"""file_name"""'}), "(config, key='file_name')\n", (14508, 14533), False, 'import treecorr\n'), ((14947, 15003), 'treecorr.read_catalogs', 'treecorr.read_catalogs', (['config', '"""file_name"""', '"""file_list"""'], {}), "(config, 'file_name', 'file_list')\n", (14969, 15003), False, 'import treecorr\n'), ((15291, 15328), 'os.path.join', 'os.path.join', (['"""data"""', '"""test_list.txt"""'], {}), "('data', 'test_list.txt')\n", (15303, 15328), False, 'import os\n'), ((15509, 15565), 'treecorr.read_catalogs', 'treecorr.read_catalogs', (['config', '"""file_name"""', '"""file_list"""'], {}), "(config, 'file_name', 'file_list')\n", (15531, 15565), False, 'import treecorr\n'), ((15835, 15887), 'treecorr.read_catalogs', 'treecorr.read_catalogs', (['config'], {'list_key': '"""file_list"""'}), "(config, list_key='file_list')\n", (15857, 15887), False, 'import treecorr\n'), ((16232, 16258), 'numpy.random.seed', 'numpy.random.seed', (['(8675309)'], {}), '(8675309)\n', (16249, 16258), False, 'import numpy\n'), ((16267, 16304), 'numpy.random.normal', 'numpy.random.normal', (['(222)', '(50)', '(ngal,)'], {}), '(222, 50, (ngal,))\n', (16286, 16304), False, 'import numpy\n'), ((16313, 16350), 'numpy.random.normal', 'numpy.random.normal', (['(138)', '(20)', '(ngal,)'], {}), '(138, 20, (ngal,))\n', (16332, 16350), False, 'import numpy\n'), ((16359, 16397), 'numpy.random.normal', 'numpy.random.normal', (['(912)', '(130)', '(ngal,)'], {}), '(912, 130, (ngal,))\n', (16378, 16397), False, 'import numpy\n'), ((16406, 16444), 'numpy.random.normal', 'numpy.random.normal', (['(1.3)', '(0.1)', '(ngal,)'], {}), '(1.3, 0.1, (ngal,))\n', (16425, 16444), False, 'import numpy\n'), ((16456, 16496), 'numpy.random.normal', 'numpy.random.normal', (['(11.34)', '(0.9)', '(ngal,)'], {}), '(11.34, 0.9, (ngal,))\n', (16475, 16496), False, 'import numpy\n'), ((16508, 16549), 'numpy.random.normal', 'numpy.random.normal', (['(-48.12)', '(4.3)', '(ngal,)'], {}), '(-48.12, 4.3, (ngal,))\n', (16527, 16549), False, 'import numpy\n'), ((16559, 16598), 'numpy.random.normal', 'numpy.random.normal', (['(1024)', '(230)', '(ngal,)'], {}), '(1024, 230, (ngal,))\n', (16578, 16598), False, 'import numpy\n'), ((16609, 16643), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 's', '(ngal,)'], {}), '(0, s, (ngal,))\n', (16628, 16643), False, 'import numpy\n'), ((16653, 16687), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 's', '(ngal,)'], {}), '(0, s, (ngal,))\n', (16672, 16687), False, 'import numpy\n'), ((16697, 16731), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 's', '(ngal,)'], {}), '(0, s, (ngal,))\n', (16716, 16731), False, 'import numpy\n'), ((16744, 16775), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (16760, 16775), False, 'import treecorr\n'), ((16787, 16886), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""hour"""', 'dec_units': '"""deg"""', 'w': 'w', 'g1': 'g1', 'g2': 'g2', 'k': 'k'}), "(ra=ra, dec=dec, r=r, ra_units='hour', dec_units='deg', w=w,\n g1=g1, g2=g2, k=k)\n", (16803, 16886), False, 'import treecorr\n'), ((17135, 17183), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1_asc.x', 'x'], {}), '(cat1_asc.x, x)\n', (17168, 17183), False, 'import numpy\n'), ((17188, 17236), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1_asc.y', 'y'], {}), '(cat1_asc.y, y)\n', (17221, 17236), False, 'import numpy\n'), ((17241, 17289), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1_asc.z', 'z'], {}), '(cat1_asc.z, z)\n', (17274, 17289), False, 'import numpy\n'), ((17596, 17646), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_asc.ra', 'ra'], {}), '(cat2_asc.ra, ra)\n', (17629, 17646), False, 'import numpy\n'), ((17651, 17703), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_asc.dec', 'dec'], {}), '(cat2_asc.dec, dec)\n', (17684, 17703), False, 'import numpy\n'), ((17708, 17756), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_asc.r', 'r'], {}), '(cat2_asc.r, r)\n', (17741, 17756), False, 'import numpy\n'), ((17761, 17809), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_asc.w', 'w'], {}), '(cat2_asc.w, w)\n', (17794, 17809), False, 'import numpy\n'), ((17814, 17864), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_asc.g1', 'g1'], {}), '(cat2_asc.g1, g1)\n', (17847, 17864), False, 'import numpy\n'), ((17869, 17919), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_asc.g2', 'g2'], {}), '(cat2_asc.g2, g2)\n', (17902, 17919), False, 'import numpy\n'), ((17924, 17972), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_asc.k', 'k'], {}), '(cat2_asc.k, k)\n', (17957, 17972), False, 'import numpy\n'), ((18205, 18254), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1_fits.x', 'x'], {}), '(cat1_fits.x, x)\n', (18238, 18254), False, 'import numpy\n'), ((18259, 18308), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1_fits.y', 'y'], {}), '(cat1_fits.y, y)\n', (18292, 18308), False, 'import numpy\n'), ((18313, 18362), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat1_fits.z', 'z'], {}), '(cat1_fits.z, z)\n', (18346, 18362), False, 'import numpy\n'), ((18692, 18743), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_fits.ra', 'ra'], {}), '(cat2_fits.ra, ra)\n', (18725, 18743), False, 'import numpy\n'), ((18748, 18801), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_fits.dec', 'dec'], {}), '(cat2_fits.dec, dec)\n', (18781, 18801), False, 'import numpy\n'), ((18806, 18855), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_fits.r', 'r'], {}), '(cat2_fits.r, r)\n', (18839, 18855), False, 'import numpy\n'), ((18860, 18909), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_fits.w', 'w'], {}), '(cat2_fits.w, w)\n', (18893, 18909), False, 'import numpy\n'), ((18914, 18965), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_fits.g1', 'g1'], {}), '(cat2_fits.g1, g1)\n', (18947, 18965), False, 'import numpy\n'), ((18970, 19021), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_fits.g2', 'g2'], {}), '(cat2_fits.g2, g2)\n', (19003, 19021), False, 'import numpy\n'), ((19026, 19075), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cat2_fits.k', 'k'], {}), '(cat2_fits.k, k)\n', (19059, 19075), False, 'import numpy\n'), ((1374, 1408), 'numpy.bitwise_or', 'numpy.bitwise_or', (['flags[sub]', 'flag'], {}), '(flags[sub], flag)\n', (1390, 1408), False, 'import numpy\n'), ((13834, 13866), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (13860, 13866), False, 'import numpy\n'), ((13879, 13911), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (13905, 13911), False, 'import numpy\n'), ((13932, 13975), 'os.path.join', 'os.path.join', (['"""data"""', "('test_list%d.dat' % k)"], {}), "('data', 'test_list%d.dat' % k)\n", (13944, 13975), False, 'import os\n'), ((14618, 14673), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].x', 'x_list[k]'], {}), '(cats[k].x, x_list[k])\n', (14651, 14673), False, 'import numpy\n'), ((14682, 14737), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].y', 'y_list[k]'], {}), '(cats[k].y, y_list[k])\n', (14715, 14737), False, 'import numpy\n'), ((15088, 15143), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].x', 'x_list[k]'], {}), '(cats[k].x, x_list[k])\n', (15121, 15143), False, 'import numpy\n'), ((15152, 15207), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].y', 'y_list[k]'], {}), '(cats[k].y, y_list[k])\n', (15185, 15207), False, 'import numpy\n'), ((15650, 15705), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].x', 'x_list[k]'], {}), '(cats[k].x, x_list[k])\n', (15683, 15705), False, 'import numpy\n'), ((15714, 15769), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].y', 'y_list[k]'], {}), '(cats[k].y, y_list[k])\n', (15747, 15769), False, 'import numpy\n'), ((15972, 16027), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].x', 'x_list[k]'], {}), '(cats[k].x, x_list[k])\n', (16005, 16027), False, 'import numpy\n'), ((16036, 16091), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['cats[k].y', 'y_list[k]'], {}), '(cats[k].y, y_list[k])\n', (16069, 16091), False, 'import numpy\n'), ((16951, 16985), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat1.dat"""'], {}), "('output', 'cat1.dat')\n", (16963, 16985), False, 'import os\n'), ((17018, 17052), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat1.dat"""'], {}), "('output', 'cat1.dat')\n", (17030, 17052), False, 'import os\n'), ((17306, 17340), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat2.dat"""'], {}), "('output', 'cat2.dat')\n", (17318, 17340), False, 'import os\n'), ((17392, 17426), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat2.dat"""'], {}), "('output', 'cat2.dat')\n", (17404, 17426), False, 'import os\n'), ((18012, 18047), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat1.fits"""'], {}), "('output', 'cat1.fits')\n", (18024, 18047), False, 'import os\n'), ((18099, 18134), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat1.fits"""'], {}), "('output', 'cat1.fits')\n", (18111, 18134), False, 'import os\n'), ((18379, 18414), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat2.fits"""'], {}), "('output', 'cat2.fits')\n", (18391, 18414), False, 'import os\n'), ((18448, 18483), 'os.path.join', 'os.path.join', (['"""output"""', '"""cat2.fits"""'], {}), "('output', 'cat2.fits')\n", (18460, 18483), False, 'import os\n'), ((1234, 1251), 'numpy.zeros', 'numpy.zeros', (['nobj'], {}), '(nobj)\n', (1245, 1251), False, 'import numpy\n'), ((1314, 1346), 'numpy.random.random_sample', 'numpy.random.random_sample', (['nobj'], {}), '(nobj)\n', (1340, 1346), False, 'import numpy\n'), ((2799, 2839), 'numpy.logical_or', 'numpy.logical_or', (['(flags == 0)', '(flags == 4)'], {}), '(flags == 0, flags == 4)\n', (2815, 2839), False, 'import numpy\n'), ((2878, 2918), 'numpy.logical_or', 'numpy.logical_or', (['(flags == 0)', '(flags == 4)'], {}), '(flags == 0, flags == 4)\n', (2894, 2918), False, 'import numpy\n'), ((2962, 3003), 'numpy.logical_and', 'numpy.logical_and', (['(flags != 0)', '(flags != 4)'], {}), '(flags != 0, flags != 4)\n', (2979, 3003), False, 'import numpy\n')] |
import numpy as np
# global stopping criteria
EPS = 0.001
def value_iteration(model, maxiter=100):
"""
Solves the supplied environment with value iteration.
Parameters
----------
model : python object
Holds information about the environment to solve
such as the reward structure and the transition dynamics.
maxiter : int
The maximum number of iterations to perform.
Return
------
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
pi : numpy array of shape (N, 1)
Optimal policy of the environment.
"""
# initialize the value function and policy
pi = np.ones((model.num_states, 1))
val_ = np.zeros((model.num_states, 1))
for i in range(maxiter):
# initialize delta
delta = 0
# perform Bellman update for each state
for state in range(model.num_states):
# store old value
tmp = val_[state].copy()
# compute the value function
val_[state] = np.max( np.sum((model.R[state] + model.gamma * val_) * model.P[state,:,:], 0) )
# find maximum change in value
delta = np.max( (delta, np.abs(tmp - val_[state])) )
# stopping criteria
if delta <= EPS * (1 - model.gamma) / model.gamma:
print("Value iteration converged after %d iterations." % i)
break
# compute the policy
for state in range(model.num_states):
pi[state] = np.argmax(np.sum(val_ * model.P[state,:,:],0))
return val_, pi
def policy_iteration(model, maxiter):
"""
Solves the supplied environment with policy iteration.
Parameters
----------
model : python object
Holds information about the environment to solve
such as the reward structure and the transition dynamics.
maxiter : int
The maximum number of iterations to perform.
Return
------
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
pi : numpy array of shape (N, 1)
Optimal policy of the environment.
"""
# initialize the value function and policy
pi = np.ones((model.num_states, 1))
val_ = np.zeros((model.num_states, 1))
for i in range(maxiter):
# Stopping criteria
stable_policy = True
# Policy evaluation
val_ = policy_evaluation(model, val_, pi)
for state in range(model.num_states):
# do policy improvement
action = np.argmax( np.sum( (model.R[state] + model.gamma * val_) * model.P[state,:,:], 0) )
# check if policy has been updated
if action != pi[state]:
# store new action
pi[state] = action
# update stopping criteria
stable_policy = False
# check if stopping criteria satisfied
if stable_policy:
print("Policy iteration converged after %d iterations." % i)
break
return val_, pi
def policy_evaluation(model, val_, policy):
"""
Evaluates a given policy.
Parameters
----------
model : python object
Holds information about the environment to solve
such as the reward structure and the transition dynamics.
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
policy : numpy array of shape (N, 1)
Optimal policy of the environment.
Return
------
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
"""
loop = True
while loop:
# initialize delta
delta = 0
for state in range(model.num_states):
# store old value
tmp = val_[state].copy()
# compute the value function
val_[state] = np.sum( (model.R[state] + model.gamma * val_) * model.P[state,:,int(policy[state])].reshape(-1,1))
# find maximum change in value
delta = np.max( (delta, np.abs(tmp - val_[state])) )
# stopping criteria
if delta <= EPS * (1 - model.gamma) / model.gamma:
loop = False
return val_ | [
"numpy.sum",
"numpy.zeros",
"numpy.abs",
"numpy.ones"
] | [((728, 758), 'numpy.ones', 'np.ones', (['(model.num_states, 1)'], {}), '((model.num_states, 1))\n', (735, 758), True, 'import numpy as np\n'), ((770, 801), 'numpy.zeros', 'np.zeros', (['(model.num_states, 1)'], {}), '((model.num_states, 1))\n', (778, 801), True, 'import numpy as np\n'), ((2293, 2323), 'numpy.ones', 'np.ones', (['(model.num_states, 1)'], {}), '((model.num_states, 1))\n', (2300, 2323), True, 'import numpy as np\n'), ((2335, 2366), 'numpy.zeros', 'np.zeros', (['(model.num_states, 1)'], {}), '((model.num_states, 1))\n', (2343, 2366), True, 'import numpy as np\n'), ((1568, 1606), 'numpy.sum', 'np.sum', (['(val_ * model.P[state, :, :])', '(0)'], {}), '(val_ * model.P[state, :, :], 0)\n', (1574, 1606), True, 'import numpy as np\n'), ((1113, 1184), 'numpy.sum', 'np.sum', (['((model.R[state] + model.gamma * val_) * model.P[state, :, :])', '(0)'], {}), '((model.R[state] + model.gamma * val_) * model.P[state, :, :], 0)\n', (1119, 1184), True, 'import numpy as np\n'), ((2647, 2718), 'numpy.sum', 'np.sum', (['((model.R[state] + model.gamma * val_) * model.P[state, :, :])', '(0)'], {}), '((model.R[state] + model.gamma * val_) * model.P[state, :, :], 0)\n', (2653, 2718), True, 'import numpy as np\n'), ((1264, 1289), 'numpy.abs', 'np.abs', (['(tmp - val_[state])'], {}), '(tmp - val_[state])\n', (1270, 1289), True, 'import numpy as np\n'), ((4230, 4255), 'numpy.abs', 'np.abs', (['(tmp - val_[state])'], {}), '(tmp - val_[state])\n', (4236, 4255), True, 'import numpy as np\n')] |
import sys
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
sys.path.append(os.path.join('../'))
from lib.BeamDynamicsTools.Boundary import Boundary
from lib.BeamDynamicsTools.Bfield import Bfield, BfieldTF, BfieldVF
from lib.BeamDynamicsTools.Trajectory import Trajectory
from lib.BeamDynamicsTools.Beam import Beam
import pylab as pl
# ===============================================================================
# Calculates Spread in trajectory for non gaussian beam energy distribution
# ===============================================================================
# Define np.array of injection angles
# (x,y,z) = (1.798m, -0.052m, 0.243m)
# alpha = 12.6 degrees (X-Z plane)
# beta = 8.0 degrees (X-Y plane)
alpha0 = 12.6
beta0 = 8.0
alpha = alpha0 / 180.0 * np.pi
beta = beta0 / 180.0 * np.pi
print(alpha, beta)
Rinjection = [1.798, -0.052, 0.243]
Vinjection = [-np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta), -np.sin(alpha)]
#Energy = [0.594e6, 0.740e6, 0.900e6]
Energy = np.linspace(0.594e6, 0.900e6, 10)
# ------------------------------------------------------------------------------
# Import poloidal Boundary points
Rb = np.loadtxt('../data/CmodCoordinatesRZ.dat', usecols=[0])
Zb = np.loadtxt('../data/CmodCoordinatesRZ.dat', usecols=[1])
# Generate vessel Boundary
Vessel = Boundary(Rb, Zb)
# 3D plot of vessel Boundary
ax = Vessel.Figure3D(1)
Vessel.Plot3D(ax)
# ------------------------------------------------------------------------------
# Inputs for four B-field settings
#Bn = np.array([0.40])
#In = np.array([ 0.0, 1600.0 ,3120 ,4450.0])
#Bn = np.array([ 0.0, 0.05818182, 0.11345455, 0.16181818 ])
#Bn = np.array([0.10,0.20, 0.30, 0.40])
Bn = np.array([0.0, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45])
AngleComponents = []
Coordinates = []
Parameters = []
TrajectoryList = []
OutputPath = '../output/'
Color = ['k', 'g', 'r', 'c', 'b', 'm', 'g', 'r', 'c', 'b', 'm', 'g']
print(len(Energy) * len(Bn) * 10.0 / 60.0)
for j in range(len(Energy)):
for i in range(len(Bn)):
B = BfieldTF(B0=Bn[i])
Bv = BfieldVF(B0=0.00000)
T = Trajectory(Vessel, B, Bv, v0=Vinjection, T0=Energy[j])
T.LineColor = Color[i]
T.LineWidth = 1.0
if j == 0:
T.LineWidth = 2
if j == 9:
T.LineWidth = 4
if j == 4:
T.LineWidth = 2
T.LineColor = 'k'
T.LineStyle = '--'
TrajectoryList.append(T)
# Save Target parameters
# T.Target.SaveTargetParameters(TFCurrent=In[i],Path=OutputPath+'geometry/')
# append lists of Target Quantities
# AngleComponents.append([T.Target.VAngle,T.Target.HAngle])
# Coordinates.append([T.Target.R,T.Target.Z,T.Target.Phi])
# Parameters.append(T.Target.GetDetectionParameters())
# ------------------------------------------------------------------------------
# Plot 3D results
for i in range(len(TrajectoryList)):
TrajectoryList[i].Plot3D(ax)
# TrajectoryList[i].Target.Plot3D(ax);
TrajectoryList[-1].Limits3D(ax)
# ------------------------------------------------------------------------------
# Construct Legend
Leg = []
for i in range(len(Bn)):
Leg.append('B = %0.2fT' % TrajectoryList[i].BFieldTF.B0)
# ------------------------------------------------------------------------------
# Plot 2D projections of Trajectories (Poloidal View)
plt.figure(figsize=(20, 8))
for i in range(len(TrajectoryList)):
plt.subplot(1, 2, 1)
TrajectoryList[i].Plot2D('poloidal')
plt.subplot(1, 2, 1)
Vessel.Border('poloidal')
plt.xlim(0.2, 1.4)
plt.ylim(-0.7, 0.5)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('Poloidal Projection')
plt.title(r'Poloidal Projection ($\alpha$ = %0.1f$^o$, $\beta$ = %0.1f$^o$)' %
(alpha0, beta0))
# ------------------------------------------------------------------------------
# Plot 2D projections of Trajectories (Top View)
for i in range(len(TrajectoryList)):
plt.subplot(1, 2, 2)
TrajectoryList[i].Plot2D('top')
plt.subplot(1, 2, 2)
Vessel.Border('top')
plt.xlim(0, 1.2)
plt.ylim(-0.6, 0.6)
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title(r'Midplane Projection ($\alpha$ = %0.1f$^o$, $\beta$ = %0.1f$^o$)' %
(alpha0, beta0))
plt.legend(Leg, bbox_to_anchor=(1.28, 1.0))
# plt.legend(('B = 0.05','B = 0.10','B = 0.15','B = 0.20','B = 0.25','B = 0.30')
# ------------------------------------------------------------------------------
# Save Angular and Detection Quantities
if False:
np.savetxt(OutputPath + 'geometry/TargetAngle_Vert_Horiz.dat', AngleComponents)
np.savetxt(OutputPath + 'geometry/TargetCoordinates.dat', Coordinates)
Header0 = '(0) I0 [A], (1) B0 [T], (2) X [m] , (3) Y [m], (4) Z [m], (5) incident angle [rad], (6) Detection Angle [rad], (7) optical path length [m] , (8) Detection Angle [rad], (9) Detection Angle [deg], (10) Detector Eff'
np.savetxt(OutputPath + 'geometry/DetectionParameters.dat',
(np.array(Parameters)), header=Header0)
if True:
FigName = 'TrajectoryProjections_alpha%2.2f_beta%2.2f.pdf' % (
alpha0, beta0)
FigPath = '/output/plots/'
# ------------------------------------------------------------------------------
# Save Figure
plt.savefig(FigPath + FigName)
print('File saved: ' + FigName)
plt.show()
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"lib.BeamDynamicsTools.Boundary.Boundary",
"lib.BeamDynamicsTools.Bfield.BfieldTF",
"numpy.sin",
"lib.BeamDynamicsTools.Bfield.BfieldVF",
"matplotlib.pyplot.xlabel",
"lib.BeamDynamicsTools.Trajectory.Trajectory",
"numpy.linspace",
"matplotlib.pyplot.ylim"... | [((1029, 1064), 'numpy.linspace', 'np.linspace', (['(594000.0)', '(900000.0)', '(10)'], {}), '(594000.0, 900000.0, 10)\n', (1040, 1064), True, 'import numpy as np\n'), ((1184, 1240), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/CmodCoordinatesRZ.dat"""'], {'usecols': '[0]'}), "('../data/CmodCoordinatesRZ.dat', usecols=[0])\n", (1194, 1240), True, 'import numpy as np\n'), ((1246, 1302), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/CmodCoordinatesRZ.dat"""'], {'usecols': '[1]'}), "('../data/CmodCoordinatesRZ.dat', usecols=[1])\n", (1256, 1302), True, 'import numpy as np\n'), ((1340, 1356), 'lib.BeamDynamicsTools.Boundary.Boundary', 'Boundary', (['Rb', 'Zb'], {}), '(Rb, Zb)\n', (1348, 1356), False, 'from lib.BeamDynamicsTools.Boundary import Boundary\n'), ((1719, 1784), 'numpy.array', 'np.array', (['[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45]'], {}), '([0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45])\n', (1727, 1784), True, 'import numpy as np\n'), ((3382, 3409), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (3392, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3533), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3524, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3578), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.2)', '(1.4)'], {}), '(0.2, 1.4)\n', (3568, 3578), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3598), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.7)', '(0.5)'], {}), '(-0.7, 0.5)\n', (3587, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3618), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""R [m]"""'], {}), "('R [m]')\n", (3609, 3618), True, 'import matplotlib.pyplot as plt\n'), ((3619, 3638), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z [m]"""'], {}), "('Z [m]')\n", (3629, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3639, 3671), 'matplotlib.pyplot.title', 'plt.title', (['"""Poloidal Projection"""'], {}), "('Poloidal Projection')\n", (3648, 3671), True, 'import matplotlib.pyplot as plt\n'), ((3672, 3778), 'matplotlib.pyplot.title', 'plt.title', (["('Poloidal Projection ($\\\\alpha$ = %0.1f$^o$, $\\\\beta$ = %0.1f$^o$)' % (\n alpha0, beta0))"], {}), "(\n 'Poloidal Projection ($\\\\alpha$ = %0.1f$^o$, $\\\\beta$ = %0.1f$^o$)' % (\n alpha0, beta0))\n", (3681, 3778), True, 'import matplotlib.pyplot as plt\n'), ((4006, 4026), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4017, 4026), True, 'import matplotlib.pyplot as plt\n'), ((4048, 4064), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.2)'], {}), '(0, 1.2)\n', (4056, 4064), True, 'import matplotlib.pyplot as plt\n'), ((4065, 4084), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.6)', '(0.6)'], {}), '(-0.6, 0.6)\n', (4073, 4084), True, 'import matplotlib.pyplot as plt\n'), ((4085, 4104), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x [m]"""'], {}), "('x [m]')\n", (4095, 4104), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4124), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y [m]"""'], {}), "('y [m]')\n", (4115, 4124), True, 'import matplotlib.pyplot as plt\n'), ((4125, 4231), 'matplotlib.pyplot.title', 'plt.title', (["('Midplane Projection ($\\\\alpha$ = %0.1f$^o$, $\\\\beta$ = %0.1f$^o$)' % (\n alpha0, beta0))"], {}), "(\n 'Midplane Projection ($\\\\alpha$ = %0.1f$^o$, $\\\\beta$ = %0.1f$^o$)' % (\n alpha0, beta0))\n", (4134, 4231), True, 'import matplotlib.pyplot as plt\n'), ((4230, 4273), 'matplotlib.pyplot.legend', 'plt.legend', (['Leg'], {'bbox_to_anchor': '(1.28, 1.0)'}), '(Leg, bbox_to_anchor=(1.28, 1.0))\n', (4240, 4273), True, 'import matplotlib.pyplot as plt\n'), ((5220, 5250), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(FigPath + FigName)'], {}), '(FigPath + FigName)\n', (5231, 5250), True, 'import matplotlib.pyplot as plt\n'), ((5284, 5294), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5292, 5294), True, 'import matplotlib.pyplot as plt\n'), ((99, 118), 'os.path.join', 'os.path.join', (['"""../"""'], {}), "('../')\n", (111, 118), False, 'import os\n'), ((3451, 3471), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3462, 3471), True, 'import matplotlib.pyplot as plt\n'), ((3949, 3969), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3960, 3969), True, 'import matplotlib.pyplot as plt\n'), ((4493, 4572), 'numpy.savetxt', 'np.savetxt', (["(OutputPath + 'geometry/TargetAngle_Vert_Horiz.dat')", 'AngleComponents'], {}), "(OutputPath + 'geometry/TargetAngle_Vert_Horiz.dat', AngleComponents)\n", (4503, 4572), True, 'import numpy as np\n'), ((4577, 4647), 'numpy.savetxt', 'np.savetxt', (["(OutputPath + 'geometry/TargetCoordinates.dat')", 'Coordinates'], {}), "(OutputPath + 'geometry/TargetCoordinates.dat', Coordinates)\n", (4587, 4647), True, 'import numpy as np\n'), ((922, 934), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (928, 934), True, 'import numpy as np\n'), ((936, 949), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (942, 949), True, 'import numpy as np\n'), ((952, 964), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (958, 964), True, 'import numpy as np\n'), ((967, 980), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (973, 980), True, 'import numpy as np\n'), ((2072, 2090), 'lib.BeamDynamicsTools.Bfield.BfieldTF', 'BfieldTF', ([], {'B0': 'Bn[i]'}), '(B0=Bn[i])\n', (2080, 2090), False, 'from lib.BeamDynamicsTools.Bfield import Bfield, BfieldTF, BfieldVF\n'), ((2104, 2120), 'lib.BeamDynamicsTools.Bfield.BfieldVF', 'BfieldVF', ([], {'B0': '(0.0)'}), '(B0=0.0)\n', (2112, 2120), False, 'from lib.BeamDynamicsTools.Bfield import Bfield, BfieldTF, BfieldVF\n'), ((2137, 2191), 'lib.BeamDynamicsTools.Trajectory.Trajectory', 'Trajectory', (['Vessel', 'B', 'Bv'], {'v0': 'Vinjection', 'T0': 'Energy[j]'}), '(Vessel, B, Bv, v0=Vinjection, T0=Energy[j])\n', (2147, 2191), False, 'from lib.BeamDynamicsTools.Trajectory import Trajectory\n'), ((4954, 4974), 'numpy.array', 'np.array', (['Parameters'], {}), '(Parameters)\n', (4962, 4974), True, 'import numpy as np\n'), ((906, 919), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (912, 919), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import sys
import numpy as np
if len(sys.argv) > 1 and sys.argv[1] == '-p':
import mfem.par as mfem
use_parallel = True
from mfem.common.mpi_debug import nicePrint as print
from mpi4py import MPI
myid = MPI.COMM_WORLD.rank
else:
import mfem.ser as mfem
use_parallel = False
myid = 0
def run_test():
m = mfem.DenseMatrix(3,3)
m.Assign(np.arange(9.).reshape(3,3))
m.Print()
print(np.arange(9.).reshape(3,3))
x = np.zeros(5)+1
y = np.zeros(5)+2
z = np.zeros(5)+3
mm = mfem.DenseMatrix(3, 5)
mm.Assign(np.vstack([x, y, z]))
mm.Print()
mfem.Vector(mm.GetData(), 15).Print()
mm.Print("densmat.dat")
if __name__=='__main__':
run_test()
| [
"mfem.ser.DenseMatrix",
"numpy.zeros",
"numpy.vstack",
"numpy.arange"
] | [((403, 425), 'mfem.ser.DenseMatrix', 'mfem.DenseMatrix', (['(3)', '(3)'], {}), '(3, 3)\n', (419, 425), True, 'import mfem.ser as mfem\n'), ((598, 620), 'mfem.ser.DenseMatrix', 'mfem.DenseMatrix', (['(3)', '(5)'], {}), '(3, 5)\n', (614, 620), True, 'import mfem.ser as mfem\n'), ((531, 542), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (539, 542), True, 'import numpy as np\n'), ((553, 564), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (561, 564), True, 'import numpy as np\n'), ((575, 586), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (583, 586), True, 'import numpy as np\n'), ((635, 655), 'numpy.vstack', 'np.vstack', (['[x, y, z]'], {}), '([x, y, z])\n', (644, 655), True, 'import numpy as np\n'), ((438, 452), 'numpy.arange', 'np.arange', (['(9.0)'], {}), '(9.0)\n', (447, 452), True, 'import numpy as np\n'), ((490, 504), 'numpy.arange', 'np.arange', (['(9.0)'], {}), '(9.0)\n', (499, 504), True, 'import numpy as np\n')] |
import nixio
import numpy as np
import matplotlib.pyplot as plt
from .efish_ephys_repro import EfishEphys
class Chirps(EfishEphys):
_repro_name = "Chirps"
def __init__(self, repro_run: nixio.Tag, traces, relacs_nix_version=1.1):
super().__init__(repro_run, traces, relacs_nix_version=relacs_nix_version)
@property
def chirp_times(self):
""" The times of the artificial chirps of a given stimulus presentation.
Returns
-------
list
The chirp times relative to stimulus onset
str
The unit
"""
cts = []
unit = ""
for s in self.stimuli:
metadata = s.metadata
cts.append(metadata[s.name]["ChirpTimes"][0])
unit = s.metadata[s.name]["ChirpTimes"][1]
return cts, unit
@property
def beat_specification(self):
"""Returns the way the beat is specified. Will return either *absolute frequency* or "Relative EODf".
In the first case the beat frequency is given by the *delta_f* property, in the latter by the *relative_eodf* property.
Returns
-------
str
the beat selection setting of the Chirps RePro.
"""
spec = self.metadata["RePro-Info"]["settings"]["beatsel"][0][0]
return spec
@property
def relative_eodf(self):
"""The beat frequency specified relative to the EOD frequency of the fish.
Returns
-------
float
the releodf setting of the repro run.
"""
rel = self.metadata["RePro-Info"]["settings"]["releodf"][0][0]
return rel
@property
def delta_f(self):
"""The difference frequency to the recorded fish's EOD frequency for all stimulus presentations
Returns
-------
float
The dfs used.
str
the unit
"""
df = self.metadata["RePro-Info"]["settings"]["deltaf"][0][0]
unit = self.metadata["RePro-Info"]["settings"]["deltaf"][1]
return df, unit
@property
def chirp_duration(self):
"""The chirp durations of the stimulus presentations.
Returns
-------
float
The chirp duration.
str
the unit
"""
cd = self.metadata["RePro-Info"]["settings"]["chirpwidth"][0][0]
unit = self.metadata["RePro-Info"]["settings"]["chirpwidth"][1]
return cd, unit
@property
def chirp_size(self):
"""The size of the frequency excursion of the chirp.
Returns
-------
list
List containing the chirp size for each stimulus presentation.
str
the unit
"""
cs = self.metadata["RePro-Info"]["settings"]["chirpsize"][0][0]
unit = self.metadata["RePro-Info"]["settings"]["chirpsize"][1]
return cs, unit
def _plot_axis(self, axis, x_data, y_data, spikes, chirp_times, ylabel):
axis.plot(x_data, y_data, lw=0.5, color="tab:blue", label="voltage")
axis.scatter(spikes, np.ones_like(spikes) * np.max(y_data), s=10, marker="*", c="tab:green", label="spikes")
axis.scatter(chirp_times, np.ones_like(chirp_times) * np.min(y_data), s=20, marker="o", c="tab:red", label="chirps")
axis.set_ylabel(ylabel)
axis.spines["top"].set_visible(False)
axis.spines["right"].set_visible(False)
axis.set_xlim([x_data[0], x_data[-1]])
def plot_overview(self, stimulus_index=0, filename=None):
"""[summary]
Parameters
----------
stimulus_index : int, optional
The stimulus index, by default 0
filename: str, optional
The filename for the figure. If not given, the plot will be shown. By default None
"""
spikes = self.spikes(stimulus_index=stimulus_index)
voltage, time = self.membrane_voltage(stimulus_index=stimulus_index)
eod, eod_time = self.local_eod(stimulus_index=stimulus_index)
stim, stim_time = self.stimulus_output(stimulus_index=stimulus_index)
chirp_times, _ = self.chirp_times
c_times = chirp_times[stimulus_index]
fig, axes = plt.subplots(ncols=1, nrows=3, sharex="all")
self._plot_axis(axes[0], time, voltage, spikes, c_times, "voltage [mV]")
axes[0].legend(fontsize=7, ncol=3, loc=(0.5, 1.05))
self._plot_axis(axes[1], eod_time, eod, spikes, c_times, "voltage [mV]")
self._plot_axis(axes[2], stim_time, stim, spikes, c_times, "voltage [mV]")
axes[-1].set_xlabel("time [s]")
if filename is not None:
fig.savefig(filename)
plt.close()
else:
plt.show()
| [
"numpy.ones_like",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((4220, 4264), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(3)', 'sharex': '"""all"""'}), "(ncols=1, nrows=3, sharex='all')\n", (4232, 4264), True, 'import matplotlib.pyplot as plt\n'), ((4690, 4701), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4699, 4701), True, 'import matplotlib.pyplot as plt\n'), ((4728, 4738), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4736, 4738), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3113), 'numpy.ones_like', 'np.ones_like', (['spikes'], {}), '(spikes)\n', (3105, 3113), True, 'import numpy as np\n'), ((3116, 3130), 'numpy.max', 'np.max', (['y_data'], {}), '(y_data)\n', (3122, 3130), True, 'import numpy as np\n'), ((3215, 3240), 'numpy.ones_like', 'np.ones_like', (['chirp_times'], {}), '(chirp_times)\n', (3227, 3240), True, 'import numpy as np\n'), ((3243, 3257), 'numpy.min', 'np.min', (['y_data'], {}), '(y_data)\n', (3249, 3257), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2016 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates token_lengths.npy from a full corpus stored as an SQLite database.
The file contains the token length counts as a numpy array.
"""
import sys
import numpy as np
from tqdm import tqdm
from corpus import Corpus
def main(len=len):
_, filename = sys.argv
corpus = Corpus.connect_to(filename)
total = len(corpus)
array = np.empty(total, dtype=np.uint32)
MAX = 2**32 - 1
for i, tokens in enumerate(tqdm(corpus, total=total)):
n_tokens = len(tokens)
assert n_tokens <= MAX
array[i] = n_tokens
del tokens
np.save('token_lengths', array)
if __name__ == '__main__':
main()
| [
"numpy.empty",
"tqdm.tqdm",
"corpus.Corpus.connect_to",
"numpy.save"
] | [((913, 940), 'corpus.Corpus.connect_to', 'Corpus.connect_to', (['filename'], {}), '(filename)\n', (930, 940), False, 'from corpus import Corpus\n'), ((978, 1010), 'numpy.empty', 'np.empty', (['total'], {'dtype': 'np.uint32'}), '(total, dtype=np.uint32)\n', (986, 1010), True, 'import numpy as np\n'), ((1206, 1237), 'numpy.save', 'np.save', (['"""token_lengths"""', 'array'], {}), "('token_lengths', array)\n", (1213, 1237), True, 'import numpy as np\n'), ((1064, 1089), 'tqdm.tqdm', 'tqdm', (['corpus'], {'total': 'total'}), '(corpus, total=total)\n', (1068, 1089), False, 'from tqdm import tqdm\n')] |
"""
defines methods to access MPC/rigid element data:
- get_mpc_node_ids( mpc_id, stop_on_failure=True)
- get_mpc_node_ids_c1( mpc_id, stop_on_failure=True)
- get_rigid_elements_with_node_ids(self, node_ids)
- get_dependent_nid_to_components(self, mpc_id=None, stop_on_failure=True)
- get_lines_rigid(model: BDF)
- get_mpcs(model, mpc_id, mpc_id, consider_mpcadd=True,
stop_on_failure=True)
"""
from __future__ import annotations
from collections import defaultdict
from typing import Tuple, List, Dict, Any, TYPE_CHECKING
import numpy as np
from pyNastran.utils.numpy_utils import integer_types
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
def get_mpc_node_ids(model: BDF, mpc_id: int,
consider_mpcadd: bool=True,
stop_on_failure: bool=True) -> List[List[int]]:
r"""
Get the MPC/MPCADD IDs.
Parameters
----------
mpc_id : int
the MPC id
consider_mpcadd : bool
MPCADDs should not be considered when referenced from an MPCADD
from a case control, True should be used.
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
lines : List[[independent, dependent]]
independent : int
the independent node id
dependent : int
the dependent node id
I I
\ /
I---D---I
"""
lines = []
mpcs = model.get_reduced_mpcs(
mpc_id, consider_mpcadd=consider_mpcadd,
stop_on_failure=stop_on_failure)
# dependent, independent
for card in mpcs:
if card.type == 'MPC':
nids = card.node_ids
nid0 = nids[0]
#component0 = card.components[0]
#enforced0 = card.coefficients[0]
#card.constraints[1:]
for nid, coefficient in zip(nids[1:], card.coefficients[1:]):
if coefficient != 0.0:
lines.append([nid0, nid])
else:
msg = 'get_MPCx_node_ids doesnt support %r' % card.type
if stop_on_failure:
raise RuntimeError(msg)
model.log.warning(msg)
return lines
def get_mpc_node_ids_c1(model: BDF, mpc_id: int,
consider_mpcadd: bool=True,
stop_on_failure: bool=True) -> Tuple[Dict[str, List[int]],
Dict[str, List[int]]]:
r"""
Get the MPC/MPCADD IDs.
Parameters
----------
mpc_id : int
the MPC id
consider_mpcadd : bool
MPCADDs should not be considered when referenced from an MPCADD
from a case control, True should be used.
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
independent_node_ids_c1 : Dict[component] = node_ids
component : str
the DOF to constrain
node_ids : List[int]
the constrained node ids
dependent_node_ids_c1 : Dict[component] = node_ids
component : str
the DOF to constrain
node_ids : List[int]
the constrained node ids
I I
\ /
I---D---I
"""
if not isinstance(mpc_id, integer_types):
msg = 'mpc_id must be an integer; type=%s, mpc_id=\n%r' % (type(mpc_id), mpc_id)
raise TypeError(msg)
mpcs = model.get_reduced_mpcs(
mpc_id, consider_mpcadd=consider_mpcadd,
stop_on_failure=stop_on_failure)
# dependent, independent
independent_node_ids_c1 = defaultdict(list)
dependent_node_ids_c1 = defaultdict(list)
for card in mpcs:
if card.type == 'MPC':
nids = card.node_ids
nid0 = nids[0]
#component0 = card.components[0]
#coefficient0 = card.coefficients[0]
#card.constraints[1:]
dofs = card.components
for dof in dofs:
independent_node_ids_c1[dof].append(nid0)
for nid, coefficient in zip(nids[1:], card.coefficients[1:]):
if coefficient != 0.0:
for dof in dofs:
dependent_node_ids_c1[dof].append(nid)
else:
msg = 'get_MPCx_node_ids_c1 doesnt support %r' % card.type
if stop_on_failure:
raise RuntimeError(msg)
model.log.warning(msg)
return dict(independent_node_ids_c1), dict(dependent_node_ids_c1)
def get_rigid_elements_with_node_ids(model: BDF, node_ids):
"""
Gets the series of rigid elements that use specific nodes
Parameters
----------
node_ids : List[int]
the node ids to check
Returns
-------
rbes : List[int]
the set of self.rigid_elements
"""
try:
nids = set(node_ids)
except TypeError:
print(node_ids)
raise
rbes = []
for eid, rigid_element in model.rigid_elements.items():
if rigid_element.type in ['RBE3', 'RBE2', 'RBE1', 'RBAR', 'RSPLINE', 'RROD', 'RBAR1']:
independent_nodes = set(rigid_element.independent_nodes)
dependent_nodes = set(rigid_element.dependent_nodes)
rbe_nids = independent_nodes | dependent_nodes
if nids.intersection(rbe_nids):
rbes.append(eid)
elif rigid_element.type == 'RSSCON':
msg = 'skipping card in get_rigid_elements_with_node_ids\n%s' % str(rigid_element)
model.log.warning(msg)
else:
raise RuntimeError(rigid_element.type)
return rbes
def get_dependent_nid_to_components(model: BDF, mpc_id=None, stop_on_failure=True):
"""
Gets a dictionary of the dependent node/components.
Parameters
----------
mpc_id : int; default=None -> no MPCs are checked
TODO: add
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
dependent_nid_to_components : dict[node_id] : components
node_id : int
the node_id
components : str
the DOFs that are linked
Nastran can either define a load/motion at a given node.
SPCs define constraints that may not have loads/motions.
MPCs and rigid elements define independent and dependent nodes on
specific DOFs.
- independent nodes : loads/motions may be defined
- dependent nodes : loads/motions may not be defined
"""
dependent_nid_to_components = {}
if mpc_id is not None:
mpcs = get_mpcs(model, mpc_id)
for mpc in mpcs:
if mpc.type == 'MPC':
for nid, component in zip(mpc.node_ids, mpc.components):
dependent_nid_to_components[nid] = component
else:
raise NotImplementedError(mpc)
for unused_eid, rigid_element in model.rigid_elements.items():
if rigid_element.type == 'RBE2':
dependent_nodes = set(rigid_element.dependent_nodes)
components = rigid_element.cm
for nid in dependent_nodes:
dependent_nid_to_components[nid] = components
elif rigid_element.type == 'RBE3':
dependent_nid_to_components[rigid_element.ref_grid_id] = rigid_element.refc
for gmi, cmi in zip(rigid_element.Gmi_node_ids, rigid_element.Cmi):
dependent_nid_to_components[gmi] = cmi
#if rigid_element.type in ['RBE3', 'RBE2', 'RBE1', 'RBAR']:
##independent_nodes = set(rigid_element.independent_nodes)
#dependent_nodes = set(rigid_element.dependent_nodes)
#rbe_nids = independent_nodes | dependent_nodes
#if nids.intersection(rbe_nids):
#rbes.append(eid)
#elif rigid_element == 'RSPLINE':
elif rigid_element.type == 'RBAR':
nodes = [rigid_element.ga, rigid_element.gb]
components = [rigid_element.cma, rigid_element.cmb]
for nid, componentsi in zip(nodes, components):
dependent_nid_to_components[nid] = componentsi
elif rigid_element.type == 'RBAR1':
for componentsi in rigid_element.cb:
dependent_nid_to_components[rigid_element.gb] = componentsi
elif rigid_element.type == 'RBE1':
# +------+-----+-----+-----+-------+-----+-----+-----+
# | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
# +======+=====+=====+=====+=======+=====+=====+=====+
# | RBE1 | EID | GN1 | CN1 | GN2 | CN2 | GN3 | CN3 |
# | | | GN4 | CN4 | GN5 | CN5 | GN6 | CN6 |
# | | UM | GM1 | CM1 | GM2 | CM2 | GM3 | CM3 |
# | | GM4 | CM4 | etc | ALPHA | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
# | RBE1 | 59 | 59 | 123 | 60 | 456 | | |
# | | UM | 61 | 246 | | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
# dependent=m (independent=n)
for nid, componentsi in zip(rigid_element.Gmi_node_ids, rigid_element.Cmi):
dependent_nid_to_components[nid] = componentsi
#dependent = elem.dependent_nodes
#independent = elem.independent_nodes
#assert len(dependent) == 1, dependent
#assert len(independent) == 1, independent
#lines_rigid.append([dependent[0], independent[0]])
elif rigid_element.type == 'RROD':
components = [rigid_element.cma, rigid_element.cmb]
if rigid_element.cma is not None:
nid = rigid_element.nodes[0]
for component in rigid_element.cma:
dependent_nid_to_components[nid] = component
if rigid_element.cmb is not None:
nid = rigid_element.nodes[1]
for component in rigid_element.cmb:
dependent_nid_to_components[nid] = component
elif rigid_element.type == 'RSPLINE':
#independent_nid = rigid_element.independent_nid
for nid, component in zip(rigid_element.dependent_nids, rigid_element.dependent_components):
if component is None:
continue
dependent_nid_to_components[nid] = component
elif rigid_element.type == 'RSSCON':
msg = 'skipping card in get_dependent_nid_to_components\n%s' % str(rigid_element)
model.log.warning(msg)
else:
raise RuntimeError(rigid_element.type)
return dependent_nid_to_components
def get_lines_rigid(model: BDF) -> Any:
"""
GUI helper function
dependent = (lines[:, 0])
independent = np.unique(lines[:, 1])
"""
lines_rigid = []
for eid, elem in model.rigid_elements.items():
if elem.type == 'RBE3':
if elem.Gmi != []:
# UM are dependent
msg = 'UM is not supported; RBE3 eid=%s Gmi=%s' % (elem.eid, elem.Gmi)
raise RuntimeError(msg)
#list_fields = ['RBE3', elem.eid, None, elem.ref_grid_id, elem.refc]
n1 = elem.ref_grid_id
assert isinstance(n1, integer_types), 'RBE3 eid=%s ref_grid_id=%s' % (elem.eid, n1)
for (_weight, ci, Gij) in zip(elem.weights, elem.comps, elem.Gijs):
Giji = elem._node_ids(nodes=Gij, allow_empty_nodes=True)
# list_fields += [wt, ci] + Giji
for n2 in Giji:
assert isinstance(n2, integer_types), 'RBE3 eid=%s Giji=%s' % (elem.eid, Giji)
lines_rigid.append([n1, n2])
elif elem.type == 'RBE2':
#list_fields = ['RBE2', elem.eid, elem.Gn(), elem.cm
#] + elem.Gmi_node_ids + [elem.alpha]
n2 = elem.Gn() # independent
nids1 = elem.Gmi_node_ids # dependent
for n1 in nids1:
lines_rigid.append([n1, n2])
elif elem.type in ['RBAR', 'RBAR1', 'RROD']: ## TODO: these aren't quite right
dependent = elem.Ga()
independent = elem.Gb()
lines_rigid.append([dependent, independent])
elif elem.type == 'RBE1':
# +------+-----+-----+-----+-------+-----+-----+-----+
# | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
# +======+=====+=====+=====+=======+=====+=====+=====+
# | RBE1 | EID | GN1 | CN1 | GN2 | CN2 | GN3 | CN3 |
# | | | GN4 | CN4 | GN5 | CN5 | GN6 | CN6 |
# | | UM | GM1 | CM1 | GM2 | CM2 | GM3 | CM3 |
# | | GM4 | CM4 | etc | ALPHA | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
# | RBE1 | 59 | 59 | 123 | 60 | 456 | | |
# | | UM | 61 | 246 | | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
dependent = elem.dependent_nodes
independent = elem.independent_nodes
#assert len(dependent) == 1, dependent
#assert len(independent) == 1, independent
if len(independent) != 1 or len(dependent) != 1:
msg = 'skipping card because len(independent) != 1 or len(dependent) != 1\n'
msg += ' independent = %s\n' % independent
msg += ' dependent = %s\n' % dependent
msg += str(elem)
model.log.error(msg)
continue
lines_rigid.append([dependent[0], independent[0]])
elif elem.type == 'RSPLINE':
independent_nid = elem.independent_nid
for dependent_nid in np.unique(elem.dependent_nids):
lines_rigid.append([dependent_nid, independent_nid])
elif elem.type == 'RSSCON':
model.log.warning('skipping card in _get_rigid\n%s' % str(elem))
else:
print(str(elem))
raise NotImplementedError(elem.type)
return lines_rigid
def get_mpcs(model, mpc_id: int, consider_mpcadd: bool=True,
stop_on_failure: bool=True) -> Tuple[List[int], List[str]]:
"""
Gets the MPCs in a semi-usable form.
Parameters
----------
mpc_id : int
the desired MPC ID
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
nids : List[int]
the constrained nodes
comps : List[str]
the components that are constrained on each node
Considers:
- MPC
- MPCADD
"""
mpcs = model.get_reduced_mpcs(
mpc_id, consider_mpcadd=consider_mpcadd,
stop_on_failure=stop_on_failure)
nids = []
comps = []
for mpc in mpcs:
if mpc.type == 'MPC':
for nid, comp, unused_coefficient in zip(mpc.nodes, mpc.components, mpc.coefficients):
nids.append(nid)
comps.append(comp)
else:
if stop_on_failure:
model.log.error('not considering:\n%s' % str(mpc))
raise NotImplementedError(mpc)
model.log.warning('not considering:\n%s' % str(mpc))
return nids, comps
| [
"collections.defaultdict",
"numpy.unique"
] | [((3592, 3609), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3603, 3609), False, 'from collections import defaultdict\n'), ((3638, 3655), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3649, 3655), False, 'from collections import defaultdict\n'), ((13745, 13775), 'numpy.unique', 'np.unique', (['elem.dependent_nids'], {}), '(elem.dependent_nids)\n', (13754, 13775), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Tutorial example of training a statistical model to tension test data
from from a known distribution.
"""
import sys
import os.path
sys.path.append("../../../..")
sys.path.append("..")
import numpy.random as ra
import xarray as xr
import torch
from maker import make_model, downsample
from pyoptmat import optimize, experiments
from tqdm import tqdm
import pyro
from pyro.infer import SVI
import pyro.optim as optim
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# Use doubles
torch.set_default_tensor_type(torch.DoubleTensor)
# Run on GPU!
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
# Don't try to optimize for the Young's modulus
def make(n, eta, s0, R, d, **kwargs):
"""
Maker with Young's modulus fixed
"""
return make_model(torch.tensor(0.5), n, eta, s0, R, d, device=device, **kwargs).to(
device
)
if __name__ == "__main__":
# 1) Load the data for the variance of interest,
# cut down to some number of samples, and flatten
scale = 0.05
nsamples = 10 # at each strain rate
input_data = xr.open_dataset(os.path.join("..", "scale-%3.2f.nc" % scale))
data, results, cycles, types, control = downsample(
experiments.load_results(input_data, device=device),
nsamples,
input_data.nrates,
input_data.nsamples,
)
# 2) Setup names for each parameter and the priors
names = ["n", "eta", "s0", "R", "d"]
loc_loc_priors = [
torch.tensor(ra.random(), device=device) for i in range(len(names))
]
loc_scale_priors = [torch.tensor(0.15, device=device) for i in range(len(names))]
scale_scale_priors = [torch.tensor(0.15, device=device) for i in range(len(names))]
eps = torch.tensor(1.0e-4, device=device)
print("Initial parameter values:")
print("\tloc loc\t\tloc scale\tscale scale")
for n, llp, lsp, sp in zip(
names, loc_loc_priors, loc_scale_priors, scale_scale_priors
):
print("%s:\t%3.2f\t\t%3.2f\t\t%3.2f" % (n, llp, lsp, sp))
print("")
# 3) Create the actual model
model = optimize.HierarchicalStatisticalModel(
make, names, loc_loc_priors, loc_scale_priors, scale_scale_priors, eps
).to(device)
# 4) Get the guide
guide = model.make_guide()
# 5) Setup the optimizer and loss
lr = 1.0e-2
g = 1.0
niter = 200
lrd = g ** (1.0 / niter)
num_samples = 1
optimizer = optim.ClippedAdam({"lr": lr, "lrd": lrd})
ls = pyro.infer.Trace_ELBO(num_particles=num_samples)
svi = SVI(model, guide, optimizer, loss=ls)
# 6) Infer!
t = tqdm(range(niter), total=niter, desc="Loss: ")
loss_hist = []
for i in t:
loss = svi.step(data, cycles, types, control, results)
loss_hist.append(loss)
t.set_description("Loss %3.2e" % loss)
# 7) Print out results
print("")
print("Inferred distributions:")
print("\tloc\t\tscale")
for n in names:
s = pyro.param(n + model.scale_suffix + model.param_suffix).data
m = pyro.param(n + model.loc_suffix + model.param_suffix).data
print("%s:\t%3.2f/0.50\t%3.2f/%3.2f" % (n, m, s, scale))
print("")
# 8) Plot convergence
plt.figure()
plt.loglog(loss_hist)
plt.xlabel("Iteration")
plt.ylabel("Loss")
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"sys.path.append",
"pyoptmat.optimize.HierarchicalStatisticalModel",
"matplotlib.pyplot.loglog",
"numpy.random.random",
"pyro.param",
"matplotlib.pyplot.xlabel",
"torch.set_default_tensor_type",
"pyro.infer.SVI",
"pyro.optim.ClippedAdam",
... | [((170, 200), 'sys.path.append', 'sys.path.append', (['"""../../../.."""'], {}), "('../../../..')\n", (185, 200), False, 'import sys\n'), ((201, 222), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (216, 222), False, 'import sys\n'), ((510, 543), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (533, 543), False, 'import warnings\n'), ((559, 608), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (588, 608), False, 'import torch\n'), ((627, 652), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (650, 652), False, 'import torch\n'), ((704, 721), 'torch.device', 'torch.device', (['dev'], {}), '(dev)\n', (716, 721), False, 'import torch\n'), ((1835, 1870), 'torch.tensor', 'torch.tensor', (['(0.0001)'], {'device': 'device'}), '(0.0001, device=device)\n', (1847, 1870), False, 'import torch\n'), ((2532, 2573), 'pyro.optim.ClippedAdam', 'optim.ClippedAdam', (["{'lr': lr, 'lrd': lrd}"], {}), "({'lr': lr, 'lrd': lrd})\n", (2549, 2573), True, 'import pyro.optim as optim\n'), ((2584, 2632), 'pyro.infer.Trace_ELBO', 'pyro.infer.Trace_ELBO', ([], {'num_particles': 'num_samples'}), '(num_particles=num_samples)\n', (2605, 2632), False, 'import pyro\n'), ((2644, 2681), 'pyro.infer.SVI', 'SVI', (['model', 'guide', 'optimizer'], {'loss': 'ls'}), '(model, guide, optimizer, loss=ls)\n', (2647, 2681), False, 'from pyro.infer import SVI\n'), ((3314, 3326), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3324, 3326), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3352), 'matplotlib.pyplot.loglog', 'plt.loglog', (['loss_hist'], {}), '(loss_hist)\n', (3341, 3352), True, 'import matplotlib.pyplot as plt\n'), ((3357, 3380), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (3367, 3380), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3395, 3403), True, 'import matplotlib.pyplot as plt\n'), ((3408, 3426), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3424, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3439, 3441), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1366), 'pyoptmat.experiments.load_results', 'experiments.load_results', (['input_data'], {'device': 'device'}), '(input_data, device=device)\n', (1339, 1366), False, 'from pyoptmat import optimize, experiments\n'), ((1674, 1707), 'torch.tensor', 'torch.tensor', (['(0.15)'], {'device': 'device'}), '(0.15, device=device)\n', (1686, 1707), False, 'import torch\n'), ((1762, 1795), 'torch.tensor', 'torch.tensor', (['(0.15)'], {'device': 'device'}), '(0.15, device=device)\n', (1774, 1795), False, 'import torch\n'), ((1589, 1600), 'numpy.random.random', 'ra.random', ([], {}), '()\n', (1598, 1600), True, 'import numpy.random as ra\n'), ((2193, 2306), 'pyoptmat.optimize.HierarchicalStatisticalModel', 'optimize.HierarchicalStatisticalModel', (['make', 'names', 'loc_loc_priors', 'loc_scale_priors', 'scale_scale_priors', 'eps'], {}), '(make, names, loc_loc_priors,\n loc_scale_priors, scale_scale_priors, eps)\n', (2230, 2306), False, 'from pyoptmat import optimize, experiments\n'), ((3072, 3127), 'pyro.param', 'pyro.param', (['(n + model.scale_suffix + model.param_suffix)'], {}), '(n + model.scale_suffix + model.param_suffix)\n', (3082, 3127), False, 'import pyro\n'), ((3145, 3198), 'pyro.param', 'pyro.param', (['(n + model.loc_suffix + model.param_suffix)'], {}), '(n + model.loc_suffix + model.param_suffix)\n', (3155, 3198), False, 'import pyro\n'), ((888, 905), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (900, 905), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as pl
import h5py
import time
from readout_fn import *
def xyplot(ix,iy):
pl.close(2)
fig, ax = pl.subplots(figsize=(10,6), nrows=2, ncols=2,num = 'OBSERVED & SYNTHETIC PROFILES')
ax = ax.flatten()
ix = int(np.ceil(ix))
iy = int(np.ceil(iy))
for i in range(0,4):
ax[i].plot(wav-10830.,obspro[ix,iy,:,i])
ax[i].plot(wav-10830.,synpro1[ix,iy,i,:])
if i ==0:ax[0].text(-6.5, 0.5, 'pixels: ('+str(iy)+','+str(ix)+')')
pl.tight_layout()
pl.show()
############################
def onclick(event):
#pl.close(1)
global ix, iy
ix, iy = event.xdata, event.ydata
print("x-pos:",np.ceil(ix)," y-pos:", np.ceil(iy))
xyplot(iy,ix)
#if len(coords) == 5:
#fig.canvas.mpl_disconnect(cid)
#pl.close()
#return coords
##########################
if __name__ == "__main__":
'''
program to show the observed and best fitted profiles interactively.
Click on the inverted maps to see the observed and the synthetic profiles.
Inputs:
obsf: observed Stokes profiles file
invf: synthetic profiles and inverted maps file,
wavf: wavelength file [*txt]
nx & ny: size of observed data
Outputs:
Inverted maps, observed and synthetic Stokes profiles
External library (optional):
readout_fn.py
'''
global obspro,synpro1
obsf = 'observations/spatially_binned_2pix.h5' #observed Stokes profiles
invf = 'outputs/comp1/xybin_0403_00.h5' #inverted maps
wavf = 'wavelength_2bin_trim.txt' #wavelenght scale
nx = 125 #xsize
ny = 226 #ysize
wav = np.loadtxt(wavf) #read wavelength file
f1 = h5py.File(obsf,'r') #read observed profiles
prof = f1['stokes']
npix,nlambda,stks = f1['stokes'].shape
inv_ch, synpro1,chi = readout_1c_ch(obsf,nx,ny,invf) #get synthetic profiles and inverted maps
nx,ny,stk,lmb = synpro1.shape
obspro = np.reshape(prof,[nx,ny,nlambda,stks])
mod = inv_ch #inverted maps
fig, ax = pl.subplots(figsize=(7,5), nrows=3,ncols=2,num = 'INVERTED MAPS')
ax = ax.flatten()
chlabel = ['tau','v','Bx','By','Bz','chi2','beta','deltav','ff']
vminv = ['-30','-500', '-500','-500']
vmaxv = ['30','500', '500','500']
for i in range(0,6):
if i == 0 or i >4:
im = ax[i].imshow(mod[:,:,i], origin='lower', cmap=pl.cm.bwr)
else:
im = ax[i].imshow(mod[:,:,i], origin='lower', cmap=pl.cm.bwr,vmin = vminv[i-1], vmax = vmaxv[i-1])
if i == 5: im = ax[i].imshow(chi, origin='lower', cmap=pl.cm.bwr)
fig.colorbar(im, ax=ax[i],orientation="horizontal",label=chlabel[i])
cid = fig.canvas.mpl_connect('button_press_event', onclick)
pl.tight_layout()
pl.show()
#fig.canvas.mpl_disconnect(cid)
| [
"numpy.ceil",
"numpy.reshape",
"h5py.File",
"matplotlib.pyplot.close",
"matplotlib.pyplot.tight_layout",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((120, 131), 'matplotlib.pyplot.close', 'pl.close', (['(2)'], {}), '(2)\n', (128, 131), True, 'import matplotlib.pyplot as pl\n'), ((143, 231), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(2)', 'ncols': '(2)', 'num': '"""OBSERVED & SYNTHETIC PROFILES"""'}), "(figsize=(10, 6), nrows=2, ncols=2, num=\n 'OBSERVED & SYNTHETIC PROFILES')\n", (154, 231), True, 'import matplotlib.pyplot as pl\n'), ((473, 490), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (488, 490), True, 'import matplotlib.pyplot as pl\n'), ((494, 503), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (501, 503), True, 'import matplotlib.pyplot as pl\n'), ((1537, 1553), 'numpy.loadtxt', 'np.loadtxt', (['wavf'], {}), '(wavf)\n', (1547, 1553), True, 'import numpy as np\n'), ((1586, 1606), 'h5py.File', 'h5py.File', (['obsf', '"""r"""'], {}), "(obsf, 'r')\n", (1595, 1606), False, 'import h5py\n'), ((1836, 1877), 'numpy.reshape', 'np.reshape', (['prof', '[nx, ny, nlambda, stks]'], {}), '(prof, [nx, ny, nlambda, stks])\n', (1846, 1877), True, 'import numpy as np\n'), ((1919, 1985), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'figsize': '(7, 5)', 'nrows': '(3)', 'ncols': '(2)', 'num': '"""INVERTED MAPS"""'}), "(figsize=(7, 5), nrows=3, ncols=2, num='INVERTED MAPS')\n", (1930, 1985), True, 'import matplotlib.pyplot as pl\n'), ((2567, 2584), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (2582, 2584), True, 'import matplotlib.pyplot as pl\n'), ((2587, 2596), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2594, 2596), True, 'import matplotlib.pyplot as pl\n'), ((256, 267), 'numpy.ceil', 'np.ceil', (['ix'], {}), '(ix)\n', (263, 267), True, 'import numpy as np\n'), ((279, 290), 'numpy.ceil', 'np.ceil', (['iy'], {}), '(iy)\n', (286, 290), True, 'import numpy as np\n'), ((635, 646), 'numpy.ceil', 'np.ceil', (['ix'], {}), '(ix)\n', (642, 646), True, 'import numpy as np\n'), ((658, 669), 'numpy.ceil', 'np.ceil', (['iy'], {}), '(iy)\n', (665, 669), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from shapecheck import (ShapeError, check_shapes, is_checking_enabled, is_compatible,
set_checking_enabled, str_to_shape)
from .utils import CaptureStdOut
def test_basic():
@check_shapes('3', '4', out_='2')
def f(x, y):
return x[:2]**2 + y[:2]**2
f(np.array([1, 2, 3]), np.array([1, 2, 3, 4]))
with pytest.raises(ShapeError):
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
def test_named_dim():
@check_shapes('3,N', 'N', out_='1,N')
def f(x, y):
return (x + y).sum(0, keepdims=True)
f(np.ones((3, 5)), np.ones((5,)))
with pytest.raises(ShapeError):
f(np.ones((3, 4)), np.ones((5,)))
def test_named_dim_one_arg():
@check_shapes('A,A,N', out_='N')
def f(x):
return x.sum((0, 1))
f(np.ones((5, 5, 7)))
with pytest.raises(ShapeError):
f(np.ones((6, 5, 7)))
def test_any_dim():
@check_shapes('N,-1', out_='N,1')
def f(x):
return x.sum(-1, keepdims=True)
f(np.ones((5, 3)))
f(np.ones((5, 7)))
def test_ndim_mismatch():
@check_shapes('-1,-1')
def f(x):
return x
f(np.ones((1, 2)))
with pytest.raises(ShapeError):
f(np.ones((1,)))
with pytest.raises(ShapeError):
f(np.ones((1, 2, 3)))
def test_no_stdout():
# Prevent pushing debug messages.
with CaptureStdOut() as output:
@check_shapes('3,A,A,N', out_='N')
def f(x):
return x.sum((0, 2, 1))
f(np.ones((3, 5, 5, 7)))
with pytest.raises(ShapeError):
f(np.ones((3, 6, 5, 7)))
assert len(output) == 0
def test_readme_example():
import numpy as np
from shapecheck import check_shapes
@check_shapes('-1,N', 'N', None, '3,N', out_='3,N')
def f(a, b, c, d):
return (a + b).sum(0, keepdims=True) + d
f(np.ones((7, 5)), np.ones(5), 'anything', np.ones((3, 5))) # succeeds
f(np.ones((2, 6)), np.ones(6), np.ones(1), np.ones((3, 6))) # succeeds
with pytest.raises(ShapeError):
f(np.ones((2, 6)), np.ones(5), np.ones(1), np.ones((3, 6))) # fails
@check_shapes('1,...,1', '...,1,1')
def g(a, b):
pass
g(np.ones((1, 3, 4, 1)), np.ones((2, 1, 1))) # succeeds
g(np.ones((1, 1)), np.ones((1, 1))) # succeeds
with pytest.raises(ShapeError):
g(np.ones((2, 3, 4, 1)), np.ones((1, 1))) # fails
@check_shapes('batch,variadic...', 'variadic...')
def h(a, b):
pass
h(np.ones((7, 1, 2)), np.ones((1, 2))) # succeeds
with pytest.raises(ShapeError):
h(np.ones((6, 2)), np.ones((1, 1))) # fails
with pytest.raises(ShapeError):
h(np.ones((6, 2)), np.ones((1))) # fails
def test_non_array_args():
@check_shapes(None, '2,N', None)
def f(x, y, z):
return 1
f('some string', np.ones((2, 5)), np.ones((5,)))
f(np.ones((1, 2, 3)), np.ones((2, 6)), 'non-array object')
with pytest.raises(ShapeError):
f(np.ones((1, 1)), np.ones((3, 5)), np.ones((5,)))
with pytest.raises(ShapeError):
f('another-test', np.ones((3, 6)), 'non-array object')
@pytest.mark.parametrize('string, shape', [('N,1,3,M', ('N', 1, 3, 'M')),
('N, 1, 3, M', ('N', 1, 3, 'M')),
('...,a,1', ('...', 'a', 1)),
('1, ... ,2', (1, '...', 2)),
('a,b,c,...', ('a', 'b', 'c', '...')),
('...', ('...',))])
def test_shape_to_str(string, shape):
result = str_to_shape(string)
for a, b in zip(shape, result):
assert a == b, f'Expected: {shape} Got: {result}'
@pytest.mark.parametrize('string', [
'...,...,...', 'a,...,b,...', '...,1,...', (1, 2), 3, 4.0, [5.0], ['1,2'], ('1,2',)
])
def test_shape_to_str_error(string):
with pytest.raises(RuntimeError):
str_to_shape(string)
@pytest.mark.parametrize('shape, expected_shape', [
((3, 2, 3), ('n', 2, 'n')),
((3, 2, 3), ('n', '...', 2, 'n')),
((3, 1, 1, 2, 3), ('n', '...', 2, 'n')),
((3, 2, 3), ('...', 'n', 2, 'n')),
((1, 1, 3, 2, 3), ('...', 'n', 2, 'n')),
((3, 2, 3), ('n', 2, 'n', '...')),
((3, 2, 3, 1, 1), ('n', 2, 'n', '...')),
((3, 2, 3), ('...',)),
])
def test_compatible_variadic_shapes(shape, expected_shape):
assert is_compatible(shape, expected_shape)
@pytest.mark.parametrize('shape, expected_shape', [
((3, 3, 3), ('n', 2, 'n')),
((3, 2, 4), ('n', '...', 2, 'n')),
((3, 1, 1, 3, 3), ('n', '...', 2, 'n')),
((4, 2, 3), ('...', 'n', 2, 'n')),
((1, 1, 2, 3), ('...', 'n', 2, 'n')),
((3, 3), ('n', 2, 'n', '...')),
((2, 3, 1, 1), ('n', 2, 'n', '...')),
])
def test_incompatible_variadic_shapes(shape, expected_shape):
assert not is_compatible(shape, expected_shape)
@pytest.mark.parametrize('e_shape1, e_shape2, shape1, shape2', [
(('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 2, 3))),
(('...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 2, 3))),
(('n...,2,2', '1,n...', (2, 2), (1,))),
(('n...,1,1', 'a...', (1, 2, 3, 1, 1), (1, 3))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 9, 9, 3, 4), (6, 9, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 9, 3, 4), (6, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 3, 4), (6, 7))),
])
def test_named_variadic_shapes(e_shape1, e_shape2, shape1, shape2):
@check_shapes(e_shape1, e_shape2)
def f(a, b):
pass
f(np.ones(shape1), np.ones(shape2))
@pytest.mark.parametrize('e_shape1, e_shape2, shape1, shape2', [
(('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 3, 3))),
(('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 3))),
(('n...,2,2', '1,n...', (2, 2), (1, 1))),
(('n...,2,2', 'n...', (2, 2), (1,))),
(('n...,', 'n...', (2, 2), (1,))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 8, 9, 3, 4), (6, 9, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 7, 3, 4), (6, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 3, 4), (6, 1, 7))),
])
def test_bad_named_variadic_shapes(e_shape1, e_shape2, shape1, shape2):
@check_shapes(e_shape1, e_shape2)
def f(a, b):
pass
with pytest.raises(ShapeError):
f(np.ones(shape1), np.ones(shape2))
def test_incompatible_output():
@check_shapes(out_='1,1')
def f():
return np.ones((1,))
with pytest.raises(ShapeError):
f()
def test_nested_structs():
@check_shapes(('N,1', 'N'), '1,2', out_={'one': ('N,1', 'N'), 'two': ('1,2')})
def f(one, two):
return {'one': one, 'two': two}
f((np.ones((7, 1)), np.ones((7,))), np.ones((1, 2)))
with pytest.raises(ShapeError):
f((np.ones((7, 1)), np.ones((6,))), np.ones((1, 2)))
def test_readme_nested_example():
@check_shapes(('N,1', 'N'), '1,2', out_={'one': ('N,1', 'N'), 'two': ('1,2')})
def f(one, two):
return {'one': (one[1], one[1]), 'two': two.sum()}
with pytest.raises(ShapeError):
f((np.ones((7, 1)), np.ones((7,))), np.ones((1, 2)))
def test_readme_set_checking_enabled():
from shapecheck import is_checking_enabled, set_checking_enabled
assert is_checking_enabled()
set_checking_enabled(False)
assert not is_checking_enabled()
set_checking_enabled(True)
assert is_checking_enabled()
with set_checking_enabled(False):
assert not is_checking_enabled()
assert is_checking_enabled()
def test_set_checking_enabled():
@check_shapes('3', '4', out_='2')
def f(x, y):
return x[:2]**2 + y[:2]**2
set_checking_enabled(False)
assert not is_checking_enabled()
f(np.array([1, 2, 3]), np.array([1, 2, 3, 4]))
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
@check_shapes('3', '4', out_='2')
def g(x, y):
return x[:2]**2 + y[:2]**2
set_checking_enabled(True)
assert is_checking_enabled()
with pytest.raises(ShapeError):
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
with pytest.raises(ShapeError):
g(np.array([1, 2, 3]), np.array([2, 3, 4]))
def test_set_checking_enabled_context():
@check_shapes('3', '4', out_='2')
def f(x, y):
return x[:2]**2 + y[:2]**2
assert is_checking_enabled()
with set_checking_enabled(False):
assert not is_checking_enabled()
f(np.array([1, 2, 3]), np.array([1, 2, 3, 4]))
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
@check_shapes('3', '4', out_='2')
def g(x, y):
return x[:2]**2 + y[:2]**2
assert is_checking_enabled()
with pytest.raises(ShapeError):
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
with pytest.raises(ShapeError):
g(np.array([1, 2, 3]), np.array([2, 3, 4]))
def test_match_callees():
@check_shapes('N', 'M', 'O', out_='N')
def f(x, y, z):
return x
@check_shapes('N', 'M', 'R', match_callees_=True)
def g(x, y, z):
return f(x, y, z)
g(np.ones(5), np.ones(6), np.ones(7))
def test_match_callees_error():
@check_shapes('N', 'M', 'O', out_='N')
def f(x, y, z):
return x
@check_shapes('M', 'N', 'R', match_callees_=True)
def g(x, y, z):
return f(x, y, z)
with pytest.raises(ShapeError):
g(np.ones(5), np.ones(6), np.ones(7))
def test_match_callees_complex():
@check_shapes('a, v...', 'v...', out_='v...')
def f(x, y):
return x.sum(0) + y
@check_shapes('v...')
def g(x):
return x.sum()
@check_shapes('a', match_callees_=True)
def h(x):
a = np.ones((x.shape[0], 2, 3, 4))
b = np.ones((2, 3, 4))
f(a, b)
return g(np.ones((5, 4, 3)))
h(np.ones((8)))
@check_shapes('a', match_callees_=True)
def h(x):
a = np.ones((x.shape[0] - 1, 2, 3, 4))
b = np.ones((2, 3, 4))
f(a, b)
return g(np.ones((5, 4, 3)))
with pytest.raises(ShapeError):
h(np.ones((8)))
def test_match_callees_readme():
@check_shapes('M', 'N', 'O', out_='M')
def child_fn(a, b, c):
return a
@check_shapes('M', 'N', 'R')
def parent_fn_1(x, y, z):
return child_fn(y, x, z)
@check_shapes('M', 'N', 'R', match_callees_=True)
def parent_fn_2(x, y, z):
return child_fn(y, x, z)
parent_fn_1(np.ones(5), np.ones(6), np.ones(7)) # succeeds
with pytest.raises(ShapeError):
parent_fn_2(np.ones(5), np.ones(6), np.ones(7)) # fails
@pytest.mark.parametrize('cs_args, cs_kwargs, f_args, f_kwargs', [
(('N', 'M', 'O', 'P'), {}, (1, 2, 3), {}),
(('N', 'M', 'O', 'P'), {}, (1, 2), {'c': 3}),
(('N', 'M', 'O',), {}, (1, 2, 3), {}),
(('N', 'M'), {}, (1, 2), {'c': 3}),
(('N', 'M', 'O', 'P'), {}, (1,), {'c': 3, 'b': 2}),
(('N', 'M', 'O'), {'d': 'P'}, (1, 2, 3), {}),
(('N', 'M'), {'c': 'O', 'd': 'P'}, (1, 2, 3), {}),
(('N',), {'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2), {'c': 3}),
((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2, 3), {}),
((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2), {'c': 3}),
]) # yapf: disable
def test_check_shapes_signature(cs_args, cs_kwargs, f_args, f_kwargs):
# TODO: write more rigorous shape signature tests
@check_shapes(*cs_args, **cs_kwargs)
def f(a, b, c, *, d):
pass
f_kwargs = {k: np.ones(v) for k, v in f_kwargs.items()}
f(*map(np.ones, f_args), d=np.ones(4), **f_kwargs)
def test_readme_example2():
# yapf: disable
@check_shapes({'imgs': 'N,W,W,-1', 'labels': 'N,1'}, 'N', None, out_='')
def loss_fn(batch, arg2, arg3):
diff = (batch['imgs'].mean((1, 2, 3)) - batch['labels'].squeeze())
return np.mean(diff**2 + arg2)
loss_fn({'imgs': np.ones((3, 2, 2, 1)), 'labels': np.ones((3, 1))},
arg2=np.ones(3), arg3=np.ones((2, 3))) # succeeds
loss_fn({'imgs': np.ones((5, 3, 3, 4)), 'labels': np.ones((5, 1))},
arg2=np.ones(5), arg3='any') # succeeds
with pytest.raises(ShapeError):
loss_fn({'imgs': np.ones((3, 5, 2, 1)), 'labels': np.ones((3, 1))},
arg2=np.ones(3), arg3='any') # fails
# yapf: enable
def test_readme_example3():
@check_shapes({'imgs': 'N,W,W,-1', 'labels': 'N,1'}, aux_info=None, out_='')
def loss(batch, aux_info):
# do something with aux_info
diff = (batch['imgs'].mean((1, 2, 3)) - batch['labels'].squeeze())
return np.mean(diff**2)
loss({'imgs': np.ones((3, 2, 2, 1)), 'labels': np.ones((3, 1))}, np.ones(1))
loss({'imgs': np.ones((5, 3, 3, 4)), 'labels': np.ones((5, 1))}, 'any')
with pytest.raises(ShapeError):
loss({'imgs': np.ones((3, 5, 2, 1)), 'labels': np.ones((3, 1))}, 'any')
@pytest.mark.parametrize('inp, out', [(1, 1), (np.array(2), 2), (3, np.array(4)),
(np.array(5), np.array(6))])
def test_scalar_output(inp, out):
@check_shapes(x='', out_='')
def loss_fn(x):
return out
loss_fn(inp)
| [
"numpy.mean",
"numpy.ones",
"shapecheck.is_compatible",
"shapecheck.check_shapes",
"pytest.mark.parametrize",
"shapecheck.is_checking_enabled",
"numpy.array",
"shapecheck.str_to_shape",
"pytest.raises",
"shapecheck.set_checking_enabled"
] | [((3149, 3389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""string, shape"""', "[('N,1,3,M', ('N', 1, 3, 'M')), ('N, 1, 3, M', ('N', 1, 3, 'M')), (\n '...,a,1', ('...', 'a', 1)), ('1, ... ,2', (1, '...', 2)), ('a,b,c,...',\n ('a', 'b', 'c', '...')), ('...', ('...',))]"], {}), "('string, shape', [('N,1,3,M', ('N', 1, 3, 'M')), (\n 'N, 1, 3, M', ('N', 1, 3, 'M')), ('...,a,1', ('...', 'a', 1)), (\n '1, ... ,2', (1, '...', 2)), ('a,b,c,...', ('a', 'b', 'c', '...')), (\n '...', ('...',))])\n", (3172, 3389), False, 'import pytest\n'), ((3759, 3883), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""string"""', "['...,...,...', 'a,...,b,...', '...,1,...', (1, 2), 3, 4.0, [5.0], ['1,2'],\n ('1,2',)]"], {}), "('string', ['...,...,...', 'a,...,b,...',\n '...,1,...', (1, 2), 3, 4.0, [5.0], ['1,2'], ('1,2',)])\n", (3782, 3883), False, 'import pytest\n'), ((3993, 4339), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape, expected_shape"""', "[((3, 2, 3), ('n', 2, 'n')), ((3, 2, 3), ('n', '...', 2, 'n')), ((3, 1, 1, \n 2, 3), ('n', '...', 2, 'n')), ((3, 2, 3), ('...', 'n', 2, 'n')), ((1, 1,\n 3, 2, 3), ('...', 'n', 2, 'n')), ((3, 2, 3), ('n', 2, 'n', '...')), ((3,\n 2, 3, 1, 1), ('n', 2, 'n', '...')), ((3, 2, 3), ('...',))]"], {}), "('shape, expected_shape', [((3, 2, 3), ('n', 2, 'n')\n ), ((3, 2, 3), ('n', '...', 2, 'n')), ((3, 1, 1, 2, 3), ('n', '...', 2,\n 'n')), ((3, 2, 3), ('...', 'n', 2, 'n')), ((1, 1, 3, 2, 3), ('...', 'n',\n 2, 'n')), ((3, 2, 3), ('n', 2, 'n', '...')), ((3, 2, 3, 1, 1), ('n', 2,\n 'n', '...')), ((3, 2, 3), ('...',))])\n", (4016, 4339), False, 'import pytest\n'), ((4469, 4783), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape, expected_shape"""', "[((3, 3, 3), ('n', 2, 'n')), ((3, 2, 4), ('n', '...', 2, 'n')), ((3, 1, 1, \n 3, 3), ('n', '...', 2, 'n')), ((4, 2, 3), ('...', 'n', 2, 'n')), ((1, 1,\n 2, 3), ('...', 'n', 2, 'n')), ((3, 3), ('n', 2, 'n', '...')), ((2, 3, 1,\n 1), ('n', 2, 'n', '...'))]"], {}), "('shape, expected_shape', [((3, 3, 3), ('n', 2, 'n')\n ), ((3, 2, 4), ('n', '...', 2, 'n')), ((3, 1, 1, 3, 3), ('n', '...', 2,\n 'n')), ((4, 2, 3), ('...', 'n', 2, 'n')), ((1, 1, 2, 3), ('...', 'n', 2,\n 'n')), ((3, 3), ('n', 2, 'n', '...')), ((2, 3, 1, 1), ('n', 2, 'n',\n '...'))])\n", (4492, 4783), False, 'import pytest\n'), ((4915, 5357), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""e_shape1, e_shape2, shape1, shape2"""', "[('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 2, 3)), ('...,1,1', 'n...', (1, \n 2, 3, 1, 1), (1, 2, 3)), ('n...,2,2', '1,n...', (2, 2), (1,)), (\n 'n...,1,1', 'a...', (1, 2, 3, 1, 1), (1, 3)), ('1,2,a...,3,4',\n '6,a...,7', (1, 2, 9, 9, 3, 4), (6, 9, 9, 7)), ('1,2,a...,3,4',\n '6,a...,7', (1, 2, 9, 3, 4), (6, 9, 7)), ('1,2,a...,3,4', '6,a...,7', (\n 1, 2, 3, 4), (6, 7))]"], {}), "('e_shape1, e_shape2, shape1, shape2', [('n...,1,1',\n 'n...', (1, 2, 3, 1, 1), (1, 2, 3)), ('...,1,1', 'n...', (1, 2, 3, 1, 1\n ), (1, 2, 3)), ('n...,2,2', '1,n...', (2, 2), (1,)), ('n...,1,1',\n 'a...', (1, 2, 3, 1, 1), (1, 3)), ('1,2,a...,3,4', '6,a...,7', (1, 2, 9,\n 9, 3, 4), (6, 9, 9, 7)), ('1,2,a...,3,4', '6,a...,7', (1, 2, 9, 3, 4),\n (6, 9, 7)), ('1,2,a...,3,4', '6,a...,7', (1, 2, 3, 4), (6, 7))])\n", (4938, 5357), False, 'import pytest\n'), ((5562, 6034), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""e_shape1, e_shape2, shape1, shape2"""', "[('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 3, 3)), ('n...,1,1', 'n...', (1,\n 2, 3, 1, 1), (1, 3)), ('n...,2,2', '1,n...', (2, 2), (1, 1)), (\n 'n...,2,2', 'n...', (2, 2), (1,)), ('n...,', 'n...', (2, 2), (1,)), (\n '1,2,a...,3,4', '6,a...,7', (1, 2, 8, 9, 3, 4), (6, 9, 9, 7)), (\n '1,2,a...,3,4', '6,a...,7', (1, 2, 7, 3, 4), (6, 9, 7)), (\n '1,2,a...,3,4', '6,a...,7', (1, 2, 3, 4), (6, 1, 7))]"], {}), "('e_shape1, e_shape2, shape1, shape2', [('n...,1,1',\n 'n...', (1, 2, 3, 1, 1), (1, 3, 3)), ('n...,1,1', 'n...', (1, 2, 3, 1, \n 1), (1, 3)), ('n...,2,2', '1,n...', (2, 2), (1, 1)), ('n...,2,2',\n 'n...', (2, 2), (1,)), ('n...,', 'n...', (2, 2), (1,)), ('1,2,a...,3,4',\n '6,a...,7', (1, 2, 8, 9, 3, 4), (6, 9, 9, 7)), ('1,2,a...,3,4',\n '6,a...,7', (1, 2, 7, 3, 4), (6, 9, 7)), ('1,2,a...,3,4', '6,a...,7', (\n 1, 2, 3, 4), (6, 1, 7))])\n", (5585, 6034), False, 'import pytest\n'), ((10453, 11050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cs_args, cs_kwargs, f_args, f_kwargs"""', "[(('N', 'M', 'O', 'P'), {}, (1, 2, 3), {}), (('N', 'M', 'O', 'P'), {}, (1, \n 2), {'c': 3}), (('N', 'M', 'O'), {}, (1, 2, 3), {}), (('N', 'M'), {}, (\n 1, 2), {'c': 3}), (('N', 'M', 'O', 'P'), {}, (1,), {'c': 3, 'b': 2}), (\n ('N', 'M', 'O'), {'d': 'P'}, (1, 2, 3), {}), (('N', 'M'), {'c': 'O',\n 'd': 'P'}, (1, 2, 3), {}), (('N',), {'b': 'M', 'c': 'O', 'd': 'P'}, (1,\n 2), {'c': 3}), ((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2, 3),\n {}), ((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2), {'c': 3})]"], {}), "('cs_args, cs_kwargs, f_args, f_kwargs', [(('N', 'M',\n 'O', 'P'), {}, (1, 2, 3), {}), (('N', 'M', 'O', 'P'), {}, (1, 2), {'c':\n 3}), (('N', 'M', 'O'), {}, (1, 2, 3), {}), (('N', 'M'), {}, (1, 2), {\n 'c': 3}), (('N', 'M', 'O', 'P'), {}, (1,), {'c': 3, 'b': 2}), (('N',\n 'M', 'O'), {'d': 'P'}, (1, 2, 3), {}), (('N', 'M'), {'c': 'O', 'd': 'P'\n }, (1, 2, 3), {}), (('N',), {'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2), {\n 'c': 3}), ((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2, 3), {}),\n ((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2), {'c': 3})])\n", (10476, 11050), False, 'import pytest\n'), ((239, 271), 'shapecheck.check_shapes', 'check_shapes', (['"""3"""', '"""4"""'], {'out_': '"""2"""'}), "('3', '4', out_='2')\n", (251, 271), False, 'from shapecheck import check_shapes\n'), ((493, 529), 'shapecheck.check_shapes', 'check_shapes', (['"""3,N"""', '"""N"""'], {'out_': '"""1,N"""'}), "('3,N', 'N', out_='1,N')\n", (505, 529), False, 'from shapecheck import check_shapes\n'), ((746, 777), 'shapecheck.check_shapes', 'check_shapes', (['"""A,A,N"""'], {'out_': '"""N"""'}), "('A,A,N', out_='N')\n", (758, 777), False, 'from shapecheck import check_shapes\n'), ((941, 973), 'shapecheck.check_shapes', 'check_shapes', (['"""N,-1"""'], {'out_': '"""N,1"""'}), "('N,-1', out_='N,1')\n", (953, 973), False, 'from shapecheck import check_shapes\n'), ((1108, 1129), 'shapecheck.check_shapes', 'check_shapes', (['"""-1,-1"""'], {}), "('-1,-1')\n", (1120, 1129), False, 'from shapecheck import check_shapes\n'), ((1747, 1797), 'shapecheck.check_shapes', 'check_shapes', (['"""-1,N"""', '"""N"""', 'None', '"""3,N"""'], {'out_': '"""3,N"""'}), "('-1,N', 'N', None, '3,N', out_='3,N')\n", (1759, 1797), False, 'from shapecheck import check_shapes\n'), ((2142, 2176), 'shapecheck.check_shapes', 'check_shapes', (['"""1,...,1"""', '"""...,1,1"""'], {}), "('1,...,1', '...,1,1')\n", (2154, 2176), False, 'from shapecheck import check_shapes\n'), ((2422, 2470), 'shapecheck.check_shapes', 'check_shapes', (['"""batch,variadic..."""', '"""variadic..."""'], {}), "('batch,variadic...', 'variadic...')\n", (2434, 2470), False, 'from shapecheck import check_shapes\n'), ((2766, 2797), 'shapecheck.check_shapes', 'check_shapes', (['None', '"""2,N"""', 'None'], {}), "(None, '2,N', None)\n", (2778, 2797), False, 'from shapecheck import check_shapes\n'), ((3641, 3661), 'shapecheck.str_to_shape', 'str_to_shape', (['string'], {}), '(string)\n', (3653, 3661), False, 'from shapecheck import ShapeError, check_shapes, is_checking_enabled, is_compatible, set_checking_enabled, str_to_shape\n'), ((4429, 4465), 'shapecheck.is_compatible', 'is_compatible', (['shape', 'expected_shape'], {}), '(shape, expected_shape)\n', (4442, 4465), False, 'from shapecheck import ShapeError, check_shapes, is_checking_enabled, is_compatible, set_checking_enabled, str_to_shape\n'), ((5455, 5487), 'shapecheck.check_shapes', 'check_shapes', (['e_shape1', 'e_shape2'], {}), '(e_shape1, e_shape2)\n', (5467, 5487), False, 'from shapecheck import check_shapes\n'), ((6137, 6169), 'shapecheck.check_shapes', 'check_shapes', (['e_shape1', 'e_shape2'], {}), '(e_shape1, e_shape2)\n', (6149, 6169), False, 'from shapecheck import check_shapes\n'), ((6320, 6344), 'shapecheck.check_shapes', 'check_shapes', ([], {'out_': '"""1,1"""'}), "(out_='1,1')\n", (6332, 6344), False, 'from shapecheck import check_shapes\n'), ((6470, 6545), 'shapecheck.check_shapes', 'check_shapes', (["('N,1', 'N')", '"""1,2"""'], {'out_': "{'one': ('N,1', 'N'), 'two': '1,2'}"}), "(('N,1', 'N'), '1,2', out_={'one': ('N,1', 'N'), 'two': '1,2'})\n", (6482, 6545), False, 'from shapecheck import check_shapes\n'), ((6805, 6880), 'shapecheck.check_shapes', 'check_shapes', (["('N,1', 'N')", '"""1,2"""'], {'out_': "{'one': ('N,1', 'N'), 'two': '1,2'}"}), "(('N,1', 'N'), '1,2', out_={'one': ('N,1', 'N'), 'two': '1,2'})\n", (6817, 6880), False, 'from shapecheck import check_shapes\n'), ((7184, 7205), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (7203, 7205), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7210, 7237), 'shapecheck.set_checking_enabled', 'set_checking_enabled', (['(False)'], {}), '(False)\n', (7230, 7237), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7279, 7305), 'shapecheck.set_checking_enabled', 'set_checking_enabled', (['(True)'], {}), '(True)\n', (7299, 7305), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7317, 7338), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (7336, 7338), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7429, 7450), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (7448, 7450), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7491, 7523), 'shapecheck.check_shapes', 'check_shapes', (['"""3"""', '"""4"""'], {'out_': '"""2"""'}), "('3', '4', out_='2')\n", (7503, 7523), False, 'from shapecheck import check_shapes\n'), ((7581, 7608), 'shapecheck.set_checking_enabled', 'set_checking_enabled', (['(False)'], {}), '(False)\n', (7601, 7608), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7751, 7783), 'shapecheck.check_shapes', 'check_shapes', (['"""3"""', '"""4"""'], {'out_': '"""2"""'}), "('3', '4', out_='2')\n", (7763, 7783), False, 'from shapecheck import check_shapes\n'), ((7841, 7867), 'shapecheck.set_checking_enabled', 'set_checking_enabled', (['(True)'], {}), '(True)\n', (7861, 7867), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7879, 7900), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (7898, 7900), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((8126, 8158), 'shapecheck.check_shapes', 'check_shapes', (['"""3"""', '"""4"""'], {'out_': '"""2"""'}), "('3', '4', out_='2')\n", (8138, 8158), False, 'from shapecheck import check_shapes\n'), ((8223, 8244), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (8242, 8244), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((8546, 8567), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (8565, 8567), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((8777, 8814), 'shapecheck.check_shapes', 'check_shapes', (['"""N"""', '"""M"""', '"""O"""'], {'out_': '"""N"""'}), "('N', 'M', 'O', out_='N')\n", (8789, 8814), False, 'from shapecheck import check_shapes\n'), ((8858, 8906), 'shapecheck.check_shapes', 'check_shapes', (['"""N"""', '"""M"""', '"""R"""'], {'match_callees_': '(True)'}), "('N', 'M', 'R', match_callees_=True)\n", (8870, 8906), False, 'from shapecheck import check_shapes\n'), ((9035, 9072), 'shapecheck.check_shapes', 'check_shapes', (['"""N"""', '"""M"""', '"""O"""'], {'out_': '"""N"""'}), "('N', 'M', 'O', out_='N')\n", (9047, 9072), False, 'from shapecheck import check_shapes\n'), ((9116, 9164), 'shapecheck.check_shapes', 'check_shapes', (['"""M"""', '"""N"""', '"""R"""'], {'match_callees_': '(True)'}), "('M', 'N', 'R', match_callees_=True)\n", (9128, 9164), False, 'from shapecheck import check_shapes\n'), ((9335, 9379), 'shapecheck.check_shapes', 'check_shapes', (['"""a, v..."""', '"""v..."""'], {'out_': '"""v..."""'}), "('a, v...', 'v...', out_='v...')\n", (9347, 9379), False, 'from shapecheck import check_shapes\n'), ((9431, 9451), 'shapecheck.check_shapes', 'check_shapes', (['"""v..."""'], {}), "('v...')\n", (9443, 9451), False, 'from shapecheck import check_shapes\n'), ((9495, 9533), 'shapecheck.check_shapes', 'check_shapes', (['"""a"""'], {'match_callees_': '(True)'}), "('a', match_callees_=True)\n", (9507, 9533), False, 'from shapecheck import check_shapes\n'), ((9702, 9740), 'shapecheck.check_shapes', 'check_shapes', (['"""a"""'], {'match_callees_': '(True)'}), "('a', match_callees_=True)\n", (9714, 9740), False, 'from shapecheck import check_shapes\n'), ((9987, 10024), 'shapecheck.check_shapes', 'check_shapes', (['"""M"""', '"""N"""', '"""O"""'], {'out_': '"""M"""'}), "('M', 'N', 'O', out_='M')\n", (9999, 10024), False, 'from shapecheck import check_shapes\n'), ((10075, 10102), 'shapecheck.check_shapes', 'check_shapes', (['"""M"""', '"""N"""', '"""R"""'], {}), "('M', 'N', 'R')\n", (10087, 10102), False, 'from shapecheck import check_shapes\n'), ((10172, 10220), 'shapecheck.check_shapes', 'check_shapes', (['"""M"""', '"""N"""', '"""R"""'], {'match_callees_': '(True)'}), "('M', 'N', 'R', match_callees_=True)\n", (10184, 10220), False, 'from shapecheck import check_shapes\n'), ((11211, 11246), 'shapecheck.check_shapes', 'check_shapes', (['*cs_args'], {}), '(*cs_args, **cs_kwargs)\n', (11223, 11246), False, 'from shapecheck import check_shapes\n'), ((11457, 11528), 'shapecheck.check_shapes', 'check_shapes', (["{'imgs': 'N,W,W,-1', 'labels': 'N,1'}", '"""N"""', 'None'], {'out_': '""""""'}), "({'imgs': 'N,W,W,-1', 'labels': 'N,1'}, 'N', None, out_='')\n", (11469, 11528), False, 'from shapecheck import check_shapes\n'), ((12160, 12235), 'shapecheck.check_shapes', 'check_shapes', (["{'imgs': 'N,W,W,-1', 'labels': 'N,1'}"], {'aux_info': 'None', 'out_': '""""""'}), "({'imgs': 'N,W,W,-1', 'labels': 'N,1'}, aux_info=None, out_='')\n", (12172, 12235), False, 'from shapecheck import check_shapes\n'), ((12875, 12902), 'shapecheck.check_shapes', 'check_shapes', ([], {'x': '""""""', 'out_': '""""""'}), "(x='', out_='')\n", (12887, 12902), False, 'from shapecheck import check_shapes\n'), ((331, 350), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (339, 350), True, 'import numpy as np\n'), ((352, 374), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (360, 374), True, 'import numpy as np\n'), ((385, 410), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (398, 410), False, 'import pytest\n'), ((599, 614), 'numpy.ones', 'np.ones', (['(3, 5)'], {}), '((3, 5))\n', (606, 614), True, 'import numpy as np\n'), ((616, 629), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (623, 629), True, 'import numpy as np\n'), ((640, 665), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (653, 665), False, 'import pytest\n'), ((828, 846), 'numpy.ones', 'np.ones', (['(5, 5, 7)'], {}), '((5, 5, 7))\n', (835, 846), True, 'import numpy as np\n'), ((857, 882), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (870, 882), False, 'import pytest\n'), ((1035, 1050), 'numpy.ones', 'np.ones', (['(5, 3)'], {}), '((5, 3))\n', (1042, 1050), True, 'import numpy as np\n'), ((1058, 1073), 'numpy.ones', 'np.ones', (['(5, 7)'], {}), '((5, 7))\n', (1065, 1073), True, 'import numpy as np\n'), ((1168, 1183), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (1175, 1183), True, 'import numpy as np\n'), ((1194, 1219), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (1207, 1219), False, 'import pytest\n'), ((1255, 1280), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (1268, 1280), False, 'import pytest\n'), ((1420, 1453), 'shapecheck.check_shapes', 'check_shapes', (['"""3,A,A,N"""'], {'out_': '"""N"""'}), "('3,A,A,N', out_='N')\n", (1432, 1453), False, 'from shapecheck import check_shapes\n'), ((1877, 1892), 'numpy.ones', 'np.ones', (['(7, 5)'], {}), '((7, 5))\n', (1884, 1892), True, 'import numpy as np\n'), ((1894, 1904), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1901, 1904), True, 'import numpy as np\n'), ((1918, 1933), 'numpy.ones', 'np.ones', (['(3, 5)'], {}), '((3, 5))\n', (1925, 1933), True, 'import numpy as np\n'), ((1953, 1968), 'numpy.ones', 'np.ones', (['(2, 6)'], {}), '((2, 6))\n', (1960, 1968), True, 'import numpy as np\n'), ((1970, 1980), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (1977, 1980), True, 'import numpy as np\n'), ((1982, 1992), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (1989, 1992), True, 'import numpy as np\n'), ((1994, 2009), 'numpy.ones', 'np.ones', (['(3, 6)'], {}), '((3, 6))\n', (2001, 2009), True, 'import numpy as np\n'), ((2032, 2057), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (2045, 2057), False, 'import pytest\n'), ((2214, 2235), 'numpy.ones', 'np.ones', (['(1, 3, 4, 1)'], {}), '((1, 3, 4, 1))\n', (2221, 2235), True, 'import numpy as np\n'), ((2237, 2255), 'numpy.ones', 'np.ones', (['(2, 1, 1)'], {}), '((2, 1, 1))\n', (2244, 2255), True, 'import numpy as np\n'), ((2275, 2290), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2282, 2290), True, 'import numpy as np\n'), ((2292, 2307), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2299, 2307), True, 'import numpy as np\n'), ((2330, 2355), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (2343, 2355), False, 'import pytest\n'), ((2508, 2526), 'numpy.ones', 'np.ones', (['(7, 1, 2)'], {}), '((7, 1, 2))\n', (2515, 2526), True, 'import numpy as np\n'), ((2528, 2543), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (2535, 2543), True, 'import numpy as np\n'), ((2566, 2591), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (2579, 2591), False, 'import pytest\n'), ((2655, 2680), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (2668, 2680), False, 'import pytest\n'), ((2857, 2872), 'numpy.ones', 'np.ones', (['(2, 5)'], {}), '((2, 5))\n', (2864, 2872), True, 'import numpy as np\n'), ((2874, 2887), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (2881, 2887), True, 'import numpy as np\n'), ((2895, 2913), 'numpy.ones', 'np.ones', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (2902, 2913), True, 'import numpy as np\n'), ((2915, 2930), 'numpy.ones', 'np.ones', (['(2, 6)'], {}), '((2, 6))\n', (2922, 2930), True, 'import numpy as np\n'), ((2961, 2986), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (2974, 2986), False, 'import pytest\n'), ((3056, 3081), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (3069, 3081), False, 'import pytest\n'), ((3932, 3959), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3945, 3959), False, 'import pytest\n'), ((3969, 3989), 'shapecheck.str_to_shape', 'str_to_shape', (['string'], {}), '(string)\n', (3981, 3989), False, 'from shapecheck import ShapeError, check_shapes, is_checking_enabled, is_compatible, set_checking_enabled, str_to_shape\n'), ((4875, 4911), 'shapecheck.is_compatible', 'is_compatible', (['shape', 'expected_shape'], {}), '(shape, expected_shape)\n', (4888, 4911), False, 'from shapecheck import ShapeError, check_shapes, is_checking_enabled, is_compatible, set_checking_enabled, str_to_shape\n'), ((5525, 5540), 'numpy.ones', 'np.ones', (['shape1'], {}), '(shape1)\n', (5532, 5540), True, 'import numpy as np\n'), ((5542, 5557), 'numpy.ones', 'np.ones', (['shape2'], {}), '(shape2)\n', (5549, 5557), True, 'import numpy as np\n'), ((6210, 6235), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (6223, 6235), False, 'import pytest\n'), ((6373, 6386), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (6380, 6386), True, 'import numpy as np\n'), ((6397, 6422), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (6410, 6422), False, 'import pytest\n'), ((6650, 6665), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (6657, 6665), True, 'import numpy as np\n'), ((6676, 6701), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (6689, 6701), False, 'import pytest\n'), ((6973, 6998), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (6986, 6998), False, 'import pytest\n'), ((7253, 7274), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (7272, 7274), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7348, 7375), 'shapecheck.set_checking_enabled', 'set_checking_enabled', (['(False)'], {}), '(False)\n', (7368, 7375), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7624, 7645), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (7643, 7645), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7652, 7671), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (7660, 7671), True, 'import numpy as np\n'), ((7673, 7695), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (7681, 7695), True, 'import numpy as np\n'), ((7703, 7722), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (7711, 7722), True, 'import numpy as np\n'), ((7724, 7743), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (7732, 7743), True, 'import numpy as np\n'), ((7911, 7936), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (7924, 7936), False, 'import pytest\n'), ((7999, 8024), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (8012, 8024), False, 'import pytest\n'), ((8254, 8281), 'shapecheck.set_checking_enabled', 'set_checking_enabled', (['(False)'], {}), '(False)\n', (8274, 8281), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((8441, 8473), 'shapecheck.check_shapes', 'check_shapes', (['"""3"""', '"""4"""'], {'out_': '"""2"""'}), "('3', '4', out_='2')\n", (8453, 8473), False, 'from shapecheck import check_shapes\n'), ((8577, 8602), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (8590, 8602), False, 'import pytest\n'), ((8665, 8690), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (8678, 8690), False, 'import pytest\n'), ((8960, 8970), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (8967, 8970), True, 'import numpy as np\n'), ((8972, 8982), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (8979, 8982), True, 'import numpy as np\n'), ((8984, 8994), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (8991, 8994), True, 'import numpy as np\n'), ((9221, 9246), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (9234, 9246), False, 'import pytest\n'), ((9560, 9590), 'numpy.ones', 'np.ones', (['(x.shape[0], 2, 3, 4)'], {}), '((x.shape[0], 2, 3, 4))\n', (9567, 9590), True, 'import numpy as np\n'), ((9603, 9621), 'numpy.ones', 'np.ones', (['(2, 3, 4)'], {}), '((2, 3, 4))\n', (9610, 9621), True, 'import numpy as np\n'), ((9682, 9692), 'numpy.ones', 'np.ones', (['(8)'], {}), '(8)\n', (9689, 9692), True, 'import numpy as np\n'), ((9767, 9801), 'numpy.ones', 'np.ones', (['(x.shape[0] - 1, 2, 3, 4)'], {}), '((x.shape[0] - 1, 2, 3, 4))\n', (9774, 9801), True, 'import numpy as np\n'), ((9814, 9832), 'numpy.ones', 'np.ones', (['(2, 3, 4)'], {}), '((2, 3, 4))\n', (9821, 9832), True, 'import numpy as np\n'), ((9896, 9921), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (9909, 9921), False, 'import pytest\n'), ((10301, 10311), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (10308, 10311), True, 'import numpy as np\n'), ((10313, 10323), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (10320, 10323), True, 'import numpy as np\n'), ((10325, 10335), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (10332, 10335), True, 'import numpy as np\n'), ((10358, 10383), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (10371, 10383), False, 'import pytest\n'), ((11306, 11316), 'numpy.ones', 'np.ones', (['v'], {}), '(v)\n', (11313, 11316), True, 'import numpy as np\n'), ((11655, 11680), 'numpy.mean', 'np.mean', (['(diff ** 2 + arg2)'], {}), '(diff ** 2 + arg2)\n', (11662, 11680), True, 'import numpy as np\n'), ((11949, 11974), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (11962, 11974), False, 'import pytest\n'), ((12394, 12412), 'numpy.mean', 'np.mean', (['(diff ** 2)'], {}), '(diff ** 2)\n', (12401, 12412), True, 'import numpy as np\n'), ((12481, 12491), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (12488, 12491), True, 'import numpy as np\n'), ((12578, 12603), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (12591, 12603), False, 'import pytest\n'), ((422, 441), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (430, 441), True, 'import numpy as np\n'), ((443, 462), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (451, 462), True, 'import numpy as np\n'), ((677, 692), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (684, 692), True, 'import numpy as np\n'), ((694, 707), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (701, 707), True, 'import numpy as np\n'), ((894, 912), 'numpy.ones', 'np.ones', (['(6, 5, 7)'], {}), '((6, 5, 7))\n', (901, 912), True, 'import numpy as np\n'), ((1231, 1244), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (1238, 1244), True, 'import numpy as np\n'), ((1292, 1310), 'numpy.ones', 'np.ones', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1299, 1310), True, 'import numpy as np\n'), ((1519, 1540), 'numpy.ones', 'np.ones', (['(3, 5, 5, 7)'], {}), '((3, 5, 5, 7))\n', (1526, 1540), True, 'import numpy as np\n'), ((1555, 1580), 'pytest.raises', 'pytest.raises', (['ShapeError'], {}), '(ShapeError)\n', (1568, 1580), False, 'import pytest\n'), ((2069, 2084), 'numpy.ones', 'np.ones', (['(2, 6)'], {}), '((2, 6))\n', (2076, 2084), True, 'import numpy as np\n'), ((2086, 2096), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2093, 2096), True, 'import numpy as np\n'), ((2098, 2108), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2105, 2108), True, 'import numpy as np\n'), ((2110, 2125), 'numpy.ones', 'np.ones', (['(3, 6)'], {}), '((3, 6))\n', (2117, 2125), True, 'import numpy as np\n'), ((2367, 2388), 'numpy.ones', 'np.ones', (['(2, 3, 4, 1)'], {}), '((2, 3, 4, 1))\n', (2374, 2388), True, 'import numpy as np\n'), ((2390, 2405), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2397, 2405), True, 'import numpy as np\n'), ((2603, 2618), 'numpy.ones', 'np.ones', (['(6, 2)'], {}), '((6, 2))\n', (2610, 2618), True, 'import numpy as np\n'), ((2620, 2635), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2627, 2635), True, 'import numpy as np\n'), ((2692, 2707), 'numpy.ones', 'np.ones', (['(6, 2)'], {}), '((6, 2))\n', (2699, 2707), True, 'import numpy as np\n'), ((2709, 2719), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2716, 2719), True, 'import numpy as np\n'), ((2998, 3013), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (3005, 3013), True, 'import numpy as np\n'), ((3015, 3030), 'numpy.ones', 'np.ones', (['(3, 5)'], {}), '((3, 5))\n', (3022, 3030), True, 'import numpy as np\n'), ((3032, 3045), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (3039, 3045), True, 'import numpy as np\n'), ((3109, 3124), 'numpy.ones', 'np.ones', (['(3, 6)'], {}), '((3, 6))\n', (3116, 3124), True, 'import numpy as np\n'), ((6247, 6262), 'numpy.ones', 'np.ones', (['shape1'], {}), '(shape1)\n', (6254, 6262), True, 'import numpy as np\n'), ((6264, 6279), 'numpy.ones', 'np.ones', (['shape2'], {}), '(shape2)\n', (6271, 6279), True, 'import numpy as np\n'), ((6617, 6632), 'numpy.ones', 'np.ones', (['(7, 1)'], {}), '((7, 1))\n', (6624, 6632), True, 'import numpy as np\n'), ((6634, 6647), 'numpy.ones', 'np.ones', (['(7,)'], {}), '((7,))\n', (6641, 6647), True, 'import numpy as np\n'), ((6747, 6762), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (6754, 6762), True, 'import numpy as np\n'), ((7044, 7059), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (7051, 7059), True, 'import numpy as np\n'), ((7396, 7417), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (7415, 7417), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((7948, 7967), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (7956, 7967), True, 'import numpy as np\n'), ((7969, 7988), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (7977, 7988), True, 'import numpy as np\n'), ((8036, 8055), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (8044, 8055), True, 'import numpy as np\n'), ((8057, 8076), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (8065, 8076), True, 'import numpy as np\n'), ((8302, 8323), 'shapecheck.is_checking_enabled', 'is_checking_enabled', ([], {}), '()\n', (8321, 8323), False, 'from shapecheck import is_checking_enabled, set_checking_enabled\n'), ((8334, 8353), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (8342, 8353), True, 'import numpy as np\n'), ((8355, 8377), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (8363, 8377), True, 'import numpy as np\n'), ((8389, 8408), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (8397, 8408), True, 'import numpy as np\n'), ((8410, 8429), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (8418, 8429), True, 'import numpy as np\n'), ((8614, 8633), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (8622, 8633), True, 'import numpy as np\n'), ((8635, 8654), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (8643, 8654), True, 'import numpy as np\n'), ((8702, 8721), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (8710, 8721), True, 'import numpy as np\n'), ((8723, 8742), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (8731, 8742), True, 'import numpy as np\n'), ((9258, 9268), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (9265, 9268), True, 'import numpy as np\n'), ((9270, 9280), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (9277, 9280), True, 'import numpy as np\n'), ((9282, 9292), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (9289, 9292), True, 'import numpy as np\n'), ((9655, 9673), 'numpy.ones', 'np.ones', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (9662, 9673), True, 'import numpy as np\n'), ((9866, 9884), 'numpy.ones', 'np.ones', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (9873, 9884), True, 'import numpy as np\n'), ((9933, 9943), 'numpy.ones', 'np.ones', (['(8)'], {}), '(8)\n', (9940, 9943), True, 'import numpy as np\n'), ((10405, 10415), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (10412, 10415), True, 'import numpy as np\n'), ((10417, 10427), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (10424, 10427), True, 'import numpy as np\n'), ((10429, 10439), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (10436, 10439), True, 'import numpy as np\n'), ((11378, 11388), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (11385, 11388), True, 'import numpy as np\n'), ((11701, 11722), 'numpy.ones', 'np.ones', (['(3, 2, 2, 1)'], {}), '((3, 2, 2, 1))\n', (11708, 11722), True, 'import numpy as np\n'), ((11734, 11749), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (11741, 11749), True, 'import numpy as np\n'), ((11769, 11779), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (11776, 11779), True, 'import numpy as np\n'), ((11786, 11801), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (11793, 11801), True, 'import numpy as np\n'), ((11836, 11857), 'numpy.ones', 'np.ones', (['(5, 3, 3, 4)'], {}), '((5, 3, 3, 4))\n', (11843, 11857), True, 'import numpy as np\n'), ((11869, 11884), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (11876, 11884), True, 'import numpy as np\n'), ((11904, 11914), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (11911, 11914), True, 'import numpy as np\n'), ((12430, 12451), 'numpy.ones', 'np.ones', (['(3, 2, 2, 1)'], {}), '((3, 2, 2, 1))\n', (12437, 12451), True, 'import numpy as np\n'), ((12463, 12478), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (12470, 12478), True, 'import numpy as np\n'), ((12511, 12532), 'numpy.ones', 'np.ones', (['(5, 3, 3, 4)'], {}), '((5, 3, 3, 4))\n', (12518, 12532), True, 'import numpy as np\n'), ((12544, 12559), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (12551, 12559), True, 'import numpy as np\n'), ((12734, 12745), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (12742, 12745), True, 'import numpy as np\n'), ((12755, 12766), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (12763, 12766), True, 'import numpy as np\n'), ((12808, 12819), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (12816, 12819), True, 'import numpy as np\n'), ((12821, 12832), 'numpy.array', 'np.array', (['(6)'], {}), '(6)\n', (12829, 12832), True, 'import numpy as np\n'), ((1596, 1617), 'numpy.ones', 'np.ones', (['(3, 6, 5, 7)'], {}), '((3, 6, 5, 7))\n', (1603, 1617), True, 'import numpy as np\n'), ((6714, 6729), 'numpy.ones', 'np.ones', (['(7, 1)'], {}), '((7, 1))\n', (6721, 6729), True, 'import numpy as np\n'), ((6731, 6744), 'numpy.ones', 'np.ones', (['(6,)'], {}), '((6,))\n', (6738, 6744), True, 'import numpy as np\n'), ((7011, 7026), 'numpy.ones', 'np.ones', (['(7, 1)'], {}), '((7, 1))\n', (7018, 7026), True, 'import numpy as np\n'), ((7028, 7041), 'numpy.ones', 'np.ones', (['(7,)'], {}), '((7,))\n', (7035, 7041), True, 'import numpy as np\n'), ((12001, 12022), 'numpy.ones', 'np.ones', (['(3, 5, 2, 1)'], {}), '((3, 5, 2, 1))\n', (12008, 12022), True, 'import numpy as np\n'), ((12034, 12049), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (12041, 12049), True, 'import numpy as np\n'), ((12073, 12083), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (12080, 12083), True, 'import numpy as np\n'), ((12627, 12648), 'numpy.ones', 'np.ones', (['(3, 5, 2, 1)'], {}), '((3, 5, 2, 1))\n', (12634, 12648), True, 'import numpy as np\n'), ((12660, 12675), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (12667, 12675), True, 'import numpy as np\n')] |
""" Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------
"""
import neoml.PythonWrapper as PythonWrapper
import numpy
class Blob:
def __init__(self, internal):
if not type(internal) is PythonWrapper.Blob:
raise ValueError('The `blob` must be PythonWrapper.Blob.')
self._internal = internal
@property
def shape(self):
"""
"""
return self._internal.shape()
@property
def batch_len(self):
"""
"""
return self._internal.batch_len()
@property
def batch_width(self):
"""
"""
return self._internal.batch_width()
@property
def list_size(self):
"""
"""
return self._internal.list_size()
@property
def height(self):
"""
"""
return self._internal.height()
@property
def width(self):
"""
"""
return self._internal.width()
@property
def depth(self):
"""
"""
return self._internal.depth()
@property
def channels(self):
"""
"""
return self._internal.channels()
@property
def size(self):
"""
"""
return self._internal.size()
@property
def object_count(self):
"""
"""
return self._internal.object_count()
@property
def object_size(self):
"""
"""
return self._internal.object_size()
@property
def geometrical_size(self):
"""
"""
return self._internal.geometrical_size()
@property
def data(self):
"""
"""
return self._internal.data()
# -------------------------------------------------------------------------------------------------------------
def asblob(math_engine, data):
"""
"""
np_data = numpy.asarray(data)
if np_data.dtype != numpy.float32 and np_data.dtype != numpy.int32:
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if len(np_data.shape) > 7:
raise ValueError('The `data` must have < 8 dimensions.')
return Blob(PythonWrapper.tensor(math_engine._internal, np_data))
def vector(math_engine, size, dtype):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if size < 1:
raise ValueError('The `size` must be > 0.')
shape = (size, 1, 1, 1, 1, 1, 1)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
def matrix(math_engine, matrix_height, matrix_width, dtype="float32"):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if matrix_height < 1:
raise ValueError('The `matrix_height` must be > 0.')
if matrix_width < 1:
raise ValueError('The `matrix_width` must be > 0.')
shape = (matrix_height, matrix_width, 1, 1, 1, 1, 1)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
def tensor(math_engine, shape, dtype="float32"):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if shape.size < 8:
raise ValueError('The `shape.size` must be <= 7.')
if not all(value > 0 for value in shape):
raise ValueError('All `shape` elements must be > 0.')
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
def data_blob(math_engine, batch_len, batch_width, channels, dtype="float32", data=None):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if batch_len < 1:
raise ValueError('The `batch_len` must be > 0.')
if batch_width < 1:
raise ValueError('The `batch_width` must be > 0.')
if channels < 1:
raise ValueError('The `channels` must be > 0.')
shape = (channels, 1, 1, 1, 1, batch_width, batch_len)
if data is None:
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
np_data = numpy.asarray( data )
if np_data.size != batch_len * batch_width * channels :
raise ValueError('The `data.size` must be equal to `batch_len * batch_width * channels`.')
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype, np_data))
def list_blob(math_engine, batch_len, batch_width, list_size, channels, dtype="float32"):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if batch_len < 1:
raise ValueError('The `batch_len` must be > 0.')
if batch_width < 1:
raise ValueError('The `batch_width` must be > 0.')
if list_size < 1:
raise ValueError('The `list_size` must be > 0.')
if channels < 1:
raise ValueError('The `channels` must be > 0.')
shape = (batch_len, batch_width, list_size, 1, 1, 1, channels)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
def image2d(math_engine, batch_len, batch_width, height, width, channels, dtype="float32"):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if batch_len < 1:
raise ValueError('The `batch_len` must be > 0.')
if batch_width < 1:
raise ValueError('The `batch_width` must be > 0.')
if height < 1:
raise ValueError('The `height` must be > 0.')
if width < 1:
raise ValueError('The `width` must be > 0.')
if channels < 1:
raise ValueError('The `channels` must be > 0.')
shape = (batch_len, batch_width, 1, height, width, 1, channels)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
def image3d(math_engine, batch_len, batch_width, height, width, depth, channels, dtype="float32"):
"""
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if batch_len < 1:
raise ValueError('The `batch_len` must be > 0.')
if batch_width < 1:
raise ValueError('The `batch_width` must be > 0.')
if height < 1:
raise ValueError('The `height` must be > 0.')
if width < 1:
raise ValueError('The `width` must be > 0.')
if depth < 1:
raise ValueError('The `depth` must be > 0.')
if channels < 1:
raise ValueError('The `channels` must be > 0.')
shape = (batch_len, batch_width, 1, height, width, depth, channels)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
| [
"neoml.PythonWrapper.tensor",
"numpy.asarray"
] | [((2482, 2501), 'numpy.asarray', 'numpy.asarray', (['data'], {}), '(data)\n', (2495, 2501), False, 'import numpy\n'), ((4811, 4830), 'numpy.asarray', 'numpy.asarray', (['data'], {}), '(data)\n', (4824, 4830), False, 'import numpy\n'), ((2766, 2818), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'np_data'], {}), '(math_engine._internal, np_data)\n', (2786, 2818), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((3125, 3182), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype'], {}), '(math_engine._internal, shape, dtype)\n', (3145, 3182), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((3645, 3702), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype'], {}), '(math_engine._internal, shape, dtype)\n', (3665, 3702), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((4104, 4161), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype'], {}), '(math_engine._internal, shape, dtype)\n', (4124, 4161), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((5010, 5076), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype', 'np_data'], {}), '(math_engine._internal, shape, dtype, np_data)\n', (5030, 5076), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((5714, 5771), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype'], {}), '(math_engine._internal, shape, dtype)\n', (5734, 5771), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((6477, 6534), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype'], {}), '(math_engine._internal, shape, dtype)\n', (6497, 6534), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((7322, 7379), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype'], {}), '(math_engine._internal, shape, dtype)\n', (7342, 7379), True, 'import neoml.PythonWrapper as PythonWrapper\n'), ((4737, 4794), 'neoml.PythonWrapper.tensor', 'PythonWrapper.tensor', (['math_engine._internal', 'shape', 'dtype'], {}), '(math_engine._internal, shape, dtype)\n', (4757, 4794), True, 'import neoml.PythonWrapper as PythonWrapper\n')] |
import json
import random
import os
import logging
import pickle
import string
import re
from pathlib import Path
from collections import Counter, OrderedDict, defaultdict as ddict
import torch
import numpy as np
from tqdm.notebook import tqdm
from torch.utils.data import Dataset
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_pickle(path):
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
def save_pickle(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
return
def visualize(tbx, pred_dict, gold_dict, step, split, num_visuals):
"""Visualize text examples to TensorBoard.
Args:
tbx (tensorboardX.SummaryWriter): Summary writer.
pred_dict (dict): dict of predictions of the form id -> pred.
step (int): Number of examples seen so far during training.
split (str): Name of data split being visualized.
num_visuals (int): Number of visuals to select at random from preds.
"""
if num_visuals <= 0:
return
if num_visuals > len(pred_dict):
num_visuals = len(pred_dict)
id2index = {curr_id : idx for idx, curr_id in enumerate(gold_dict['id'])}
visual_ids = np.random.choice(list(pred_dict), size=num_visuals, replace=False)
for i, id_ in enumerate(visual_ids):
pred = pred_dict[id_] or 'N/A'
idx_gold_dict = id2index[id_]
question = gold_dict['question'][idx_gold_dict]
context = gold_dict['context'][idx_gold_dict]
answers = gold_dict['answer'][idx_gold_dict]
gold = answers['text'][0] if answers else 'N/A'
tbl_fmt = (f'- **Question:** {question}\n'
+ f'- **Context:** {context}\n'
+ f'- **Answer:** {gold}\n'
+ f'- **Prediction:** {pred}')
tbx.add_text(tag=f'{split}/{i+1}_of_{num_visuals}',
text_string=tbl_fmt,
global_step=step)
def get_save_dir(base_dir, name, id_max=100):
for uid in range(1, id_max):
save_dir = os.path.join(base_dir, f'{name}-{uid:02d}')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
return save_dir
raise RuntimeError('Too many save directories created with the same name. \
Delete old save directories or use another name.')
def filter_encodings(encodings):
filter_idx = [idx for idx, val in enumerate(encodings['end_positions'])
if not val]
filter_idx = set(filter_idx)
encodings_filtered = {key : [] for key in encodings}
sz = len(encodings['input_ids'])
for idx in range(sz):
if idx not in filter_idx:
for key in encodings:
encodings_filtered[key].append(encodings[key][idx])
return encodings_filtered
def merge(encodings, new_encoding):
if not encodings:
return new_encoding
else:
for key in new_encoding:
encodings[key] += new_encoding[key]
return encodings
def get_logger(log_dir, name):
"""Get a `logging.Logger` instance that prints to the console
and an auxiliary file.
Args:
log_dir (str): Directory in which to create the log file.
name (str): Name to identify the logs.
Returns:
logger (logging.Logger): Logger instance for logging events.
"""
class StreamHandlerWithTQDM(logging.Handler):
"""Let `logging` print without breaking `tqdm` progress bars.
See Also:
> https://stackoverflow.com/questions/38543506
"""
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# Create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Log everything (i.e., DEBUG level and above) to a file
log_path = os.path.join(log_dir, f'{name}.txt')
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
# Log everything except DEBUG level (i.e., INFO level and above) to console
console_handler = StreamHandlerWithTQDM()
console_handler.setLevel(logging.INFO)
# Create format for the logs
file_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
file_handler.setFormatter(file_formatter)
console_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
class AverageMeter:
"""Keep track of average values over time.
Adapted from:
> https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
"""Reset meter."""
self.__init__()
def update(self, val, num_samples=1):
"""Update meter with new value `val`, the average of `num` samples.
Args:
val (float): Average value to update the meter with.
num_samples (int): Number of samples that were averaged to
produce `val`.
"""
self.count += num_samples
self.sum += val * num_samples
self.avg = self.sum / self.count
class QADataset(Dataset):
def __init__(self, encodings, train=True):
self.encodings = encodings
self.keys = ['input_ids', 'attention_mask']
if train:
self.keys += ['start_positions', 'end_positions']
assert(all(key in self.encodings for key in self.keys))
def __getitem__(self, idx):
return {key : torch.tensor(self.encodings[key][idx]) for key in self.keys}
def __len__(self):
return len(self.encodings['input_ids'])
def read_squad(path, start_idx, end_idx):
path = Path(path)
with open(path, 'rb') as f:
squad_dict = json.load(f)
# miniature dataset
#limit = min(4000, len(squad_dict['data']))
if start_idx == None or end_idx == None:
start_idx = 0
end_idx = 4000
print(len(squad_dict['data']))
if end_idx > len(squad_dict['data']):
end_idx = len(squad_dict['data'])
data_dict = {'question': [], 'context': [], 'id': [], 'answer': []}
for i in range(start_idx, end_idx):
for passage in squad_dict['data'][i]['paragraphs']:
context = passage['context']
for qa in passage['qas']:
question = qa['question']
if len(qa['answers']) == 0:
data_dict['question'].append(question)
data_dict['context'].append(context)
data_dict['id'].append(qa['id'])
else:
for answer in qa['answers']:
data_dict['question'].append(question)
data_dict['context'].append(context)
data_dict['id'].append(qa['id'])
data_dict['answer'].append(answer)
id_map = ddict(list)
for idx, qid in enumerate(data_dict['id']):
id_map[qid].append(idx)
data_dict_collapsed = {'question': [], 'context': [], 'id': []}
if data_dict['answer']:
data_dict_collapsed['answer'] = []
for qid in id_map:
ex_ids = id_map[qid]
data_dict_collapsed['question'].append(data_dict['question'][ex_ids[0]])
data_dict_collapsed['context'].append(data_dict['context'][ex_ids[0]])
data_dict_collapsed['id'].append(qid)
if data_dict['answer']:
all_answers = [data_dict['answer'][idx] for idx in ex_ids]
data_dict_collapsed['answer'].append({'answer_start': [answer['answer_start'] for answer in all_answers],
'text': [answer['text'] for answer in all_answers]})
return data_dict_collapsed
def add_token_positions(encodings, answers, tokenizer):
start_positions = []
end_positions = []
for i in range(len(answers)):
start_positions.append(encodings.char_to_token(i, answers[i]['answer_start']))
end_positions.append(encodings.char_to_token(i, answers[i]['answer_end']))
# if start position is None, the answer passage has been truncated
if start_positions[-1] is None:
start_positions[-1] = tokenizer.model_max_length
# if end position is None, the 'char_to_token' function points to the space before the correct token - > add + 1
if end_positions[-1] is None:
end_positions[-1] = encodings.char_to_token(i, answers[i]['answer_end'] + 1)
encodings.update({'start_positions': start_positions, 'end_positions': end_positions})
def add_end_idx(answers, contexts):
for answer, context in zip(answers, contexts):
gold_text = answer['text']
start_idx = answer['answer_start']
end_idx = start_idx + len(gold_text)
# sometimes squad answers are off by a character or two – fix this
if context[start_idx:end_idx] == gold_text:
answer['answer_end'] = end_idx
elif context[start_idx-1:end_idx-1] == gold_text:
answer['answer_start'] = start_idx - 1
answer['answer_end'] = end_idx - 1 # When the gold label is off by one character
elif context[start_idx-2:end_idx-2] == gold_text:
answer['answer_start'] = start_idx - 2
answer['answer_end'] = end_idx - 2 # When the gold label is off by two characters
def convert_tokens(eval_dict, qa_id, y_start_list, y_end_list):
"""Convert predictions to tokens from the context.
Args:
eval_dict (dict): Dictionary with eval info for the dataset. This is
used to perform the mapping from IDs and indices to actual text.
qa_id (int): List of QA example IDs.
y_start_list (list): List of start predictions.
y_end_list (list): List of end predictions.
no_answer (bool): Questions can have no answer. E.g., SQuAD 2.0.
Returns:
pred_dict (dict): Dictionary index IDs -> predicted answer text.
sub_dict (dict): Dictionary UUIDs -> predicted answer text (submission).
"""
pred_dict = {}
sub_dict = {}
for qid, y_start, y_end in zip(qa_id, y_start_list, y_end_list):
context = eval_dict[str(qid)]["context"]
spans = eval_dict[str(qid)]["spans"]
uuid = eval_dict[str(qid)]["uuid"]
start_idx = spans[y_start][0]
end_idx = spans[y_end][1]
pred_dict[str(qid)] = context[start_idx: end_idx]
sub_dict[uuid] = context[start_idx: end_idx]
return pred_dict, sub_dict
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
if not ground_truths:
return metric_fn(prediction, '')
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def eval_dicts(gold_dict, pred_dict):
avna = f1 = em = total = 0
id2index = {curr_id : idx for idx, curr_id in enumerate(gold_dict['id'])}
for curr_id in pred_dict:
total += 1
index = id2index[curr_id]
ground_truths = gold_dict['answer'][index]['text']
prediction = pred_dict[curr_id]
em += metric_max_over_ground_truths(compute_em, prediction, ground_truths)
f1 += metric_max_over_ground_truths(compute_f1, prediction, ground_truths)
eval_dict = {'EM': 100. * em / total,
'F1': 100. * f1 / total}
return eval_dict
def postprocess_qa_predictions(examples, features, predictions, number,
n_best_size=20, max_answer_length=30):
all_start_logits, all_end_logits = predictions
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = ddict(list)
for i, feat_id in enumerate(features['id']):
features_per_example[example_id_to_index[feat_id]].append(i)
# The dictionaries we have to fill.
all_predictions = OrderedDict()
# Let's loop over all the examples!
limit = min(number, len(examples['id']))
for example_index in tqdm(range(limit)):
example = {key : examples[key][example_index] for key in examples}
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
seq_ids = features.sequence_ids(feature_index)
non_pad_idx = len(seq_ids) - 1
while not seq_ids[non_pad_idx]:
non_pad_idx -= 1
start_logits = start_logits[:non_pad_idx]
end_logits = end_logits[:non_pad_idx]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features["offset_mapping"][feature_index]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features.get("token_is_max_context", None)
if token_is_max_context:
token_is_max_context = token_is_max_context[feature_index]
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either = 0 or > max_answer_length.
if end_index <= start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"start_index": start_index,
"end_index": end_index,
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred['offsets']
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0:
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# need to find the best non-empty prediction.
i = 0
while i < len(predictions):
if predictions[i]['text'] != '':
break
i += 1
if i == len(predictions):
i = len(predictions) - 1
# import pdb; pdb.set_trace();
best_non_null_pred = predictions[i]
all_predictions[example["id"]] = best_non_null_pred["text"]
return all_predictions
# All methods below this line are from the official SQuAD 2.0 eval script
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
"""Convert to lowercase and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_em(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = Counter(gold_toks) & Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
| [
"logging.getLogger",
"re.compile",
"numpy.argsort",
"os.path.exists",
"pathlib.Path",
"numpy.max",
"logging.FileHandler",
"numpy.random.seed",
"collections.OrderedDict",
"pickle.load",
"re.sub",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"pickle.dump",
"os.makedirs",
"logging.F... | [((306, 323), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (317, 323), False, 'import random\n'), ((328, 348), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (342, 348), True, 'import numpy as np\n'), ((353, 376), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (370, 376), False, 'import torch\n'), ((381, 413), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (407, 413), False, 'import torch\n'), ((3968, 3991), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (3985, 3991), False, 'import logging\n'), ((4104, 4140), 'os.path.join', 'os.path.join', (['log_dir', 'f"""{name}.txt"""'], {}), "(log_dir, f'{name}.txt')\n", (4116, 4140), False, 'import os\n'), ((4160, 4189), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (4179, 4189), False, 'import logging\n'), ((4456, 4531), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(message)s"""'], {'datefmt': '"""%m.%d.%y %H:%M:%S"""'}), "('[%(asctime)s] %(message)s', datefmt='%m.%d.%y %H:%M:%S')\n", (4473, 4531), False, 'import logging\n'), ((4641, 4716), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(message)s"""'], {'datefmt': '"""%m.%d.%y %H:%M:%S"""'}), "('[%(asctime)s] %(message)s', datefmt='%m.%d.%y %H:%M:%S')\n", (4658, 4716), False, 'import logging\n'), ((6248, 6258), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6252, 6258), False, 'from pathlib import Path\n'), ((7442, 7453), 'collections.defaultdict', 'ddict', (['list'], {}), '(list)\n', (7447, 7453), True, 'from collections import Counter, OrderedDict, defaultdict as ddict\n'), ((12359, 12370), 'collections.defaultdict', 'ddict', (['list'], {}), '(list)\n', (12364, 12370), True, 'from collections import Counter, OrderedDict, defaultdict as ddict\n'), ((12552, 12565), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12563, 12565), False, 'from collections import Counter, OrderedDict, defaultdict as ddict\n'), ((484, 498), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (495, 498), False, 'import pickle\n'), ((583, 602), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (594, 602), False, 'import pickle\n'), ((2133, 2176), 'os.path.join', 'os.path.join', (['base_dir', 'f"""{name}-{uid:02d}"""'], {}), "(base_dir, f'{name}-{uid:02d}')\n", (2145, 2176), False, 'import os\n'), ((6313, 6325), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6322, 6325), False, 'import json\n'), ((17999, 18041), 're.compile', 're.compile', (['"""\\\\b(a|an|the)\\\\b"""', 're.UNICODE'], {}), "('\\\\b(a|an|the)\\\\b', re.UNICODE)\n", (18009, 18041), False, 'import re\n'), ((18056, 18080), 're.sub', 're.sub', (['regex', '""" """', 'text'], {}), "(regex, ' ', text)\n", (18062, 18080), False, 'import re\n'), ((18712, 18730), 'collections.Counter', 'Counter', (['gold_toks'], {}), '(gold_toks)\n', (18719, 18730), False, 'from collections import Counter, OrderedDict, defaultdict as ddict\n'), ((18733, 18751), 'collections.Counter', 'Counter', (['pred_toks'], {}), '(pred_toks)\n', (18740, 18751), False, 'from collections import Counter, OrderedDict, defaultdict as ddict\n'), ((2192, 2216), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2206, 2216), False, 'import os\n'), ((2230, 2251), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (2241, 2251), False, 'import os\n'), ((6061, 6099), 'torch.tensor', 'torch.tensor', (['self.encodings[key][idx]'], {}), '(self.encodings[key][idx])\n', (6073, 6099), False, 'import torch\n'), ((3754, 3769), 'tqdm.notebook.tqdm.write', 'tqdm.write', (['msg'], {}), '(msg)\n', (3764, 3769), False, 'from tqdm.notebook import tqdm\n'), ((17013, 17027), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (17019, 17027), True, 'import numpy as np\n'), ((14231, 14255), 'numpy.argsort', 'np.argsort', (['start_logits'], {}), '(start_logits)\n', (14241, 14255), True, 'import numpy as np\n'), ((14319, 14341), 'numpy.argsort', 'np.argsort', (['end_logits'], {}), '(end_logits)\n', (14329, 14341), True, 'import numpy as np\n')] |
import torch
from torch.autograd import Variable
from typing import Dict
from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU
from torch.nn.functional import sigmoid, logsigmoid, softmax, normalize, log_softmax
from embeddings.representation import SpanRepresentation
from torch.nn.init import xavier_normal
from embeddings.util import pretrained_embeddings_or_xavier
import numpy as np
from torch.nn.functional import cosine_similarity
def get_type_file(filename, vocab, indxs=False):
data = np.load(filename)
if len(vocab) - data.shape[0] > 0:
if indxs:
data = data + (len(vocab) - data.shape[0])
data = np.concatenate((np.ones((len(vocab) - data.shape[0], data.shape[1]), dtype=data.dtype), data))
return torch.from_numpy(data)
class Pair2Vec(Module):
def __init__(self, config, arg_vocab, rel_vocab):
super(Pair2Vec, self).__init__()
self.config = config
self.arg_vocab = arg_vocab
self.rel_vocab = rel_vocab
self.compositional_rels = config.compositional_rels
self.normalize_pretrained = getattr(config, 'normalize_pretrained', False)
self.separate_mlr = getattr(config, 'separate_mlr', False)
self.positional_rels = getattr(config, 'positional_rels', False)
self.type_scores = get_type_file(config.type_scores_file, arg_vocab).cuda() if hasattr(config, 'type_scores_file') else None
self.type_indices = get_type_file(config.type_indices_file, arg_vocab, indxs=True).cuda() if hasattr(config, 'type_indices_file') else None
self.pad = arg_vocab.stoi['<pad>']
score_fn_str = getattr(config, 'score_function', 'dot_product')
if score_fn_str == 'dot_product':
self.score = (lambda predicted, observed : (predicted * observed).sum(-1))
elif score_fn_str == 'cosine':
self.score = (lambda predicted, observed : cosine_similarity(predicted, observed, dim=1, eps=1e-8))
else:
raise NotImplementedError()
self.num_neg_samples = getattr(config, 'num_neg_samples', 1)
self.num_sampled_relations = getattr(config, 'num_sampled_relations', 1)
self.subword_vocab_file = getattr(config, 'subword_vocab_file', None)
self.loss_weights = [('positive_loss', getattr(config, 'positive_loss', 1.0)),
('negative_rel_loss', getattr(config, 'negative_rel_loss', 1.0)),
('negative_subject_loss', getattr(config, 'negative_subject_loss', 1.0)),
('negative_object_loss', getattr(config, 'negative_object_loss', 1.0))]
if self.type_scores is not None:
self.loss_weights += [('type_subject_loss', getattr(config, 'type_subject_loss', 0.3)), ('type_object_loss', getattr(config, 'type_object_loss', 0.3))]
self.shared_arg_embeddings = getattr(config, 'shared_arg_embeddings', True)
self.represent_arguments = Embedding(config.n_args, config.d_embed)
self.represent_left_argument = lambda x : self.represent_arguments(x)
self.represent_right_argument = (lambda x : self.represent_arguments(x)) if self.shared_arg_embeddings else Embedding(config.n_args, config.d_embed)
if config.compositional_rels:
self.represent_relations = SpanRepresentation(config, config.d_rels, rel_vocab)
else:
raise NotImplementedError()
if config.relation_predictor == 'multiplication':
self.predict_relations = lambda x, y: x * y
elif config.relation_predictor == 'mlp':
self.predict_relations = MLP(config)
else:
raise Exception('Unknown relation predictor: ' + config.relation_predictor)
self.init()
def to_tensors(self, fields):
return ((field, 1.0 - torch.eq(field, self.pad).float()) if (len(field.size()) > 1 and (self.compositional_rels)) else field for field in fields)
def init(self):
for arg_matrix in [self.represent_arguments, self.represent_right_argument]:
if isinstance(arg_matrix, Embedding):
if self.arg_vocab.vectors is not None:
pretrained = normalize(self.arg_vocab.vectors, dim=-1) if self.normalize_pretrained else self.arg_vocab.vectors
arg_matrix.weight.data[:, :pretrained.size(1)].copy_(pretrained)
print('Copied pretrained vecs for argument matrix')
else:
arg_matrix.reset_parameters()
def forward(self, batch):
if len(batch) == 4:
batch = batch + (None, None)
subjects, objects, observed_relations, sampled_relations, sampled_subjects, sampled_objects = batch
sampled_relations = sampled_relations.view(-1, observed_relations.size(1), 1).squeeze(-1)
subjects, objects = self.to_tensors((subjects, objects))
embedded_subjects = self.represent_left_argument(subjects)
embedded_objects = self.represent_right_argument(objects)
predicted_relations = self.predict_relations(embedded_subjects, embedded_objects)
observed_relations, sampled_relations = self.to_tensors((observed_relations, sampled_relations))
observed_relations = self.represent_relations(observed_relations)
sampled_relations = self.represent_relations(sampled_relations)
# score = lambda predicted, observed : (predicted * observed).sum(-1)
rep_observed_relations = observed_relations.repeat(self.num_sampled_relations, 1)
rep_predicted_relations = predicted_relations.repeat(self.num_sampled_relations, 1)
pos_rel_scores, neg_rel_scores = self.score(predicted_relations, observed_relations), self.score(rep_predicted_relations, sampled_relations)
output_dict = {}
output_dict['positive_loss'] = -logsigmoid(pos_rel_scores).sum()
output_dict['negative_rel_loss'] = -logsigmoid(-neg_rel_scores).sum()
# fake pair loss
if sampled_subjects is not None and sampled_objects is not None:
# sampled_subjects, sampled_objects = self.to_tensors((sampled_subjects, sampled_objects))
sampled_subjects, sampled_objects = sampled_subjects.view(-1, 1).squeeze(-1), sampled_objects.view(-1, 1).squeeze(-1)
sampled_subjects, sampled_objects = self.represent_left_argument(sampled_subjects), self.represent_right_argument(sampled_objects)
rep_embedded_objects, rep_embedded_subjects = embedded_objects.repeat(self.num_neg_samples, 1), embedded_subjects.repeat(self.num_neg_samples, 1)
pred_relations_for_sampled_sub = self.predict_relations(sampled_subjects, rep_embedded_objects)
pred_relations_for_sampled_obj = self.predict_relations(rep_embedded_subjects, sampled_objects)
rep_observed_relations = observed_relations.repeat(self.num_neg_samples, 1)
output_dict['negative_subject_loss'] = -logsigmoid(-self.score(pred_relations_for_sampled_sub, rep_observed_relations)).sum() #/ self.num_neg_samples
output_dict['negative_object_loss'] = -logsigmoid(-self.score(pred_relations_for_sampled_obj, rep_observed_relations)).sum() #/ self.num_neg_samples
if self.type_scores is not None:
# loss_weights += [('type_subject_loss', 0.3), ('type_object_loss', 0.3)]
method = 'uniform'
type_sampled_subjects, type_sampled_objects = self.get_type_sampled_arguments(subjects, method), self.get_type_sampled_arguments(objects, method)
type_sampled_subjects, type_sampled_objects = self.represent_left_argument(type_sampled_subjects), self.represent_right_argument(type_sampled_objects)
pred_relations_for_type_sampled_sub = self.predict_relations(type_sampled_subjects, embedded_objects)
pred_relations_for_type_sampled_obj = self.predict_relations(embedded_subjects, type_sampled_objects)
output_dict['type_subject_loss'] = -logsigmoid(-self.score(pred_relations_for_type_sampled_sub, observed_relations)).sum()
output_dict['type_object_loss'] = -logsigmoid(-self.score(pred_relations_for_type_sampled_obj, observed_relations)).sum()
loss = 0.0
for loss_name, weight in self.loss_weights:
loss += weight * output_dict[loss_name]
output_dict['observed_probabilities'] = sigmoid(pos_rel_scores)
output_dict['sampled_probabilities'] = sigmoid(neg_rel_scores)
return predicted_relations, loss, output_dict
def get_type_sampled_arguments(self, arguments, method='uniform'):
argument_indices = torch.index_select(self.type_indices, 0, arguments.data)
if method == 'unigram':
argument_scores = torch.index_select(self.type_scores, 0, arguments.data)
sampled_idx_idxs = torch.multinomial(argument_scores, 1, replacement=True).squeeze(1).cuda()
sampled_idxs = torch.gather(argument_indices, 1, sampled_idx_idxs.unsqueeze(1)).squeeze(1)
else:
# sampled_idx_idxs = torch.randint(0, self.type_scores.size(1), size=arguments.size(0), replacement=True)
sampled_idx_idxs = torch.LongTensor(arguments.size(0)).random_(0, self.type_scores.size(1)).cuda()
sampled_idxs = torch.gather(argument_indices, 1, sampled_idx_idxs.unsqueeze(1)).squeeze(1)
return Variable(sampled_idxs, requires_grad=False)
def score(self, predicted, observed):
return torch.bmm(predicted.unsqueeze(1), observed.unsqueeze(2)).squeeze(-1).squeeze(-1)
class MLP(Module):
def __init__(self, config):
super(MLP, self).__init__()
self.dropout = Dropout(p=config.dropout)
self.nonlinearity = ReLU()
self.normalize = normalize if getattr(config, 'normalize_args', False) else (lambda x : x)
layers = getattr(config, "mlp_layers", 4)
if layers == 2:
self.mlp = Sequential(self.dropout, Linear(3 * config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_rels))
elif layers == 3:
self.mlp = Sequential(self.dropout, Linear(3 * config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_rels))
elif layers == 4:
self.mlp = Sequential(self.dropout, Linear(3 * config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_rels))
else:
raise NotImplementedError()
def forward(self, subjects, objects):
subjects = self.normalize(subjects)
objects = self.normalize(objects)
return self.mlp(torch.cat([subjects, objects, subjects * objects], dim=-1))
| [
"torch.index_select",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.autograd.Variable",
"embeddings.representation.SpanRepresentation",
"torch.multinomial",
"torch.nn.functional.cosine_similarity",
"torch.from_numpy",
"torch.nn.functional.sigmoid",
"torch.nn.functional.normalize",
"torch.cat",
"... | [((531, 548), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (538, 548), True, 'import numpy as np\n'), ((782, 804), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (798, 804), False, 'import torch\n'), ((2987, 3027), 'torch.nn.Embedding', 'Embedding', (['config.n_args', 'config.d_embed'], {}), '(config.n_args, config.d_embed)\n', (2996, 3027), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((8384, 8407), 'torch.nn.functional.sigmoid', 'sigmoid', (['pos_rel_scores'], {}), '(pos_rel_scores)\n', (8391, 8407), False, 'from torch.nn.functional import sigmoid, logsigmoid, softmax, normalize, log_softmax\n'), ((8455, 8478), 'torch.nn.functional.sigmoid', 'sigmoid', (['neg_rel_scores'], {}), '(neg_rel_scores)\n', (8462, 8478), False, 'from torch.nn.functional import sigmoid, logsigmoid, softmax, normalize, log_softmax\n'), ((8632, 8688), 'torch.index_select', 'torch.index_select', (['self.type_indices', '(0)', 'arguments.data'], {}), '(self.type_indices, 0, arguments.data)\n', (8650, 8688), False, 'import torch\n'), ((9376, 9419), 'torch.autograd.Variable', 'Variable', (['sampled_idxs'], {'requires_grad': '(False)'}), '(sampled_idxs, requires_grad=False)\n', (9384, 9419), False, 'from torch.autograd import Variable\n'), ((9672, 9697), 'torch.nn.Dropout', 'Dropout', ([], {'p': 'config.dropout'}), '(p=config.dropout)\n', (9679, 9697), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((9727, 9733), 'torch.nn.ReLU', 'ReLU', ([], {}), '()\n', (9731, 9733), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((3222, 3262), 'torch.nn.Embedding', 'Embedding', (['config.n_args', 'config.d_embed'], {}), '(config.n_args, config.d_embed)\n', (3231, 3262), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((3340, 3392), 'embeddings.representation.SpanRepresentation', 'SpanRepresentation', (['config', 'config.d_rels', 'rel_vocab'], {}), '(config, config.d_rels, rel_vocab)\n', (3358, 3392), False, 'from embeddings.representation import SpanRepresentation\n'), ((8751, 8806), 'torch.index_select', 'torch.index_select', (['self.type_scores', '(0)', 'arguments.data'], {}), '(self.type_scores, 0, arguments.data)\n', (8769, 8806), False, 'import torch\n'), ((10866, 10924), 'torch.cat', 'torch.cat', (['[subjects, objects, subjects * objects]'], {'dim': '(-1)'}), '([subjects, objects, subjects * objects], dim=-1)\n', (10875, 10924), False, 'import torch\n'), ((9955, 9995), 'torch.nn.Linear', 'Linear', (['(3 * config.d_args)', 'config.d_args'], {}), '(3 * config.d_args, config.d_args)\n', (9961, 9995), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((10030, 10066), 'torch.nn.Linear', 'Linear', (['config.d_args', 'config.d_rels'], {}), '(config.d_args, config.d_rels)\n', (10036, 10066), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((1928, 1984), 'torch.nn.functional.cosine_similarity', 'cosine_similarity', (['predicted', 'observed'], {'dim': '(1)', 'eps': '(1e-08)'}), '(predicted, observed, dim=1, eps=1e-08)\n', (1945, 1984), False, 'from torch.nn.functional import cosine_similarity\n'), ((5865, 5891), 'torch.nn.functional.logsigmoid', 'logsigmoid', (['pos_rel_scores'], {}), '(pos_rel_scores)\n', (5875, 5891), False, 'from torch.nn.functional import sigmoid, logsigmoid, softmax, normalize, log_softmax\n'), ((5942, 5969), 'torch.nn.functional.logsigmoid', 'logsigmoid', (['(-neg_rel_scores)'], {}), '(-neg_rel_scores)\n', (5952, 5969), False, 'from torch.nn.functional import sigmoid, logsigmoid, softmax, normalize, log_softmax\n'), ((10142, 10182), 'torch.nn.Linear', 'Linear', (['(3 * config.d_args)', 'config.d_args'], {}), '(3 * config.d_args, config.d_args)\n', (10148, 10182), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((10217, 10253), 'torch.nn.Linear', 'Linear', (['config.d_args', 'config.d_args'], {}), '(config.d_args, config.d_args)\n', (10223, 10253), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((10288, 10324), 'torch.nn.Linear', 'Linear', (['config.d_args', 'config.d_rels'], {}), '(config.d_args, config.d_rels)\n', (10294, 10324), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((4215, 4256), 'torch.nn.functional.normalize', 'normalize', (['self.arg_vocab.vectors'], {'dim': '(-1)'}), '(self.arg_vocab.vectors, dim=-1)\n', (4224, 4256), False, 'from torch.nn.functional import sigmoid, logsigmoid, softmax, normalize, log_softmax\n'), ((10400, 10440), 'torch.nn.Linear', 'Linear', (['(3 * config.d_args)', 'config.d_args'], {}), '(3 * config.d_args, config.d_args)\n', (10406, 10440), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((10475, 10511), 'torch.nn.Linear', 'Linear', (['config.d_args', 'config.d_args'], {}), '(config.d_args, config.d_args)\n', (10481, 10511), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((10546, 10582), 'torch.nn.Linear', 'Linear', (['config.d_args', 'config.d_args'], {}), '(config.d_args, config.d_args)\n', (10552, 10582), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((10617, 10653), 'torch.nn.Linear', 'Linear', (['config.d_args', 'config.d_rels'], {}), '(config.d_args, config.d_rels)\n', (10623, 10653), False, 'from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU\n'), ((8838, 8893), 'torch.multinomial', 'torch.multinomial', (['argument_scores', '(1)'], {'replacement': '(True)'}), '(argument_scores, 1, replacement=True)\n', (8855, 8893), False, 'import torch\n'), ((3847, 3872), 'torch.eq', 'torch.eq', (['field', 'self.pad'], {}), '(field, self.pad)\n', (3855, 3872), False, 'import torch\n')] |
#!/usr/bin/env python
# coding: utf-8
# `Firefly/ntbks/multiple_datasets_tutorial.ipynb`
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from IPython.display import YouTubeVideo
# A recording of this jupyter notebook in action is available at:
# In[2]:
YouTubeVideo("TMq3IvnxGY8")
# In[3]:
import numpy as np
import os
import sys
sys.path.insert(0,'/Users/agurvich/research/repos/Firefly/src')
from Firefly.data_reader import ArrayReader
# # Tutorial notebook: Managing multiple datasets with Firefly
# There are two ways to manage multiple datasets with Firefly
# 1. listing multiple entries in startup.json
# 2. creating a "standalone" iteration of Firefly
#
# 1 and 2 can be combined so that visitors to different "standalone" iterations of Firefly can select between different sets of multiple datasets using a dropdown see <a href="https://agurvich.github.io/firefly_versions">this example</a>.
# ## Editing the entries of `startup.json`
# When the Firefly webapp starts up it looks for a `Firefly/static/data/startup.json` file to tell it which dataset to display. If only a single entry is present then it will automatically begin loading that dataset. If multiple entries are listed then it will present the user with a dropdown box to select which dataset to load. See the <a href="https://ageller.github.io/Firefly/docs/build/html/data_reader/multiple_datasets.html">documentation for managing multiple datasets</a> for how to format the `startup.json` file to list multiple entries manually. We provide a method of easily adding datasets to the `startup.json` file using the `write_startup` keyword argument of the `Firefly.data_reader.Reader` (sub-)class(es).
# In[4]:
## let's create some sample data, a grid of points in a 3d cube
my_coords = np.linspace(-10,10,20)
xs,ys,zs = np.meshgrid(my_coords,my_coords,my_coords)
xs,ys,zs = xs.flatten(),ys.flatten(),zs.flatten()
coords = np.array([xs,ys,zs]).T
## we'll pick some random field values to demonstrate filtering/colormapping
fields = np.random.random(size=xs.size)
# We'll overwrite whatever file is existing with a new `startup.json` with only 1 entry in it. Then we'll append a second entry. Then we'll create a reader and specify that it should not be added to the `startup.json` file.
# In[5]:
## initialize an ArrayReader
reader = ArrayReader(
coordinates=[coords[:-1],coords], ## pass in two particle groups as a demonstration (just copies of our sample data)
fields=[[],[fields,fields]], ## field data for each particle group, 0 fields for 1 and 2 repeated fields for the other.
write_startup=True) ## overwrite the existing startup.json file
## initialize a second ArrayReader
fake_reader = ArrayReader(
coordinates=[coords[:-1],coords], ## pass in two particle groups as a demonstration (just copies of our sample data)
fields=[[],[fields,fields]],## field data for each particle group, 0 fields for 1 and 2 repeated fields for the other.
JSONdir="FakeData",
write_startup='append') ## append this entry to the startup.json file if it doesn't already exists
## initialize a THIRD ArrayReader
null_reader = ArrayReader(
coordinates=[coords[:-1],coords], ## pass in two particle groups as a demonstration (just copies of our sample data)
fields=[[],[fields,fields]],## field data for each particle group, 0 fields for 1 and 2 repeated fields for the other.
JSONdir="NullData",
write_startup=False) ## do not add this reader to the startup.json file
# Let's read the content of the `startup.json` file:
# In[6]:
get_ipython().system('cat /Users/agurvich/research/repos/Firefly/src/Firefly/static/data/startup.json')
# Notice that the "NullData" `JSONdir` is not listed because we set `write_startup=False`.
# ## Creating a standalone iteration of Firefly
# You can copy the necessary Firefly source files by creating a `Reader` object containing your data and using the `copyFireflySourceToTarget`.
# We've also included a script that will automatically create a new Github repository and enable GitHub pages so that your data can be visited by users over the internet via URL.
# For instructions on how to configure this feature and details for copying the Firefly source see the <a href="https://ageller.github.io/Firefly/docs/build/html/data_reader/multiple_datasets.html">documentation for managing multiple datasets</a>.
# In[7]:
reader.copyFireflySourceToTarget(init_gh_pages=False)
# Let's read the contents of the new `my_Firefly` directory:
# In[8]:
get_ipython().system('ls /Users/agurvich/my_Firefly/')
# In[9]:
get_ipython().system('ls /Users/agurvich/my_Firefly/static/data/')
| [
"sys.path.insert",
"numpy.random.random",
"Firefly.data_reader.ArrayReader",
"numpy.array",
"numpy.linspace",
"IPython.display.YouTubeVideo",
"numpy.meshgrid"
] | [((326, 353), 'IPython.display.YouTubeVideo', 'YouTubeVideo', (['"""TMq3IvnxGY8"""'], {}), "('TMq3IvnxGY8')\n", (338, 353), False, 'from IPython.display import YouTubeVideo\n'), ((408, 472), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/Users/agurvich/research/repos/Firefly/src"""'], {}), "(0, '/Users/agurvich/research/repos/Firefly/src')\n", (423, 472), False, 'import sys\n'), ((1843, 1867), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(20)'], {}), '(-10, 10, 20)\n', (1854, 1867), True, 'import numpy as np\n'), ((1877, 1921), 'numpy.meshgrid', 'np.meshgrid', (['my_coords', 'my_coords', 'my_coords'], {}), '(my_coords, my_coords, my_coords)\n', (1888, 1921), True, 'import numpy as np\n'), ((2089, 2119), 'numpy.random.random', 'np.random.random', ([], {'size': 'xs.size'}), '(size=xs.size)\n', (2105, 2119), True, 'import numpy as np\n'), ((2397, 2499), 'Firefly.data_reader.ArrayReader', 'ArrayReader', ([], {'coordinates': '[coords[:-1], coords]', 'fields': '[[], [fields, fields]]', 'write_startup': '(True)'}), '(coordinates=[coords[:-1], coords], fields=[[], [fields, fields]\n ], write_startup=True)\n', (2408, 2499), False, 'from Firefly.data_reader import ArrayReader\n'), ((2773, 2899), 'Firefly.data_reader.ArrayReader', 'ArrayReader', ([], {'coordinates': '[coords[:-1], coords]', 'fields': '[[], [fields, fields]]', 'JSONdir': '"""FakeData"""', 'write_startup': '"""append"""'}), "(coordinates=[coords[:-1], coords], fields=[[], [fields, fields]\n ], JSONdir='FakeData', write_startup='append')\n", (2784, 2899), False, 'from Firefly.data_reader import ArrayReader\n'), ((3206, 3329), 'Firefly.data_reader.ArrayReader', 'ArrayReader', ([], {'coordinates': '[coords[:-1], coords]', 'fields': '[[], [fields, fields]]', 'JSONdir': '"""NullData"""', 'write_startup': '(False)'}), "(coordinates=[coords[:-1], coords], fields=[[], [fields, fields]\n ], JSONdir='NullData', write_startup=False)\n", (3217, 3329), False, 'from Firefly.data_reader import ArrayReader\n'), ((1979, 2001), 'numpy.array', 'np.array', (['[xs, ys, zs]'], {}), '([xs, ys, zs])\n', (1987, 2001), True, 'import numpy as np\n')] |
################################################################################
# #
# ONE-ZONE OPTICALLY THIN BREMSSTRAHLUNG COOLING #
# #
################################################################################
from __future__ import print_function, division
import os
import sys; sys.dont_write_bytecode = True
sys.path.insert(0, '../script/')
sys.path.insert(0, '../script/analysis')
from subprocess import call
import glob
import numpy as np
import hdf5_to_dict as io
import units
cgs = units.get_cgs()
import util
from bhlight import bcall
TMP_DIR = 'TMP'
util.safe_remove(TMP_DIR)
PROBLEM = 'brem'
AUTO = '-auto' in sys.argv
FAST = '-fast' in sys.argv
TF = 2.56 if FAST else 1.e8
if AUTO:
import pickle
else:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
os.chdir('../prob/' + PROBLEM)
# COMPILE CODE
call([sys.executable, 'build.py', '-dir', TMP_DIR])
os.chdir('../../test')
call(['mv', '../prob/' + PROBLEM + '/' + TMP_DIR, './'])
# RUN EXECUTABLE
os.chdir(TMP_DIR)
if FAST:
util.change_rparm('tf',str(TF),'param_template.dat')
bcall(['./bhlight', '-p', 'param_template.dat'])
os.chdir('../')
# READ SIMULATION OUTPUT
dfiles = np.sort(glob.glob(os.path.join(TMP_DIR,'')+'/dumps/dump*.h5'))
Nd = len(dfiles)
hdr = io.load_hdr(dfiles[0])
geom = io.load_geom(hdr)
t_code = np.zeros(Nd)
Te_code = np.zeros(Nd)
for n in range(Nd):
dump = io.load_dump(dfiles[n], geom)
t_code[n] = dump['t']*hdr['T_unit']
Te_code[n] = dump['Thetae'][0][0][0]*cgs['ME']*cgs['CL']**2/cgs['KBOL']
# GET ANALYTIC SOLUTION
tf = 1.e8
Te0 = 1.e8
dump = io.load_dump(dfiles[0], geom)
ne = dump['RHO'].mean()*hdr['Ne_unit']
gam = 5./3.
#N = 5.4e-39 # cm^3 K^1/2 s^-1 Sr^-1 Hz^-1
t_sol = np.linspace(0, tf, 1024)
#Te0 = Te_code[0]
from scipy.integrate import odeint
def func(Te, t):
gff = 1.2
rel = 1. + 4.4e-10*Te
J = np.sqrt(2*np.pi*cgs['KBOL']*Te/(3*cgs['ME']))
J *= 2**5*np.pi*cgs['QE']**6/(3*cgs['HPL']*cgs['ME']*cgs['CL']**3)
J *= ne**2*gff*rel
return -(gam-1.)*J/(2.*ne*cgs['KBOL'])
Te_sol = odeint(func, Te0, t_sol)
#Te_sol = Te0*(1. - t_sol/tf)**2.
if AUTO:
data = {}
data['SOL'] = [t_sol, Te_sol]
data['CODE'] = [t_code[:int(0.7*len(t_code))], Te_code[:int(0.7*len(Te_code))]]
pickle.dump(data, open('data.p', 'wb'))
# CLEAN UP
util.safe_remove(TMP_DIR)
sys.exit()
# MAKE FIGURE
code_col = 'r'; code_ls = ''; code_mrk = '.'
sol_col = 'k'; sol_ls = '-'; sol_mrk = ''
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
ax.plot(t_code, Te_code, color=code_col, linestyle=code_ls, marker=code_mrk)
ax.plot(t_sol, Te_sol, color=sol_col, linestyle=sol_ls, marker=sol_mrk)
plt.xlabel('t (s)'); plt.ylabel('Te (K)')
plt.xlim([0,256*hdr['T_unit']]); plt.ylim([0, 1.1e8])
plt.savefig('brem.png', bbox_inches='tight')
# CLEAN UP
util.safe_remove(TMP_DIR)
| [
"sys.path.insert",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"sys.exit",
"matplotlib.pyplot.xlabel",
"numpy.linspace",
"subprocess.call",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"hdf5_to_dict.load_hdr",
"scipy.integrate.odeint",
"matplotlib.use",
"util.safe_remove",
"matplot... | [((507, 539), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../script/"""'], {}), "(0, '../script/')\n", (522, 539), False, 'import sys\n'), ((540, 580), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../script/analysis"""'], {}), "(0, '../script/analysis')\n", (555, 580), False, 'import sys\n'), ((685, 700), 'units.get_cgs', 'units.get_cgs', ([], {}), '()\n', (698, 700), False, 'import units\n'), ((756, 781), 'util.safe_remove', 'util.safe_remove', (['TMP_DIR'], {}), '(TMP_DIR)\n', (772, 781), False, 'import util\n'), ((1012, 1042), 'os.chdir', 'os.chdir', (["('../prob/' + PROBLEM)"], {}), "('../prob/' + PROBLEM)\n", (1020, 1042), False, 'import os\n'), ((1059, 1110), 'subprocess.call', 'call', (["[sys.executable, 'build.py', '-dir', TMP_DIR]"], {}), "([sys.executable, 'build.py', '-dir', TMP_DIR])\n", (1063, 1110), False, 'from subprocess import call\n'), ((1111, 1133), 'os.chdir', 'os.chdir', (['"""../../test"""'], {}), "('../../test')\n", (1119, 1133), False, 'import os\n'), ((1134, 1190), 'subprocess.call', 'call', (["['mv', '../prob/' + PROBLEM + '/' + TMP_DIR, './']"], {}), "(['mv', '../prob/' + PROBLEM + '/' + TMP_DIR, './'])\n", (1138, 1190), False, 'from subprocess import call\n'), ((1209, 1226), 'os.chdir', 'os.chdir', (['TMP_DIR'], {}), '(TMP_DIR)\n', (1217, 1226), False, 'import os\n'), ((1291, 1339), 'bhlight.bcall', 'bcall', (["['./bhlight', '-p', 'param_template.dat']"], {}), "(['./bhlight', '-p', 'param_template.dat'])\n", (1296, 1339), False, 'from bhlight import bcall\n'), ((1340, 1355), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (1348, 1355), False, 'import os\n'), ((1477, 1499), 'hdf5_to_dict.load_hdr', 'io.load_hdr', (['dfiles[0]'], {}), '(dfiles[0])\n', (1488, 1499), True, 'import hdf5_to_dict as io\n'), ((1507, 1524), 'hdf5_to_dict.load_geom', 'io.load_geom', (['hdr'], {}), '(hdr)\n', (1519, 1524), True, 'import hdf5_to_dict as io\n'), ((1534, 1546), 'numpy.zeros', 'np.zeros', (['Nd'], {}), '(Nd)\n', (1542, 1546), True, 'import numpy as np\n'), ((1557, 1569), 'numpy.zeros', 'np.zeros', (['Nd'], {}), '(Nd)\n', (1565, 1569), True, 'import numpy as np\n'), ((1794, 1823), 'hdf5_to_dict.load_dump', 'io.load_dump', (['dfiles[0]', 'geom'], {}), '(dfiles[0], geom)\n', (1806, 1823), True, 'import hdf5_to_dict as io\n'), ((1927, 1951), 'numpy.linspace', 'np.linspace', (['(0)', 'tf', '(1024)'], {}), '(0, tf, 1024)\n', (1938, 1951), True, 'import numpy as np\n'), ((2251, 2275), 'scipy.integrate.odeint', 'odeint', (['func', 'Te0', 't_sol'], {}), '(func, Te0, t_sol)\n', (2257, 2275), False, 'from scipy.integrate import odeint\n'), ((2651, 2682), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.18, 10)'}), '(figsize=(16.18, 10))\n', (2661, 2682), True, 'import matplotlib.pyplot as plt\n'), ((2860, 2879), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (s)"""'], {}), "('t (s)')\n", (2870, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Te (K)"""'], {}), "('Te (K)')\n", (2891, 2901), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2936), 'matplotlib.pyplot.xlim', 'plt.xlim', (["[0, 256 * hdr['T_unit']]"], {}), "([0, 256 * hdr['T_unit']])\n", (2910, 2936), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2961), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 110000000.0]'], {}), '([0, 110000000.0])\n', (2943, 2961), True, 'import matplotlib.pyplot as plt\n'), ((2957, 3001), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""brem.png"""'], {'bbox_inches': '"""tight"""'}), "('brem.png', bbox_inches='tight')\n", (2968, 3001), True, 'import matplotlib.pyplot as plt\n'), ((3014, 3039), 'util.safe_remove', 'util.safe_remove', (['TMP_DIR'], {}), '(TMP_DIR)\n', (3030, 3039), False, 'import util\n'), ((934, 955), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (948, 955), False, 'import matplotlib\n'), ((1599, 1628), 'hdf5_to_dict.load_dump', 'io.load_dump', (['dfiles[n]', 'geom'], {}), '(dfiles[n], geom)\n', (1611, 1628), True, 'import hdf5_to_dict as io\n'), ((2065, 2120), 'numpy.sqrt', 'np.sqrt', (["(2 * np.pi * cgs['KBOL'] * Te / (3 * cgs['ME']))"], {}), "(2 * np.pi * cgs['KBOL'] * Te / (3 * cgs['ME']))\n", (2072, 2120), True, 'import numpy as np\n'), ((2504, 2529), 'util.safe_remove', 'util.safe_remove', (['TMP_DIR'], {}), '(TMP_DIR)\n', (2520, 2529), False, 'import util\n'), ((2532, 2542), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2540, 2542), False, 'import sys\n'), ((1409, 1434), 'os.path.join', 'os.path.join', (['TMP_DIR', '""""""'], {}), "(TMP_DIR, '')\n", (1421, 1434), False, 'import os\n')] |
# @Time : 2020/11/14
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>
# UPDATE
# @Time : 2021/4/12
# @Author : <NAME>
# @Email : <EMAIL>
"""
textbox.evaluator.abstract_evaluator
#####################################
"""
import numpy as np
class AbstractEvaluator(object):
""":class:`AbstractEvaluator` is an abstract object which supports
the evaluation of the model. It is called by :class:`Trainer`.
Note:
If you want to inherit this class and implement your own evalautor class,
you must implement the following functions.
Args:
config (Config): The config of evaluator.
"""
def evaluate(self, generate_corpus, reference_corpus):
r"""get metrics result
Args:
generate_corpus: the generated corpus
reference_corpus: the referenced corpus
Returns:
dict: such as ``{metric-1: xxx}``
"""
# get metrics
metric_dict = {}
info_dict = self._calc_metrics_info(generate_corpus=generate_corpus, reference_corpus=reference_corpus)
for key in info_dict:
tp_list = info_dict[key]
tp_val = np.mean(tp_list)
metric_dict[key] = round(tp_val, 4)
return metric_dict
def _calc_metrics_info(self):
""" to calculate the metrics"""
raise NotImplementedError
| [
"numpy.mean"
] | [((1180, 1196), 'numpy.mean', 'np.mean', (['tp_list'], {}), '(tp_list)\n', (1187, 1196), True, 'import numpy as np\n')] |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Memory module for storing "nearest neighbors".
Implements a key-value memory for generalized one-shot learning
as described in the paper
"Learning to Remember Rare Events"
by <NAME>, <NAME>, <NAME>, <NAME>,
published as a conference paper at ICLR 2017.
"""
import numpy as np
from six.moves import xrange
import tensorflow as tf
class Memory(object):
"""Memory module."""
def __init__(self, key_dim, memory_size, vocab_size,
choose_k=256, alpha=0.1, correct_in_top=1, age_noise=8.0,
var_cache_device='', nn_device=''):
# key_dim = 128 (output of NN)
# memory_size = 8192
# vocab_size = 80
self.key_dim = key_dim
self.memory_size = memory_size
self.vocab_size = vocab_size
#choose_k = 256
self.choose_k = min(choose_k, memory_size)
self.alpha = alpha
self.correct_in_top = correct_in_top
self.age_noise = age_noise
self.var_cache_device = var_cache_device # Variables are cached here.
self.nn_device = nn_device # Device to perform nearest neighbour matmul.
caching_device = var_cache_device if var_cache_device else None
self.update_memory = tf.constant(True) # Can be fed "false" if needed.
self.mem_keys = tf.get_variable(
'memkeys', [self.memory_size, self.key_dim], trainable=False,
initializer=tf.random_uniform_initializer(-0.0, 0.0),
caching_device=caching_device)
self.mem_vals = tf.get_variable(
'memvals', [self.memory_size], dtype=tf.int32, trainable=False,
initializer=tf.constant_initializer(0, tf.int32),
caching_device=caching_device)
self.mem_age = tf.get_variable(
'memage', [self.memory_size], dtype=tf.float32, trainable=False,
initializer=tf.constant_initializer(0.0), caching_device=caching_device)
self.recent_idx = tf.get_variable(
'recent_idx', [self.vocab_size], dtype=tf.int32, trainable=False,
initializer=tf.constant_initializer(0, tf.int32))
# variable for projecting query vector into memory key
# size = 128x128
self.query_proj = tf.get_variable(
'memory_query_proj', [self.key_dim, self.key_dim], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(0, 0.01),
caching_device=caching_device)
def get(self):
return self.mem_keys, self.mem_vals, self.mem_age, self.recent_idx
def set(self, k, v, a, r=None):
return tf.group(
self.mem_keys.assign(k),
self.mem_vals.assign(v),
self.mem_age.assign(a),
(self.recent_idx.assign(r) if r is not None else tf.group()))
def clear(self):
return tf.variables_initializer([self.mem_keys, self.mem_vals, self.mem_age,
self.recent_idx])
def get_hint_pool_idxs(self, normalized_query):
"""Get small set of idxs to compute nearest neighbor queries on.
This is an expensive look-up on the whole memory that is used to
avoid more expensive operations later on.
Args:
normalized_query: A Tensor of shape [None, key_dim].
Returns:
A Tensor of shape [None, choose_k] of indices in memory
that are closest to the queries.
"""
# look up in large memory, no gradients
with tf.device(self.nn_device):
#DIM: (BS * KeyDim) X (KeyDim * MemDepth)
# = 16 X MemDepth
similarities = tf.matmul(tf.stop_gradient(normalized_query),
self.mem_keys, transpose_b=True, name='nn_mmul')
#Top 256 of the MemDepth
_, hint_pool_idxs = tf.nn.top_k(
tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk')
return hint_pool_idxs
def make_update_op(self, upd_idxs, upd_keys, upd_vals,
batch_size, use_recent_idx, intended_output):
"""Function that creates all the update ops."""
mem_age_incr = self.mem_age.assign_add(tf.ones([self.memory_size],
dtype=tf.float32))
with tf.control_dependencies([mem_age_incr]):
mem_age_upd = tf.scatter_update(
self.mem_age, upd_idxs, tf.zeros([batch_size], dtype=tf.float32))
mem_key_upd = tf.scatter_update(
self.mem_keys, upd_idxs, upd_keys)
mem_val_upd = tf.scatter_update(
self.mem_vals, upd_idxs, upd_vals)
if use_recent_idx:
recent_idx_upd = tf.scatter_update(
self.recent_idx, intended_output, upd_idxs)
else:
recent_idx_upd = tf.group()
return tf.group(mem_age_upd, mem_key_upd, mem_val_upd, recent_idx_upd)
def query(self, query_vec, intended_output, use_recent_idx=True):
"""Queries memory for nearest neighbor.
Args:
query_vec: A batch of vectors to query (embedding of input to model).
intended_output: The values that would be the correct output of the
memory.
use_recent_idx: Whether to always insert at least one instance of a
correct memory fetch.
Returns:
A tuple (result, mask, teacher_loss).
result: The result of the memory look up.
mask: The affinity of the query to the result.
teacher_loss: The loss for training the memory module.
"""
batch_size = tf.shape(query_vec)[0]
output_given = intended_output is not None
# prepare query for memory lookup
query_vec = tf.matmul(query_vec, self.query_proj)
normalized_query = tf.nn.l2_normalize(query_vec, dim=1)
#indices for top 256 dot-products of the MemDepth
# BS X 256
hint_pool_idxs = self.get_hint_pool_idxs(normalized_query)
if output_given and use_recent_idx: # add at least one correct memory
#what is recent_idx for?
most_recent_hint_idx = tf.gather(self.recent_idx, intended_output)
hint_pool_idxs = tf.concat(
axis=1,
values=[hint_pool_idxs, tf.expand_dims(most_recent_hint_idx, 1)])
choose_k = tf.shape(hint_pool_idxs)[1]
with tf.device(self.var_cache_device):
# create small memory and look up with gradients
my_mem_keys = tf.stop_gradient(tf.gather(self.mem_keys, hint_pool_idxs,
name='my_mem_keys_gather'))
similarities = tf.matmul(tf.expand_dims(normalized_query, 1),
my_mem_keys, adjoint_b=True, name='batch_mmul')
hint_pool_sims = tf.squeeze(similarities, [1], name='hint_pool_sims')
hint_pool_mem_vals = tf.gather(self.mem_vals, hint_pool_idxs,
name='hint_pool_mem_vals')
# Calculate softmax mask on the top-k if requested.
# Softmax temperature. Say we have K elements at dist x and one at (x+a).
# Softmax of the last is e^tm(x+a)/Ke^tm*x + e^tm(x+a) = e^tm*a/K+e^tm*a.
# To make that 20% we'd need to have e^tm*a ~= 0.2K, so tm = log(0.2K)/a.
softmax_temp = max(1.0, np.log(0.2 * self.choose_k) / self.alpha)
mask = tf.nn.softmax(hint_pool_sims[:, :choose_k - 1] * softmax_temp)
# prepare returned values
nearest_neighbor = tf.to_int32(
tf.argmax(hint_pool_sims[:, :choose_k - 1], 1))
no_teacher_idxs = tf.gather(
tf.reshape(hint_pool_idxs, [-1]),
nearest_neighbor + choose_k * tf.range(batch_size))
with tf.device(self.var_cache_device):
result = tf.gather(self.mem_vals, tf.reshape(no_teacher_idxs, [-1]))
if not output_given:
teacher_loss = None
return result, mask, teacher_loss
# prepare hints from the teacher on hint pool
teacher_hints = tf.to_float(
tf.abs(tf.expand_dims(intended_output, 1) - hint_pool_mem_vals))
teacher_hints = 1.0 - tf.minimum(1.0, teacher_hints)
teacher_vals, teacher_hint_idxs = tf.nn.top_k(
hint_pool_sims * teacher_hints, k=1)
neg_teacher_vals, _ = tf.nn.top_k(
hint_pool_sims * (1 - teacher_hints), k=1)
# bring back idxs to full memory
teacher_idxs = tf.gather(
tf.reshape(hint_pool_idxs, [-1]),
teacher_hint_idxs[:, 0] + choose_k * tf.range(batch_size))
# zero-out teacher_vals if there are no hints
teacher_vals *= (
1 - tf.to_float(tf.equal(0.0, tf.reduce_sum(teacher_hints, 1))))
# we'll determine whether to do an update to memory based on whether
# memory was queried correctly
sliced_hints = tf.slice(teacher_hints, [0, 0], [-1, self.correct_in_top])
incorrect_memory_lookup = tf.equal(0.0, tf.reduce_sum(sliced_hints, 1))
# loss based on triplet loss
teacher_loss = (tf.nn.relu(neg_teacher_vals - teacher_vals + self.alpha)
- self.alpha)
# prepare memory updates
update_keys = normalized_query
update_vals = intended_output
fetched_idxs = teacher_idxs # correctly fetched from memory
with tf.device(self.var_cache_device):
fetched_keys = tf.gather(self.mem_keys, fetched_idxs, name='fetched_keys')
fetched_vals = tf.gather(self.mem_vals, fetched_idxs, name='fetched_vals')
# do memory updates here
fetched_keys_upd = update_keys + fetched_keys # Momentum-like update
fetched_keys_upd = tf.nn.l2_normalize(fetched_keys_upd, dim=1)
# Randomize age a bit, e.g., to select different ones in parallel workers.
mem_age_with_noise = self.mem_age + tf.random_uniform(
[self.memory_size], - self.age_noise, self.age_noise)
_, oldest_idxs = tf.nn.top_k(mem_age_with_noise, k=batch_size, sorted=False)
with tf.control_dependencies([result]):
upd_idxs = tf.where(incorrect_memory_lookup,
oldest_idxs,
fetched_idxs)
# upd_idxs = tf.Print(upd_idxs, [upd_idxs], "UPD IDX", summarize=8)
upd_keys = tf.where(incorrect_memory_lookup,
update_keys,
fetched_keys_upd)
upd_vals = tf.where(incorrect_memory_lookup,
update_vals,
fetched_vals)
def make_update_op():
return self.make_update_op(upd_idxs, upd_keys, upd_vals,
batch_size, use_recent_idx, intended_output)
update_op = tf.cond(self.update_memory, make_update_op, tf.no_op)
with tf.control_dependencies([update_op]):
result = tf.identity(result)
mask = tf.identity(mask)
teacher_loss = tf.identity(teacher_loss)
return result, mask, tf.reduce_mean(teacher_loss)
| [
"tensorflow.shape",
"tensorflow.scatter_update",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.truncated_normal_initializer",
"tensorflow.group",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.variables_initializer",
"tensorflow.slice",
"t... | [((1826, 1843), 'tensorflow.constant', 'tf.constant', (['(True)'], {}), '(True)\n', (1837, 1843), True, 'import tensorflow as tf\n'), ((3294, 3386), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['[self.mem_keys, self.mem_vals, self.mem_age, self.recent_idx]'], {}), '([self.mem_keys, self.mem_vals, self.mem_age, self.\n recent_idx])\n', (3318, 3386), True, 'import tensorflow as tf\n'), ((4817, 4869), 'tensorflow.scatter_update', 'tf.scatter_update', (['self.mem_keys', 'upd_idxs', 'upd_keys'], {}), '(self.mem_keys, upd_idxs, upd_keys)\n', (4834, 4869), True, 'import tensorflow as tf\n'), ((4897, 4949), 'tensorflow.scatter_update', 'tf.scatter_update', (['self.mem_vals', 'upd_idxs', 'upd_vals'], {}), '(self.mem_vals, upd_idxs, upd_vals)\n', (4914, 4949), True, 'import tensorflow as tf\n'), ((5135, 5198), 'tensorflow.group', 'tf.group', (['mem_age_upd', 'mem_key_upd', 'mem_val_upd', 'recent_idx_upd'], {}), '(mem_age_upd, mem_key_upd, mem_val_upd, recent_idx_upd)\n', (5143, 5198), True, 'import tensorflow as tf\n'), ((5964, 6001), 'tensorflow.matmul', 'tf.matmul', (['query_vec', 'self.query_proj'], {}), '(query_vec, self.query_proj)\n', (5973, 6001), True, 'import tensorflow as tf\n'), ((6025, 6061), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['query_vec'], {'dim': '(1)'}), '(query_vec, dim=1)\n', (6043, 6061), True, 'import tensorflow as tf\n'), ((7524, 7586), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(hint_pool_sims[:, :choose_k - 1] * softmax_temp)'], {}), '(hint_pool_sims[:, :choose_k - 1] * softmax_temp)\n', (7537, 7586), True, 'import tensorflow as tf\n'), ((8314, 8362), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['(hint_pool_sims * teacher_hints)'], {'k': '(1)'}), '(hint_pool_sims * teacher_hints, k=1)\n', (8325, 8362), True, 'import tensorflow as tf\n'), ((8398, 8452), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['(hint_pool_sims * (1 - teacher_hints))'], {'k': '(1)'}), '(hint_pool_sims * (1 - teacher_hints), k=1)\n', (8409, 8452), True, 'import tensorflow as tf\n'), ((8913, 8971), 'tensorflow.slice', 'tf.slice', (['teacher_hints', '[0, 0]', '[-1, self.correct_in_top]'], {}), '(teacher_hints, [0, 0], [-1, self.correct_in_top])\n', (8921, 8971), True, 'import tensorflow as tf\n'), ((9690, 9733), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['fetched_keys_upd'], {'dim': '(1)'}), '(fetched_keys_upd, dim=1)\n', (9708, 9733), True, 'import tensorflow as tf\n'), ((9956, 10015), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['mem_age_with_noise'], {'k': 'batch_size', 'sorted': '(False)'}), '(mem_age_with_noise, k=batch_size, sorted=False)\n', (9967, 10015), True, 'import tensorflow as tf\n'), ((10714, 10767), 'tensorflow.cond', 'tf.cond', (['self.update_memory', 'make_update_op', 'tf.no_op'], {}), '(self.update_memory, make_update_op, tf.no_op)\n', (10721, 10767), True, 'import tensorflow as tf\n'), ((3902, 3927), 'tensorflow.device', 'tf.device', (['self.nn_device'], {}), '(self.nn_device)\n', (3911, 3927), True, 'import tensorflow as tf\n'), ((4224, 4254), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['similarities'], {}), '(similarities)\n', (4240, 4254), True, 'import tensorflow as tf\n'), ((4535, 4580), 'tensorflow.ones', 'tf.ones', (['[self.memory_size]'], {'dtype': 'tf.float32'}), '([self.memory_size], dtype=tf.float32)\n', (4542, 4580), True, 'import tensorflow as tf\n'), ((4642, 4681), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[mem_age_incr]'], {}), '([mem_age_incr])\n', (4665, 4681), True, 'import tensorflow as tf\n'), ((5006, 5067), 'tensorflow.scatter_update', 'tf.scatter_update', (['self.recent_idx', 'intended_output', 'upd_idxs'], {}), '(self.recent_idx, intended_output, upd_idxs)\n', (5023, 5067), True, 'import tensorflow as tf\n'), ((5112, 5122), 'tensorflow.group', 'tf.group', ([], {}), '()\n', (5120, 5122), True, 'import tensorflow as tf\n'), ((5839, 5858), 'tensorflow.shape', 'tf.shape', (['query_vec'], {}), '(query_vec)\n', (5847, 5858), True, 'import tensorflow as tf\n'), ((6331, 6374), 'tensorflow.gather', 'tf.gather', (['self.recent_idx', 'intended_output'], {}), '(self.recent_idx, intended_output)\n', (6340, 6374), True, 'import tensorflow as tf\n'), ((6518, 6542), 'tensorflow.shape', 'tf.shape', (['hint_pool_idxs'], {}), '(hint_pool_idxs)\n', (6526, 6542), True, 'import tensorflow as tf\n'), ((6556, 6588), 'tensorflow.device', 'tf.device', (['self.var_cache_device'], {}), '(self.var_cache_device)\n', (6565, 6588), True, 'import tensorflow as tf\n'), ((6968, 7020), 'tensorflow.squeeze', 'tf.squeeze', (['similarities', '[1]'], {'name': '"""hint_pool_sims"""'}), "(similarities, [1], name='hint_pool_sims')\n", (6978, 7020), True, 'import tensorflow as tf\n'), ((7048, 7115), 'tensorflow.gather', 'tf.gather', (['self.mem_vals', 'hint_pool_idxs'], {'name': '"""hint_pool_mem_vals"""'}), "(self.mem_vals, hint_pool_idxs, name='hint_pool_mem_vals')\n", (7057, 7115), True, 'import tensorflow as tf\n'), ((7662, 7708), 'tensorflow.argmax', 'tf.argmax', (['hint_pool_sims[:, :choose_k - 1]', '(1)'], {}), '(hint_pool_sims[:, :choose_k - 1], 1)\n', (7671, 7708), True, 'import tensorflow as tf\n'), ((7752, 7784), 'tensorflow.reshape', 'tf.reshape', (['hint_pool_idxs', '[-1]'], {}), '(hint_pool_idxs, [-1])\n', (7762, 7784), True, 'import tensorflow as tf\n'), ((7856, 7888), 'tensorflow.device', 'tf.device', (['self.var_cache_device'], {}), '(self.var_cache_device)\n', (7865, 7888), True, 'import tensorflow as tf\n'), ((8244, 8274), 'tensorflow.minimum', 'tf.minimum', (['(1.0)', 'teacher_hints'], {}), '(1.0, teacher_hints)\n', (8254, 8274), True, 'import tensorflow as tf\n'), ((8538, 8570), 'tensorflow.reshape', 'tf.reshape', (['hint_pool_idxs', '[-1]'], {}), '(hint_pool_idxs, [-1])\n', (8548, 8570), True, 'import tensorflow as tf\n'), ((9016, 9046), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sliced_hints', '(1)'], {}), '(sliced_hints, 1)\n', (9029, 9046), True, 'import tensorflow as tf\n'), ((9102, 9158), 'tensorflow.nn.relu', 'tf.nn.relu', (['(neg_teacher_vals - teacher_vals + self.alpha)'], {}), '(neg_teacher_vals - teacher_vals + self.alpha)\n', (9112, 9158), True, 'import tensorflow as tf\n'), ((9367, 9399), 'tensorflow.device', 'tf.device', (['self.var_cache_device'], {}), '(self.var_cache_device)\n', (9376, 9399), True, 'import tensorflow as tf\n'), ((9422, 9481), 'tensorflow.gather', 'tf.gather', (['self.mem_keys', 'fetched_idxs'], {'name': '"""fetched_keys"""'}), "(self.mem_keys, fetched_idxs, name='fetched_keys')\n", (9431, 9481), True, 'import tensorflow as tf\n'), ((9503, 9562), 'tensorflow.gather', 'tf.gather', (['self.mem_vals', 'fetched_idxs'], {'name': '"""fetched_vals"""'}), "(self.mem_vals, fetched_idxs, name='fetched_vals')\n", (9512, 9562), True, 'import tensorflow as tf\n'), ((9853, 9923), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.memory_size]', '(-self.age_noise)', 'self.age_noise'], {}), '([self.memory_size], -self.age_noise, self.age_noise)\n', (9870, 9923), True, 'import tensorflow as tf\n'), ((10026, 10059), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[result]'], {}), '([result])\n', (10049, 10059), True, 'import tensorflow as tf\n'), ((10078, 10138), 'tensorflow.where', 'tf.where', (['incorrect_memory_lookup', 'oldest_idxs', 'fetched_idxs'], {}), '(incorrect_memory_lookup, oldest_idxs, fetched_idxs)\n', (10086, 10138), True, 'import tensorflow as tf\n'), ((10282, 10346), 'tensorflow.where', 'tf.where', (['incorrect_memory_lookup', 'update_keys', 'fetched_keys_upd'], {}), '(incorrect_memory_lookup, update_keys, fetched_keys_upd)\n', (10290, 10346), True, 'import tensorflow as tf\n'), ((10416, 10476), 'tensorflow.where', 'tf.where', (['incorrect_memory_lookup', 'update_vals', 'fetched_vals'], {}), '(incorrect_memory_lookup, update_vals, fetched_vals)\n', (10424, 10476), True, 'import tensorflow as tf\n'), ((10778, 10814), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[update_op]'], {}), '([update_op])\n', (10801, 10814), True, 'import tensorflow as tf\n'), ((10831, 10850), 'tensorflow.identity', 'tf.identity', (['result'], {}), '(result)\n', (10842, 10850), True, 'import tensorflow as tf\n'), ((10864, 10881), 'tensorflow.identity', 'tf.identity', (['mask'], {}), '(mask)\n', (10875, 10881), True, 'import tensorflow as tf\n'), ((10903, 10928), 'tensorflow.identity', 'tf.identity', (['teacher_loss'], {}), '(teacher_loss)\n', (10914, 10928), True, 'import tensorflow as tf\n'), ((10955, 10983), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['teacher_loss'], {}), '(teacher_loss)\n', (10969, 10983), True, 'import tensorflow as tf\n'), ((2004, 2044), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.0)', '(0.0)'], {}), '(-0.0, 0.0)\n', (2033, 2044), True, 'import tensorflow as tf\n'), ((2214, 2250), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)', 'tf.int32'], {}), '(0, tf.int32)\n', (2237, 2250), True, 'import tensorflow as tf\n'), ((2420, 2448), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2443, 2448), True, 'import tensorflow as tf\n'), ((2614, 2650), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)', 'tf.int32'], {}), '(0, tf.int32)\n', (2637, 2650), True, 'import tensorflow as tf\n'), ((2869, 2909), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (2900, 2909), True, 'import tensorflow as tf\n'), ((3250, 3260), 'tensorflow.group', 'tf.group', ([], {}), '()\n', (3258, 3260), True, 'import tensorflow as tf\n'), ((4032, 4066), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['normalized_query'], {}), '(normalized_query)\n', (4048, 4066), True, 'import tensorflow as tf\n'), ((4756, 4796), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]'], {'dtype': 'tf.float32'}), '([batch_size], dtype=tf.float32)\n', (4764, 4796), True, 'import tensorflow as tf\n'), ((6682, 6749), 'tensorflow.gather', 'tf.gather', (['self.mem_keys', 'hint_pool_idxs'], {'name': '"""my_mem_keys_gather"""'}), "(self.mem_keys, hint_pool_idxs, name='my_mem_keys_gather')\n", (6691, 6749), True, 'import tensorflow as tf\n'), ((6829, 6864), 'tensorflow.expand_dims', 'tf.expand_dims', (['normalized_query', '(1)'], {}), '(normalized_query, 1)\n', (6843, 6864), True, 'import tensorflow as tf\n'), ((7471, 7498), 'numpy.log', 'np.log', (['(0.2 * self.choose_k)'], {}), '(0.2 * self.choose_k)\n', (7477, 7498), True, 'import numpy as np\n'), ((7930, 7963), 'tensorflow.reshape', 'tf.reshape', (['no_teacher_idxs', '[-1]'], {}), '(no_teacher_idxs, [-1])\n', (7940, 7963), True, 'import tensorflow as tf\n'), ((7824, 7844), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (7832, 7844), True, 'import tensorflow as tf\n'), ((8160, 8194), 'tensorflow.expand_dims', 'tf.expand_dims', (['intended_output', '(1)'], {}), '(intended_output, 1)\n', (8174, 8194), True, 'import tensorflow as tf\n'), ((8617, 8637), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (8625, 8637), True, 'import tensorflow as tf\n'), ((8750, 8781), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['teacher_hints', '(1)'], {}), '(teacher_hints, 1)\n', (8763, 8781), True, 'import tensorflow as tf\n'), ((6461, 6500), 'tensorflow.expand_dims', 'tf.expand_dims', (['most_recent_hint_idx', '(1)'], {}), '(most_recent_hint_idx, 1)\n', (6475, 6500), True, 'import tensorflow as tf\n')] |
import serial
import struct
import time
import numpy as np
from ahrs.filters import Madgwick
from cube import CubeRenderer
sync = b'\xEF\xBE\xAD\xDE'
def reset(ser):
ser.write(sync)
ser.write(b'\x01\x00')
ser.write(b'\x00')
def begin(ser):
ser.write(sync)
ser.write(b'\x03\x00')
ser.write(b'\x10')
ser.write(b'\x32\0x00') # 50 ms period
def receive(ser):
recognized = False
pattern = [ 0, 0, 0, 0 ]
while not recognized:
pattern.append(ser.read(1)) # sync pattern
pattern.pop(0)
recognized = True
for i in range(0, 4):
recognized = recognized and (pattern[i] == sync[i:i+1])
rcvd = ser.read(2)
len = struct.unpack('<H', rcvd)[0] # packet length
cmd = ser.read(1)
data = ser.read(len - 1)
return [cmd, data]
def main():
connecting = True
while connecting:
try:
ser = serial.Serial('COM4')
connecting = False
except serial.serialutil.SerialException:
print("Connection to COM4 timed out, retrying...")
reset(ser)
print("resetting")
time.sleep(3)
begin(ser)
# q_last = np.array([0.7071, -0.7071, 0.0, 0.0])
q_last = np.array([1., 0., 0., 0.])
madgwick = Madgwick(Dt = 0.05, q0 = q_last)
renderer = CubeRenderer()
while True:
[cmd, data] = receive(ser)
if cmd == b'\x10':
raw = struct.unpack('<fffffffff', data) # 3 float vectors, 9 floats
mag = raw[0:3]
acc = raw[3:6]
gyro = raw[6:9]
# q_last = madgwick.updateMARG(q_last, gyr=gyro, acc=acc, mag=mag)
q_last = madgwick.updateIMU(q_last, gyr=gyro, acc=acc)
print(q_last)
renderer.render(q_last)
if __name__ == '__main__':
main() | [
"time.sleep",
"numpy.array",
"struct.unpack",
"serial.Serial",
"cube.CubeRenderer",
"ahrs.filters.Madgwick"
] | [((1049, 1062), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1059, 1062), False, 'import time\n'), ((1142, 1172), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (1150, 1172), True, 'import numpy as np\n'), ((1182, 1210), 'ahrs.filters.Madgwick', 'Madgwick', ([], {'Dt': '(0.05)', 'q0': 'q_last'}), '(Dt=0.05, q0=q_last)\n', (1190, 1210), False, 'from ahrs.filters import Madgwick\n'), ((1231, 1245), 'cube.CubeRenderer', 'CubeRenderer', ([], {}), '()\n', (1243, 1245), False, 'from cube import CubeRenderer\n'), ((660, 685), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'rcvd'], {}), "('<H', rcvd)\n", (673, 685), False, 'import struct\n'), ((859, 880), 'serial.Serial', 'serial.Serial', (['"""COM4"""'], {}), "('COM4')\n", (872, 880), False, 'import serial\n'), ((1329, 1362), 'struct.unpack', 'struct.unpack', (['"""<fffffffff"""', 'data'], {}), "('<fffffffff', data)\n", (1342, 1362), False, 'import struct\n')] |
import numpy as np
import time
import gym
def execute(env, policy, gamma=1.0, render=False):
totalReward = 0
start = env.reset()
stepIndex = 0
while True:
if render:
env.render()
start, reward, done, _ = env.step(int(policy[start]))
totalReward += (gamma ** stepIndex * reward)
stepIndex += 1
if done:
break
return totalReward
def evaluatePolicy(env, policy, gamma=1.0, n=100, render=False):
scores=[execute(env, policy, gamma=gamma, render=render) for _ in range(n)]
return np.mean(scores)
def extractPolicy(env, v, gamma=1.0):
policy = np.zeros(env.env.nS)
for s in range(env.env.nS):
q_sa = np.zeros(env.env.nA)
for a in range(env.env.nA):
q_sa[a] = sum([p * (r + gamma * v[s_]) for p, s_, r, _ in env.env.P[s][a]])
policy[s] = np.argmax(q_sa)
return policy
def CalcPolicyValue(env, policy, gamma=1.0):
value = np.zeros(env.env.nS)
eps = 1e-10
while True:
previousValue = np.copy(value)
for states in range(env.env.nS):
policy_a = policy[states]
value[states] = sum([p * (r + gamma * previousValue[s_]) for p,s_,r,_ in env.env.P[states][policy_a]])
if np.sum(np.fabs(previousValue - value)) <= eps:
break
return value
def policyIteration(env, gamma=1.0):
policy = np.random.choice(env.env.nA, size=(env.env.nS))
maxIterations = 100
gamma = 1.0
for i in range(maxIterations):
oldPolicyValue = CalcPolicyValue(env, policy, gamma)
newPolicy = extractPolicy(env, oldPolicyValue, gamma)
if np.all(policy == newPolicy):
print ('Policy Iteration converged at %d.' %(i+1))
break
policy = newPolicy
return policy
if __name__ == '__main__':
env_name = 'FrozenLake-v0'
env = gym.make(env_name)
start= time.time()
optimalPolicy = policyIteration(env, gamma=1.0)
scores = evaluatePolicy(env, optimalPolicy, gamma =1.0)
end = time.time()
print("Best score = %0.2f. Time taken = %4.4f seconds" %(np.max(scores) , end - start)) | [
"numpy.mean",
"numpy.copy",
"numpy.fabs",
"numpy.random.choice",
"numpy.argmax",
"numpy.max",
"numpy.zeros",
"numpy.all",
"time.time",
"gym.make"
] | [((570, 585), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (577, 585), True, 'import numpy as np\n'), ((638, 658), 'numpy.zeros', 'np.zeros', (['env.env.nS'], {}), '(env.env.nS)\n', (646, 658), True, 'import numpy as np\n'), ((963, 983), 'numpy.zeros', 'np.zeros', (['env.env.nS'], {}), '(env.env.nS)\n', (971, 983), True, 'import numpy as np\n'), ((1394, 1439), 'numpy.random.choice', 'np.random.choice', (['env.env.nA'], {'size': 'env.env.nS'}), '(env.env.nA, size=env.env.nS)\n', (1410, 1439), True, 'import numpy as np\n'), ((1876, 1894), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1884, 1894), False, 'import gym\n'), ((1906, 1917), 'time.time', 'time.time', ([], {}), '()\n', (1915, 1917), False, 'import time\n'), ((2040, 2051), 'time.time', 'time.time', ([], {}), '()\n', (2049, 2051), False, 'import time\n'), ((706, 726), 'numpy.zeros', 'np.zeros', (['env.env.nA'], {}), '(env.env.nA)\n', (714, 726), True, 'import numpy as np\n'), ((871, 886), 'numpy.argmax', 'np.argmax', (['q_sa'], {}), '(q_sa)\n', (880, 886), True, 'import numpy as np\n'), ((1040, 1054), 'numpy.copy', 'np.copy', (['value'], {}), '(value)\n', (1047, 1054), True, 'import numpy as np\n'), ((1651, 1678), 'numpy.all', 'np.all', (['(policy == newPolicy)'], {}), '(policy == newPolicy)\n', (1657, 1678), True, 'import numpy as np\n'), ((1268, 1298), 'numpy.fabs', 'np.fabs', (['(previousValue - value)'], {}), '(previousValue - value)\n', (1275, 1298), True, 'import numpy as np\n'), ((2113, 2127), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (2119, 2127), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 17:10:49 2020
@author: <NAME>
In this code a Hamiltonian Neural Network is designed and employed
to solve a system of four differential equations obtained by Hamilton's
equations for the Hamiltonian of Henon-Heiles chaotic dynamical.
"""
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import grad
import matplotlib.pyplot as plt
import time
import copy
from os import path
import sys
from utils_HHsystem import symEuler, HH_exact, HHsolution, energy ,saveData
dtype=torch.float
# %matplotlib inline
plt. close('all')
# Define the sin() activation function
class mySin(torch.nn.Module):
@staticmethod
def forward(input):
return torch.sin(input)
#####################################
# Hamiltonian Neural Network (HNN) class
####################################
# Calculate the derivatice with auto-differention
def dfx(x,f):
return grad([f], [x], grad_outputs=torch.ones(x.shape, dtype=dtype), create_graph=True)[0]
def perturbPoints(grid,t0,tf,sig=0.5):
# stochastic perturbation of the evaluation points
# force t[0]=t0 & force points to be in the t-interval
delta_t = grid[1] - grid[0]
noise = delta_t * torch.randn_like(grid)*sig
t = grid + noise
t.data[2] = torch.ones(1,1)*(-1)
t.data[t<t0]=t0 - t.data[t<t0]
t.data[t>tf]=2*tf - t.data[t>tf]
# t.data[0] = torch.ones(1,1)*t0
t.requires_grad = False
return t
def parametricSolutions(t, nn, X0):
# parametric solutions
t0, x0, y0, px0, py0, _ = X0[0],X0[1],X0[2],X0[3],X0[4],X0[5]
N1, N2, N3, N4 = nn(t)
dt =t-t0
#### THERE ARE TWO PARAMETRIC SOLUTIONS. Uncomment f=dt
f = (1-torch.exp(-dt))
# f=dt
x_hat = x0 + f*N1
y_hat = y0 + f*N2
px_hat = px0 + f*N3
py_hat = py0 + f*N4
return x_hat, y_hat, px_hat, py_hat
def hamEqs_Loss(t,x,y,px,py,lam):
# Define the loss function by Hamilton Eqs., write explicitely the Ham. Equations
xd,yd,pxd,pyd= dfx(t,x),dfx(t,y),dfx(t,px),dfx(t,py)
fx = xd - px;
fy = yd - py;
fpx = pxd + x + 2.*lam*x*y
fpy = pyd + y + lam*(x.pow(2) - y.pow(2))
Lx = (fx.pow(2)).mean(); Ly = (fy.pow(2)).mean();
Lpx = (fpx.pow(2)).mean(); Lpy = (fpy.pow(2)).mean();
L = Lx + Ly + Lpx + Lpy
return L
def hamiltonian(x,y,px,py,lam):
#returns the hamiltonian ham for Kinetic (K) and Potential (V) Energies
V = 0.5*(x**2 + y**2) + lam*(x**2*y - y**3/3)
K = 0.5*(px**2+py**2)
ham = K + V
return ham
def hamiltonian_Loss(t,x,y,px,py,lam):
# Define the loss function as the time derivative of the hamiltonian
xd,yd,pxd,pyd= dfx(t,x),dfx(t,y),dfx(t,px),dfx(t,py)
ham = 0.5*(px.pow(2)+py.pow(2)+x.pow(2)+y.pow(2))+lam*(x.pow(2)*y-y.pow(3)/3)
hx = grad([ham], [x], grad_outputs=torch.ones(x.shape, dtype=dtype), create_graph=True)[0]
hy = grad([ham], [y], grad_outputs=torch.ones(y.shape, dtype=dtype), create_graph=True)[0]
hpx = grad([ham], [px], grad_outputs=torch.ones(px.shape, dtype=dtype), create_graph=True)[0]
hpy = grad([ham], [py], grad_outputs=torch.ones(py.shape, dtype=dtype), create_graph=True)[0]
ht = hx*xd + hy*yd + hpx*pxd + hpy*pyd
L = (ht.pow(2)).mean()
return L
# NETWORK ARCHITECTURE
# A two hidden layer NN, 1 input & two output
class odeNet_HH_MM(torch.nn.Module):
def __init__(self, D_hid=10):
super(odeNet_HH_MM,self).__init__()
# Define the Activation
# self.actF = torch.nn.Sigmoid()
self.actF = mySin()
# define layers
self.Lin_1 = torch.nn.Linear(1, D_hid)
self.Lin_2 = torch.nn.Linear(D_hid, D_hid)
self.Lin_out = torch.nn.Linear(D_hid, 4)
def forward(self,t):
# layer 1
l = self.Lin_1(t); h = self.actF(l)
# layer 2
l = self.Lin_2(h); h = self.actF(l)
# output layer
r = self.Lin_out(h)
xN = (r[:,0]).reshape(-1,1); yN = (r[:,1]).reshape(-1,1)
pxN = (r[:,2]).reshape(-1,1); pyN = (r[:,3]).reshape(-1,1)
return xN, yN, pxN, pyN
# Train the NN
def run_odeNet_HH_MM(X0, tf, neurons, epochs, n_train,lr, PATH= "models/model_HH", loadWeights=False,
minLoss=1e-3):
fc0 = odeNet_HH_MM(neurons)
fc1 = copy.deepcopy(fc0) # fc1 is a deepcopy of the network with the lowest training loss
# optimizer
betas = [0.999, 0.9999]
optimizer = optim.Adam(fc0.parameters(), lr=lr, betas=betas)
Loss_history = []; Llim = 1
Loss_erg_history= []
t0=X0[0];
x0, y0, px0, py0, lam = X0[1], X0[2], X0[3], X0[4], X0[5]
# Initial Energy that should be convserved
ham0 = hamiltonian(x0,y0,px0,py0,lam)
grid = torch.linspace(t0, tf, n_train).reshape(-1,1)
## LOADING WEIGHTS PART if PATH file exists and loadWeights=True
if path.exists(PATH) and loadWeights==True:
checkpoint = torch.load(PATH)
fc0.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
tt = checkpoint['epoch']
Ltot = checkpoint['loss']
fc0.train(); # or model.eval
## TRAINING ITERATION
TeP0 = time.time()
for tt in range(epochs):
# Perturbing the evaluation points & forcing t[0]=t0
# t=perturbPoints(grid,t0,tf,sig=.03*tf)
t=perturbPoints(grid,t0,tf,sig= 0.3*tf)
t.requires_grad = True
# Network solutions
x,y,px,py =parametricSolutions(t,fc0,X0)
# LOSS FUNCTION
# Loss function defined by Hamilton Eqs.
Ltot = hamEqs_Loss(t,x,y,px,py,lam)
# ENERGY REGULARIZATION
ham = hamiltonian(x,y,px,py,lam)
L_erg = .5*( ( ham - ham0).pow(2) ).mean()
Ltot=Ltot+ L_erg
# OPTIMIZER
Ltot.backward(retain_graph=False); #True
optimizer.step()
loss = Ltot.data.numpy()
loss_erg=L_erg.data.numpy()
optimizer.zero_grad()
# keep the loss function history
Loss_history.append(loss)
Loss_erg_history.append(loss_erg)
#Keep the best model (lowest loss) by using a deep copy
if tt > 0.8*epochs and Ltot < Llim:
fc1 = copy.deepcopy(fc0)
Llim=Ltot
# break the training after a thresold of accuracy
if Ltot < minLoss :
fc1 = copy.deepcopy(fc0)
print('Reach minimum requested loss')
break
TePf = time.time()
runTime = TePf - TeP0
torch.save({
'epoch': tt,
'model_state_dict': fc1.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': Ltot,
}, PATH)
return fc1, Loss_history, runTime, Loss_erg_history
###
def trainModel(X0, t_max, neurons, epochs, n_train, lr, loadWeights=True, minLoss=1e-6, showLoss=True, PATH ='models/'):
model,loss,runTime, loss_erg = run_odeNet_HH_MM(X0, t_max, neurons, epochs, n_train,lr, loadWeights=loadWeights, minLoss=minLoss)
np.savetxt('data/loss.txt',loss)
if showLoss==True :
print('Training time (minutes):', runTime/60)
print('Training Loss: ', loss[-1] )
plt.figure()
plt.loglog(loss,'-b',alpha=0.975, label='Total loss');
plt.loglog(loss_erg,'-r',alpha=0.75, label='Energy penalty');
plt.legend()
plt.tight_layout()
plt.ylabel('Loss');plt.xlabel('t')
plt.savefig('HenonHeiles_loss.png')
def loadModel(PATH="models/model_HH"):
if path.exists(PATH):
fc0 = odeNet_HH_MM(neurons)
checkpoint = torch.load(PATH)
fc0.load_state_dict(checkpoint['model_state_dict'])
fc0.train(); # or model.eval
else:
print('Warning: There is not any trained model. Terminate')
sys.exit()
return fc0
############
# Set the initial state. lam controls the nonlinearity
t0, x0, y0, px0, py0, lam = 0, 0.3,-0.3, 0.3, 0.15, 1;
X0 = [t0, x0, y0, px0, py0, lam]
# Run first a short time prediction.
# Then load the model and train for longer time
# ## SHORT TIME
# t_max, N = 6*np.pi, 500;
# print(t_max * 0.069, ' Lyapunov times prediction'); dt = t_max/N;
# n_train, neurons, epochs, lr = N, 80, int(2e4 ), 8e-3
# trainModel(X0, t_max, neurons, epochs, n_train, lr, loadWeights=False, minLoss=1e-8, showLoss=True)
## LONG TIME: use loadWeights=True
t_max, N = 12*np.pi, 500;
print(t_max * 0.069, ' Lyapunov times prediction'); dt = t_max/N;
n_train, neurons, epochs, lr = N, 80, int(5e4 ), 5e-3
# TRAIN THE NETWORK.
trainModel(X0, t_max, neurons, epochs, n_train, lr, loadWeights=True, minLoss=1e-8, showLoss=True)
model = loadModel()
#####################################
# TEST THE PREDICTED SOLUTIONS
#######################################3
#
nTest = N ; t_max_test = 1.0*t_max
tTest = torch.linspace(t0,t_max_test,nTest)
tTest = tTest.reshape(-1,1);
tTest.requires_grad=True
t_net = tTest.detach().numpy()
x,y,px,py =parametricSolutions(tTest,model,X0)
x=x.data.numpy(); y=y.data.numpy()
px=px.data.numpy(); py=py.data.numpy()
E = energy(x, y, px, py, lam)
# ####################
# Scipy solver
######################
t_num = np.linspace(t0, t_max_test, N)
E0, E_ex = HH_exact(N,x0, y0, px0, py0, lam)
x_num, y_num, px_num, py_num = HHsolution(N,t_num, x0, y0, px0, py0, lam)
E_num = energy(x_num, y_num, px_num, py_num, lam)
# ###################
# # Symplectic Euler
# # ####################
Ns = n_train -1;
E_s, x_s, y_s, px_s, py_s, t_s = symEuler(Ns, x0,y0, px0,py0,t0,t_max_test,lam)
# # 10 times more time points
Ns10 = 10*n_train ;
T0 = time.time()
E_s10, x_s10, y_s10, px_s10, py_s10, t_s10 = symEuler(Ns10, x0,y0, px0,py0,t0,t_max_test,lam)
runTimeEuler = time.time() - T0
print('Euler runtime is ', runTimeEuler/60)
################
# Make the plots
#################
# Figure for trajectories: x(t), p(t), energy in time E(t),
# and phase space trajectory p(x)
lineW = 2 # Line thickness
plt.figure(figsize=(10,8))
plt.subplot(2,2,1)
plt.plot(t_num,x_num,'-g',linewidth=lineW, label='Ground truth');
plt.plot(t_net, x,'--b', label='Neural Net');
plt.plot(t_s,x_s,':k',linewidth=lineW, label='Symplectic Euler');
plt.plot(t_s10,x_s10,'-.r',linewidth=lineW, label='Symplectic Euler x 10 points');
plt.ylabel('x');plt.xlabel('t')
plt.legend()
plt.subplot(2,2,2)
plt.plot(t_num,E_ex,'-g',linewidth=lineW);
plt.plot(t_net, E,'--b')
plt.plot(t_s,E_s,':k',linewidth=lineW);
plt.plot(t_s10,E_s10,'-.r',linewidth=lineW);
plt.ylabel('E');plt.xlabel('t')
plt.ylim([1.1*E0,0.9*E0])
plt.subplot(2,2,3)
plt.plot(t_num,px_num,'-g',linewidth=lineW);
plt.plot(t_net, px,'--b')
plt.plot(t_s,px_s,':k',linewidth=lineW);
plt.plot(t_s10,px_s10,'-.r',linewidth=lineW);
plt.ylabel('$p_x$');plt.xlabel('t')
plt.subplot(2,2,4)
plt.plot(x_num,y_num,'-g',linewidth=lineW);
plt.plot(x, y,'--b')
plt.plot(x_s,y_s,'--k',linewidth=lineW);
plt.plot(x_s10, y_s10,'-.r',linewidth=lineW);
plt.ylabel('y');plt.xlabel('x');
plt.tight_layout()
plt.savefig('HenonHeiles_trajectories.png')
# calculate the errors for the solutions obtained by network
dx_num =x_num-x_num; dp_num=px_num-px_num
dx = x_num - x[:,0]; dpx = px_num - px[:,0]
dy = y_num- y[:,0]; dpy = py_num - py[:,0]
# # calculate the errors for the solutions obtained by Euler
x_numN, y_numN, px_numN, py_numN = HHsolution(Ns,t_s, x0, y0, px0, py0, lam)
dx_s = x_numN - x_s; dpx_s = px_numN - px_s
dy_s = y_numN - y_s; dpy_s = py_numN - py_s
x_numN, y_numN, px_numN, py_numN = HHsolution(Ns10,t_s10, x0, y0, px0, py0, lam)
dx_s10 = x_numN - x_s10; dpx_s10 = px_numN - px_s10
dy_s10 = y_numN - y_s10; dpy_s10 = py_numN - py_s10
plt.figure(figsize=(10,8))
plt.subplot(2,2,1)
plt.plot(t_net,dx, 'b', label='Neural Net')
plt.plot(t_s, dx_s, ':k', label='Symplectic Euler')
plt.plot(t_s10, dx_s10, '-.r', label='Symplectic Euler x 10')
plt.ylabel('$\delta_x$');plt.xlabel('t')
plt.legend()
plt.subplot(2,2,2)
plt.plot(dx,dpx,'b')
plt.plot(dx_s, dpx_s, ':k')
plt.plot(dx_s10, dpx_s10, '-.r')
plt.ylabel('$\delta_{p_x}$'); plt.xlabel('$\delta_x$');
plt.subplot(2,2,3)
plt.plot(t_net,dy, 'b')
plt.plot(t_s, dy_s, ':k')
plt.plot(t_s10, dy_s10, '-.r')
plt.ylabel('$\delta_y$');plt.xlabel('t')
plt.subplot(2,2,4)
plt.plot(dy,dpy,'b')
plt.plot(dy_s, dpy_s, ':k')
plt.plot(dy_s10, dpy_s10, '-.r')
plt.ylabel('$\delta_{p_y}$'); plt.xlabel('$\delta_y$');
plt.tight_layout()
plt.savefig('HenonHeiles_trajectories_error.png')
# saveData('data/', t_net, x, y, px,py, E)
# saveData('data/Euler10/', t_s10, x_s10, y_s10, px_s10,py_s10, E_s10)
# saveData('data/solver/', t_num, x_num, y_num, px_num, py_num, E_num)
# np.savetxt('data/'+"dx.txt",dx)
# np.savetxt('data/'+"dy.txt",dy)
# np.savetxt('data/'+"dpx.txt",dpx)
# np.savetxt('data/'+"dpy.txt",dpy)
# np.savetxt('data/Euler10/'+"dx.txt",dx_s10)
# np.savetxt('data/Euler10/'+"dy.txt",dy_s10)
# np.savetxt('data/Euler10/'+"dpx.txt",dpx_s10)
# np.savetxt('data/Euler10/'+"dpy.txt",dpy_s10)
# saveData('data/Euler200/', t_s10, x_s10, y_s10, px_s10,py_s10, E_s10)
# np.savetxt('data/Euler200/'+"dx.txt",dx_s10)
# np.savetxt('data/Euler200/'+"dy.txt",dy_s10)
# np.savetxt('data/Euler200/'+"dpx.txt",dpx_s10)
# np.savetxt('data/Euler200/'+"dpy.txt",dpy_s10)
| [
"utils_HHsystem.HHsolution",
"matplotlib.pyplot.ylabel",
"torch.sin",
"torch.exp",
"copy.deepcopy",
"sys.exit",
"utils_HHsystem.HH_exact",
"os.path.exists",
"utils_HHsystem.symEuler",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",... | [((611, 627), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (620, 627), True, 'import matplotlib.pyplot as plt\n'), ((8927, 8964), 'torch.linspace', 'torch.linspace', (['t0', 't_max_test', 'nTest'], {}), '(t0, t_max_test, nTest)\n', (8941, 8964), False, 'import torch\n'), ((9177, 9202), 'utils_HHsystem.energy', 'energy', (['x', 'y', 'px', 'py', 'lam'], {}), '(x, y, px, py, lam)\n', (9183, 9202), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((9276, 9306), 'numpy.linspace', 'np.linspace', (['t0', 't_max_test', 'N'], {}), '(t0, t_max_test, N)\n', (9287, 9306), True, 'import numpy as np\n'), ((9318, 9352), 'utils_HHsystem.HH_exact', 'HH_exact', (['N', 'x0', 'y0', 'px0', 'py0', 'lam'], {}), '(N, x0, y0, px0, py0, lam)\n', (9326, 9352), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((9383, 9426), 'utils_HHsystem.HHsolution', 'HHsolution', (['N', 't_num', 'x0', 'y0', 'px0', 'py0', 'lam'], {}), '(N, t_num, x0, y0, px0, py0, lam)\n', (9393, 9426), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((9434, 9475), 'utils_HHsystem.energy', 'energy', (['x_num', 'y_num', 'px_num', 'py_num', 'lam'], {}), '(x_num, y_num, px_num, py_num, lam)\n', (9440, 9475), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((9598, 9649), 'utils_HHsystem.symEuler', 'symEuler', (['Ns', 'x0', 'y0', 'px0', 'py0', 't0', 't_max_test', 'lam'], {}), '(Ns, x0, y0, px0, py0, t0, t_max_test, lam)\n', (9606, 9649), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((9703, 9714), 'time.time', 'time.time', ([], {}), '()\n', (9712, 9714), False, 'import time\n'), ((9760, 9813), 'utils_HHsystem.symEuler', 'symEuler', (['Ns10', 'x0', 'y0', 'px0', 'py0', 't0', 't_max_test', 'lam'], {}), '(Ns10, x0, y0, px0, py0, t0, t_max_test, lam)\n', (9768, 9813), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((10072, 10099), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (10082, 10099), True, 'import matplotlib.pyplot as plt\n'), ((10099, 10119), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (10110, 10119), True, 'import matplotlib.pyplot as plt\n'), ((10118, 10185), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'x_num', '"""-g"""'], {'linewidth': 'lineW', 'label': '"""Ground truth"""'}), "(t_num, x_num, '-g', linewidth=lineW, label='Ground truth')\n", (10126, 10185), True, 'import matplotlib.pyplot as plt\n'), ((10185, 10230), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'x', '"""--b"""'], {'label': '"""Neural Net"""'}), "(t_net, x, '--b', label='Neural Net')\n", (10193, 10230), True, 'import matplotlib.pyplot as plt\n'), ((10232, 10299), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'x_s', '""":k"""'], {'linewidth': 'lineW', 'label': '"""Symplectic Euler"""'}), "(t_s, x_s, ':k', linewidth=lineW, label='Symplectic Euler')\n", (10240, 10299), True, 'import matplotlib.pyplot as plt\n'), ((10299, 10388), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s10', 'x_s10', '"""-.r"""'], {'linewidth': 'lineW', 'label': '"""Symplectic Euler x 10 points"""'}), "(t_s10, x_s10, '-.r', linewidth=lineW, label=\n 'Symplectic Euler x 10 points')\n", (10307, 10388), True, 'import matplotlib.pyplot as plt\n'), ((10383, 10398), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x"""'], {}), "('x')\n", (10393, 10398), True, 'import matplotlib.pyplot as plt\n'), ((10399, 10414), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (10409, 10414), True, 'import matplotlib.pyplot as plt\n'), ((10415, 10427), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10425, 10427), True, 'import matplotlib.pyplot as plt\n'), ((10429, 10449), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (10440, 10449), True, 'import matplotlib.pyplot as plt\n'), ((10448, 10492), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'E_ex', '"""-g"""'], {'linewidth': 'lineW'}), "(t_num, E_ex, '-g', linewidth=lineW)\n", (10456, 10492), True, 'import matplotlib.pyplot as plt\n'), ((10492, 10517), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'E', '"""--b"""'], {}), "(t_net, E, '--b')\n", (10500, 10517), True, 'import matplotlib.pyplot as plt\n'), ((10517, 10558), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'E_s', '""":k"""'], {'linewidth': 'lineW'}), "(t_s, E_s, ':k', linewidth=lineW)\n", (10525, 10558), True, 'import matplotlib.pyplot as plt\n'), ((10558, 10604), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s10', 'E_s10', '"""-.r"""'], {'linewidth': 'lineW'}), "(t_s10, E_s10, '-.r', linewidth=lineW)\n", (10566, 10604), True, 'import matplotlib.pyplot as plt\n'), ((10604, 10619), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""E"""'], {}), "('E')\n", (10614, 10619), True, 'import matplotlib.pyplot as plt\n'), ((10620, 10635), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (10630, 10635), True, 'import matplotlib.pyplot as plt\n'), ((10636, 10666), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1.1 * E0, 0.9 * E0]'], {}), '([1.1 * E0, 0.9 * E0])\n', (10644, 10666), True, 'import matplotlib.pyplot as plt\n'), ((10663, 10683), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (10674, 10683), True, 'import matplotlib.pyplot as plt\n'), ((10682, 10728), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'px_num', '"""-g"""'], {'linewidth': 'lineW'}), "(t_num, px_num, '-g', linewidth=lineW)\n", (10690, 10728), True, 'import matplotlib.pyplot as plt\n'), ((10728, 10754), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'px', '"""--b"""'], {}), "(t_net, px, '--b')\n", (10736, 10754), True, 'import matplotlib.pyplot as plt\n'), ((10754, 10796), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'px_s', '""":k"""'], {'linewidth': 'lineW'}), "(t_s, px_s, ':k', linewidth=lineW)\n", (10762, 10796), True, 'import matplotlib.pyplot as plt\n'), ((10796, 10843), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s10', 'px_s10', '"""-.r"""'], {'linewidth': 'lineW'}), "(t_s10, px_s10, '-.r', linewidth=lineW)\n", (10804, 10843), True, 'import matplotlib.pyplot as plt\n'), ((10843, 10862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$p_x$"""'], {}), "('$p_x$')\n", (10853, 10862), True, 'import matplotlib.pyplot as plt\n'), ((10863, 10878), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (10873, 10878), True, 'import matplotlib.pyplot as plt\n'), ((10880, 10900), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (10891, 10900), True, 'import matplotlib.pyplot as plt\n'), ((10899, 10944), 'matplotlib.pyplot.plot', 'plt.plot', (['x_num', 'y_num', '"""-g"""'], {'linewidth': 'lineW'}), "(x_num, y_num, '-g', linewidth=lineW)\n", (10907, 10944), True, 'import matplotlib.pyplot as plt\n'), ((10944, 10965), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""--b"""'], {}), "(x, y, '--b')\n", (10952, 10965), True, 'import matplotlib.pyplot as plt\n'), ((10965, 11007), 'matplotlib.pyplot.plot', 'plt.plot', (['x_s', 'y_s', '"""--k"""'], {'linewidth': 'lineW'}), "(x_s, y_s, '--k', linewidth=lineW)\n", (10973, 11007), True, 'import matplotlib.pyplot as plt\n'), ((11007, 11053), 'matplotlib.pyplot.plot', 'plt.plot', (['x_s10', 'y_s10', '"""-.r"""'], {'linewidth': 'lineW'}), "(x_s10, y_s10, '-.r', linewidth=lineW)\n", (11015, 11053), True, 'import matplotlib.pyplot as plt\n'), ((11054, 11069), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (11064, 11069), True, 'import matplotlib.pyplot as plt\n'), ((11070, 11085), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (11080, 11085), True, 'import matplotlib.pyplot as plt\n'), ((11088, 11106), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11104, 11106), True, 'import matplotlib.pyplot as plt\n'), ((11107, 11150), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""HenonHeiles_trajectories.png"""'], {}), "('HenonHeiles_trajectories.png')\n", (11118, 11150), True, 'import matplotlib.pyplot as plt\n'), ((11464, 11506), 'utils_HHsystem.HHsolution', 'HHsolution', (['Ns', 't_s', 'x0', 'y0', 'px0', 'py0', 'lam'], {}), '(Ns, t_s, x0, y0, px0, py0, lam)\n', (11474, 11506), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((11647, 11693), 'utils_HHsystem.HHsolution', 'HHsolution', (['Ns10', 't_s10', 'x0', 'y0', 'px0', 'py0', 'lam'], {}), '(Ns10, t_s10, x0, y0, px0, py0, lam)\n', (11657, 11693), False, 'from utils_HHsystem import symEuler, HH_exact, HHsolution, energy, saveData\n'), ((11816, 11843), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (11826, 11843), True, 'import matplotlib.pyplot as plt\n'), ((11845, 11865), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (11856, 11865), True, 'import matplotlib.pyplot as plt\n'), ((11864, 11908), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'dx', '"""b"""'], {'label': '"""Neural Net"""'}), "(t_net, dx, 'b', label='Neural Net')\n", (11872, 11908), True, 'import matplotlib.pyplot as plt\n'), ((11908, 11959), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'dx_s', '""":k"""'], {'label': '"""Symplectic Euler"""'}), "(t_s, dx_s, ':k', label='Symplectic Euler')\n", (11916, 11959), True, 'import matplotlib.pyplot as plt\n'), ((11960, 12021), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s10', 'dx_s10', '"""-.r"""'], {'label': '"""Symplectic Euler x 10"""'}), "(t_s10, dx_s10, '-.r', label='Symplectic Euler x 10')\n", (11968, 12021), True, 'import matplotlib.pyplot as plt\n'), ((12022, 12047), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\delta_x$"""'], {}), "('$\\\\delta_x$')\n", (12032, 12047), True, 'import matplotlib.pyplot as plt\n'), ((12047, 12062), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (12057, 12062), True, 'import matplotlib.pyplot as plt\n'), ((12063, 12075), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12073, 12075), True, 'import matplotlib.pyplot as plt\n'), ((12079, 12099), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (12090, 12099), True, 'import matplotlib.pyplot as plt\n'), ((12098, 12120), 'matplotlib.pyplot.plot', 'plt.plot', (['dx', 'dpx', '"""b"""'], {}), "(dx, dpx, 'b')\n", (12106, 12120), True, 'import matplotlib.pyplot as plt\n'), ((12119, 12146), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_s', 'dpx_s', '""":k"""'], {}), "(dx_s, dpx_s, ':k')\n", (12127, 12146), True, 'import matplotlib.pyplot as plt\n'), ((12147, 12179), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_s10', 'dpx_s10', '"""-.r"""'], {}), "(dx_s10, dpx_s10, '-.r')\n", (12155, 12179), True, 'import matplotlib.pyplot as plt\n'), ((12180, 12209), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\delta_{p_x}$"""'], {}), "('$\\\\delta_{p_x}$')\n", (12190, 12209), True, 'import matplotlib.pyplot as plt\n'), ((12210, 12235), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta_x$"""'], {}), "('$\\\\delta_x$')\n", (12220, 12235), True, 'import matplotlib.pyplot as plt\n'), ((12238, 12258), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (12249, 12258), True, 'import matplotlib.pyplot as plt\n'), ((12257, 12281), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'dy', '"""b"""'], {}), "(t_net, dy, 'b')\n", (12265, 12281), True, 'import matplotlib.pyplot as plt\n'), ((12281, 12306), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'dy_s', '""":k"""'], {}), "(t_s, dy_s, ':k')\n", (12289, 12306), True, 'import matplotlib.pyplot as plt\n'), ((12307, 12337), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s10', 'dy_s10', '"""-.r"""'], {}), "(t_s10, dy_s10, '-.r')\n", (12315, 12337), True, 'import matplotlib.pyplot as plt\n'), ((12338, 12363), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\delta_y$"""'], {}), "('$\\\\delta_y$')\n", (12348, 12363), True, 'import matplotlib.pyplot as plt\n'), ((12363, 12378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (12373, 12378), True, 'import matplotlib.pyplot as plt\n'), ((12380, 12400), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (12391, 12400), True, 'import matplotlib.pyplot as plt\n'), ((12399, 12421), 'matplotlib.pyplot.plot', 'plt.plot', (['dy', 'dpy', '"""b"""'], {}), "(dy, dpy, 'b')\n", (12407, 12421), True, 'import matplotlib.pyplot as plt\n'), ((12420, 12447), 'matplotlib.pyplot.plot', 'plt.plot', (['dy_s', 'dpy_s', '""":k"""'], {}), "(dy_s, dpy_s, ':k')\n", (12428, 12447), True, 'import matplotlib.pyplot as plt\n'), ((12448, 12480), 'matplotlib.pyplot.plot', 'plt.plot', (['dy_s10', 'dpy_s10', '"""-.r"""'], {}), "(dy_s10, dpy_s10, '-.r')\n", (12456, 12480), True, 'import matplotlib.pyplot as plt\n'), ((12481, 12510), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\delta_{p_y}$"""'], {}), "('$\\\\delta_{p_y}$')\n", (12491, 12510), True, 'import matplotlib.pyplot as plt\n'), ((12511, 12536), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta_y$"""'], {}), "('$\\\\delta_y$')\n", (12521, 12536), True, 'import matplotlib.pyplot as plt\n'), ((12538, 12556), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12554, 12556), True, 'import matplotlib.pyplot as plt\n'), ((12557, 12606), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""HenonHeiles_trajectories_error.png"""'], {}), "('HenonHeiles_trajectories_error.png')\n", (12568, 12606), True, 'import matplotlib.pyplot as plt\n'), ((4370, 4388), 'copy.deepcopy', 'copy.deepcopy', (['fc0'], {}), '(fc0)\n', (4383, 4388), False, 'import copy\n'), ((5315, 5326), 'time.time', 'time.time', ([], {}), '()\n', (5324, 5326), False, 'import time\n'), ((6565, 6576), 'time.time', 'time.time', ([], {}), '()\n', (6574, 6576), False, 'import time\n'), ((7102, 7135), 'numpy.savetxt', 'np.savetxt', (['"""data/loss.txt"""', 'loss'], {}), "('data/loss.txt', loss)\n", (7112, 7135), True, 'import numpy as np\n'), ((7609, 7626), 'os.path.exists', 'path.exists', (['PATH'], {}), '(PATH)\n', (7620, 7626), False, 'from os import path\n'), ((9824, 9835), 'time.time', 'time.time', ([], {}), '()\n', (9833, 9835), False, 'import time\n'), ((757, 773), 'torch.sin', 'torch.sin', (['input'], {}), '(input)\n', (766, 773), False, 'import torch\n'), ((1334, 1350), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (1344, 1350), False, 'import torch\n'), ((1748, 1762), 'torch.exp', 'torch.exp', (['(-dt)'], {}), '(-dt)\n', (1757, 1762), False, 'import torch\n'), ((3651, 3676), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', 'D_hid'], {}), '(1, D_hid)\n', (3666, 3676), False, 'import torch\n'), ((3700, 3729), 'torch.nn.Linear', 'torch.nn.Linear', (['D_hid', 'D_hid'], {}), '(D_hid, D_hid)\n', (3715, 3729), False, 'import torch\n'), ((3753, 3778), 'torch.nn.Linear', 'torch.nn.Linear', (['D_hid', '(4)'], {}), '(D_hid, 4)\n', (3768, 3778), False, 'import torch\n'), ((4955, 4972), 'os.path.exists', 'path.exists', (['PATH'], {}), '(PATH)\n', (4966, 4972), False, 'from os import path\n'), ((5017, 5033), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (5027, 5033), False, 'import torch\n'), ((7271, 7283), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7281, 7283), True, 'import matplotlib.pyplot as plt\n'), ((7292, 7347), 'matplotlib.pyplot.loglog', 'plt.loglog', (['loss', '"""-b"""'], {'alpha': '(0.975)', 'label': '"""Total loss"""'}), "(loss, '-b', alpha=0.975, label='Total loss')\n", (7302, 7347), True, 'import matplotlib.pyplot as plt\n'), ((7355, 7417), 'matplotlib.pyplot.loglog', 'plt.loglog', (['loss_erg', '"""-r"""'], {'alpha': '(0.75)', 'label': '"""Energy penalty"""'}), "(loss_erg, '-r', alpha=0.75, label='Energy penalty')\n", (7365, 7417), True, 'import matplotlib.pyplot as plt\n'), ((7425, 7437), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7435, 7437), True, 'import matplotlib.pyplot as plt\n'), ((7446, 7464), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7462, 7464), True, 'import matplotlib.pyplot as plt\n'), ((7473, 7491), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (7483, 7491), True, 'import matplotlib.pyplot as plt\n'), ((7492, 7507), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (7502, 7507), True, 'import matplotlib.pyplot as plt\n'), ((7521, 7556), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""HenonHeiles_loss.png"""'], {}), "('HenonHeiles_loss.png')\n", (7532, 7556), True, 'import matplotlib.pyplot as plt\n'), ((7685, 7701), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (7695, 7701), False, 'import torch\n'), ((7885, 7895), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7893, 7895), False, 'import sys\n'), ((1270, 1292), 'torch.randn_like', 'torch.randn_like', (['grid'], {}), '(grid)\n', (1286, 1292), False, 'import torch\n'), ((4822, 4853), 'torch.linspace', 'torch.linspace', (['t0', 'tf', 'n_train'], {}), '(t0, tf, n_train)\n', (4836, 4853), False, 'import torch\n'), ((6324, 6342), 'copy.deepcopy', 'copy.deepcopy', (['fc0'], {}), '(fc0)\n', (6337, 6342), False, 'import copy\n'), ((6464, 6482), 'copy.deepcopy', 'copy.deepcopy', (['fc0'], {}), '(fc0)\n', (6477, 6482), False, 'import copy\n'), ((1007, 1039), 'torch.ones', 'torch.ones', (['x.shape'], {'dtype': 'dtype'}), '(x.shape, dtype=dtype)\n', (1017, 1039), False, 'import torch\n'), ((2872, 2904), 'torch.ones', 'torch.ones', (['x.shape'], {'dtype': 'dtype'}), '(x.shape, dtype=dtype)\n', (2882, 2904), False, 'import torch\n'), ((2968, 3000), 'torch.ones', 'torch.ones', (['y.shape'], {'dtype': 'dtype'}), '(y.shape, dtype=dtype)\n', (2978, 3000), False, 'import torch\n'), ((3065, 3098), 'torch.ones', 'torch.ones', (['px.shape'], {'dtype': 'dtype'}), '(px.shape, dtype=dtype)\n', (3075, 3098), False, 'import torch\n'), ((3163, 3196), 'torch.ones', 'torch.ones', (['py.shape'], {'dtype': 'dtype'}), '(py.shape, dtype=dtype)\n', (3173, 3196), False, 'import torch\n')] |
from dataclasses import dataclass
from typing import Tuple, List
import ase.data
import numpy as np
from networkx import Graph
from rdkit import Chem
import graphdg.parse.tools as tools
from graphdg.parse.extended_graph import mol_to_extended_graph
from graphdg.tools import GLOBALS, NODES, EDGES, SENDERS, RECEIVERS, to_one_hot
# Elements
SYMBOLS = ase.data.chemical_symbols[1:10]
SYMBOL_TO_OH = {symbol: to_one_hot(index=index, num_classes=len(SYMBOLS)) for index, symbol in enumerate(SYMBOLS)}
# Rings
MAX_RING_SIZE = 9
RING_SIZES = range(3, MAX_RING_SIZE + 1)
NOT_IN_RING = tuple(0 for _ in RING_SIZES)
# Edges
EDGE_KINDS = [1, 2, 3]
EDGE_KIND_TO_OH = {
edge_kind: to_one_hot(index=index, num_classes=len(EDGE_KINDS))
for index, edge_kind in enumerate(EDGE_KINDS)
}
# Bond type
BOND_TYPES = [
Chem.BondType.ZERO, Chem.BondType.SINGLE, Chem.BondType.DOUBLE, Chem.BondType.TRIPLE, Chem.BondType.AROMATIC
]
BOND_TYPE_TO_OH = {
bond_type: to_one_hot(index=index, num_classes=len(BOND_TYPES))
for index, bond_type in enumerate(BOND_TYPES)
}
# Stereo
STEREO_TYPES = [Chem.BondStereo.STEREONONE, Chem.BondStereo.STEREOANY, Chem.BondStereo.STEREOE, Chem.BondStereo.STEREOZ]
STEREO_TYPE_TO_OH = {
stereo_type: to_one_hot(index=index, num_classes=len(STEREO_TYPES))
for index, stereo_type in enumerate(STEREO_TYPES)
}
# Chirality
CHI_TAGS = [Chem.CHI_UNSPECIFIED, Chem.CHI_TETRAHEDRAL_CW, Chem.CHI_TETRAHEDRAL_CCW]
CHI_TAGS_TO_OH = {chi_tag: to_one_hot(index=index, num_classes=len(CHI_TAGS)) for index, chi_tag in enumerate(CHI_TAGS)}
@dataclass
class NodeInfo:
symbol: str
chiral_tag: int
def to_features(self) -> List:
return SYMBOL_TO_OH[self.symbol] + CHI_TAGS_TO_OH[self.chiral_tag]
@dataclass
class EdgeInfo:
distance: float
atom_ids: Tuple[int, int]
kind: int
stereo: int = Chem.BondStereo.STEREONONE
bond_type: int = Chem.BondType.ZERO
is_aromatic: bool = False
is_conjugated: bool = False
is_in_ring_size: Tuple[int, ...] = NOT_IN_RING
def to_features(self) -> Tuple[Tuple, List[int], float]:
feats = EDGE_KIND_TO_OH[self.kind] + STEREO_TYPE_TO_OH[self.stereo] + BOND_TYPE_TO_OH[self.bond_type] + [
int(self.is_aromatic), int(self.is_conjugated)
] + list(self.is_in_ring_size)
return self.atom_ids, feats, self.distance
def get_node_infos(molecule: Chem.Mol) -> List[NodeInfo]:
return [
NodeInfo(
symbol=ase.data.chemical_symbols[atom.GetAtomicNum()],
chiral_tag=atom.GetChiralTag(),
) for atom in molecule.GetAtoms()
]
def get_edge_infos(molecule: Chem.Mol, graph: Graph):
edge_infos = []
for (source, sink) in graph.edges:
kind = graph.edges[(source, sink)]['kind']
if kind == 1:
bond = molecule.GetBondBetweenAtoms(source, sink)
edge_info = EdgeInfo(
distance=tools.get_atom_distance(molecule, source, sink),
atom_ids=(source, sink),
kind=kind,
stereo=bond.GetStereo(),
bond_type=bond.GetBondType(),
is_aromatic=bond.GetIsAromatic(),
is_conjugated=bond.GetIsConjugated(),
is_in_ring_size=tuple(int(bond.IsInRingSize(size)) for size in RING_SIZES),
)
else:
edge_info = EdgeInfo(
distance=tools.get_atom_distance(molecule, source, sink),
atom_ids=(source, sink),
kind=kind,
)
edge_infos.append(edge_info)
return edge_infos
def get_feats_and_targets(node_infos: List[NodeInfo], edge_infos: List[EdgeInfo]) -> Tuple[dict, np.ndarray]:
nodes = [node_info.to_features() for node_info in node_infos]
edges = []
senders = []
receivers = []
targets = []
for edge_info in edge_infos:
(sender, receiver), edge_feats, distance = edge_info.to_features()
# Forward
edges.append(edge_feats)
senders.append(sender)
receivers.append(receiver)
targets.append([distance])
# Reverse
edges.append(edge_feats)
senders.append(receiver)
receivers.append(sender)
targets.append([distance])
assert (len(edges) == len(senders) == len(receivers) == len(targets))
feats = {
GLOBALS: np.array([], dtype=np.float),
NODES: np.array(nodes, dtype=np.float),
EDGES: np.array(edges, dtype=np.float),
SENDERS: np.array(senders, dtype=np.int),
RECEIVERS: np.array(receivers, dtype=np.int),
}
targets = np.array(targets, dtype=np.float)
return feats, targets
def get_info_tuple(molecule: Chem.Mol, seed: int) -> Tuple[List[NodeInfo], List[EdgeInfo]]:
atom_infos = get_node_infos(molecule)
graph = mol_to_extended_graph(molecule, seed=seed)
edge_infos = get_edge_infos(molecule=molecule, graph=graph)
return atom_infos, edge_infos
def get_dataset(molecules: List[Chem.Mol], seed: int, count: int) -> List[Tuple[dict, np.ndarray]]:
dataset = []
for mol_id, molecule in enumerate(molecules):
for index in range(count):
node_infos, edge_infos = get_info_tuple(molecule, seed=seed + mol_id + index)
feats_targets = get_feats_and_targets(node_infos=node_infos, edge_infos=edge_infos)
dataset.append(feats_targets)
return dataset
| [
"graphdg.parse.tools.get_atom_distance",
"numpy.array",
"graphdg.parse.extended_graph.mol_to_extended_graph"
] | [((4616, 4649), 'numpy.array', 'np.array', (['targets'], {'dtype': 'np.float'}), '(targets, dtype=np.float)\n', (4624, 4649), True, 'import numpy as np\n'), ((4826, 4868), 'graphdg.parse.extended_graph.mol_to_extended_graph', 'mol_to_extended_graph', (['molecule'], {'seed': 'seed'}), '(molecule, seed=seed)\n', (4847, 4868), False, 'from graphdg.parse.extended_graph import mol_to_extended_graph\n'), ((4365, 4393), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (4373, 4393), True, 'import numpy as np\n'), ((4410, 4441), 'numpy.array', 'np.array', (['nodes'], {'dtype': 'np.float'}), '(nodes, dtype=np.float)\n', (4418, 4441), True, 'import numpy as np\n'), ((4458, 4489), 'numpy.array', 'np.array', (['edges'], {'dtype': 'np.float'}), '(edges, dtype=np.float)\n', (4466, 4489), True, 'import numpy as np\n'), ((4508, 4539), 'numpy.array', 'np.array', (['senders'], {'dtype': 'np.int'}), '(senders, dtype=np.int)\n', (4516, 4539), True, 'import numpy as np\n'), ((4560, 4593), 'numpy.array', 'np.array', (['receivers'], {'dtype': 'np.int'}), '(receivers, dtype=np.int)\n', (4568, 4593), True, 'import numpy as np\n'), ((2918, 2965), 'graphdg.parse.tools.get_atom_distance', 'tools.get_atom_distance', (['molecule', 'source', 'sink'], {}), '(molecule, source, sink)\n', (2941, 2965), True, 'import graphdg.parse.tools as tools\n'), ((3405, 3452), 'graphdg.parse.tools.get_atom_distance', 'tools.get_atom_distance', (['molecule', 'source', 'sink'], {}), '(molecule, source, sink)\n', (3428, 3452), True, 'import graphdg.parse.tools as tools\n')] |
from numpy import genfromtxt
import numpy as np
import matplotlib.pyplot as plt
import sys
filepath = 'dummy.csv'
if len(sys.argv) >= 2:
filepath = sys.argv[1]
print(filepath)
log_data = genfromtxt(filepath,
delimiter=',',
skip_header=1,
converters = {
1: lambda s: int(s or 0), # tracked persons
2: lambda s: int(s or 0), # players
3: lambda s: 'true' in str(s), # lane 1
8: lambda s: 'true' in str(s), # lane 2
13: lambda s: 'true' in str(s), # lane 3
18: lambda s: 'true' in str(s), # lane 4
})
def calc_dates(data):
dates = list()
for entry in data:
dates.append(entry[0])
return np.array(dates)
dates = (calc_dates(log_data) - log_data[0][0]) / 1000 / 60
person_delta = list(map(lambda x: x[1] - x[2], log_data))
def calc_out_of_bound(entry):
value = 0
coordinates = list([entry[4], entry[5], entry[6], entry[7],
entry[9], entry[10], entry[11], entry[12],
entry[14], entry[15], entry[16], entry[17],
entry[19], entry[20], entry[21], entry[22]])
for coordinate in coordinates:
if coordinate > 1:
value += coordinate - 1
if coordinate < 0:
value += abs(coordinate)
return value
def calc_lane_time(data):
lane_count = np.array([0, 0, 0, 0])
last_time = data[0][0]
for entry in data:
if entry[3]:
lane_count[0] += entry[0] - last_time
if entry[8]:
lane_count[1] += entry[0] - last_time
if entry[13]:
lane_count[2] += entry[0] - last_time
if entry[18]:
lane_count[3] += entry[0] - last_time
last_time = entry[0]
return lane_count / 1000
def calc_player_count(data):
playing = list()
for entry in data:
players = 0
if entry[3]:
players += 1
if entry[8]:
players += 1
if entry[13]:
players += 1
if entry[18]:
players += 1
playing.append(players)
return np.array(playing)
lanes = ["Drums", "Bass", "Melody", "Tone"]
print(calc_lane_time(log_data))
out_of_range = list(map(calc_out_of_bound, log_data))
plt.subplots_adjust(wspace=None, hspace=0.5)
plt.subplot(2,2,1)
plt.plot(dates, out_of_range)
plt.xlabel('time (min)')
plt.ylabel('difference')
plt.title("Hands out of Lane")
plt.legend()
plt.subplot(2,2,2)
plt.step(dates, person_delta)
plt.xlabel('time (min)')
plt.ylabel('tracked but not playing')
plt.yticks(np.arange(min(person_delta), max(person_delta)+1, 1.0))
plt.title("Player Tracking")
plt.legend()
plt.subplot(2,2,3)
plt.bar(lanes, calc_lane_time(log_data))
plt.xlabel('lane')
plt.ylabel('time (s)')
plt.title("Time in Lane")
plt.legend()
plt.subplot(2,2,4)
plt.step(dates, calc_player_count(log_data),label='player count')
plt.plot(dates, np.ones(len(dates)) * calc_player_count(log_data).mean(), label='mean')
plt.xlabel('time (min)')
plt.ylabel('players')
plt.title("Player Count")
plt.legend()
plt.show() | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.step",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((2442, 2486), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': 'None', 'hspace': '(0.5)'}), '(wspace=None, hspace=0.5)\n', (2461, 2486), True, 'import matplotlib.pyplot as plt\n'), ((2488, 2508), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (2499, 2508), True, 'import matplotlib.pyplot as plt\n'), ((2507, 2536), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'out_of_range'], {}), '(dates, out_of_range)\n', (2515, 2536), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2561), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (min)"""'], {}), "('time (min)')\n", (2547, 2561), True, 'import matplotlib.pyplot as plt\n'), ((2562, 2586), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""difference"""'], {}), "('difference')\n", (2572, 2586), True, 'import matplotlib.pyplot as plt\n'), ((2587, 2617), 'matplotlib.pyplot.title', 'plt.title', (['"""Hands out of Lane"""'], {}), "('Hands out of Lane')\n", (2596, 2617), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2630), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2628, 2630), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2652), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2643, 2652), True, 'import matplotlib.pyplot as plt\n'), ((2651, 2680), 'matplotlib.pyplot.step', 'plt.step', (['dates', 'person_delta'], {}), '(dates, person_delta)\n', (2659, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2705), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (min)"""'], {}), "('time (min)')\n", (2691, 2705), True, 'import matplotlib.pyplot as plt\n'), ((2706, 2743), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""tracked but not playing"""'], {}), "('tracked but not playing')\n", (2716, 2743), True, 'import matplotlib.pyplot as plt\n'), ((2811, 2839), 'matplotlib.pyplot.title', 'plt.title', (['"""Player Tracking"""'], {}), "('Player Tracking')\n", (2820, 2839), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2852), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2850, 2852), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2874), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2865, 2874), True, 'import matplotlib.pyplot as plt\n'), ((2914, 2932), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""lane"""'], {}), "('lane')\n", (2924, 2932), True, 'import matplotlib.pyplot as plt\n'), ((2933, 2955), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""time (s)"""'], {}), "('time (s)')\n", (2943, 2955), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2981), 'matplotlib.pyplot.title', 'plt.title', (['"""Time in Lane"""'], {}), "('Time in Lane')\n", (2965, 2981), True, 'import matplotlib.pyplot as plt\n'), ((2982, 2994), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2992, 2994), True, 'import matplotlib.pyplot as plt\n'), ((2996, 3016), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (3007, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3193), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (min)"""'], {}), "('time (min)')\n", (3179, 3193), True, 'import matplotlib.pyplot as plt\n'), ((3194, 3215), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""players"""'], {}), "('players')\n", (3204, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3241), 'matplotlib.pyplot.title', 'plt.title', (['"""Player Count"""'], {}), "('Player Count')\n", (3225, 3241), True, 'import matplotlib.pyplot as plt\n'), ((3242, 3254), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3252, 3254), True, 'import matplotlib.pyplot as plt\n'), ((3256, 3266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3264, 3266), True, 'import matplotlib.pyplot as plt\n'), ((879, 894), 'numpy.array', 'np.array', (['dates'], {}), '(dates)\n', (887, 894), True, 'import numpy as np\n'), ((1549, 1571), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1557, 1571), True, 'import numpy as np\n'), ((2290, 2307), 'numpy.array', 'np.array', (['playing'], {}), '(playing)\n', (2298, 2307), True, 'import numpy as np\n')] |
# Actual movies.py from codecademy
import pandas as pd
from random import shuffle, seed
import numpy as np
seed(100)
df = pd.read_csv("movies.csv")
df = df.dropna()
good_movies = df.loc[df['imdb_score'] >= 7]
bad_movies = df.loc[df['imdb_score'] < 7]
def min_max_normalize(lst):
minimum = min(lst)
maximum = max(lst)
normalized = []
for value in lst:
normalized_num = (value - minimum) / (maximum - minimum)
normalized.append(normalized_num)
return normalized
x_good = good_movies["budget"]
y_good = good_movies["duration"]
z_good = good_movies['title_year']
x_bad = bad_movies["budget"]
y_bad = bad_movies["duration"]
z_bad = bad_movies['title_year']
data = [x_good, y_good, z_good, x_bad, y_bad, z_bad]
arrays_data = []
for d in data:
norm_d = min_max_normalize(d)
arrays_data.append(np.array(norm_d))
good_class = list(zip(arrays_data[0].flatten(), arrays_data[1].flatten(), arrays_data[2].flatten(),(np.array(([1] * len(arrays_data[0])))) ))
bad_class = list(zip(arrays_data[3].flatten(), arrays_data[4].flatten(), arrays_data[5].flatten(),(np.array(([0] * len(arrays_data[0])))) ))
dataset = good_class + bad_class
shuffle(dataset)
movie_dataset = []
labels = []
for movie in dataset:
movie_dataset.append(movie[:-1])
labels.append(movie[-1])
| [
"numpy.array",
"random.shuffle",
"random.seed",
"pandas.read_csv"
] | [((108, 117), 'random.seed', 'seed', (['(100)'], {}), '(100)\n', (112, 117), False, 'from random import shuffle, seed\n'), ((124, 149), 'pandas.read_csv', 'pd.read_csv', (['"""movies.csv"""'], {}), "('movies.csv')\n", (135, 149), True, 'import pandas as pd\n'), ((1145, 1161), 'random.shuffle', 'shuffle', (['dataset'], {}), '(dataset)\n', (1152, 1161), False, 'from random import shuffle, seed\n'), ((808, 824), 'numpy.array', 'np.array', (['norm_d'], {}), '(norm_d)\n', (816, 824), True, 'import numpy as np\n')] |
#
# Aggregation of changing ROS topic values over time. Supports the following use cases:
#
# 1) Latching infrequent values (with expiration)
# 2) Aggregating values over a moving window, with tolerance
#
import rospy
class LatchedValue:
def __init__(self, topic, max_age, value):
self.topic = topic
self.max_age = max_age
self.value = value
self.time_acquired = rospy.get_rostime()
class LatchMap:
""" Keep track of latched values for a set of topics. Each latched value
is optionally defined to expire after a set time.
"""
def __init__(self):
self.values = {}
def latch_value(self, topic, value, max_age=None):
self.values[topic] = LatchedValue(topic, max_age, value)
def get_value(self, topic):
if topic in self.values:
lvalue = self.values[topic]
if not lvalue.time_acquired:
# No expiration
return lvalue.value
d = lvalue.time_acquired - rospy.Time.now()
if lvalue.max_age and d.secs > lvalue.max_age:
rospy.loginfo("Latched value for %s is too old (%d secs)", topic, d.secs)
return None
return lvalue.value
return None
from sklearn.cluster import DBSCAN
import numpy as np
def centroid(arr):
length = arr.shape[0]
sum_x = np.sum(arr[:, 0])
sum_y = np.sum(arr[:, 1])
return sum_x / length, sum_y / length
class SpatialAggregation:
""" Re-cluster points using DBSCAN any time a new point is added
"""
def __init__(self, values=[]):
self.values = values
self.centroids = None
self.mean_dists = None
self.largest_size = None
self.largest_centroid = None
def add_value(self, coord):
# Add value
self.values.append(coord)
if len(self.values) < 2: return
# Rerun clustering
values = np.array(self.values)
db = DBSCAN(eps=1, min_samples=3).fit(values)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
self.labels = labels
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print("num clusters: %d" % n_clusters_)
if n_clusters_>0:
self.cluster_sizes = np.array([len(np.nonzero(labels == i)[0]) for i in range(n_clusters_)])
k = self.cluster_sizes.argmax(axis=0)
self.centroids = [centroid(values[(labels == i)]) for i in range(n_clusters_)]
self.mean_dists = [np.mean([np.sqrt((x-self.centroids[i][0])**2+(y-self.centroids[i][1])**2) \
for (x, y) in values[labels == i]]) for i in range(n_clusters_)]
for i, c in enumerate(self.centroids):
print("Cluster %d - Centroid: (%2.4f,%2.4f) - Mean Distance: %2.4f" \
% (i,c[0],c[1],self.mean_dists[i]))
self.largest_size = self.cluster_sizes[k]
self.largest_centroid = self.centroids[k]
print("Largest cluster is %d with centroid %s and %s points" \
% (k, self.largest_centroid, self.largest_size))
self.points = values[labels.astype(bool)]
| [
"numpy.sqrt",
"rospy.get_rostime",
"rospy.Time.now",
"numpy.sum",
"numpy.array",
"numpy.nonzero",
"numpy.zeros_like",
"rospy.loginfo",
"sklearn.cluster.DBSCAN"
] | [((1419, 1436), 'numpy.sum', 'np.sum', (['arr[:, 0]'], {}), '(arr[:, 0])\n', (1425, 1436), True, 'import numpy as np\n'), ((1449, 1466), 'numpy.sum', 'np.sum', (['arr[:, 1]'], {}), '(arr[:, 1])\n', (1455, 1466), True, 'import numpy as np\n'), ((404, 423), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (421, 423), False, 'import rospy\n'), ((1983, 2004), 'numpy.array', 'np.array', (['self.values'], {}), '(self.values)\n', (1991, 2004), True, 'import numpy as np\n'), ((2087, 2124), 'numpy.zeros_like', 'np.zeros_like', (['db.labels_'], {'dtype': 'bool'}), '(db.labels_, dtype=bool)\n', (2100, 2124), True, 'import numpy as np\n'), ((1037, 1053), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (1051, 1053), False, 'import rospy\n'), ((1129, 1202), 'rospy.loginfo', 'rospy.loginfo', (['"""Latched value for %s is too old (%d secs)"""', 'topic', 'd.secs'], {}), "('Latched value for %s is too old (%d secs)', topic, d.secs)\n", (1142, 1202), False, 'import rospy\n'), ((2018, 2046), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(1)', 'min_samples': '(3)'}), '(eps=1, min_samples=3)\n', (2024, 2046), False, 'from sklearn.cluster import DBSCAN\n'), ((2738, 2812), 'numpy.sqrt', 'np.sqrt', (['((x - self.centroids[i][0]) ** 2 + (y - self.centroids[i][1]) ** 2)'], {}), '((x - self.centroids[i][0]) ** 2 + (y - self.centroids[i][1]) ** 2)\n', (2745, 2812), True, 'import numpy as np\n'), ((2498, 2521), 'numpy.nonzero', 'np.nonzero', (['(labels == i)'], {}), '(labels == i)\n', (2508, 2521), True, 'import numpy as np\n')] |
# -----------------------------------------------------------------------------
# Copyright (c) <NAME> 2015, all rights reserved.
#
# Created: 2015-10-4
# Version: 0.1
# Purpose: Main Python3 code for running a wireless sensor network gateway node
# on a Raspberry Pi 2 with 4 cores
#
# TODO: Implementation of conifg files? Or Argparse? If needed
# - argparse for serial port selection?
#
# This software is provided under the GNU GPLv2
# WITHOUT ANY WARRANTY OF ANY KIND, EXPRESS OR IMPLIED.
# If no license was provided, see <http://www.gnu.org/licenses/>
# -----------------------------------------------------------------------------
# ENVIRONMENT
# -----------------------------------------------------------------------------
# standards
import os
import time
from datetime import datetime
import json
import numpy as np
from pathlib import Path
import logging
import argparse
np.set_printoptions(suppress = True) # to suppres scientific writing
# MPI
from mpi4py import MPI
# -----------------------------------------------------------------------------
# MPI SETUP
# To allow for parralel procesing with communication capabilities
# Communication is actually not needed, but could be
# -----------------------------------------------------------------------------
def enum(*sequential, **named):
"""Handy way to fake an enumerated type in Python
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
# define MPI tags
tags = enum('GO', 'ERROR', 'EXIT')
# communiction
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
status = MPI.Status()
# -----------------------------------------------------------------------------
# command line args and logging
parser = argparse.ArgumentParser()
parser.add_argument('--url', required=True, help='URL of the SOS')
parser.add_argument('--log', help='defines the log level', choices=['DEBUG', 'INFO', 'WARNING'], default='WARNING')
parser.add_argument('--usb', help='serial usb port', default='/dev/ttyUSB0')
commandLineArgs = vars(parser.parse_args())
logging.basicConfig(filename='./log/log', format='%(asctime)s %(levelname)s: %(message)s', level=commandLineArgs['log'])
# -----------------------------------------------------------------------------
# Rank 0 : THE QUEEN - Dumps XBee frames
# Rank 1 : THE GUARD - Handles errors & requests. Does some data insertion
# Rank 2+: WORKER BEES - Data insertion
# -----------------------------------------------------------------------------
if rank == 0:
# XBee
import serial
from xbee import XBee, ZigBee
# Make worker directories
paths = [Path('./temp/guard/')]
for i in range(2,size):
paths.append(Path('./temp/worker{0}/'.format(size-i)))
for path in paths:
try:
path.mkdir(parents=True)
except:
continue
paths.remove(Path('./temp/guard'))
paths.remove(Path('./log'))
# --------------------------------------------------------------------------
# FUNCTION dumps
# dumps the recieved dictionary as a json to a given
def dumping(obj):
if obj['id'] != 'rx':
return
# make ints from buffer, json cant store bytes...
for key, value in obj.items():
if type(value) == bytes:
obj[key] = np.frombuffer(value, np.uint8).tolist()
stamp = datetime.utcnow().strftime('%Y%m%d-%H%M%s') + '.json'
# is it normal data?
if(obj['rf_data'][0] == 1):
path = paths.pop()
with path.joinpath(stamp).open('w') as f:
json.dump(obj, f)
paths.insert(0, path)
else:
err_path = Path('./temp/guard/' + stamp)
with err_path.open('w') as f:
json.dump(obj, f)
# init xbee
ser = serial.Serial(commandLineArgs['usb'], 115200)
xbee = ZigBee(ser, escaped=True, callback=dumping)
# Go
logging.INFO('Queen active')
while True:
try:
while not comm.Iprobe(source = MPI.ANY_SOURCE, tag = MPI.ANY_TAG):
time.sleep(0.001)
data = comm.recv(source = MPI.ANY_SOURCE, tag = MPI.ANY_TAG, status = status)
tag = status.Get_tag()
xbee.tx(dest_addr_long=bytes(data[0]), dest_addr = bytes(data[1]), data=bytes([tag]))
ser.flush()
except KeyboardInterrupt:
break
# close it, otherwise you get errors when shutting down
xbee.halt()
ser.close()
logging.info('Stopped')
logging.info('====================================')
elif rank == 1:
from guard import guard
guard_bee = guard(commandLineArgs['url'], comm)
logging.debug('Guard running')
guard_bee.routine()
elif rank > 1:
number = comm.Get_size() - rank
import worker
worker_bee = worker.bee(number)
logging.debug("Worker #{0} Ready!".format(number))
worker_bee.routine()
| [
"logging.basicConfig",
"logging.debug",
"argparse.ArgumentParser",
"pathlib.Path",
"datetime.datetime.utcnow",
"json.dump",
"logging.info",
"time.sleep",
"logging.INFO",
"mpi4py.MPI.Status",
"serial.Serial",
"guard.guard",
"numpy.frombuffer",
"xbee.ZigBee",
"worker.bee",
"numpy.set_pri... | [((911, 945), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (930, 945), True, 'import numpy as np\n'), ((1728, 1740), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (1738, 1740), False, 'from mpi4py import MPI\n'), ((1862, 1887), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1885, 1887), False, 'import argparse\n'), ((2194, 2319), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./log/log"""', 'format': '"""%(asctime)s %(levelname)s: %(message)s"""', 'level': "commandLineArgs['log']"}), "(filename='./log/log', format=\n '%(asctime)s %(levelname)s: %(message)s', level=commandLineArgs['log'])\n", (2213, 2319), False, 'import logging\n'), ((3951, 3996), 'serial.Serial', 'serial.Serial', (["commandLineArgs['usb']", '(115200)'], {}), "(commandLineArgs['usb'], 115200)\n", (3964, 3996), False, 'import serial\n'), ((4008, 4051), 'xbee.ZigBee', 'ZigBee', (['ser'], {'escaped': '(True)', 'callback': 'dumping'}), '(ser, escaped=True, callback=dumping)\n', (4014, 4051), False, 'from xbee import XBee, ZigBee\n'), ((4065, 4093), 'logging.INFO', 'logging.INFO', (['"""Queen active"""'], {}), "('Queen active')\n", (4077, 4093), False, 'import logging\n'), ((4633, 4656), 'logging.info', 'logging.info', (['"""Stopped"""'], {}), "('Stopped')\n", (4645, 4656), False, 'import logging\n'), ((4661, 4713), 'logging.info', 'logging.info', (['"""===================================="""'], {}), "('====================================')\n", (4673, 4713), False, 'import logging\n'), ((2759, 2780), 'pathlib.Path', 'Path', (['"""./temp/guard/"""'], {}), "('./temp/guard/')\n", (2763, 2780), False, 'from pathlib import Path\n'), ((3003, 3023), 'pathlib.Path', 'Path', (['"""./temp/guard"""'], {}), "('./temp/guard')\n", (3007, 3023), False, 'from pathlib import Path\n'), ((3042, 3055), 'pathlib.Path', 'Path', (['"""./log"""'], {}), "('./log')\n", (3046, 3055), False, 'from pathlib import Path\n'), ((4775, 4810), 'guard.guard', 'guard', (["commandLineArgs['url']", 'comm'], {}), "(commandLineArgs['url'], comm)\n", (4780, 4810), False, 'from guard import guard\n'), ((4815, 4845), 'logging.debug', 'logging.debug', (['"""Guard running"""'], {}), "('Guard running')\n", (4828, 4845), False, 'import logging\n'), ((3816, 3845), 'pathlib.Path', 'Path', (["('./temp/guard/' + stamp)"], {}), "('./temp/guard/' + stamp)\n", (3820, 3845), False, 'from pathlib import Path\n'), ((4957, 4975), 'worker.bee', 'worker.bee', (['number'], {}), '(number)\n', (4967, 4975), False, 'import worker\n'), ((3727, 3744), 'json.dump', 'json.dump', (['obj', 'f'], {}), '(obj, f)\n', (3736, 3744), False, 'import json\n'), ((3904, 3921), 'json.dump', 'json.dump', (['obj', 'f'], {}), '(obj, f)\n', (3913, 3921), False, 'import json\n'), ((4218, 4235), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (4228, 4235), False, 'import time\n'), ((3506, 3523), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3521, 3523), False, 'from datetime import datetime\n'), ((3449, 3479), 'numpy.frombuffer', 'np.frombuffer', (['value', 'np.uint8'], {}), '(value, np.uint8)\n', (3462, 3479), True, 'import numpy as np\n')] |
from sigmoid import sigmoid
import numpy as np
def sigmoidGradient(z):
#SIGMOIDGRADIENT returns the gradient of the sigmoid function
#evaluated at z
# g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function
# evaluated at z. This should work regardless if z is a matrix or a
# vector. In particular, if z is a vector or matrix, you should return
# the gradient for each element.
# The value g should be correctly computed by your code below.
g = 0
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the gradient of the sigmoid function evaluated at
# each value of z (z can be a matrix, vector or scalar).
try:
if len(z.shape) == 2:
g = np.multiply(sigmoid(z), np.transpose(np.ones((z.shape[0], z.shape[1]))-sigmoid(z)))
elif len(z.shape) == 1:
g = np.multiply(sigmoid(z), np.transpose(np.ones((z.shape[0]))-sigmoid(z)))
except AttributeError:
g = sigmoid(z) * (1 - sigmoid(z))
# =============================================================
return g | [
"sigmoid.sigmoid",
"numpy.ones"
] | [((752, 762), 'sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (759, 762), False, 'from sigmoid import sigmoid\n'), ((984, 994), 'sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (991, 994), False, 'from sigmoid import sigmoid\n'), ((885, 895), 'sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (892, 895), False, 'from sigmoid import sigmoid\n'), ((1002, 1012), 'sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (1009, 1012), False, 'from sigmoid import sigmoid\n'), ((777, 810), 'numpy.ones', 'np.ones', (['(z.shape[0], z.shape[1])'], {}), '((z.shape[0], z.shape[1]))\n', (784, 810), True, 'import numpy as np\n'), ((811, 821), 'sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (818, 821), False, 'from sigmoid import sigmoid\n'), ((910, 929), 'numpy.ones', 'np.ones', (['z.shape[0]'], {}), '(z.shape[0])\n', (917, 929), True, 'import numpy as np\n'), ((932, 942), 'sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (939, 942), False, 'from sigmoid import sigmoid\n')] |
import numpy as np
import mapping
# Ohmic spectral density parameters:
alpha = 1
s = 1
omega_c = 1
nof_coefficients = 5
# Discretization and chain mapping type:
disc_type = 'gk_quad'
mapping_type = 'lan_bath'
# Discretization accuracy parameter:
ncap = 1000000
# Domain of the spectral density. Must choose manual cutoff value here
domain = [0.0, 25]
J = lambda x: alpha * omega_c * (x / omega_c) ** s * np.exp(-(x/omega_c))
c0_ref, omega_ref, t_ref, info = \
mapping.chain.get(J, domain, nof_coefficients, ncap=ncap, disc_type=disc_type, mapping_type=mapping_type,
interval_type='lin')
print('Residual from the tridiagonalization: ', info['res'])
# Map to the star coefficients
gamma, xi, info = \
mapping.convert_chain_to_star(c0_ref, omega_ref, t_ref, get_trafo=True)
# Map back to the chain coefficients
c0, omega, t, info = \
mapping.convert_star_to_chain_lan(gamma, xi, get_trafo=True)
print('Residuals of the back and forth mapping: ')
print('Of the system-bath coupling: ', np.abs((c0 - c0_ref) / c0_ref))
print('Of the bath energies: ', np.max(np.abs((omega - omega_ref)/omega_ref)))
print('Of the bath-bath couplings: ', np.max(np.abs((t - t_ref)/t_ref))) | [
"numpy.abs",
"numpy.exp",
"mapping.chain.get",
"mapping.convert_star_to_chain_lan",
"mapping.convert_chain_to_star"
] | [((472, 603), 'mapping.chain.get', 'mapping.chain.get', (['J', 'domain', 'nof_coefficients'], {'ncap': 'ncap', 'disc_type': 'disc_type', 'mapping_type': 'mapping_type', 'interval_type': '"""lin"""'}), "(J, domain, nof_coefficients, ncap=ncap, disc_type=\n disc_type, mapping_type=mapping_type, interval_type='lin')\n", (489, 603), False, 'import mapping\n'), ((739, 810), 'mapping.convert_chain_to_star', 'mapping.convert_chain_to_star', (['c0_ref', 'omega_ref', 't_ref'], {'get_trafo': '(True)'}), '(c0_ref, omega_ref, t_ref, get_trafo=True)\n', (768, 810), False, 'import mapping\n'), ((877, 937), 'mapping.convert_star_to_chain_lan', 'mapping.convert_star_to_chain_lan', (['gamma', 'xi'], {'get_trafo': '(True)'}), '(gamma, xi, get_trafo=True)\n', (910, 937), False, 'import mapping\n'), ((1030, 1060), 'numpy.abs', 'np.abs', (['((c0 - c0_ref) / c0_ref)'], {}), '((c0 - c0_ref) / c0_ref)\n', (1036, 1060), True, 'import numpy as np\n'), ((411, 433), 'numpy.exp', 'np.exp', (['(-(x / omega_c))'], {}), '(-(x / omega_c))\n', (417, 433), True, 'import numpy as np\n'), ((1101, 1140), 'numpy.abs', 'np.abs', (['((omega - omega_ref) / omega_ref)'], {}), '((omega - omega_ref) / omega_ref)\n', (1107, 1140), True, 'import numpy as np\n'), ((1186, 1213), 'numpy.abs', 'np.abs', (['((t - t_ref) / t_ref)'], {}), '((t - t_ref) / t_ref)\n', (1192, 1213), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
import numpy as np
import math
import imutils
import os
beta = 0.75
class BodyPart():
def __init__(self, name='part'):
self.name = name
self.x = 0
self.y = 0
self.theta = 0
self.l = 0
self.w = 0
self.children = []
self.parent = None
self.left_upper_corner = None
self.right_upper_corner = None
self.left_lower_corner = None
self.right_lower_corner = None
self.priority = 0
self.area = 0
self.Si = 0 # intersection of synthesized body part with foreground
self.visited = 0 # number of time this body part was updated
def setData(self, x, y, theta, l, w):
self.x = x
self.y = y
self.theta = theta
self.l = l
self.w = w
self.area = l * w
self.setCorners()
def updateValue(self, indx, lamda):
if indx == 0:
self.x += int(lamda)
elif indx == 1:
self.y += int(lamda)
elif indx == 2:
self.theta += lamda
elif indx == 3:
self.l += int(lamda)
self.area = self.l * self.w
else:
self.w += int(lamda)
self.area = self.l * self.w
self.setCorners()
def addChildren(self, children):
for child in children:
self.children.append(child)
def setParent(self, parent):
self.parent = parent
def getData(self):
return (self.x, self.y, self.theta, self.l, self.w)
def setCorners(self):
if self.name == 'Torso':
center = True
else:
center = False
self.left_upper_corner = get_left_upper_corner(self.x, self.y, self.theta, self.l, self.w, center)
self.right_upper_corner = get_right_upper_corner(self.x, self.y, self.theta, self.l, self.w, center)
self.left_lower_corner = get_left_lower_corner(self.x, self.y, self.theta, self.l, self.w, center)
self.right_lower_corner = get_right_lower_corner(self.x, self.y, self.theta, self.l, self.w, center)
# input : frame , initial background with no human in it
# output: binary image
def segmentation (frame, background):
if len(frame.shape) > 2:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if len(background.shape) > 2:
background = cv.cvtColor(background, cv.COLOR_BGR2GRAY)
diff = cv.absdiff(frame, background)
diff[diff > 35] = 255
diff[diff <= 35] = 0
return diff
def get_body_height(fore):
miniy = 0
maxiy = 0
for i in range(fore.shape[0]):
r= fore[i]
x = np.argmax(r)
if r[x] == 255:
miniy = i
break
for i in reversed(range(fore.shape[0])):
r = fore[i]
x = np.argmax(r)
if r[x] == 255:
maxiy = i
break
height = abs(maxiy - miniy)
return height
def get_torso_center(foreImage):
distMap= cv.distanceTransform(foreImage, cv.DIST_L2, 5)
(yTorso, xTorso) = np.where(distMap == np.amax(distMap))
return (xTorso[0], yTorso[0])
####################length of torso#############################
def get_torso_length(foreImage, lBody):
meanLTorso = .33
varLTorso = .001
mu = meanLTorso * lBody
sigma = np.sqrt(varLTorso * (lBody**2))
lTorso = np.random.normal(mu, sigma)
return lTorso
##################################################################
####################width of torso#############################
def get_torso_width(foreImage, wBody):
meanWTorso = 1
varWTorso = .001
mu = meanWTorso * wBody
sigma = np.sqrt(varWTorso * (wBody**2))
wTorso = np.random.normal(mu, sigma)
return wTorso
##################################################################
def get_torso_angle(foreImage):
fore = foreImage.copy()
# get horizontal histogram
num_rows = foreImage.shape[0]
distMap= cv.distanceTransform(foreImage, cv.DIST_L2, 5)
(yFirst, xFirst) = np.where(distMap == np.amax(distMap))
xFirst = int(xFirst[0])
yFirst = int(yFirst[0])
cropped_image = fore[min(yFirst + 5, num_rows - 1):, ]
distMap= cv.distanceTransform(cropped_image, cv.DIST_L2, 5)
(ySecond, xSecond) = np.where(distMap == np.amax(distMap))
xSecond = int(xSecond[0])
ySecond = int(ySecond[0]) + min(yFirst + 5, num_rows - 1)
if abs(ySecond - yFirst) < 30:
cropped_image = fore[0:max(yFirst - 5, 0), ]
distMap = cv.distanceTransform(cropped_image, cv.DIST_L2, 5)
if not distMap is None:
(ySecond, xSecond) = np.where(distMap == np.amax(distMap))
xSecond = int(xSecond[0])
ySecond = int(ySecond[0])
deltaY = ySecond - yFirst
deltaX = xSecond - xFirst
if deltaX != 0:
theta = np.arctan(deltaY/deltaX) * 180.0 / np.pi
else:
theta = 90.0
return 360
#return abs(90 - theta)
def get_torso_model(image_R,face,img):
lBody = get_body_height(img)
wBody = 0.17 * lBody
l = get_torso_length(img, lBody)
w = get_torso_width(img, wBody)
x,y= get_TFH(image_R,face,l)
theta = get_torso_angle(img)
torso_data = (x, y, theta, l, w)
return torso_data
def get_right_upper_arm_model(torso_center_x, torso_center_y, torso_theta,torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
(top_right_x,top_right_y) = get_right_upper_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
top_right_y = top_right_y + (.5 * width)
sigma_x = 1
right_x = top_right_x
right_y = top_right_y
theta = np.random.normal(45,10)
return right_x, right_y, theta, height, width
def get_left_upper_arm_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
(top_left_x,top_left_y) = get_left_upper_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
top_left_y = top_left_y+(.5 * width)
sigma_x = 3
left_x = top_left_x
left_y = top_left_y
theta = np.random.normal(125, 10)
return left_x, left_y, theta, height, width
def get_right_lower_arm_model(end_x, end_y, torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
top_right_x = end_x
top_right_y = end_y
sigma_x = 1
right_x = np.random.normal(top_right_x, sigma_x)
right_y = np.random.normal(top_right_y, sigma_x)
theta = np.random.normal(45, 10)
return right_x, right_y, theta, height, width
def get_left_lower_arm_model(end_x, end_y, torso_height, torso_w):
meanHeight = .55 * torso_height
varHeight = .02
height = np.random.normal(meanHeight, varHeight)
meanW = .2 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
top_left_x = end_x
top_left_y = end_y
sigma_x = 3
left_x = np.random.normal(top_left_x, sigma_x)
left_y = np.random.normal(top_left_y, sigma_x)
theta = np.random.normal(125, 10)
return left_x, left_y, theta, height, width
def get_left_upper_leg_model(torso_center_x,torso_center_y,torso_theta,torso_height,torso_w):
meanHeight = .7* torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .35 * torso_w
varW = .1
width = np.random.normal(meanW, varW)
(bottom_left_x,bottom_left_y) = get_left_lower_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
bottom_left_x = bottom_left_x+(.5 * width)
sigma_x = 0
left_x = np.random.normal(bottom_left_x, sigma_x)
left_y = np.random.normal(bottom_left_y, sigma_x)
theta = np.random.normal(100, 10)
return left_x, left_y, theta, height, width
def get_right_upper_leg_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w):
meanHeight = .7 * torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .34 * torso_w
varW = .1
width= np.random.normal(meanW, varW)
(top_right_x,top_right_y) = get_right_lower_corner(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w,True)
top_right_x = top_right_x - (.5 * width)
sigma_x = 0
right_x = np.random.normal(top_right_x, sigma_x)
right_y = np.random.normal(top_right_y, sigma_x)
theta = np.random.normal(80, 10)
return right_x, right_y, theta, height, width
def get_left_lower_leg_model(end_x, end_y, torso_height, torso_w):
meanHeight = .7 * torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .35* torso_w
varW = .1
width= np.random.normal(meanW, varW)
bottom_left_x = end_x
bottom_left_y = end_y
sigma_x = 0
left_x = np.random.normal(bottom_left_x, sigma_x)
left_y = np.random.normal(bottom_left_y, sigma_x)
theta = np.random.normal(110, 10)
return left_x, left_y, theta, height, width
def get_right_lower_leg_model(end_x, end_y, torso_height, torso_w):
meanHeight = .7 * torso_height
varHeight = .01
height = np.random.normal(meanHeight, varHeight)
meanW = .34 * torso_w
varW = .1
width= np.random.normal(meanW, varW)
top_right_x = end_x
top_right_y = end_y
sigma_x = 0
right_x = np.random.normal(top_right_x, sigma_x)
right_y = np.random.normal(top_right_y, sigma_x)
theta = np.random.normal(70, 10)
return right_x, right_y, theta, height, width
def get_head_model(torso_center_x, torso_center_y, torso_height, torso_w):
meanHeight = .35 * torso_height
varHeight = .1
height = np.random.normal(meanHeight, varHeight)
meanW = .5* torso_w
varW = .1
width= np.random.normal(meanW, varW)
top_x = torso_center_x
top_y = torso_center_y - (.5 * torso_height)
theta = np.random.normal(270, 5)
return top_x, top_y, theta, height, width
def get_body_data(torso_center_x, torso_center_y, torso_theta, torso_height, torso_w):
############################## draw upper legs#####################################
xll, yll, thetall, hll, wll = get_left_upper_leg_model(torso_center_x, torso_center_y, torso_theta,torso_height, torso_w)
left_upper_leg_data = (xll, yll, thetall, hll, wll)
endy_left_top_leg = yll + (hll * math.sin(math.radians(thetall)))
endx_left_top_leg = xll + (hll * math.cos(math.radians(thetall)))
xrl, yrl, thetarl, hrl, wrl = get_right_upper_leg_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w)
right_upper_leg_data = (xrl, yrl, thetarl, hrl, wrl)
endy_right_top_leg = yrl + (hrl * math.sin(math.radians(thetarl)))
endx_right_top_leg = xrl + (hrl * math.cos(math.radians(thetarl)))
############################## draw lower legs#######################################
xlll, ylll, thetalll, hlll, wlll = get_left_lower_leg_model(endx_left_top_leg, endy_left_top_leg, torso_height, torso_w)
left_lower_leg_data = (xlll, ylll, thetalll, hlll, wlll)
xrll, yrll, thetarll, hrll, wrll = get_right_lower_leg_model(endx_right_top_leg, endy_right_top_leg, torso_height, torso_w)
right_lower_leg_data = (xrll, yrll, thetarll, hrll, wrll)
########################draw upper arms####################################
xla, yla, thetala, hla, wla = get_left_upper_arm_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w)
left_upper_arm_data = (xla, yla, thetala, hla, wla)
endy_left_top_arm = yla + (hla * math.sin(math.radians(thetala)))
endx_left_top_arm = xla + (hla * math.cos(math.radians(thetala)))
xra, yra, thetara, hra, wra = get_right_upper_arm_model(torso_center_x, torso_center_y,torso_theta, torso_height, torso_w)
right_upper_arm_data = (xra, yra, thetara, hra, wra)
endy_right_top_arm = yra + (hra * math.sin(math.radians(thetara)))
endx_right_top_arm = xra + (hra * math.cos(math.radians(thetara)))
###########################draw lower arms####################################
xrla, yrla, thetarla, hrla, wrla = get_left_lower_arm_model(endx_left_top_arm, endy_left_top_arm, torso_height, torso_w)
left_lower_arm_data = (xrla, yrla, thetarla, hrla, wrla)
xlla, ylla, thetalla, hlla, wlla = get_right_lower_arm_model(endx_right_top_arm, endy_right_top_arm, torso_height, torso_w)
right_lower_arm_data = (xlla, ylla, thetalla, hlla, wlla)
##########################draw Head#############################################
x, y, theta, h, w = get_head_model(torso_center_x, torso_center_y, torso_height, torso_w)
head_data = (x, y, theta, h, w)
return head_data, left_upper_leg_data, right_upper_leg_data, left_lower_leg_data, right_lower_leg_data, left_upper_arm_data, right_upper_arm_data, left_lower_arm_data, right_lower_arm_data
def get_body_tree():
torso = BodyPart('Torso')
head = BodyPart('Head')
left_upper_arm = BodyPart('Left Upper Arm')
right_upper_arm = BodyPart('Right Upper Arm')
left_upper_leg = BodyPart('Left Upper Leg')
right_upper_leg = BodyPart('Right Upper Leg')
left_lower_arm = BodyPart('Left Lower Arm')
right_lower_arm = BodyPart('Right Lower Arm')
left_lower_leg = BodyPart('Left Lower Leg')
right_lower_leg = BodyPart('Right Lower Leg')
left_lower_arm.setParent(left_upper_arm)
right_lower_arm.setParent(right_upper_arm)
left_lower_leg.setParent(left_upper_leg)
right_lower_leg.setParent(right_upper_leg)
left_upper_arm.addChildren([left_lower_arm])
right_upper_arm.addChildren([right_lower_arm])
left_upper_leg.addChildren([left_lower_leg])
right_upper_leg.addChildren([right_lower_leg])
head.setParent(torso)
left_upper_arm.setParent(torso)
right_upper_arm.setParent(torso)
left_upper_leg.setParent(torso)
right_upper_leg.setParent(torso)
torso.addChildren([head, left_upper_arm, right_upper_arm, left_upper_leg, right_upper_leg])
return torso, head, left_upper_arm, right_upper_arm, left_upper_leg, right_upper_leg, left_lower_arm, right_lower_arm, left_lower_leg, right_lower_leg
def get_left_upper_corner(x, y, theta, l, w, center=True):
if center:
x_left_upper_corner = int(x + l/2.0 * math.sin(math.radians(theta)) - w/2.0 * math.cos(math.radians(theta)))
y_left_upper_corner = int(y - l/2.0 * math.cos(math.radians(theta)) - w/2.0 * math.sin(math.radians(theta)))
else:
cx = x + (0.5 * l * math.cos(math.radians(theta)))
cy = y + (0.5 * l * math.sin(math.radians(theta)))
return get_left_upper_corner(cx, cy, theta - 90, l, w)
return (x_left_upper_corner, y_left_upper_corner)
def get_right_upper_corner(x, y, theta, l, w, center=True):
if center:
x_right_upper_corner = int(x + l/2.0 * math.sin(math.radians(theta)) + w/2.0 * math.cos(math.radians(theta)))
y_right_upper_corner = int(y - l/2.0 * math.cos(math.radians(theta)) + w/2.0 * math.sin(math.radians(theta)))
else:
cx = x + (0.5 * l * math.cos(math.radians(theta)))
cy = y + (0.5 * l * math.sin(math.radians(theta)))
return get_right_upper_corner(cx, cy, theta - 90, l, w)
return (x_right_upper_corner, y_right_upper_corner)
def get_left_lower_corner(x, y, theta, l, w, center=True):
if center:
x_left_lower_corner = int(x - l/2.0 * math.sin(math.radians(theta)) - w/2.0 * math.cos(math.radians(theta)))
y_left_lower_corner = int(y + l/2.0 * math.cos(math.radians(theta)) - w/2.0 * math.sin(math.radians(theta)))
else:
cx = x + (0.5 * l * math.cos(math.radians(theta)))
cy = y + (0.5 * l * math.sin(math.radians(theta)))
return get_left_lower_corner(cx, cy, theta - 90, l, w)
return (x_left_lower_corner, y_left_lower_corner)
def get_right_lower_corner(x, y, theta, l, w, center=True):
if center:
x_right_lower_corner = int(x - l/2.0 * math.sin(math.radians(theta)) + w/2.0 * math.cos(math.radians(theta)))
y_right_lower_corner = int(y + l/2.0 * math.cos(math.radians(theta)) + w/2.0 * math.sin(math.radians(theta)))
else:
cx = x + (0.5 * l * math.cos(math.radians(theta)))
cy = y + (0.5 * l * math.sin(math.radians(theta)))
return get_right_lower_corner(cx, cy, theta - 90, l, w)
return (x_right_lower_corner, y_right_lower_corner)
def draw_rectangle(img, left_upper_corner, right_upper_corner, left_lower_corner, right_lower_corner):
(x_left_upper_corner, y_left_upper_corner) = left_upper_corner
(x_right_upper_corner, y_right_upper_corner) = right_upper_corner
(x_left_lower_corner, y_left_lower_corner) = left_lower_corner
(x_right_lower_corner, y_right_lower_corner) = right_lower_corner
if len(img.shape) == 2:
image = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
else:
image = img
image = cv.line(image, (x_left_upper_corner, y_left_upper_corner), (x_right_upper_corner, y_right_upper_corner), color=(255,0,0), thickness=2)
image = cv.line(image, (x_left_upper_corner, y_left_upper_corner), (x_left_lower_corner, y_left_lower_corner), color=(255,0,0), thickness=2)
image = cv.line(image, (x_right_upper_corner, y_right_upper_corner), (x_right_lower_corner, y_right_lower_corner), color=(255,0,0), thickness=2)
image = cv.line(image, (x_left_lower_corner, y_left_lower_corner), (x_right_lower_corner, y_right_lower_corner), color=(255,0,0), thickness=2)
return image
def draw_bounding_lines(img, data):
left_upper_corner = get_left_upper_corner(*data)
right_upper_corner = get_right_upper_corner(*data)
left_lower_corner = get_left_lower_corner(*data)
right_lower_corner = get_right_lower_corner(*data)
(x_left_upper_corner, y_left_upper_corner) = left_upper_corner
(x_right_upper_corner, y_right_upper_corner) = right_upper_corner
(x_left_lower_corner, y_left_lower_corner) = left_lower_corner
(x_right_lower_corner, y_right_lower_corner) = right_lower_corner
if len(img.shape) == 2:
image = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
else:
image = img
image = cv.line(image, (x_left_upper_corner, y_left_upper_corner), (x_right_upper_corner, y_right_upper_corner), color=(255,0,0), thickness=2)
image = cv.line(image, (x_left_upper_corner, y_left_upper_corner), (x_left_lower_corner, y_left_lower_corner), color=(255,0,0), thickness=2)
image = cv.line(image, (x_right_upper_corner, y_right_upper_corner), (x_right_lower_corner, y_right_lower_corner), color=(255,0,0), thickness=2)
image = cv.line(image, (x_left_lower_corner, y_left_lower_corner), (x_right_lower_corner, y_right_lower_corner), color=(255,0,0), thickness=2)
return image
def face_detection(image, foreground_image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
faceCascade = cv.CascadeClassifier("frontface_info.xml")
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=3,
minSize=(30, 30),
flags = cv.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(image, (x, y), (x+w, y+h), (255,0 , 0), 2)
return faces
def fix_torso(img, torso):
x, y, theta, l, w = torso.getData()
(x_left_upper_corner, y_left_upper_corner) = torso.left_upper_corner
(x_right_upper_corner, y_right_upper_corner) = torso.right_upper_corner
(x_left_lower_corner, y_left_lower_corner) = torso.left_lower_corner
(x_right_lower_corner, y_right_lower_corner) = torso.right_lower_corner
min_y = min(y_left_upper_corner, y_right_upper_corner)
max_y = max(y_left_lower_corner, y_right_lower_corner)
min_x = min(x_left_upper_corner, x_left_lower_corner)
max_x = max(x_right_upper_corner, x_right_lower_corner)
white_pixels_above_shoulders = 0
for i, r in enumerate(img):
if i == min_y:
break
white_pixels_above_shoulders += np.count_nonzero(r == 255)
white_pixels_in_torso = 0
for i in range(min_y, max_y + 1):
for j in range(min_x, max_x + 1):
white_pixels_in_torso += bool(img[i, j])
ratio = white_pixels_above_shoulders * 1.0 / white_pixels_in_torso
if ratio > 0.5:
right_num_pixels = 0.09 * white_pixels_in_torso
ratio = right_num_pixels * 1.0 / white_pixels_above_shoulders
y = int((1 - ratio) * y)
new_torso_data = (x, y, theta, l, w)
torso.setData(*new_torso_data)
def get_values_within_box(img, points):
# define polygon points
points = np.array([points], dtype=np.int32)
# draw polygon on input to visualize
gray = img.copy()
img_poly = cv.cvtColor(gray, cv.COLOR_GRAY2BGR)
cv.polylines(img_poly, [points], True, (0,0,255), 1)
# create mask for polygon
mask = np.zeros_like(gray)
cv.fillPoly(mask, [points], (255))
# get color values in gray image corresponding to where mask is white
values = gray[np.where(mask == 255)]
return values
def create_mask_body_part(img, points):
points = np.array([points], dtype=np.int32)
mask = np.zeros_like(img)
cv.fillPoly(mask, [points], (255))
return mask
def get_intersection_with_body_parts(img, index, body_parts_list):
main_body_part = body_parts_list[index]
lu, ru, rl, ll = main_body_part.left_upper_corner, main_body_part.right_upper_corner, main_body_part.right_lower_corner, main_body_part.left_lower_corner
points = [lu, ru, rl, ll]
main_part = create_mask_body_part(img, points)
intersection = 0
for i in range(0, len(body_parts_list)):
if i != index:
body_part = body_parts_list[i]
points = [body_part.left_upper_corner, body_part.right_upper_corner, body_part.right_lower_corner, body_part.left_lower_corner]
part = create_mask_body_part(img, points)
intersection += np.sum(np.logical_and(part, main_part))
return intersection
def update_importance(img, index, body_parts_list, weight):
body_part = body_parts_list[index]
lu = body_part.left_upper_corner
ru = body_part.right_upper_corner
ll = body_part.left_lower_corner
rl = body_part.right_lower_corner
values = get_values_within_box(img, [lu, ru, rl, ll])
black_pixels = len(values[values == 0])
white_pixels = len(values[values == 255])
body_part.Si = white_pixels
all_pixels = len(values)
# get straight bounding box
x_min = min(lu[0], ru[0], ll[0], rl[0])
x_max = max(lu[0], ru[0], ll[0], rl[0])
y_min = min(lu[1], ru[1], ll[1], rl[1])
y_max = max(lu[1], ru[1], ll[1], rl[1])
rect_points = [(x_min, y_min), (x_min, y_max), (x_max, y_max), (x_max, y_min)]
rect_values = get_values_within_box(img, rect_points)
# number of white pixels in bounding box, and not in body part
minus_white_pixels = len(rect_values[rect_values == 255]) - white_pixels
intersection = get_intersection_with_body_parts(img, index, body_parts_list)
importance = (black_pixels / all_pixels) + (weight * ((minus_white_pixels * (all_pixels + intersection)) / (all_pixels**2)))
# if body_part.name == 'Torso':
# importance *= 1.7
if body_part.name == 'Left Upper Arm' or body_part.name == 'Right Upper Arm' or body_part.name == 'Left Upper Leg' or body_part.name == 'Right Upper Leg':
importance *= 2.2
body_part.priority = importance
def update_all_priorities(img, body_parts_list, w):
for i in range(len(body_parts_list)):
update_importance(img, i, body_parts_list, w)
def update_child(bp):
if(bp.name == 'Torso'):
for i in range(0,5):
child_bp = bp.children[i]
if(child_bp.name=='Head'):
x = bp.x
y = bp.y - (.5 * bp.l)
elif(child_bp.name=='Left Upper Arm'):
(x,y) = get_left_upper_corner(bp.x, bp.y, bp.theta, bp.l, bp.w,True)
y = y+(.5 * child_bp.w)
elif(child_bp.name=='Right Upper Arm'):
(x,y) = get_right_upper_corner(bp.x, bp.y, bp.theta, bp.l, bp.w,True)
y = y + (.5 * child_bp.w)
elif(child_bp.name=='Left Upper Leg'):
(x,y) = get_left_lower_corner(bp.x, bp.y, bp.theta, bp.l, bp.w,True)
x = x+(.5 * child_bp.w)
else:
(x,y) = get_right_lower_corner(bp.x, bp.y, bp.theta, bp.l, bp.w,True)
x = x - (.5 * child_bp.w)
child_bp.setData(x, y, child_bp.theta, child_bp.l, child_bp.w)
else:
y = bp.y + (bp.l * math.sin(math.radians(bp.theta)))
x = bp.x + (bp.l * math.cos(math.radians(bp.theta)))
if bp.name == 'head':
pass
child_bp = bp.children[0]
child_bp.setData(x, y, child_bp.theta, child_bp.l, child_bp.w)
def update_parent(bp):
parent_bp = bp.parent
y = bp.y - (parent_bp.l * math.sin(math.radians(parent_bp.theta)))
x = bp.x - (parent_bp.l * math.cos(math.radians(parent_bp.theta)))
parent_bp.setData(x, y, parent_bp.theta, parent_bp.l, parent_bp.w)
def total_overlap(img , body_parts_list):
intersection = 0
for i in range(0, len(body_parts_list)):
for j in range(1,len(body_parts_list)):
body_part = body_parts_list[i]
body_part2 = body_parts_list[j]
points = [body_part.left_upper_corner, body_part.right_upper_corner, body_part.right_lower_corner, body_part.left_lower_corner]
points2 = [body_part2.left_upper_corner, body_part2.right_upper_corner, body_part2.right_lower_corner, body_part2.left_lower_corner]
part = create_mask_body_part(img, points)
part2 = create_mask_body_part(img, points2)
intersection += np.sum(np.logical_and(part, part2))
return intersection
def get_posterior_probability(img, foreground_area, beta, body_parts_list):
Si = 0
pose_area = 0
for bp in body_parts_list:
Si += bp.Si
pose_area += bp.area
Su = pose_area + foreground_area - Si
So = total_overlap(img, body_parts_list)
return np.exp((Si - beta * So) * 1.0 / Su)
def rotate_image(img, angle):
return imutils.rotate_bound(img, -angle)
def draw_all(img,body_parts_list):
image = img
for i in range(0, len(body_parts_list)):
body_part = body_parts_list[i]
image = draw_rectangle(image, body_part.left_upper_corner, body_part.right_upper_corner, body_part.left_lower_corner, body_part.right_lower_corner)
return image
def draw_video_frame(img, body_parts_list, i, save_path):
img[img == 255] = 0
img_skeleton = create_skeleton(img,body_parts_list)
name = save_path + '/' + str(i).zfill(7) + '.jpg'
print("Saving frame", name)
cv.imwrite(name, img_skeleton)
def create_skeleton(image,body_model):
points = []
positions = np.zeros(14) #head,torso,left_arm_up,left_arm_mid,left_arm_down,right_arm_up,right_arm_mid,right_arm_down,left_leg_mid,left_leg_down,right_leg_up,right_leg_mid,right_leg_down
new_image = np.zeros((image.shape[0],image.shape[1],image.shape[2]))
black_pixels = np.where(
(new_image[:, :, 0] == 0) &
(new_image[:, :, 1] == 0) &
(new_image[:, :, 2] == 0))
# set those pixels to white
new_image[black_pixels] = [255, 255, 255]
indx = 0
for i in range(0,len(body_model)):
if body_model[i].name =='Head':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (0, 0, 255), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
positions[0] = indx
indx = indx+1
elif body_model[i].name == 'Torso':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (0, 0, 255), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
positions[1] = indx
indx = indx+1
elif body_model[i].name == 'Left Upper Arm':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (0, 128, 255), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
positions[2] = indx
indx = indx+1
elif body_model[i].name == 'Left Lower Arm':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (0, 128, 255), 3)
left_hand = (int((body_model[i].left_lower_corner[0]+body_model[i].right_lower_corner[0])/2),int((body_model[i].left_lower_corner[1]+body_model[i].right_lower_corner[1])/2))
positions[3] = indx
positions[4] = indx+1
indx = indx+2
new_image = cv.circle(new_image, left_hand, 3, (153,204,255), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
points.append(left_hand)
elif body_model[i].name == 'Right Upper Arm':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (51,255,255), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
positions[5] = indx
indx = indx+1
elif body_model[i].name == 'Right Lower Arm':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (51,255,255), 3)
right_hand = (int((body_model[i].left_lower_corner[0]+body_model[i].right_lower_corner[0])/2),int((body_model[i].left_lower_corner[1]+body_model[i].right_lower_corner[1])/2))
new_image = cv.circle(new_image, right_hand, 3, (51,255,255), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
points.append(right_hand)
positions[6] = indx
positions[7] = indx+1
indx = indx+2
elif body_model[i].name == 'Left Upper Leg':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (255,51 , 51), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
positions[8] = indx
indx = indx+1
elif body_model[i].name == 'Left Lower Leg':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (255,178 , 102), 3)
left_leg = (int((body_model[i].left_lower_corner[0]+body_model[i].right_lower_corner[0])/2),int((body_model[i].left_lower_corner[1]+body_model[i].right_lower_corner[1])/2))
new_image = cv.circle(new_image, left_leg, 3, (255,178 , 102), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
points.append(left_leg)
positions[9] = indx
positions[10] = indx+1
indx = indx+2
elif body_model[i].name == 'Right Upper Leg':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (51,255,153), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
positions[11] = indx
indx = indx+1
elif body_model[i].name == 'Right Lower Leg':
new_image = cv.circle(new_image, (int(body_model[i].x),int(body_model[i].y)), 3, (51,255,51), 3)
right_leg = (int((body_model[i].left_lower_corner[0]+body_model[i].right_lower_corner[0])/2),int((body_model[i].left_lower_corner[1]+body_model[i].right_lower_corner[1])/2))
new_image = cv.circle(new_image, right_leg, 3, (51,255,51), 3)
points.append((int(body_model[i].x),int(body_model[i].y)))
points.append(right_leg)
positions[12] = indx
positions[13] = indx+1
indx = indx+2
middle_leg = (int((points[int(positions[11])][0]+points[int(positions[8])][0])/2),int((points[int(positions[11])][1]+points[int(positions[8])][1])/2))
points.append(middle_leg)
new_image = connect_points(new_image,positions,points)
#new_image = cv.cvtColor(new_image, cv.COLOR_BGR2RGB)
# show_image(new_image)
return new_image
def connect_points(new_image,positions,points):
#torso connected points
torso_conn = [0,2,5]
for i in torso_conn:
new_image = cv.line(new_image, points[int(positions[i])], points[int(positions[1])], (0,0,255), 3)
#legs and arm connections points
l_a = [2,5,8,11]
color = [0,0,(0, 128, 255),(153,204,255),(153,204,255),(51,255,255),(51,255,255),(51,255,255),(255,51 , 51),(255,178 , 102),(255,178 , 102),(51,255,153),(51,255,51),(51,255,51)]
for i in l_a:
new_image = cv.line(new_image, points[int(positions[i])], points[int(positions[i+1])], color[i], 3)
new_image = cv.line(new_image, points[int(positions[i+1])], points[int(positions[i+2])], color[i+1], 3)
new_image = cv.line(new_image, points[14], points[int(positions[8])], (0,0,255), 3)
new_image = cv.line(new_image, points[14], points[int(positions[11])], (0,0,255), 3)
new_image = cv.line(new_image, points[14], points[int(positions[1])], (0,0,255), 3)
return new_image
def init_visited(body_parts_list):
for i in range(0, len(body_parts_list)):
body_parts_list[i].visited=0
def change_value(body_part):
#index = np.random.randint(0, 5)
if(body_part.name=='Torso'):
index = np.random.randint(0, 3)
lamdas = [7, 5, 4, 2, 2]
else:
index=2
lamdas = [7, 5, 10, 2, 2]
eps = np.random.normal(0, lamdas[index])
body_part.updateValue(index, eps)
return index, eps
def get_TFH(image,face,height):
face_center = (int(face[0]+face[2]/2),int(face[1]+face[3]/2))
image = cv.circle(image, face_center, 3, (255,0 , 0), 3)
shoulder = round(face[1]+face[3])
image = cv.circle(image, (int(face_center[0]),int(shoulder)), 3, (255,0 , 0), 3)
torso_center = (int(face_center[0]),int(round(shoulder+height/2)))
image = cv.circle(image, torso_center, 3, (255,0 , 0), 3)
return torso_center[0],torso_center[1]
def initial_pose(image_R, foreground_image, faces, foreground_area):
torso, head, left_upper_arm, right_upper_arm, left_upper_leg, right_upper_leg, left_lower_arm, right_lower_arm, left_lower_leg, right_lower_leg = get_body_tree()
torso_data = get_torso_model(image_R,faces[0],foreground_image)
torso.setData(*torso_data)
torso_data = torso.getData()
head_data, left_upper_leg_data, right_upper_leg_data, left_lower_leg_data, right_lower_leg_data, left_upper_arm_data, right_upper_arm_data, left_lower_arm_data, right_lower_arm_data = get_body_data(*torso_data)
head.setData(*head_data)
left_upper_arm.setData(*left_upper_arm_data)
right_upper_arm.setData(*right_upper_arm_data)
left_upper_leg.setData(*left_upper_leg_data)
right_upper_leg.setData(*right_upper_leg_data)
left_lower_arm.setData(*left_lower_arm_data)
right_lower_arm.setData(*right_lower_arm_data)
left_lower_leg.setData(*left_lower_leg_data)
right_lower_leg.setData(*right_lower_leg_data)
body_parts_list = [torso, left_upper_arm, left_lower_arm, right_upper_arm, right_lower_arm, left_upper_leg, left_lower_leg, right_upper_leg, right_lower_leg, head]
draw_all(foreground_image, body_parts_list)
w = 0.2
update_all_priorities(foreground_image, body_parts_list, w)
bp_priority_based = body_parts_list.copy()
bp_priority_based = sorted(bp_priority_based, reverse=True, key=lambda x: x.priority)
posterior_prob = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
return body_parts_list,bp_priority_based,posterior_prob
def build_pose(image_R, foreground_image, body_parts_list, bp_priority_based, posterior_prob, step, frame_num, foreground_area, w, save_path):
if(step ==1):
limit = 20
else:
limit=15
bp = 1
update_all_priorities(foreground_image, body_parts_list, w)
posterior_prob = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
bp_priority_based = sorted(bp_priority_based, reverse=True, key=lambda x: x.priority)
init_visited(body_parts_list)
for i in range(limit):
if i < 9:
bp = body_parts_list[i]
else:
bp = bp_priority_based[0]
if bp_priority_based[0].priority < 0.71:
break
bp.visited += 1
for k in range(20):
posterior_prob =get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
cur_priority = bp.priority
cur_post = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
if(((bp.priority > 1 and bp.visited>3) or (bp.priority > 0.95 and bp.visited>6) ) and step ==1 ):
bp.updateValue(j,45)
update_all_priorities(foreground_image, body_parts_list, w)
temp_posterior = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
bp.updateValue(j,-90)
update_all_priorities(foreground_image, body_parts_list, w)
new_posterior = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
if(new_posterior < temp_posterior and cur_priority>1.3):
bp.updateValue(j,90)
update_all_priorities(foreground_image, body_parts_list, w)
elif(cur_priority<1.3 and cur_post > new_posterior and cur_post>temp_posterior ):
bp.updateValue(j,-45)
update_all_priorities(foreground_image, body_parts_list, w)
j, diff = change_value(bp)
if bp.name == 'Torso' or bp.name == 'Left Upper Arm' or bp.name == 'Right Upper Arm' or bp.name == 'Left Upper Leg' or bp.name == 'Right Upper Leg':
update_child(bp)
update_all_priorities(foreground_image, body_parts_list, w)
new_posterior = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
if new_posterior <= posterior_prob:
bp.updateValue(j, -2 * diff)
if bp.name == 'Torso' or bp.name == 'Left Upper Arm' or bp.name == 'Right Upper Arm' or bp.name == 'Left Upper Leg' or bp.name == 'Right Upper Leg':
update_child(bp)
update_all_priorities(foreground_image, body_parts_list, w)
new_posterior = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
if new_posterior < posterior_prob:
bp.updateValue(j, diff)
if bp.name == 'Torso' or bp.name == 'Left Upper Arm' or bp.name == 'Right Upper Arm' or bp.name == 'Left Upper Leg' or bp.name == 'Right Upper Leg':
update_child(bp)
update_all_priorities(foreground_image, body_parts_list, w)
else:
posterior_prob = new_posterior
else:
posterior_prob = new_posterior
if bp.name == 'Torso' or bp.name == 'Left Upper Arm' or bp.name == 'Right Upper Arm' or bp.name == 'Left Upper Leg' or bp.name == 'Right Upper Leg':
update_child(bp)
draw_all(foreground_image, body_parts_list)
update_all_priorities(foreground_image, body_parts_list, w)
bp_priority_based = sorted(bp_priority_based, reverse=True, key=lambda x: x.priority)
draw_all(foreground_image, body_parts_list)
best_body_parts_list = body_parts_list.copy()
image = draw_all(foreground_image, best_body_parts_list)
draw_video_frame(image, body_parts_list, frame_num, save_path)
return body_parts_list, bp_priority_based, posterior_prob
def complete_video(frames_names_list, foreground_names_list, prev_frames, body_parts_list, bp_priority_based, posterior_prob, main_number, foreground_area, w, save_path):
frames_count = len(frames_names_list)
main_body_parts_list = body_parts_list.copy()
main_bp_priority_based = bp_priority_based.copy()
main_posterior_prob = posterior_prob.copy()
for i in range(main_number - 1, -1, -1):
frame = prev_frames[i]
foreground_image = cv.imread(foreground_names_list[i])
foreground_image = cv.cvtColor(foreground_image, cv.COLOR_RGB2GRAY)
body_parts_list, bp_priority_based, posterior_prob = build_pose(frame, foreground_image, body_parts_list, bp_priority_based, posterior_prob, 2, i, foreground_area, w, save_path)
body_parts_list = main_body_parts_list.copy()
bp_priority_based = main_bp_priority_based.copy()
posterior_prob = main_posterior_prob.copy()
for i in range(main_number+1, frames_count):
frame = cv.imread(frames_names_list[i])
foreground_image = cv.imread(foreground_names_list[i])
foreground_image = cv.cvtColor(foreground_image, cv.COLOR_RGB2GRAY)
body_parts_list, bp_priority_based, posterior_prob = build_pose(frame, foreground_image, body_parts_list, bp_priority_based, posterior_prob, 2, i, foreground_area, w, save_path)
def get_poses(frames_path, segmented_frames_path, poses_path):
if not os.path.exists(poses_path):
os.makedirs(poses_path)
frames_names = os.listdir(frames_path)
frames_names = sorted(frames_names)
segmented_frames_names = os.listdir(segmented_frames_path)
segmented_frames_names = sorted(segmented_frames_names)
frames_count = len(segmented_frames_names)
frames_full_names = [frames_path + '/' + frames_names[i] for i in range(frames_count)]
segmented_full_names = [segmented_frames_path + '/' + segmented_frames_names[i] for i in range(frames_count)]
prev_frames = []
for i in range(frames_count):
frame = cv.imread(frames_full_names[i])
prev_frames.append(frame)
print("processing ", frames_path, '/', frames_names[i], sep='')
foreground_image = cv.imread(segmented_full_names[i], cv.IMREAD_COLOR)
foreground_area = np.count_nonzero(foreground_image[foreground_image == 255])
faces = face_detection(frame, foreground_image)
if faces != ():
main_number = i
image_R = frame.copy()
foreground_image = cv.cvtColor(foreground_image, cv.COLOR_RGB2GRAY)
break
torso, head, left_upper_arm, right_upper_arm, left_upper_leg, right_upper_leg, left_lower_arm, right_lower_arm, left_lower_leg, right_lower_leg = get_body_tree()
torso_data = get_torso_model(image_R,faces[0],foreground_image)
torso.setData(*torso_data)
torso_data = torso.getData()
head_data, left_upper_leg_data, right_upper_leg_data, left_lower_leg_data, right_lower_leg_data, left_upper_arm_data, right_upper_arm_data, left_lower_arm_data, right_lower_arm_data = get_body_data(*torso_data)
head.setData(*head_data)
left_upper_arm.setData(*left_upper_arm_data)
right_upper_arm.setData(*right_upper_arm_data)
left_upper_leg.setData(*left_upper_leg_data)
right_upper_leg.setData(*right_upper_leg_data)
left_lower_arm.setData(*left_lower_arm_data)
right_lower_arm.setData(*right_lower_arm_data)
left_lower_leg.setData(*left_lower_leg_data)
right_lower_leg.setData(*right_lower_leg_data)
body_parts_list = [torso, left_upper_arm, left_lower_arm, right_upper_arm, right_lower_arm, left_upper_leg, left_lower_leg, right_upper_leg, right_lower_leg, head]
draw_all(foreground_image, body_parts_list)
w = 0.2
update_all_priorities(foreground_image, body_parts_list, w)
bp_priority_based = body_parts_list.copy()
bp_priority_based = sorted(bp_priority_based, reverse=True, key=lambda x: x.priority)
posterior_prob = get_posterior_probability(foreground_image, foreground_area, beta, body_parts_list)
body_parts_list, bp_priority_based, posterior_prob = initial_pose(image_R, foreground_image, faces, foreground_area)
body_parts_list, bp_priority_based, posterior_prob = build_pose(image_R, foreground_image, body_parts_list, bp_priority_based, posterior_prob, 1, main_number, foreground_area, w, poses_path)
main_body_parts_list1 = body_parts_list.copy()
main_bp_priority_based1 = bp_priority_based.copy()
main_posterior_prob1 = posterior_prob.copy()
complete_video(frames_full_names, segmented_full_names, prev_frames, body_parts_list, bp_priority_based, posterior_prob, main_number, foreground_area, w, poses_path)
| [
"cv2.rectangle",
"numpy.sqrt",
"numpy.count_nonzero",
"numpy.array",
"cv2.CascadeClassifier",
"os.path.exists",
"os.listdir",
"numpy.where",
"cv2.line",
"numpy.exp",
"cv2.distanceTransform",
"numpy.arctan",
"numpy.random.normal",
"cv2.fillPoly",
"numpy.amax",
"cv2.polylines",
"numpy.... | [((2234, 2263), 'cv2.absdiff', 'cv.absdiff', (['frame', 'background'], {}), '(frame, background)\n', (2244, 2263), True, 'import cv2 as cv\n'), ((2797, 2843), 'cv2.distanceTransform', 'cv.distanceTransform', (['foreImage', 'cv.DIST_L2', '(5)'], {}), '(foreImage, cv.DIST_L2, 5)\n', (2817, 2843), True, 'import cv2 as cv\n'), ((3120, 3151), 'numpy.sqrt', 'np.sqrt', (['(varLTorso * lBody ** 2)'], {}), '(varLTorso * lBody ** 2)\n', (3127, 3151), True, 'import numpy as np\n'), ((3163, 3190), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (3179, 3190), True, 'import numpy as np\n'), ((3451, 3482), 'numpy.sqrt', 'np.sqrt', (['(varWTorso * wBody ** 2)'], {}), '(varWTorso * wBody ** 2)\n', (3458, 3482), True, 'import numpy as np\n'), ((3494, 3521), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (3510, 3521), True, 'import numpy as np\n'), ((3737, 3783), 'cv2.distanceTransform', 'cv.distanceTransform', (['foreImage', 'cv.DIST_L2', '(5)'], {}), '(foreImage, cv.DIST_L2, 5)\n', (3757, 3783), True, 'import cv2 as cv\n'), ((3965, 4015), 'cv2.distanceTransform', 'cv.distanceTransform', (['cropped_image', 'cv.DIST_L2', '(5)'], {}), '(cropped_image, cv.DIST_L2, 5)\n', (3985, 4015), True, 'import cv2 as cv\n'), ((5124, 5163), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (5140, 5163), True, 'import numpy as np\n'), ((5220, 5249), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (5236, 5249), True, 'import numpy as np\n'), ((5513, 5537), 'numpy.random.normal', 'np.random.normal', (['(45)', '(10)'], {}), '(45, 10)\n', (5529, 5537), True, 'import numpy as np\n'), ((5760, 5799), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (5776, 5799), True, 'import numpy as np\n'), ((5856, 5885), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (5872, 5885), True, 'import numpy as np\n'), ((6138, 6163), 'numpy.random.normal', 'np.random.normal', (['(125)', '(10)'], {}), '(125, 10)\n', (6154, 6163), True, 'import numpy as np\n'), ((6356, 6395), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (6372, 6395), True, 'import numpy as np\n'), ((6452, 6481), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (6468, 6481), True, 'import numpy as np\n'), ((6565, 6603), 'numpy.random.normal', 'np.random.normal', (['top_right_x', 'sigma_x'], {}), '(top_right_x, sigma_x)\n', (6581, 6603), True, 'import numpy as np\n'), ((6618, 6656), 'numpy.random.normal', 'np.random.normal', (['top_right_y', 'sigma_x'], {}), '(top_right_y, sigma_x)\n', (6634, 6656), True, 'import numpy as np\n'), ((6674, 6698), 'numpy.random.normal', 'np.random.normal', (['(45)', '(10)'], {}), '(45, 10)\n', (6690, 6698), True, 'import numpy as np\n'), ((6892, 6931), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (6908, 6931), True, 'import numpy as np\n'), ((6988, 7017), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (7004, 7017), True, 'import numpy as np\n'), ((7098, 7135), 'numpy.random.normal', 'np.random.normal', (['top_left_x', 'sigma_x'], {}), '(top_left_x, sigma_x)\n', (7114, 7135), True, 'import numpy as np\n'), ((7149, 7186), 'numpy.random.normal', 'np.random.normal', (['top_left_y', 'sigma_x'], {}), '(top_left_y, sigma_x)\n', (7165, 7186), True, 'import numpy as np\n'), ((7204, 7229), 'numpy.random.normal', 'np.random.normal', (['(125)', '(10)'], {}), '(125, 10)\n', (7220, 7229), True, 'import numpy as np\n'), ((7446, 7485), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (7462, 7485), True, 'import numpy as np\n'), ((7543, 7572), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (7559, 7572), True, 'import numpy as np\n'), ((7796, 7836), 'numpy.random.normal', 'np.random.normal', (['bottom_left_x', 'sigma_x'], {}), '(bottom_left_x, sigma_x)\n', (7812, 7836), True, 'import numpy as np\n'), ((7850, 7890), 'numpy.random.normal', 'np.random.normal', (['bottom_left_y', 'sigma_x'], {}), '(bottom_left_y, sigma_x)\n', (7866, 7890), True, 'import numpy as np\n'), ((7908, 7933), 'numpy.random.normal', 'np.random.normal', (['(100)', '(10)'], {}), '(100, 10)\n', (7924, 7933), True, 'import numpy as np\n'), ((8155, 8194), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (8171, 8194), True, 'import numpy as np\n'), ((8251, 8280), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (8267, 8280), True, 'import numpy as np\n'), ((8490, 8528), 'numpy.random.normal', 'np.random.normal', (['top_right_x', 'sigma_x'], {}), '(top_right_x, sigma_x)\n', (8506, 8528), True, 'import numpy as np\n'), ((8543, 8581), 'numpy.random.normal', 'np.random.normal', (['top_right_y', 'sigma_x'], {}), '(top_right_y, sigma_x)\n', (8559, 8581), True, 'import numpy as np\n'), ((8599, 8623), 'numpy.random.normal', 'np.random.normal', (['(80)', '(10)'], {}), '(80, 10)\n', (8615, 8623), True, 'import numpy as np\n'), ((8816, 8855), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (8832, 8855), True, 'import numpy as np\n'), ((8911, 8940), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (8927, 8940), True, 'import numpy as np\n'), ((9027, 9067), 'numpy.random.normal', 'np.random.normal', (['bottom_left_x', 'sigma_x'], {}), '(bottom_left_x, sigma_x)\n', (9043, 9067), True, 'import numpy as np\n'), ((9081, 9121), 'numpy.random.normal', 'np.random.normal', (['bottom_left_y', 'sigma_x'], {}), '(bottom_left_y, sigma_x)\n', (9097, 9121), True, 'import numpy as np\n'), ((9139, 9164), 'numpy.random.normal', 'np.random.normal', (['(110)', '(10)'], {}), '(110, 10)\n', (9155, 9164), True, 'import numpy as np\n'), ((9356, 9395), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (9372, 9395), True, 'import numpy as np\n'), ((9452, 9481), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (9468, 9481), True, 'import numpy as np\n'), ((9565, 9603), 'numpy.random.normal', 'np.random.normal', (['top_right_x', 'sigma_x'], {}), '(top_right_x, sigma_x)\n', (9581, 9603), True, 'import numpy as np\n'), ((9618, 9656), 'numpy.random.normal', 'np.random.normal', (['top_right_y', 'sigma_x'], {}), '(top_right_y, sigma_x)\n', (9634, 9656), True, 'import numpy as np\n'), ((9674, 9698), 'numpy.random.normal', 'np.random.normal', (['(70)', '(10)'], {}), '(70, 10)\n', (9690, 9698), True, 'import numpy as np\n'), ((9899, 9938), 'numpy.random.normal', 'np.random.normal', (['meanHeight', 'varHeight'], {}), '(meanHeight, varHeight)\n', (9915, 9938), True, 'import numpy as np\n'), ((9993, 10022), 'numpy.random.normal', 'np.random.normal', (['meanW', 'varW'], {}), '(meanW, varW)\n', (10009, 10022), True, 'import numpy as np\n'), ((10117, 10141), 'numpy.random.normal', 'np.random.normal', (['(270)', '(5)'], {}), '(270, 5)\n', (10133, 10141), True, 'import numpy as np\n'), ((16947, 17093), 'cv2.line', 'cv.line', (['image', '(x_left_upper_corner, y_left_upper_corner)', '(x_right_upper_corner, y_right_upper_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_left_upper_corner, y_left_upper_corner), (\n x_right_upper_corner, y_right_upper_corner), color=(255, 0, 0), thickness=2\n )\n', (16954, 17093), True, 'import cv2 as cv\n'), ((17092, 17231), 'cv2.line', 'cv.line', (['image', '(x_left_upper_corner, y_left_upper_corner)', '(x_left_lower_corner, y_left_lower_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_left_upper_corner, y_left_upper_corner), (\n x_left_lower_corner, y_left_lower_corner), color=(255, 0, 0), thickness=2)\n', (17099, 17231), True, 'import cv2 as cv\n'), ((17235, 17383), 'cv2.line', 'cv.line', (['image', '(x_right_upper_corner, y_right_upper_corner)', '(x_right_lower_corner, y_right_lower_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_right_upper_corner, y_right_upper_corner), (\n x_right_lower_corner, y_right_lower_corner), color=(255, 0, 0), thickness=2\n )\n', (17242, 17383), True, 'import cv2 as cv\n'), ((17382, 17528), 'cv2.line', 'cv.line', (['image', '(x_left_lower_corner, y_left_lower_corner)', '(x_right_lower_corner, y_right_lower_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_left_lower_corner, y_left_lower_corner), (\n x_right_lower_corner, y_right_lower_corner), color=(255, 0, 0), thickness=2\n )\n', (17389, 17528), True, 'import cv2 as cv\n'), ((18161, 18307), 'cv2.line', 'cv.line', (['image', '(x_left_upper_corner, y_left_upper_corner)', '(x_right_upper_corner, y_right_upper_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_left_upper_corner, y_left_upper_corner), (\n x_right_upper_corner, y_right_upper_corner), color=(255, 0, 0), thickness=2\n )\n', (18168, 18307), True, 'import cv2 as cv\n'), ((18306, 18445), 'cv2.line', 'cv.line', (['image', '(x_left_upper_corner, y_left_upper_corner)', '(x_left_lower_corner, y_left_lower_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_left_upper_corner, y_left_upper_corner), (\n x_left_lower_corner, y_left_lower_corner), color=(255, 0, 0), thickness=2)\n', (18313, 18445), True, 'import cv2 as cv\n'), ((18449, 18597), 'cv2.line', 'cv.line', (['image', '(x_right_upper_corner, y_right_upper_corner)', '(x_right_lower_corner, y_right_lower_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_right_upper_corner, y_right_upper_corner), (\n x_right_lower_corner, y_right_lower_corner), color=(255, 0, 0), thickness=2\n )\n', (18456, 18597), True, 'import cv2 as cv\n'), ((18596, 18742), 'cv2.line', 'cv.line', (['image', '(x_left_lower_corner, y_left_lower_corner)', '(x_right_lower_corner, y_right_lower_corner)'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, (x_left_lower_corner, y_left_lower_corner), (\n x_right_lower_corner, y_right_lower_corner), color=(255, 0, 0), thickness=2\n )\n', (18603, 18742), True, 'import cv2 as cv\n'), ((18804, 18841), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (18815, 18841), True, 'import cv2 as cv\n'), ((18858, 18900), 'cv2.CascadeClassifier', 'cv.CascadeClassifier', (['"""frontface_info.xml"""'], {}), "('frontface_info.xml')\n", (18878, 18900), True, 'import cv2 as cv\n'), ((20503, 20537), 'numpy.array', 'np.array', (['[points]'], {'dtype': 'np.int32'}), '([points], dtype=np.int32)\n', (20511, 20537), True, 'import numpy as np\n'), ((20611, 20647), 'cv2.cvtColor', 'cv.cvtColor', (['gray', 'cv.COLOR_GRAY2BGR'], {}), '(gray, cv.COLOR_GRAY2BGR)\n', (20622, 20647), True, 'import cv2 as cv\n'), ((20650, 20704), 'cv2.polylines', 'cv.polylines', (['img_poly', '[points]', '(True)', '(0, 0, 255)', '(1)'], {}), '(img_poly, [points], True, (0, 0, 255), 1)\n', (20662, 20704), True, 'import cv2 as cv\n'), ((20741, 20760), 'numpy.zeros_like', 'np.zeros_like', (['gray'], {}), '(gray)\n', (20754, 20760), True, 'import numpy as np\n'), ((20763, 20795), 'cv2.fillPoly', 'cv.fillPoly', (['mask', '[points]', '(255)'], {}), '(mask, [points], 255)\n', (20774, 20795), True, 'import cv2 as cv\n'), ((20980, 21014), 'numpy.array', 'np.array', (['[points]'], {'dtype': 'np.int32'}), '([points], dtype=np.int32)\n', (20988, 21014), True, 'import numpy as np\n'), ((21027, 21045), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (21040, 21045), True, 'import numpy as np\n'), ((21048, 21080), 'cv2.fillPoly', 'cv.fillPoly', (['mask', '[points]', '(255)'], {}), '(mask, [points], 255)\n', (21059, 21080), True, 'import cv2 as cv\n'), ((25762, 25797), 'numpy.exp', 'np.exp', (['((Si - beta * So) * 1.0 / Su)'], {}), '((Si - beta * So) * 1.0 / Su)\n', (25768, 25797), True, 'import numpy as np\n'), ((25839, 25872), 'imutils.rotate_bound', 'imutils.rotate_bound', (['img', '(-angle)'], {}), '(img, -angle)\n', (25859, 25872), False, 'import imutils\n'), ((26418, 26448), 'cv2.imwrite', 'cv.imwrite', (['name', 'img_skeleton'], {}), '(name, img_skeleton)\n', (26428, 26448), True, 'import cv2 as cv\n'), ((26518, 26530), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (26526, 26530), True, 'import numpy as np\n'), ((26707, 26765), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1], image.shape[2])'], {}), '((image.shape[0], image.shape[1], image.shape[2]))\n', (26715, 26765), True, 'import numpy as np\n'), ((26781, 26877), 'numpy.where', 'np.where', (['((new_image[:, :, 0] == 0) & (new_image[:, :, 1] == 0) & (new_image[:, :, 2\n ] == 0))'], {}), '((new_image[:, :, 0] == 0) & (new_image[:, :, 1] == 0) & (new_image\n [:, :, 2] == 0))\n', (26789, 26877), True, 'import numpy as np\n'), ((32712, 32746), 'numpy.random.normal', 'np.random.normal', (['(0)', 'lamdas[index]'], {}), '(0, lamdas[index])\n', (32728, 32746), True, 'import numpy as np\n'), ((32927, 32975), 'cv2.circle', 'cv.circle', (['image', 'face_center', '(3)', '(255, 0, 0)', '(3)'], {}), '(image, face_center, 3, (255, 0, 0), 3)\n', (32936, 32975), True, 'import cv2 as cv\n'), ((33182, 33231), 'cv2.circle', 'cv.circle', (['image', 'torso_center', '(3)', '(255, 0, 0)', '(3)'], {}), '(image, torso_center, 3, (255, 0, 0), 3)\n', (33191, 33231), True, 'import cv2 as cv\n'), ((40126, 40149), 'os.listdir', 'os.listdir', (['frames_path'], {}), '(frames_path)\n', (40136, 40149), False, 'import os\n'), ((40218, 40251), 'os.listdir', 'os.listdir', (['segmented_frames_path'], {}), '(segmented_frames_path)\n', (40228, 40251), False, 'import os\n'), ((2089, 2126), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2GRAY'], {}), '(frame, cv.COLOR_BGR2GRAY)\n', (2100, 2126), True, 'import cv2 as cv\n'), ((2180, 2222), 'cv2.cvtColor', 'cv.cvtColor', (['background', 'cv.COLOR_BGR2GRAY'], {}), '(background, cv.COLOR_BGR2GRAY)\n', (2191, 2222), True, 'import cv2 as cv\n'), ((2454, 2466), 'numpy.argmax', 'np.argmax', (['r'], {}), '(r)\n', (2463, 2466), True, 'import numpy as np\n'), ((2621, 2633), 'numpy.argmax', 'np.argmax', (['r'], {}), '(r)\n', (2630, 2633), True, 'import numpy as np\n'), ((4264, 4314), 'cv2.distanceTransform', 'cv.distanceTransform', (['cropped_image', 'cv.DIST_L2', '(5)'], {}), '(cropped_image, cv.DIST_L2, 5)\n', (4284, 4314), True, 'import cv2 as cv\n'), ((16877, 16912), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_GRAY2RGB'], {}), '(img, cv.COLOR_GRAY2RGB)\n', (16888, 16912), True, 'import cv2 as cv\n'), ((18091, 18126), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_GRAY2RGB'], {}), '(img, cv.COLOR_GRAY2RGB)\n', (18102, 18126), True, 'import cv2 as cv\n'), ((19138, 19197), 'cv2.rectangle', 'cv.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (19150, 19197), True, 'import cv2 as cv\n'), ((19936, 19962), 'numpy.count_nonzero', 'np.count_nonzero', (['(r == 255)'], {}), '(r == 255)\n', (19952, 19962), True, 'import numpy as np\n'), ((20887, 20908), 'numpy.where', 'np.where', (['(mask == 255)'], {}), '(mask == 255)\n', (20895, 20908), True, 'import numpy as np\n'), ((32601, 32624), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (32618, 32624), True, 'import numpy as np\n'), ((39133, 39168), 'cv2.imread', 'cv.imread', (['foreground_names_list[i]'], {}), '(foreground_names_list[i])\n', (39142, 39168), True, 'import cv2 as cv\n'), ((39192, 39240), 'cv2.cvtColor', 'cv.cvtColor', (['foreground_image', 'cv.COLOR_RGB2GRAY'], {}), '(foreground_image, cv.COLOR_RGB2GRAY)\n', (39203, 39240), True, 'import cv2 as cv\n'), ((39631, 39662), 'cv2.imread', 'cv.imread', (['frames_names_list[i]'], {}), '(frames_names_list[i])\n', (39640, 39662), True, 'import cv2 as cv\n'), ((39686, 39721), 'cv2.imread', 'cv.imread', (['foreground_names_list[i]'], {}), '(foreground_names_list[i])\n', (39695, 39721), True, 'import cv2 as cv\n'), ((39745, 39793), 'cv2.cvtColor', 'cv.cvtColor', (['foreground_image', 'cv.COLOR_RGB2GRAY'], {}), '(foreground_image, cv.COLOR_RGB2GRAY)\n', (39756, 39793), True, 'import cv2 as cv\n'), ((40050, 40076), 'os.path.exists', 'os.path.exists', (['poses_path'], {}), '(poses_path)\n', (40064, 40076), False, 'import os\n'), ((40082, 40105), 'os.makedirs', 'os.makedirs', (['poses_path'], {}), '(poses_path)\n', (40093, 40105), False, 'import os\n'), ((40628, 40659), 'cv2.imread', 'cv.imread', (['frames_full_names[i]'], {}), '(frames_full_names[i])\n', (40637, 40659), True, 'import cv2 as cv\n'), ((40781, 40832), 'cv2.imread', 'cv.imread', (['segmented_full_names[i]', 'cv.IMREAD_COLOR'], {}), '(segmented_full_names[i], cv.IMREAD_COLOR)\n', (40790, 40832), True, 'import cv2 as cv\n'), ((40855, 40914), 'numpy.count_nonzero', 'np.count_nonzero', (['foreground_image[foreground_image == 255]'], {}), '(foreground_image[foreground_image == 255])\n', (40871, 40914), True, 'import numpy as np\n'), ((2887, 2903), 'numpy.amax', 'np.amax', (['distMap'], {}), '(distMap)\n', (2894, 2903), True, 'import numpy as np\n'), ((3825, 3841), 'numpy.amax', 'np.amax', (['distMap'], {}), '(distMap)\n', (3832, 3841), True, 'import numpy as np\n'), ((4059, 4075), 'numpy.amax', 'np.amax', (['distMap'], {}), '(distMap)\n', (4066, 4075), True, 'import numpy as np\n'), ((41069, 41117), 'cv2.cvtColor', 'cv.cvtColor', (['foreground_image', 'cv.COLOR_RGB2GRAY'], {}), '(foreground_image, cv.COLOR_RGB2GRAY)\n', (41080, 41117), True, 'import cv2 as cv\n'), ((4564, 4590), 'numpy.arctan', 'np.arctan', (['(deltaY / deltaX)'], {}), '(deltaY / deltaX)\n', (4573, 4590), True, 'import numpy as np\n'), ((10598, 10619), 'math.radians', 'math.radians', (['thetall'], {}), '(thetall)\n', (10610, 10619), False, 'import math\n'), ((10668, 10689), 'math.radians', 'math.radians', (['thetall'], {}), '(thetall)\n', (10680, 10689), False, 'import math\n'), ((10923, 10944), 'math.radians', 'math.radians', (['thetarl'], {}), '(thetarl)\n', (10935, 10944), False, 'import math\n'), ((10994, 11015), 'math.radians', 'math.radians', (['thetarl'], {}), '(thetarl)\n', (11006, 11015), False, 'import math\n'), ((11798, 11819), 'math.radians', 'math.radians', (['thetala'], {}), '(thetala)\n', (11810, 11819), False, 'import math\n'), ((11868, 11889), 'math.radians', 'math.radians', (['thetala'], {}), '(thetala)\n', (11880, 11889), False, 'import math\n'), ((12128, 12149), 'math.radians', 'math.radians', (['thetara'], {}), '(thetara)\n', (12140, 12149), False, 'import math\n'), ((12199, 12220), 'math.radians', 'math.radians', (['thetara'], {}), '(thetara)\n', (12211, 12220), False, 'import math\n'), ((21770, 21801), 'numpy.logical_and', 'np.logical_and', (['part', 'main_part'], {}), '(part, main_part)\n', (21784, 21801), True, 'import numpy as np\n'), ((24643, 24672), 'math.radians', 'math.radians', (['parent_bp.theta'], {}), '(parent_bp.theta)\n', (24655, 24672), False, 'import math\n'), ((24712, 24741), 'math.radians', 'math.radians', (['parent_bp.theta'], {}), '(parent_bp.theta)\n', (24724, 24741), False, 'import math\n'), ((25438, 25465), 'numpy.logical_and', 'np.logical_and', (['part', 'part2'], {}), '(part, part2)\n', (25452, 25465), True, 'import numpy as np\n'), ((4390, 4406), 'numpy.amax', 'np.amax', (['distMap'], {}), '(distMap)\n', (4397, 4406), True, 'import numpy as np\n'), ((14672, 14691), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (14684, 14691), False, 'import math\n'), ((14727, 14746), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (14739, 14746), False, 'import math\n'), ((15205, 15224), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15217, 15224), False, 'import math\n'), ((15260, 15279), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15272, 15279), False, 'import math\n'), ((15738, 15757), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15750, 15757), False, 'import math\n'), ((15793, 15812), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15805, 15812), False, 'import math\n'), ((16271, 16290), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (16283, 16290), False, 'import math\n'), ((16326, 16345), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (16338, 16345), False, 'import math\n'), ((24339, 24361), 'math.radians', 'math.radians', (['bp.theta'], {}), '(bp.theta)\n', (24351, 24361), False, 'import math\n'), ((24396, 24418), 'math.radians', 'math.radians', (['bp.theta'], {}), '(bp.theta)\n', (24408, 24418), False, 'import math\n'), ((14496, 14515), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (14508, 14515), False, 'import math\n'), ((14609, 14628), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (14621, 14628), False, 'import math\n'), ((15028, 15047), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15040, 15047), False, 'import math\n'), ((15142, 15161), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15154, 15161), False, 'import math\n'), ((15562, 15581), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15574, 15581), False, 'import math\n'), ((15675, 15694), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15687, 15694), False, 'import math\n'), ((16094, 16113), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (16106, 16113), False, 'import math\n'), ((16208, 16227), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (16220, 16227), False, 'import math\n'), ((28257, 28311), 'cv2.circle', 'cv.circle', (['new_image', 'left_hand', '(3)', '(153, 204, 255)', '(3)'], {}), '(new_image, left_hand, 3, (153, 204, 255), 3)\n', (28266, 28311), True, 'import cv2 as cv\n'), ((14456, 14475), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (14468, 14475), False, 'import math\n'), ((14569, 14588), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (14581, 14588), False, 'import math\n'), ((14988, 15007), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15000, 15007), False, 'import math\n'), ((15102, 15121), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15114, 15121), False, 'import math\n'), ((15522, 15541), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15534, 15541), False, 'import math\n'), ((15635, 15654), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (15647, 15654), False, 'import math\n'), ((16054, 16073), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (16066, 16073), False, 'import math\n'), ((16168, 16187), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (16180, 16187), False, 'import math\n'), ((29057, 29111), 'cv2.circle', 'cv.circle', (['new_image', 'right_hand', '(3)', '(51, 255, 255)', '(3)'], {}), '(new_image, right_hand, 3, (51, 255, 255), 3)\n', (29066, 29111), True, 'import cv2 as cv\n'), ((29938, 29991), 'cv2.circle', 'cv.circle', (['new_image', 'left_leg', '(3)', '(255, 178, 102)', '(3)'], {}), '(new_image, left_leg, 3, (255, 178, 102), 3)\n', (29947, 29991), True, 'import cv2 as cv\n'), ((30818, 30870), 'cv2.circle', 'cv.circle', (['new_image', 'right_leg', '(3)', '(51, 255, 51)', '(3)'], {}), '(new_image, right_leg, 3, (51, 255, 51), 3)\n', (30827, 30870), True, 'import cv2 as cv\n')] |
#! /usr/bin/env python
import os
import pdb
import time
import yaml
import json
import random
import shutil
import argparse
import numpy as np
from collections import defaultdict
# torch
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import AverageMeter
from solvers import Solver
__all__ = ['BaselineSolver']
def euclidean_dist(x, y):
x = torch.from_numpy(x).cuda()
y = torch.from_numpy(y).cuda()
dist = torch.sum(x ** 2, 1).unsqueeze(1) + torch.sum(y ** 2, 1).unsqueeze(
1).transpose(0, 1) - 2 * torch.matmul(x, y.transpose(0, 1))
dist = torch.sqrt(F.relu(dist))
return dist
def cosine_dist(x, y):
x = torch.from_numpy(x).cuda()
y = torch.from_numpy(y).cuda()
return 1 - F.cosine_similarity(x[:, None, :], y[None, :, :], 2)
# Exclude identical-view cases
def de_diag(acc, each_angle=False):
view_num = acc.shape[0]
result = np.sum(acc - np.diag(np.diag(acc)), 1) / (view_num - 1)
if not each_angle:
result = np.mean(result)
return result
class BaselineSolver(Solver):
def train(self):
self.build_data()
self.build_model()
self.build_optimizer()
self.build_loss()
start_time = time.time()
self.iter = 0
# Print out configurations
self.print_log('{} samples in train set'.format(
len(self.trainloader.dataset)))
self.print_log('{} samples in test set'.format(
len(self.testloader.dataset)))
if self.cfg.print_model:
self.print_log('Architecture:\n{}'.format(self.model))
num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
self.print_log('Parameters: {}'.format(num_params))
self.print_log('Configurations:\n{}\n'.format(
json.dumps(vars(self.cfg), indent=4)))
# Load from previous checkpoints
self.load()
self.best_acc, self.best_iter = [0], -1
meters = defaultdict(lambda: AverageMeter())
end = time.time()
for seq, view, seq_type, label in self.trainloader:
self.model.train()
meters['dataTime'].update(time.time() - end)
end = time.time()
lr = self.lr_scheduler.step(self.iter)
self.iter += 1
seq, label = seq.float().cuda(), label.long().cuda()
feature = self.model(seq)
loss, loss_num = self.loss(feature, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# record loss
meters['modelTime'].update(time.time() - end)
meters['loss'].update(loss)
meters['lossNum'].update(loss_num)
# show log info
if self.iter % self.cfg.log_interval == 0:
self.print_log('Iter: {}/{}'.format(self.iter, self.cfg.num_iter) +
' - Data: {:.0f}s'.format(meters['dataTime'].sum) +
' - Model: {:.0f}s'.format(meters['modelTime'].sum) +
' - Lr: {:.2e}'.format(lr) +
' - Loss: {:.2f}'.format(meters['loss'].avg) +
' - Num: {:.2e}'.format(meters['lossNum'].avg))
for i in ['loss', 'lossNum']:
self.writer.add_scalar('train/{}'.format(i), meters[i].avg, self.iter)
for m in meters.values():
m.reset()
# save checkpoints
self.save()
# test
if self.iter % self.cfg.test_interval == 0:
acc = self._test()
self.collect(acc)
# show distributions of weights and grads
self.show_info()
# End
if self.iter == self.cfg.num_iter:
self.print_log('\nBest Acc: {}'.format(self.best_acc) +
'\nIter: {}'.format(self.best_iter) +
'\nDir: {}'.format(self.work_dir) +
'\nTime: {}'.format(
self._convert_time(time.time() - start_time)))
return
end = time.time()
def show_info(self, with_weight=True, with_grad=True):
if with_weight:
for name, param in self.model.named_parameters():
w = param.data.cpu().numpy()
self.writer.add_histogram('weight_info/{}'.format(name), w, self.iter)
if with_grad:
for name, param in self.model.named_parameters():
if param.grad is not None:
w = param.grad.cpu().numpy()
self.writer.add_histogram('grad_info/{}'.format(name), w, self.iter)
def collect(self, acc):
acc_avg = sum(acc) / len(acc)
best_avg = sum(self.best_acc) / len(self.best_acc)
if acc_avg > best_avg:
self.best_acc = acc
self.best_iter = self.iter
# save the best
path = os.path.join(self.work_dir, self.cfg.save_name+'.pth.tar')
return self.save_checkpoint(path)
def test(self):
self.build_data()
self.build_model()
if self.cfg.pretrained is None:
raise ValueError('Please appoint --pretrained.')
self.iter = self.load_checkpoint(self.cfg.pretrained, optim=False)
return self._test()
def _test(self):
self.model.eval()
feature_list = list()
view_list = list()
seq_type_list = list()
label_list = list()
for i, x in enumerate(self.testloader):
seq, view, seq_type, label = x
seq = seq.float().cuda()
feature = self.model(seq)
n = feature.size(0)
feature_list.append(feature.view(n, -1).data.cpu().numpy())
view_list += view
seq_type_list += seq_type
label_list.append(label.item())
acc = self._compute_accuracy(feature_list, view_list, seq_type_list,
label_list)
if len(acc) > 1:
self.writer.add_scalar('test/accNM', acc[0], self.iter)
self.writer.add_scalar('test/accBG', acc[1], self.iter)
self.writer.add_scalar('test/accCL', acc[2], self.iter)
else:
self.writer.add_scalar('test/acc', acc[0], self.iter)
return acc
def _compute_accuracy(self, feature, view, seq_type, label,
metric='euclidean'):
_metrics = {'euclidean': euclidean_dist, 'cosine': cosine_dist}
dist_metric = _metrics[metric]
feature = np.concatenate(feature, 0)
label = np.array(label)
view_list = list(set(view))
view_list.sort()
view_num = len(view_list)
sample_num = len(feature)
probe_seq_dict = {'CASIA': [['nm-05', 'nm-06'], ['bg-01', 'bg-02'], ['cl-01', 'cl-02']],
'OUMVLP': [['00']]}
gallery_seq_dict = {'CASIA': [['nm-01', 'nm-02', 'nm-03', 'nm-04']],
'OUMVLP': [['01']]}
num_rank = 5
dataset = 'CASIA' if 'CASIA' in self.cfg.dataset else 'OUMVLP'
acc = np.zeros([len(probe_seq_dict[dataset]), view_num, view_num, num_rank])
for (p, probe_seq) in enumerate(probe_seq_dict[dataset]):
for gallery_seq in gallery_seq_dict[dataset]:
for (v1, probe_view) in enumerate(view_list):
for (v2, gallery_view) in enumerate(view_list):
gseq_mask = np.isin(seq_type, gallery_seq) & np.isin(view, [gallery_view])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq) & np.isin(view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
dist = dist_metric(probe_x, gallery_x)
idx = dist.sort(1)[1].cpu().numpy()
out = np.reshape(probe_y, [-1, 1])
out = np.cumsum(out == gallery_y[idx[:, 0:num_rank]], 1)
out = np.sum(out > 0, 0)
out = np.round(out * 100 / dist.shape[0], 2)
acc[p, v1, v2, :] = out
# acc[p, v1, v2, :] = np.round(np.sum(np.cumsum(np.reshape(
# probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]],
# 1) > 0, 0) * 100 / dist.shape[0], 2)
if dataset == 'CASIA':
# Print rank-1 accuracy of the best model
# e.g.
# ===Rank-1 (Include identical-view cases)===
# NM: 95.405, BG: 88.284, CL: 72.041
self.print_log('===Rank-1 (Include identical-view cases)===')
self.print_log('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
np.mean(acc[0, :, :, 0]),
np.mean(acc[1, :, :, 0]),
np.mean(acc[2, :, :, 0])))
# self.print_log rank-1 accuracy of the best model,excluding identical-view cases
# e.g.
# ===Rank-1 (Exclude identical-view cases)===
# NM: 94.964, BG: 87.239, CL: 70.355
self.print_log('===Rank-1 (Exclude identical-view cases)===')
acc0 = de_diag(acc[0, :, :, 0])
acc1 = de_diag(acc[1, :, :, 0])
acc2 = de_diag(acc[2, :, :, 0])
self.print_log('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
acc0, acc1, acc2))
# self.print_log rank-1 accuracy of the best model (Each Angle)
# e.g.
# ===Rank-1 of each angle (Exclude identical-view cases)===
# NM: [90.80 97.90 99.40 96.90 93.60 91.70 95.00 97.80 98.90 96.80 85.80]
# BG: [83.80 91.20 91.80 88.79 83.30 81.00 84.10 90.00 92.20 94.45 79.00]
# CL: [61.40 75.40 80.70 77.30 72.10 70.10 71.50 73.50 73.50 68.40 50.00]
# np.set_self.print_logoptions(precision=2, floatmode='fixed')
np.printoptions(precision=2, floatmode='fixed')
self.print_log('===Rank-1 of each angle (Exclude identical-view cases)===')
s = '[' + ', '.join(['{:.3f}' for _ in range(view_num)]) + ']'
self.print_log('NM: ' + s.format(*de_diag(acc[0, :, :, 0], True)))
self.print_log('BG: ' + s.format(*de_diag(acc[1, :, :, 0], True)))
self.print_log('CL: ' + s.format(*de_diag(acc[2, :, :, 0], True)))
return [acc0, acc1, acc2]
elif dataset == 'OUMVLP':
self.print_log('===Rank-1 (Include identical-view cases)===')
self.print_log('{:.3f}'.format(np.mean(acc[0, :, :, 0])))
self.print_log('===Rank-1 (Exclude identical-view cases)===')
self.print_log('{:.3f}'.format(de_diag(acc[0, :, :, 0])))
np.printoptions(precision=2, floatmode='fixed')
self.print_log('===Rank-1 of each angle (Exclude identical-view cases)===')
s = '[' + ', '.join(['{:.3f}' for _ in range(view_num)]) + ']'
self.print_log(s.format(*de_diag(acc[0, :, :, 0], True)))
return [de_diag(acc[0, :, :, 0])]
| [
"numpy.mean",
"numpy.reshape",
"torch.nn.functional.cosine_similarity",
"os.path.join",
"torch.from_numpy",
"numpy.diag",
"numpy.isin",
"numpy.array",
"numpy.sum",
"torch.sum",
"numpy.concatenate",
"torch.nn.functional.relu",
"utils.AverageMeter",
"numpy.cumsum",
"time.time",
"numpy.ro... | [((611, 623), 'torch.nn.functional.relu', 'F.relu', (['dist'], {}), '(dist)\n', (617, 623), True, 'import torch.nn.functional as F\n'), ((750, 802), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['x[:, None, :]', 'y[None, :, :]', '(2)'], {}), '(x[:, None, :], y[None, :, :], 2)\n', (769, 802), True, 'import torch.nn.functional as F\n'), ((1009, 1024), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (1016, 1024), True, 'import numpy as np\n'), ((1228, 1239), 'time.time', 'time.time', ([], {}), '()\n', (1237, 1239), False, 'import time\n'), ((2039, 2050), 'time.time', 'time.time', ([], {}), '()\n', (2048, 2050), False, 'import time\n'), ((6710, 6736), 'numpy.concatenate', 'np.concatenate', (['feature', '(0)'], {}), '(feature, 0)\n', (6724, 6736), True, 'import numpy as np\n'), ((6753, 6768), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (6761, 6768), True, 'import numpy as np\n'), ((380, 399), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (396, 399), False, 'import torch\n'), ((415, 434), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (431, 434), False, 'import torch\n'), ((673, 692), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (689, 692), False, 'import torch\n'), ((708, 727), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (724, 727), False, 'import torch\n'), ((2217, 2228), 'time.time', 'time.time', ([], {}), '()\n', (2226, 2228), False, 'import time\n'), ((4247, 4258), 'time.time', 'time.time', ([], {}), '()\n', (4256, 4258), False, 'import time\n'), ((5079, 5139), 'os.path.join', 'os.path.join', (['self.work_dir', "(self.cfg.save_name + '.pth.tar')"], {}), "(self.work_dir, self.cfg.save_name + '.pth.tar')\n", (5091, 5139), False, 'import os\n'), ((10209, 10256), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(2)', 'floatmode': '"""fixed"""'}), "(precision=2, floatmode='fixed')\n", (10224, 10256), True, 'import numpy as np\n'), ((2008, 2022), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2020, 2022), False, 'from utils import AverageMeter\n'), ((11033, 11080), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(2)', 'floatmode': '"""fixed"""'}), "(precision=2, floatmode='fixed')\n", (11048, 11080), True, 'import numpy as np\n'), ((453, 473), 'torch.sum', 'torch.sum', (['(x ** 2)', '(1)'], {}), '(x ** 2, 1)\n', (462, 473), False, 'import torch\n'), ((934, 946), 'numpy.diag', 'np.diag', (['acc'], {}), '(acc)\n', (941, 946), True, 'import numpy as np\n'), ((2180, 2191), 'time.time', 'time.time', ([], {}), '()\n', (2189, 2191), False, 'import time\n'), ((2636, 2647), 'time.time', 'time.time', ([], {}), '()\n', (2645, 2647), False, 'import time\n'), ((8168, 8196), 'numpy.reshape', 'np.reshape', (['probe_y', '[-1, 1]'], {}), '(probe_y, [-1, 1])\n', (8178, 8196), True, 'import numpy as np\n'), ((8227, 8277), 'numpy.cumsum', 'np.cumsum', (['(out == gallery_y[idx[:, 0:num_rank]])', '(1)'], {}), '(out == gallery_y[idx[:, 0:num_rank]], 1)\n', (8236, 8277), True, 'import numpy as np\n'), ((8308, 8326), 'numpy.sum', 'np.sum', (['(out > 0)', '(0)'], {}), '(out > 0, 0)\n', (8314, 8326), True, 'import numpy as np\n'), ((8357, 8395), 'numpy.round', 'np.round', (['(out * 100 / dist.shape[0])', '(2)'], {}), '(out * 100 / dist.shape[0], 2)\n', (8365, 8395), True, 'import numpy as np\n'), ((9051, 9075), 'numpy.mean', 'np.mean', (['acc[0, :, :, 0]'], {}), '(acc[0, :, :, 0])\n', (9058, 9075), True, 'import numpy as np\n'), ((9093, 9117), 'numpy.mean', 'np.mean', (['acc[1, :, :, 0]'], {}), '(acc[1, :, :, 0])\n', (9100, 9117), True, 'import numpy as np\n'), ((9135, 9159), 'numpy.mean', 'np.mean', (['acc[2, :, :, 0]'], {}), '(acc[2, :, :, 0])\n', (9142, 9159), True, 'import numpy as np\n'), ((10848, 10872), 'numpy.mean', 'np.mean', (['acc[0, :, :, 0]'], {}), '(acc[0, :, :, 0])\n', (10855, 10872), True, 'import numpy as np\n'), ((489, 509), 'torch.sum', 'torch.sum', (['(y ** 2)', '(1)'], {}), '(y ** 2, 1)\n', (498, 509), False, 'import torch\n'), ((7636, 7666), 'numpy.isin', 'np.isin', (['seq_type', 'gallery_seq'], {}), '(seq_type, gallery_seq)\n', (7643, 7666), True, 'import numpy as np\n'), ((7669, 7698), 'numpy.isin', 'np.isin', (['view', '[gallery_view]'], {}), '(view, [gallery_view])\n', (7676, 7698), True, 'import numpy as np\n'), ((7847, 7875), 'numpy.isin', 'np.isin', (['seq_type', 'probe_seq'], {}), '(seq_type, probe_seq)\n', (7854, 7875), True, 'import numpy as np\n'), ((7878, 7905), 'numpy.isin', 'np.isin', (['view', '[probe_view]'], {}), '(view, [probe_view])\n', (7885, 7905), True, 'import numpy as np\n'), ((4178, 4189), 'time.time', 'time.time', ([], {}), '()\n', (4187, 4189), False, 'import time\n')] |
import torch
import os
import datetime
import pickle
import dill
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch import optim
from random import choices
from scipy.stats import entropy, boxcox
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from Networks import Encoder
from Networks import DecoderGRUCover, DecoderSumCover
from Evaluation import EvaluationUtil
from Parameters import Params
params = Params()
class ModelTraining:
def __init__(self, device, patient_records_file, voc_file, ehr_matrix_file):
self.device = device
self.patient_records_file = patient_records_file
self.voc_file = voc_file
self.ehr_matrix_file = ehr_matrix_file
voc = dill.load(open(self.voc_file, 'rb'))
self.diag_voc = voc['diag_voc']
self.pro_voc = voc['pro_voc']
self.med_voc = voc['med_voc']
self.diagnose_count = len(self.diag_voc.word2idx)
self.procedure_count = len(self.pro_voc.word2idx)
self.medication_count = len(self.med_voc.word2idx)
self.ehr_matrix = dill.load(open(self.ehr_matrix_file, 'rb'))
self.evaluate_utils = EvaluationUtil()
def loss_function(self, target_medications, predict_medications, proportion_bce, proportion_multi,
coverage_loss=0.0, proportion_coverage=0.0):
loss_bce_target = np.zeros((1, self.medication_count))
loss_bce_target[:, target_medications] = 1
loss_multi_target = np.full((1, self.medication_count), -1)
for idx, item in enumerate(target_medications):
loss_multi_target[0][idx] = item
loss_bce = F.binary_cross_entropy_with_logits(predict_medications,
torch.FloatTensor(loss_bce_target).to(self.device))
loss_multi = F.multilabel_margin_loss(torch.sigmoid(predict_medications),
torch.LongTensor(loss_multi_target).to(self.device))
loss = proportion_bce * loss_bce + proportion_multi * loss_multi
if proportion_coverage != 0:
loss = loss + proportion_coverage * coverage_loss
return loss
def get_performance_on_testset(self, encoder, decoder, patient_records, coverage_type):
jaccard_avg, precision_avg, recall_avg, f1_avg, prauc_avg = [], [], [], [], []
count = 0
for patient in patient_records:
for idx, adm in enumerate(patient):
count += 1
current_records = patient[:idx + 1]
query, memory_keys, memory_values = encoder(current_records)
if coverage_type == 'gru_cover':
predict_output = decoder(query, memory_keys, memory_values)
else:
predict_output, _ = decoder(query, memory_keys, memory_values)
target_medications = adm[params.MEDICATION_INDEX]
target_multi_hot = np.zeros(self.medication_count)
target_multi_hot[target_medications] = 1
predict_prob = torch.sigmoid(predict_output).detach().cpu().numpy()[0]
predict_multi_hot = predict_prob.copy()
index_nan = np.argwhere(np.isnan(predict_multi_hot))
if index_nan.shape[0] != 0:
predict_multi_hot = np.zeros_like(predict_multi_hot)
predict_multi_hot[predict_multi_hot >= 0.5] = 1
predict_multi_hot[predict_multi_hot < 0.5] = 0
predict_medications = list(np.where(predict_multi_hot == 1)[0])
jaccard = self.evaluate_utils.metric_jaccard_similarity(predict_medications, target_medications)
precision = self.evaluate_utils.metric_precision(predict_medications, target_medications)
recall = self.evaluate_utils.metric_recall(predict_medications, target_medications)
f1 = self.evaluate_utils.metric_f1(precision, recall)
prauc = self.evaluate_utils.precision_auc(predict_prob, target_multi_hot)
jaccard_avg.append(jaccard)
precision_avg.append(precision)
recall_avg.append(recall)
f1_avg.append(f1)
prauc_avg.append(prauc)
jaccard_avg = np.mean(np.array(jaccard_avg))
precision_avg = np.mean(np.array(precision_avg))
recall_avg = np.mean(np.array(recall_avg))
f1_avg = np.mean(np.array(f1_avg))
prauc_avg = np.mean(np.array(prauc_avg))
return jaccard_avg, precision_avg, recall_avg, f1_avg, prauc_avg
def trainIters(self, encoder, decoder, encoder_optimizer, decoder_optimizer, coverage_type, patient_records_train,
patient_records_test, save_model_path, n_epoch, print_every_iteration=100, save_every_epoch=5,
trained_epoch=0, trained_iteration=0):
start_epoch = trained_epoch + 1
trained_n_iteration = trained_iteration
if not os.path.exists(save_model_path):
os.makedirs(save_model_path)
log_file = open(os.path.join(save_model_path, 'medrec_loss.log'), 'a+')
encoder_lr_scheduler = ReduceLROnPlateau(encoder_optimizer, mode='max', patience=5, factor=0.1)
decoder_lr_scheduler = ReduceLROnPlateau(decoder_optimizer, mode='max', patience=5, factor=0.1)
for epoch in range(start_epoch, start_epoch + n_epoch):
print_loss = []
iteration = 0
for patient in patient_records_train:
for idx, adm in enumerate(patient):
trained_n_iteration += 1
iteration += 1
current_records = patient[:idx + 1]
target_medications = adm[params.MEDICATION_INDEX]
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
query, memory_keys, memory_values = encoder(current_records)
if coverage_type == 'gru_cover':
predict_output = decoder(query, memory_keys, memory_values)
loss = self.loss_function(target_medications, predict_output, 0.8, 0.1)
print_loss.append(loss.item())
else: # sum_cover
predict_output, coverage_loss = decoder(query, memory_keys, memory_values)
loss = self.loss_function(target_medications, predict_output, 0.8, 0.1, coverage_loss, 0.1)
print_loss.append(loss.item())
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
if iteration % print_every_iteration == 0:
print_loss_avg = np.mean(np.array(print_loss))
print_loss = []
print(
'epoch: {}; time: {}; Iteration: {}; train loss: {:.4f}'.format(
epoch, datetime.datetime.now(), trained_n_iteration, print_loss_avg))
log_file.write(
'epoch: {}; time: {}; Iteration: {}; train loss: {:.4f}\n'.format(
epoch, datetime.datetime.now(), trained_n_iteration, print_loss_avg))
encoder.eval()
decoder.eval()
jaccard_avg, precision_avg, recall_avg, f1_avg, prauc_avg = self.get_performance_on_testset(encoder,
decoder,
patient_records_test,
coverage_type)
encoder.train()
decoder.train()
print(
'epoch: {}; time: {}; Iteration: {}; jaccard_test: {:.4f}; precision_test: {:.4f}; recall_test: {:.4f}; f1_test: {:.4f}; prauc_test: {:.4f}'.format(
epoch, datetime.datetime.now(), trained_n_iteration, jaccard_avg, precision_avg, recall_avg, f1_avg,
prauc_avg))
log_file.write(
'epoch: {}; time: {}; Iteration: {}; jaccard_test: {:.4f}; precision_test: {:.4f}; recall_test: {:.4f}; f1_test: {:.4f}; prauc_test: {:.4f}\n'.format(
epoch, datetime.datetime.now(), trained_n_iteration, jaccard_avg, precision_avg, recall_avg, f1_avg,
prauc_avg))
encoder_lr_scheduler.step(f1_avg)
decoder_lr_scheduler.step(f1_avg)
if epoch % save_every_epoch == 0:
torch.save(
{'medrec_epoch': epoch,
'medrec_iteration': trained_n_iteration,
'encoder': encoder.state_dict(),
'decoder': decoder.state_dict(),
'encoder_optimizer': encoder_optimizer.state_dict(),
'decoder_optimizer': decoder_optimizer.state_dict()},
os.path.join(save_model_path,
'medrec_{}_{}_{:.4f}.checkpoint'.format(epoch, trained_n_iteration, f1_avg)))
log_file.close()
def train(self, input_size, hidden_size, encoder_n_layers, encoder_embedding_dropout_rate,
encoder_gru_dropout_rate, encoder_learning_rate, decoder_type, decoder_dropout_rate, decoder_hop_count,
regular_hop_count, attn_type_kv, attn_type_embedding, least_adm_count, select_adm_count, coverage_dim,
decoder_learning_rate, save_model_dir='data/model', n_epoch=50, print_every_iteration=100,
save_every_epoch=1, load_model_name=None):
print('initializing >>>')
if load_model_name:
print('load model from checkpoint file: ', load_model_name)
checkpoint = torch.load(load_model_name)
encoder = Encoder(self.device, input_size, hidden_size, self.diagnose_count,
self.procedure_count, encoder_n_layers, encoder_embedding_dropout_rate,
encoder_gru_dropout_rate)
if decoder_type == 'gru_cover':
decoder = DecoderGRUCover(params.device, hidden_size, self.medication_count,
decoder_dropout_rate, least_adm_count, decoder_hop_count,
coverage_dim, attn_type_kv, attn_type_embedding,
regular_hop_count, self.ehr_matrix)
coverage_type = 'gru_cover'
elif decoder_type == 'sum_cover':
decoder = DecoderSumCover(params.device, hidden_size, self.medication_count,
decoder_dropout_rate, decoder_hop_count, attn_type_kv,
attn_type_embedding, least_adm_count, select_adm_count,
regular_hop_count, self.ehr_matrix)
coverage_type = 'sum_cover'
else:
print('wrong decoder type, choose from gru_cover and sum_cover')
return
if load_model_name:
encoder_sd = checkpoint['encoder']
decoder_sd = checkpoint['decoder']
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
encoder = encoder.to(self.device)
decoder = decoder.to(self.device)
encoder.train()
decoder.train()
print('build optimizer >>>')
encoder_optimizer = optim.Adam(encoder.parameters(), lr=encoder_learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=decoder_learning_rate)
if load_model_name:
encoder_optimizer_sd = checkpoint['encoder_optimizer']
decoder_optimizer_sd = checkpoint['decoder_optimizer']
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
print('start training >>>')
patient_records = pd.read_pickle(self.patient_records_file)
split_point = int(len(patient_records) * params.TRAIN_RATIO)
test_count = int(len(patient_records) * params.TEST_RATIO)
patient_records_train = patient_records[:split_point]
patient_records_test = patient_records[split_point:split_point + test_count]
medrec_trained_epoch = 0
medrec_trained_iteration = 0
if load_model_name:
medrec_trained_n_epoch_sd = checkpoint['medrec_epoch']
medrec_trained_n_iteration_sd = checkpoint['medrec_iteration']
medrec_trained_epoch = medrec_trained_n_epoch_sd
medrec_trained_iteration = medrec_trained_n_iteration_sd
save_model_structure = str(encoder_n_layers) + '_' + str(input_size) + '_' + str(hidden_size)
save_model_parameters = str(encoder_embedding_dropout_rate) + '_' + str(encoder_gru_dropout_rate) + '_' + str(
decoder_dropout_rate) + '_' + attn_type_kv + '_' + attn_type_embedding + '_' + str(
decoder_hop_count) + '_' + str(regular_hop_count)
save_model_path = os.path.join(save_model_dir, save_model_structure, save_model_parameters)
self.trainIters(encoder, decoder, encoder_optimizer, decoder_optimizer, coverage_type, patient_records_train,
patient_records_test, save_model_path, n_epoch, print_every_iteration, save_every_epoch,
medrec_trained_epoch, medrec_trained_iteration)
| [
"torch.LongTensor",
"numpy.array",
"Networks.DecoderSumCover",
"pandas.read_pickle",
"Parameters.Params",
"os.path.exists",
"numpy.where",
"Networks.DecoderGRUCover",
"Evaluation.EvaluationUtil",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"Networks.Encoder",
"numpy.isnan",
"os.makedirs",
... | [((616, 624), 'Parameters.Params', 'Params', ([], {}), '()\n', (622, 624), False, 'from Parameters import Params\n'), ((1360, 1376), 'Evaluation.EvaluationUtil', 'EvaluationUtil', ([], {}), '()\n', (1374, 1376), False, 'from Evaluation import EvaluationUtil\n'), ((1578, 1614), 'numpy.zeros', 'np.zeros', (['(1, self.medication_count)'], {}), '((1, self.medication_count))\n', (1586, 1614), True, 'import numpy as np\n'), ((1696, 1735), 'numpy.full', 'np.full', (['(1, self.medication_count)', '(-1)'], {}), '((1, self.medication_count), -1)\n', (1703, 1735), True, 'import numpy as np\n'), ((5470, 5542), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['encoder_optimizer'], {'mode': '"""max"""', 'patience': '(5)', 'factor': '(0.1)'}), "(encoder_optimizer, mode='max', patience=5, factor=0.1)\n", (5487, 5542), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((5575, 5647), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['decoder_optimizer'], {'mode': '"""max"""', 'patience': '(5)', 'factor': '(0.1)'}), "(decoder_optimizer, mode='max', patience=5, factor=0.1)\n", (5592, 5647), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((10385, 10558), 'Networks.Encoder', 'Encoder', (['self.device', 'input_size', 'hidden_size', 'self.diagnose_count', 'self.procedure_count', 'encoder_n_layers', 'encoder_embedding_dropout_rate', 'encoder_gru_dropout_rate'], {}), '(self.device, input_size, hidden_size, self.diagnose_count, self.\n procedure_count, encoder_n_layers, encoder_embedding_dropout_rate,\n encoder_gru_dropout_rate)\n', (10392, 10558), False, 'from Networks import Encoder\n'), ((12539, 12580), 'pandas.read_pickle', 'pd.read_pickle', (['self.patient_records_file'], {}), '(self.patient_records_file)\n', (12553, 12580), True, 'import pandas as pd\n'), ((13661, 13734), 'os.path.join', 'os.path.join', (['save_model_dir', 'save_model_structure', 'save_model_parameters'], {}), '(save_model_dir, save_model_structure, save_model_parameters)\n', (13673, 13734), False, 'import os\n'), ((2071, 2105), 'torch.sigmoid', 'torch.sigmoid', (['predict_medications'], {}), '(predict_medications)\n', (2084, 2105), False, 'import torch\n'), ((4577, 4598), 'numpy.array', 'np.array', (['jaccard_avg'], {}), '(jaccard_avg)\n', (4585, 4598), True, 'import numpy as np\n'), ((4633, 4656), 'numpy.array', 'np.array', (['precision_avg'], {}), '(precision_avg)\n', (4641, 4656), True, 'import numpy as np\n'), ((4688, 4708), 'numpy.array', 'np.array', (['recall_avg'], {}), '(recall_avg)\n', (4696, 4708), True, 'import numpy as np\n'), ((4736, 4752), 'numpy.array', 'np.array', (['f1_avg'], {}), '(f1_avg)\n', (4744, 4752), True, 'import numpy as np\n'), ((4783, 4802), 'numpy.array', 'np.array', (['prauc_avg'], {}), '(prauc_avg)\n', (4791, 4802), True, 'import numpy as np\n'), ((5282, 5313), 'os.path.exists', 'os.path.exists', (['save_model_path'], {}), '(save_model_path)\n', (5296, 5313), False, 'import os\n'), ((5328, 5356), 'os.makedirs', 'os.makedirs', (['save_model_path'], {}), '(save_model_path)\n', (5339, 5356), False, 'import os\n'), ((5382, 5430), 'os.path.join', 'os.path.join', (['save_model_path', '"""medrec_loss.log"""'], {}), "(save_model_path, 'medrec_loss.log')\n", (5394, 5430), False, 'import os\n'), ((10336, 10363), 'torch.load', 'torch.load', (['load_model_name'], {}), '(load_model_name)\n', (10346, 10363), False, 'import torch\n'), ((10670, 10887), 'Networks.DecoderGRUCover', 'DecoderGRUCover', (['params.device', 'hidden_size', 'self.medication_count', 'decoder_dropout_rate', 'least_adm_count', 'decoder_hop_count', 'coverage_dim', 'attn_type_kv', 'attn_type_embedding', 'regular_hop_count', 'self.ehr_matrix'], {}), '(params.device, hidden_size, self.medication_count,\n decoder_dropout_rate, least_adm_count, decoder_hop_count, coverage_dim,\n attn_type_kv, attn_type_embedding, regular_hop_count, self.ehr_matrix)\n', (10685, 10887), False, 'from Networks import DecoderGRUCover, DecoderSumCover\n'), ((3205, 3236), 'numpy.zeros', 'np.zeros', (['self.medication_count'], {}), '(self.medication_count)\n', (3213, 3236), True, 'import numpy as np\n'), ((11106, 11331), 'Networks.DecoderSumCover', 'DecoderSumCover', (['params.device', 'hidden_size', 'self.medication_count', 'decoder_dropout_rate', 'decoder_hop_count', 'attn_type_kv', 'attn_type_embedding', 'least_adm_count', 'select_adm_count', 'regular_hop_count', 'self.ehr_matrix'], {}), '(params.device, hidden_size, self.medication_count,\n decoder_dropout_rate, decoder_hop_count, attn_type_kv,\n attn_type_embedding, least_adm_count, select_adm_count,\n regular_hop_count, self.ehr_matrix)\n', (11121, 11331), False, 'from Networks import DecoderGRUCover, DecoderSumCover\n'), ((1972, 2006), 'torch.FloatTensor', 'torch.FloatTensor', (['loss_bce_target'], {}), '(loss_bce_target)\n', (1989, 2006), False, 'import torch\n'), ((2154, 2189), 'torch.LongTensor', 'torch.LongTensor', (['loss_multi_target'], {}), '(loss_multi_target)\n', (2170, 2189), False, 'import torch\n'), ((3483, 3510), 'numpy.isnan', 'np.isnan', (['predict_multi_hot'], {}), '(predict_multi_hot)\n', (3491, 3510), True, 'import numpy as np\n'), ((3598, 3630), 'numpy.zeros_like', 'np.zeros_like', (['predict_multi_hot'], {}), '(predict_multi_hot)\n', (3611, 3630), True, 'import numpy as np\n'), ((8461, 8484), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8482, 8484), False, 'import datetime\n'), ((8813, 8836), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8834, 8836), False, 'import datetime\n'), ((3806, 3838), 'numpy.where', 'np.where', (['(predict_multi_hot == 1)'], {}), '(predict_multi_hot == 1)\n', (3814, 3838), True, 'import numpy as np\n'), ((7121, 7141), 'numpy.array', 'np.array', (['print_loss'], {}), '(print_loss)\n', (7129, 7141), True, 'import numpy as np\n'), ((7350, 7373), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7371, 7373), False, 'import datetime\n'), ((7591, 7614), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7612, 7614), False, 'import datetime\n'), ((3327, 3356), 'torch.sigmoid', 'torch.sigmoid', (['predict_output'], {}), '(predict_output)\n', (3340, 3356), False, 'import torch\n')] |
"""
A script which times the training time of our graph2vec
reimplementations using both disk memory dataset loaders
and ram memory dataset loaders.
"""
import os
import time
import numpy as np
from geometric2dr.decomposition.weisfeiler_lehman_patterns import wl_corpus
from geometric2dr.embedding_methods.pvdbow_trainer import Trainer, InMemoryTrainer
from geometric2dr.embedding_methods.classify import cross_val_accuracy
import geometric2dr.embedding_methods.utils as utils
# Input data paths
dataset = "MUTAG"
corpus_data_dir = "data/" + dataset
wl_depth = 2
min_count = 0
emb_dimension = 128
batch_size = 1024
epochs = 100
initial_lr = 0.1
# Learn embeddings
graph_files = utils.get_files(corpus_data_dir, ".gexf", max_files=0)
wl_corpus(graph_files, wl_depth)
extension = ".wld" + str(wl_depth) # Extension of the graph document
output_embedding_fh = "runtime_analysis_embeddings"
# Load from disk trainer
hd_times = []
for _ in range(10):
trainer = Trainer(corpus_dir=corpus_data_dir, extension=extension, max_files=0, output_fh=output_embedding_fh,
emb_dimension=emb_dimension, batch_size=batch_size, epochs=epochs, initial_lr=initial_lr,
min_count=min_count)
start_time = time.time()
trainer.train()
end_time = (time.time() - start_time)
hd_times.append(end_time)
mean_hd_time = np.mean(hd_times)
std_hd_time = np.std(hd_times)
# Use memory trainer
memory_times = []
for _ in range(10):
trainer = InMemoryTrainer(corpus_dir=corpus_data_dir, extension=extension, max_files=0, output_fh=output_embedding_fh,
emb_dimension=emb_dimension, batch_size=batch_size, epochs=epochs, initial_lr=initial_lr,
min_count=min_count)
start_time = time.time()
trainer.train()
end_time = (time.time() - start_time)
memory_times.append(end_time)
mean_mem_time = np.mean(memory_times)
std_mem_time = np.std(memory_times)
# print("Hard Drive Geo2DR Graph2Vec mean time: %.4f standard dev: %.4f " % (mean_hd_time, std_hd_time))
print("In Memory Geo2DR Graph2Vec mean time: %.4f standard dev: %.4f " % (mean_mem_time, std_mem_time))
# Anonymous Walk Embeddings
import os
import time
import numpy as np
import geometric2dr.embedding_methods.utils as utils
from geometric2dr.decomposition.anonymous_walk_patterns import awe_corpus
from geometric2dr.embedding_methods.classify import cross_val_accuracy
from geometric2dr.embedding_methods.pvdm_trainer import PVDM_Trainer # Note use of PVDM
aw_length = 10
label_setting = "nodes" # AWE is quite nice and versatile allowing for different node-label/edge-label settings
# Input data paths
dataset = "MUTAG"
corpus_data_dir = "data/" + dataset
# Desired output paths
output_embedding_fh = "AWE_Embeddings.json"
#######
# Step 1 Create corpus data for neural language model
# We keep permanent files for sake of deeper post studies and testing
#######
graph_files = utils.get_files(corpus_data_dir, ".gexf", max_files=0)
memory_times = []
for _ in range(10):
awe_corpus(corpus_data_dir, aw_length, label_setting, saving_graph_docs=True)
extension = ".awe_" + str(aw_length) + "_" + label_setting
######
# Step 2 Train a neural language model to learn distributed representations
# of the graphs directly or of its substructures. Here we learn it directly
# for an example of the latter check out the DGK models.
######
trainer = PVDM_Trainer(corpus_dir=corpus_data_dir, extension=extension, max_files=0, window_size=16, output_fh=output_embedding_fh,
emb_dimension=128, batch_size=100, epochs=100, initial_lr=0.1, min_count=0)
start_time = time.time()
trainer.train()
end_time = (time.time() - start_time)
memory_times.append(end_time)
mean_mem_time = np.mean(memory_times)
std_mem_time = np.std(memory_times)
print("In Memory Geo2DR AWE-DD mean time: %.4f standard dev: %.4f " % (mean_mem_time, std_mem_time))
import os
import time
import numpy as np
import geometric2dr.embedding_methods.utils as utils
from geometric2dr.decomposition.weisfeiler_lehman_patterns import wl_corpus
from geometric2dr.embedding_methods.skipgram_trainer import InMemoryTrainer
# DGK-WL
# Input data paths
dataset = "MUTAG"
corpus_data_dir = "data/" + dataset
# Desired output paths for subgraph embeddings
output_embedding_fh = "WL_Subgraph_Embeddings.json"
# WL decomposition hyperparameters
wl_depth = 2
############
# Step 1
# Run the decomposition algorithm to get subgraph patterns across the graphs of MUTAG
############
graph_files = utils.get_files(corpus_data_dir, ".gexf", max_files=0)
corpus, vocabulary, prob_map, num_graphs, graph_map = wl_corpus(graph_files, wl_depth)
extension = ".wld" + str(wl_depth) # Extension of the graph document
############
# Step 2
# Train a skipgram (w. Negative Sampling) model to learn distributed representations of the subgraph patterns
############
memory_times = []
for _ in range(10):
trainer = InMemoryTrainer(corpus_dir=corpus_data_dir, extension=extension, max_files=0, window_size=10, output_fh=output_embedding_fh,
emb_dimension=32, batch_size=1280, epochs=100, initial_lr=0.1, min_count=1)
start_time = time.time()
trainer.train()
end_time = (time.time() - start_time)
memory_times.append(end_time)
mean_mem_time = np.mean(memory_times)
std_mem_time = np.std(memory_times)
print("In Memory Geo2DR DGK-WL mean time: %.4f standard dev: %.4f " % (mean_mem_time, std_mem_time))
# DGK-SP
import os
import time
import numpy as np
import geometric2dr.embedding_methods.utils as utils
from geometric2dr.decomposition.shortest_path_patterns import sp_corpus
from geometric2dr.embedding_methods.skipgram_trainer import InMemoryTrainer
# Input data paths
dataset = "MUTAG"
corpus_data_dir = "data/" + dataset
# Desired output paths for subgraph embeddings
output_embedding_fh = "SPP_Subgraph_Embeddings.json"
############
# Step 1
# Run the decomposition algorithm to get subgraph patterns across the graphs of MUTAG
############
graph_files = utils.get_files(corpus_data_dir, ".gexf", max_files=0)
corpus, vocabulary, prob_map, num_graphs, graph_map = sp_corpus(corpus_data_dir) # will produce .spp files
extension = ".spp"
############
# Step 2
# Train a skipgram (w. Negative Sampling) model to learn distributed representations of the subgraph patterns
############
memory_times = []
for _ in range(10):
trainer = InMemoryTrainer(corpus_dir=corpus_data_dir, extension=extension, max_files=0, window_size=10, output_fh=output_embedding_fh,
emb_dimension=32, batch_size=128, epochs=100, initial_lr=0.1,
min_count=1)
start_time = time.time()
trainer.train()
end_time = (time.time() - start_time)
memory_times.append(end_time)
mean_mem_time = np.mean(memory_times)
std_mem_time = np.std(memory_times)
print("In Memory Geo2DR DGK-SP mean time: %.4f standard dev: %.4f " % (mean_mem_time, std_mem_time))
# # DGK-GK
import os
import time
import numpy as np
import geometric2dr.embedding_methods.utils as utils
from geometric2dr.decomposition.graphlet_patterns import graphlet_corpus
from geometric2dr.embedding_methods.skipgram_trainer import Trainer, InMemoryTrainer
# Input data paths
dataset = "MUTAG"
corpus_data_dir = "data/" + dataset
# Desired output paths for subgraph embeddings
output_embedding_fh = "Graphlet_Subgraph_Embeddings.json"
# Graphlet decomposition hyperparameters
num_graphlet = 7 # size of the graphlets to extract
sample_size = 100 # number of graphlets samples to extract
############
# Step 1
# Run the decomposition algorithm to get subgraph patterns across the graphs of MUTAG
############
graph_files = utils.get_files(corpus_data_dir, ".gexf", max_files=0)
corpus, vocabulary, prob_map, num_graphs, graph_map = graphlet_corpus(corpus_data_dir, num_graphlet, sample_size)
extension = ".graphlet_ng_"+str(num_graphlet)+"_ss_"+str(sample_size)
############
# Step 2
# Train a skipgram (w. Negative Sampling) model to learn distributed representations of the subgraph patterns
############
memory_times = []
for _ in range(10):
trainer = InMemoryTrainer(corpus_dir=corpus_data_dir, extension=extension, max_files=0, window_size=10, output_fh=output_embedding_fh,
emb_dimension=32, batch_size=128, epochs=100, initial_lr=0.1,
min_count=0)
start_time = time.time()
trainer.train()
end_time = (time.time() - start_time)
memory_times.append(end_time)
mean_mem_time = np.mean(memory_times)
std_mem_time = np.std(memory_times)
print("In Memory Geo2DR DGK-GRAPHLET mean time: %.4f standard dev: %.4f " % (mean_mem_time, std_mem_time))
| [
"numpy.mean",
"geometric2dr.decomposition.shortest_path_patterns.sp_corpus",
"geometric2dr.decomposition.weisfeiler_lehman_patterns.wl_corpus",
"geometric2dr.embedding_methods.utils.get_files",
"geometric2dr.embedding_methods.skipgram_trainer.Trainer",
"geometric2dr.embedding_methods.pvdm_trainer.PVDM_Tra... | [((686, 740), 'geometric2dr.embedding_methods.utils.get_files', 'utils.get_files', (['corpus_data_dir', '""".gexf"""'], {'max_files': '(0)'}), "(corpus_data_dir, '.gexf', max_files=0)\n", (701, 740), True, 'import geometric2dr.embedding_methods.utils as utils\n'), ((741, 773), 'geometric2dr.decomposition.weisfeiler_lehman_patterns.wl_corpus', 'wl_corpus', (['graph_files', 'wl_depth'], {}), '(graph_files, wl_depth)\n', (750, 773), False, 'from geometric2dr.decomposition.weisfeiler_lehman_patterns import wl_corpus\n'), ((1342, 1359), 'numpy.mean', 'np.mean', (['hd_times'], {}), '(hd_times)\n', (1349, 1359), True, 'import numpy as np\n'), ((1374, 1390), 'numpy.std', 'np.std', (['hd_times'], {}), '(hd_times)\n', (1380, 1390), True, 'import numpy as np\n'), ((1851, 1872), 'numpy.mean', 'np.mean', (['memory_times'], {}), '(memory_times)\n', (1858, 1872), True, 'import numpy as np\n'), ((1888, 1908), 'numpy.std', 'np.std', (['memory_times'], {}), '(memory_times)\n', (1894, 1908), True, 'import numpy as np\n'), ((2904, 2958), 'geometric2dr.embedding_methods.utils.get_files', 'utils.get_files', (['corpus_data_dir', '""".gexf"""'], {'max_files': '(0)'}), "(corpus_data_dir, '.gexf', max_files=0)\n", (2919, 2958), True, 'import geometric2dr.embedding_methods.utils as utils\n'), ((3729, 3750), 'numpy.mean', 'np.mean', (['memory_times'], {}), '(memory_times)\n', (3736, 3750), True, 'import numpy as np\n'), ((3766, 3786), 'numpy.std', 'np.std', (['memory_times'], {}), '(memory_times)\n', (3772, 3786), True, 'import numpy as np\n'), ((4507, 4561), 'geometric2dr.embedding_methods.utils.get_files', 'utils.get_files', (['corpus_data_dir', '""".gexf"""'], {'max_files': '(0)'}), "(corpus_data_dir, '.gexf', max_files=0)\n", (4522, 4561), True, 'import geometric2dr.embedding_methods.utils as utils\n'), ((4616, 4648), 'geometric2dr.decomposition.weisfeiler_lehman_patterns.wl_corpus', 'wl_corpus', (['graph_files', 'wl_depth'], {}), '(graph_files, wl_depth)\n', (4625, 4648), False, 'from geometric2dr.decomposition.weisfeiler_lehman_patterns import wl_corpus\n'), ((5251, 5272), 'numpy.mean', 'np.mean', (['memory_times'], {}), '(memory_times)\n', (5258, 5272), True, 'import numpy as np\n'), ((5288, 5308), 'numpy.std', 'np.std', (['memory_times'], {}), '(memory_times)\n', (5294, 5308), True, 'import numpy as np\n'), ((5976, 6030), 'geometric2dr.embedding_methods.utils.get_files', 'utils.get_files', (['corpus_data_dir', '""".gexf"""'], {'max_files': '(0)'}), "(corpus_data_dir, '.gexf', max_files=0)\n", (5991, 6030), True, 'import geometric2dr.embedding_methods.utils as utils\n'), ((6085, 6111), 'geometric2dr.decomposition.shortest_path_patterns.sp_corpus', 'sp_corpus', (['corpus_data_dir'], {}), '(corpus_data_dir)\n', (6094, 6111), False, 'from geometric2dr.decomposition.shortest_path_patterns import sp_corpus\n'), ((6693, 6714), 'numpy.mean', 'np.mean', (['memory_times'], {}), '(memory_times)\n', (6700, 6714), True, 'import numpy as np\n'), ((6730, 6750), 'numpy.std', 'np.std', (['memory_times'], {}), '(memory_times)\n', (6736, 6750), True, 'import numpy as np\n'), ((7589, 7643), 'geometric2dr.embedding_methods.utils.get_files', 'utils.get_files', (['corpus_data_dir', '""".gexf"""'], {'max_files': '(0)'}), "(corpus_data_dir, '.gexf', max_files=0)\n", (7604, 7643), True, 'import geometric2dr.embedding_methods.utils as utils\n'), ((7698, 7757), 'geometric2dr.decomposition.graphlet_patterns.graphlet_corpus', 'graphlet_corpus', (['corpus_data_dir', 'num_graphlet', 'sample_size'], {}), '(corpus_data_dir, num_graphlet, sample_size)\n', (7713, 7757), False, 'from geometric2dr.decomposition.graphlet_patterns import graphlet_corpus\n'), ((8367, 8388), 'numpy.mean', 'np.mean', (['memory_times'], {}), '(memory_times)\n', (8374, 8388), True, 'import numpy as np\n'), ((8404, 8424), 'numpy.std', 'np.std', (['memory_times'], {}), '(memory_times)\n', (8410, 8424), True, 'import numpy as np\n'), ((967, 1187), 'geometric2dr.embedding_methods.skipgram_trainer.Trainer', 'Trainer', ([], {'corpus_dir': 'corpus_data_dir', 'extension': 'extension', 'max_files': '(0)', 'output_fh': 'output_embedding_fh', 'emb_dimension': 'emb_dimension', 'batch_size': 'batch_size', 'epochs': 'epochs', 'initial_lr': 'initial_lr', 'min_count': 'min_count'}), '(corpus_dir=corpus_data_dir, extension=extension, max_files=0,\n output_fh=output_embedding_fh, emb_dimension=emb_dimension, batch_size=\n batch_size, epochs=epochs, initial_lr=initial_lr, min_count=min_count)\n', (974, 1187), False, 'from geometric2dr.embedding_methods.skipgram_trainer import Trainer, InMemoryTrainer\n'), ((1231, 1242), 'time.time', 'time.time', ([], {}), '()\n', (1240, 1242), False, 'import time\n'), ((1463, 1696), 'geometric2dr.embedding_methods.skipgram_trainer.InMemoryTrainer', 'InMemoryTrainer', ([], {'corpus_dir': 'corpus_data_dir', 'extension': 'extension', 'max_files': '(0)', 'output_fh': 'output_embedding_fh', 'emb_dimension': 'emb_dimension', 'batch_size': 'batch_size', 'epochs': 'epochs', 'initial_lr': 'initial_lr', 'min_count': 'min_count'}), '(corpus_dir=corpus_data_dir, extension=extension, max_files=\n 0, output_fh=output_embedding_fh, emb_dimension=emb_dimension,\n batch_size=batch_size, epochs=epochs, initial_lr=initial_lr, min_count=\n min_count)\n', (1478, 1696), False, 'from geometric2dr.embedding_methods.skipgram_trainer import Trainer, InMemoryTrainer\n'), ((1735, 1746), 'time.time', 'time.time', ([], {}), '()\n', (1744, 1746), False, 'import time\n'), ((2999, 3076), 'geometric2dr.decomposition.anonymous_walk_patterns.awe_corpus', 'awe_corpus', (['corpus_data_dir', 'aw_length', 'label_setting'], {'saving_graph_docs': '(True)'}), '(corpus_data_dir, aw_length, label_setting, saving_graph_docs=True)\n', (3009, 3076), False, 'from geometric2dr.decomposition.anonymous_walk_patterns import awe_corpus\n'), ((3382, 3587), 'geometric2dr.embedding_methods.pvdm_trainer.PVDM_Trainer', 'PVDM_Trainer', ([], {'corpus_dir': 'corpus_data_dir', 'extension': 'extension', 'max_files': '(0)', 'window_size': '(16)', 'output_fh': 'output_embedding_fh', 'emb_dimension': '(128)', 'batch_size': '(100)', 'epochs': '(100)', 'initial_lr': '(0.1)', 'min_count': '(0)'}), '(corpus_dir=corpus_data_dir, extension=extension, max_files=0,\n window_size=16, output_fh=output_embedding_fh, emb_dimension=128,\n batch_size=100, epochs=100, initial_lr=0.1, min_count=0)\n', (3394, 3587), False, 'from geometric2dr.embedding_methods.pvdm_trainer import PVDM_Trainer\n'), ((3613, 3624), 'time.time', 'time.time', ([], {}), '()\n', (3622, 3624), False, 'import time\n'), ((4913, 5122), 'geometric2dr.embedding_methods.skipgram_trainer.InMemoryTrainer', 'InMemoryTrainer', ([], {'corpus_dir': 'corpus_data_dir', 'extension': 'extension', 'max_files': '(0)', 'window_size': '(10)', 'output_fh': 'output_embedding_fh', 'emb_dimension': '(32)', 'batch_size': '(1280)', 'epochs': '(100)', 'initial_lr': '(0.1)', 'min_count': '(1)'}), '(corpus_dir=corpus_data_dir, extension=extension, max_files=\n 0, window_size=10, output_fh=output_embedding_fh, emb_dimension=32,\n batch_size=1280, epochs=100, initial_lr=0.1, min_count=1)\n', (4928, 5122), False, 'from geometric2dr.embedding_methods.skipgram_trainer import Trainer, InMemoryTrainer\n'), ((5135, 5146), 'time.time', 'time.time', ([], {}), '()\n', (5144, 5146), False, 'import time\n'), ((6351, 6559), 'geometric2dr.embedding_methods.skipgram_trainer.InMemoryTrainer', 'InMemoryTrainer', ([], {'corpus_dir': 'corpus_data_dir', 'extension': 'extension', 'max_files': '(0)', 'window_size': '(10)', 'output_fh': 'output_embedding_fh', 'emb_dimension': '(32)', 'batch_size': '(128)', 'epochs': '(100)', 'initial_lr': '(0.1)', 'min_count': '(1)'}), '(corpus_dir=corpus_data_dir, extension=extension, max_files=\n 0, window_size=10, output_fh=output_embedding_fh, emb_dimension=32,\n batch_size=128, epochs=100, initial_lr=0.1, min_count=1)\n', (6366, 6559), False, 'from geometric2dr.embedding_methods.skipgram_trainer import Trainer, InMemoryTrainer\n'), ((6577, 6588), 'time.time', 'time.time', ([], {}), '()\n', (6586, 6588), False, 'import time\n'), ((8023, 8231), 'geometric2dr.embedding_methods.skipgram_trainer.InMemoryTrainer', 'InMemoryTrainer', ([], {'corpus_dir': 'corpus_data_dir', 'extension': 'extension', 'max_files': '(0)', 'window_size': '(10)', 'output_fh': 'output_embedding_fh', 'emb_dimension': '(32)', 'batch_size': '(128)', 'epochs': '(100)', 'initial_lr': '(0.1)', 'min_count': '(0)'}), '(corpus_dir=corpus_data_dir, extension=extension, max_files=\n 0, window_size=10, output_fh=output_embedding_fh, emb_dimension=32,\n batch_size=128, epochs=100, initial_lr=0.1, min_count=0)\n', (8038, 8231), False, 'from geometric2dr.embedding_methods.skipgram_trainer import Trainer, InMemoryTrainer\n'), ((8251, 8262), 'time.time', 'time.time', ([], {}), '()\n', (8260, 8262), False, 'import time\n'), ((1273, 1284), 'time.time', 'time.time', ([], {}), '()\n', (1282, 1284), False, 'import time\n'), ((1777, 1788), 'time.time', 'time.time', ([], {}), '()\n', (1786, 1788), False, 'import time\n'), ((3655, 3666), 'time.time', 'time.time', ([], {}), '()\n', (3664, 3666), False, 'import time\n'), ((5177, 5188), 'time.time', 'time.time', ([], {}), '()\n', (5186, 5188), False, 'import time\n'), ((6619, 6630), 'time.time', 'time.time', ([], {}), '()\n', (6628, 6630), False, 'import time\n'), ((8293, 8304), 'time.time', 'time.time', ([], {}), '()\n', (8302, 8304), False, 'import time\n')] |
"""Functions for using Gaussian Processes."""
import logging
from typing import Callable, Tuple
import numpy as np
def zero_mean_initialise(x: np.ndarray, kernel_fun: Callable, noise=0.0) -> Tuple[np.ndarray, np.ndarray]:
"""Initialise a zero mean GP using the provided kernel function.
Parameters
----------
x: ndarray
List of x points
kernel_fun: function
Kernel function, like those provided by the kernel_functions module.
Returns
-------
tuple of ndarray
The mean vector and the covariance matrix.
"""
logging.debug("x shape: {}".format(x.shape))
mean_vector = np.zeros(x.shape[0]) # initial mean vector
logging.debug("mean vector (initial) shape: {}".format(mean_vector.shape))
covariance_matrix = kernel_fun(x, x) # kernel_matrix(x, x, kernel_fun) # initial covariance matrix
covariance_matrix += noise * np.identity(covariance_matrix.shape[0])
logging.debug("x shape (after kernel call): {}".format(x.shape))
logging.debug("covariance matrix shape: {}".format(covariance_matrix.shape))
return mean_vector, covariance_matrix
def sample_function(mean_vector, covariance_matrix) -> np.ndarray:
"""Sample a function from a GP.
Parameters
----------
mean_vector: ndarray
Mean vector of the GP
covariance_matrix: ndarray
Covariance matrix of the GP
Returns
-------
ndarray
A function sampled from the GP with the given parameters.
"""
sample_function = np.random.multivariate_normal(mean_vector, covariance_matrix) # We can use it as true function
return sample_function
def regression_update(x: np.ndarray,
kernel_fun: Callable[[np.ndarray, np.ndarray], np.ndarray],
x_data: np.ndarray,
y_data: np.ndarray,
noise: float = 0.0):
"""Update the GP with the given data
Parameters
----------
x: List of x points
kernel_fun: Kernel function to be called, takes 2 vectors and returns the corresponding kernel matrix
x_data: x points for which we have data
y_data: y points for which we have data
noise: amount of noise over the feedback
Returns
-------
Updated mean and covariance of the GP
"""
k_list = [np.array([[kernel_fun(x_, x_d)[0, 0] for x_d in x_data]]).T for x_ in np.array(x)]
# noinspection PyPep8Naming
K = kernel_fun(x_data, x_data) # Direct matrix version
K += noise * np.identity(K.shape[0])
k_new_list = [np.array(kernel_fun(x_, x_)) for x_ in np.array(x)]
# Obtain posterior predictive distribution
inv_K = np.linalg.pinv(K) # Uses pseudo-inverse to overcome inversion limitations
updated_mean = np.array([(k.T.dot(inv_K).dot(y_data)) for k in k_list]).flatten()
updated_variance = np.array([(k_new - k.T.dot(inv_K).dot(k)) for k, k_new in zip(k_list, k_new_list)]).flatten()
return updated_mean, updated_variance
| [
"numpy.identity",
"numpy.linalg.pinv",
"numpy.random.multivariate_normal",
"numpy.array",
"numpy.zeros"
] | [((642, 662), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (650, 662), True, 'import numpy as np\n'), ((1527, 1588), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_vector', 'covariance_matrix'], {}), '(mean_vector, covariance_matrix)\n', (1556, 1588), True, 'import numpy as np\n'), ((2671, 2688), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K'], {}), '(K)\n', (2685, 2688), True, 'import numpy as np\n'), ((903, 942), 'numpy.identity', 'np.identity', (['covariance_matrix.shape[0]'], {}), '(covariance_matrix.shape[0])\n', (914, 942), True, 'import numpy as np\n'), ((2516, 2539), 'numpy.identity', 'np.identity', (['K.shape[0]'], {}), '(K.shape[0])\n', (2527, 2539), True, 'import numpy as np\n'), ((2393, 2404), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2401, 2404), True, 'import numpy as np\n'), ((2598, 2609), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2606, 2609), True, 'import numpy as np\n')] |
"""Module with embedding visualization tools."""
from multiprocessing import cpu_count
from typing import Dict, List, Tuple, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ddd_subplots import subplots as subplots_3d
from ensmallen_graph import EnsmallenGraph # pylint: disable=no-name-in-module
from matplotlib.axes import Axes
from matplotlib.collections import Collection
from matplotlib.colors import ListedColormap, LogNorm
from matplotlib.figure import Figure
from matplotlib.legend_handler import HandlerBase, HandlerTuple
from sanitize_ml_labels import sanitize_ml_labels
from sklearn.decomposition import PCA
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from sklearn.preprocessing import RobustScaler
from ..transformers import GraphTransformer, NodeTransformer
class GraphVisualization:
"""Tools to visualize the graph embeddings."""
DEFAULT_SCATTER_KWARGS = dict(
s=8,
alpha=0.8
)
DEFAULT_SUBPLOT_KWARGS = dict(
figsize=(6, 6),
dpi=200
)
def __init__(
self,
graph: EnsmallenGraph,
decomposition_method: str = "TSNE",
scaler_method: "Scaler" = RobustScaler,
n_components: int = 2,
node_embedding_method: str = None,
edge_embedding_method: str = "Hadamard",
subsample_points: int = 20_000,
random_state: int = 42
):
"""Create new GraphVisualization object.
Parameters
--------------------------
graph: EnsmallenGraph,
The graph to visualize.
decomposition_method: str = "TSNE",
The decomposition method to use.
The supported methods are TSNE and PCA.
scaler_method: "Scaler" = RobustScaler,
The scaler object to use to normalize the embedding.
By default we use a Robust Scaler.
Pass None to not use any scaler.
n_components: int = 2,
Number of components to reduce the image to.
Currently, we only support 2D decompositions but we plan
to add support for also 3D decompositions.
node_embedding_method: str = None,
Name of the node embedding method used.
If provided, it is added to the images titles.
edge_embedding_method: str = "Hadamard",
Edge embedding method.
Can either be 'Hadamard', 'Sum', 'Average', 'L1', 'AbsoluteL1', 'L2' or 'Concatenate'.
subsample_points: int = 20_000,
Number of points to subsample.
Some graphs have a number of nodes and edges in the millions.
Using non-CUDA versions of TSNE, the dimensionality reduction
procedure can take a considerable amount of time.
For this porpose, we include the possibility to subsample the
points to the given number.
The subsampling is done in a way that takes into consideration
the node types and/or edge types (the subsampling is applied
separately to the two different sets) by using a Stratified Shuffle
Split if there are node types or edge types.
Otherwise, a normal train test split is used.
If None is given, no subsampling is executed.
random_state: int = 42,
The random state to reproduce the visualizations.
Raises
---------------------------
ValueError,
If the target decomposition size is not supported.
ModuleNotFoundError,
If TSNE decomposition has been required and no module supporting
it is installed.
"""
self._graph = graph
self._graph_transformer = GraphTransformer(
method=edge_embedding_method
)
self._node_transformer = NodeTransformer()
self._node_embedding_method = node_embedding_method
self._node_mapping = self._node_embedding = self._edge_embedding = None
self._subsampled_node_ids = None
self._subsampled_edge_ids = None
self._subsample_points = subsample_points
self._random_state = random_state
if n_components not in {2, 3}:
raise ValueError(
"We currently only support 2D and 3D decomposition visualization."
)
self._n_components = n_components
self._scaler_method = None if scaler_method is None else scaler_method()
if decomposition_method == "TSNE":
try:
# We try to use CUDA tsne if available, but this does not
# currently support 3D decomposition. If the user has required a
# 3D decomposition we need to switch to the MulticoreTSNE version.
if n_components != 2:
raise ModuleNotFoundError()
from tsnecuda import TSNE as CUDATSNE # pylint: disable=import-error,import-outside-toplevel
self._decomposition_method = CUDATSNE(
n_components=2,
random_seed=random_state,
verbose=True
)
except ModuleNotFoundError:
try:
from MulticoreTSNE import \
MulticoreTSNE # pylint: disable=import-outside-toplevel
self._decomposition_method = MulticoreTSNE(
n_components=n_components,
n_jobs=cpu_count(),
random_state=random_state,
verbose=True
)
except ModuleNotFoundError:
try:
from sklearn.manifold import \
TSNE # pylint: disable=import-outside-toplevel
self._decomposition_method = TSNE(
n_components=n_components,
n_jobs=cpu_count(),
random_state=random_state,
verbose=True
)
except:
raise ModuleNotFoundError(
"You do not have installed a supported TSNE "
"decomposition algorithm. Depending on your use case, "
"we suggest you install tsne-cuda if your graph is "
"very big (in the millions of nodes) if you have access "
"to a compatible GPU system.\n"
"Alternatively, we suggest (and support) MulticoreTSNE, "
"which tends to be easier to install, and is significantly "
"faster than the Sklearn implementation.\n"
"Alternatively, we suggest (and support) MulticoreTSNE, "
"which tends to be easier to install, and is significantly "
"faster than the Sklearn implementation.\n"
"If you intend to do 3D decompositions, "
"remember that tsne-cuda, at the time of writing, "
"does not support them."
)
elif decomposition_method == "PCA":
self._decomposition_method = PCA(
n_components=n_components,
random_state=random_state
)
else:
raise ValueError(
"We currently only support PCA and TSNE decomposition methods."
)
def decompose(self, X: np.ndarray) -> np.ndarray:
"""Return requested decomposition of given array.
Parameters
-----------------------
X: np.ndarray,
The data to embed.
Raises
-----------------------
ValueError,
If the given vector has less components than the required
decomposition target.
Returns
-----------------------
The obtained decomposition.
"""
if X.shape[1] == self._n_components:
return X
if X.shape[1] < self._n_components:
raise ValueError(
"The vector to decompose has less components than "
"the decomposition target."
)
return self._decomposition_method.fit_transform(X)
def _shuffle(
self,
*args: List[Union[np.ndarray, pd.DataFrame, None]]
) -> List[np.ndarray]:
"""Return given arrays shuffled synchronously.
The reason to shuffle the points is mainly that this avoids for
'fake' clusters to appear simply by stacking the points by class
artifically according to how the points are sorted.
Parameters
------------------------
*args: List[Union[np.ndarray, pd.DataFrame, None]],
The lists to shuffle.
Returns
------------------------
Shuffled data using given random state.
"""
index = np.arange(args[0].shape[0])
random_state = np.random.RandomState( # pylint: disable=no-member
seed=self._random_state
)
random_state.shuffle(index)
return [
arg[index] if isinstance(arg, np.ndarray)
else arg.iloc[index] if isinstance(arg, pd.DataFrame)
else None
for arg in args
]
def _set_legend(
self,
axes: Axes,
labels: List[str],
handles: List[HandlerBase],
legend_title: str
):
"""Set the legend with the given values and handles transparency.
Parameters
----------------------------
axes: Axes,
The axes on which to put the legend.
labels: List[str],
Labels to put in the legend.
handles: List,
Handles to display in the legend (the curresponding matplotlib
objects).
legend_title: str,
Title for the legend.
"""
legend = axes.legend(
handles=handles,
labels=labels,
loc='best',
title=legend_title,
**(
dict(handler_map={tuple: HandlerTuple(ndivide=None)})
if isinstance(handles[0], tuple)
else {}
)
)
# Setting alpha level in the legend to avoid having a transparent
# legend scatter dots.
for legend_handle in legend.legendHandles:
legend_handle._legmarker.set_alpha( # pylint: disable=protected-access
1
)
def fit_transform_nodes(
self,
node_embedding: pd.DataFrame
):
"""Executes fitting for plotting node embeddings.
Parameters
-------------------------
node_embedding: pd.DataFrame,
Embedding of the graph nodes.
"""
# Retrieve the nodes
node_names = np.array(self._graph.get_node_names())
# If necessary, we proceed with the subsampling
if self._subsample_points is not None and self._graph.get_nodes_number() > self._subsample_points:
# If there are node types, we use a stratified
# node sampling so that all the nodes types may be displayed.
if self._graph.has_node_types() and all(
count > 1
for count in self._graph.get_node_type_counts().values()
):
Splitter = StratifiedShuffleSplit
else:
# Otherwise there is no need to stratify.
Splitter = ShuffleSplit
# We compute the indices
self._subsampled_node_ids, _ = next(Splitter(
n_splits=1,
train_size=self._subsample_points,
random_state=self._random_state
).split(node_names, self._graph.get_node_types()))
# And sample the nodes
node_names = node_names[self._subsampled_node_ids]
if self._scaler_method is not None:
node_embedding = pd.DataFrame(
self._scaler_method.fit_transform(node_embedding),
columns=node_embedding.columns,
index=node_embedding.index,
)
self._node_transformer.fit(node_embedding)
self._node_embedding = pd.DataFrame(
self.decompose(
self._node_transformer.transform(node_names)
),
index=node_names
)
def fit_transform_edges(self, node_embedding: np.ndarray):
"""Executes fitting for plotting edge embeddings.
Parameters
-------------------------
node_embedding: np.ndarray,
Embedding obtained from SkipGram, CBOW or GloVe.
"""
# Retrieve the edges
edge_names = np.array(self._graph.get_edge_names())
# If necessary, we proceed with the subsampling
if self._subsample_points is not None and self._graph.get_edges_number() > self._subsample_points:
# If there are edge types, we use a stratified
# edge sampling so that all the edges types may be displayed.
if self._graph.has_edge_types() and all(
count > 1
for count in self._graph.get_edge_type_counts().values()
):
Splitter = StratifiedShuffleSplit
else:
# Otherwise there is no need to stratify.
Splitter = ShuffleSplit
# We compute the indices
self._subsampled_edge_ids, _ = next(Splitter(
n_splits=1,
train_size=self._subsample_points,
random_state=self._random_state
).split(edge_names, self._graph.get_edge_types()))
# And sample the edges
edge_names = edge_names[self._subsampled_edge_ids]
if self._scaler_method is not None:
node_embedding = pd.DataFrame(
self._scaler_method.fit_transform(node_embedding),
columns=node_embedding.columns,
index=node_embedding.index,
)
self._graph_transformer.fit(node_embedding)
self._edge_embedding = pd.DataFrame(
self.decompose(
self._graph_transformer.transform(edge_names),
),
index=edge_names
)
def _plot_scatter(
self,
title: str,
points: np.ndarray,
colors: List[int] = None,
edgecolors: List[int] = None,
labels: List[str] = None,
legend_title: str = "",
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs
) -> Tuple[Figure, Axes, Tuple[Collection]]:
"""Plot nodes of provided graph.
Parameters
------------------------------
title: str,
Title to use for the plot.
points: np.ndarray,
Points to plot.
colors: List[int] = None,
List of the colors to use for the scatter plot.
edgecolors: List[int] = None,
List of the edge colors to use for the scatter plot.
labels: List[str] = None,
Labels for the different colors.
legend_title: str = "",
Title for the legend.
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices, we only plot the
training points.
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Arguments to pass to the subplots.
Raises
------------------------------
ValueError,
If given train and test indices overlap.
Returns
------------------------------
Figure and Axis of the plot, followed by tuple of collections.
"""
if train_indices is not None and test_indices is not None:
if np.isin(train_indices, test_indices).any():
raise ValueError(
"The train and test indices overlap."
)
if figure is None or axes is None:
if self._n_components == 2:
figure, axes = plt.subplots(**{
**GraphVisualization.DEFAULT_SUBPLOT_KWARGS,
**kwargs
})
else:
figure, axes = subplots_3d(**{
**GraphVisualization.DEFAULT_SUBPLOT_KWARGS,
**kwargs
})
scatter_kwargs = {
**GraphVisualization.DEFAULT_SCATTER_KWARGS,
**(
dict(linewidths=0)
if edgecolors is None
else dict(linewidths=0.5)
),
**({} if scatter_kwargs is None else scatter_kwargs),
}
train_test_mask = np.zeros((points.shape[0]))
if train_indices is not None:
train_test_mask[train_indices] = 1
if test_indices is not None:
train_test_mask[test_indices] = 2
points, colors, edgecolors, train_test_mask = self._shuffle(
points,
colors,
edgecolors,
train_test_mask
)
legend_elements = []
collections = []
color_names = np.array([
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
])
if colors is not None:
cmap = scatter_kwargs.pop(
"cmap",
ListedColormap(color_names[:int(colors.max() + 1)])
)
if train_indices is None and test_indices is None:
scatter = axes.scatter(
*points.T,
c=colors,
edgecolors=None if edgecolors is None else cmap(edgecolors),
marker=train_marker,
cmap=cmap,
**scatter_kwargs
)
collections.append(scatter)
legend_elements += scatter.legend_elements()[0]
if train_indices is not None:
train_mask = train_test_mask == 1
train_scatter = axes.scatter(
*points[train_mask].T,
c=colors[train_mask],
edgecolors=None if edgecolors is None else cmap(
edgecolors[train_mask]
),
marker=train_marker,
cmap=cmap,
**scatter_kwargs
)
collections.append(train_scatter)
legend_elements.append(train_scatter.legend_elements()[0])
if test_indices is not None:
test_mask = train_test_mask == 2
test_scatter = axes.scatter(
*points[test_mask].T,
c=colors[test_mask],
edgecolors=None if edgecolors is None else cmap(
edgecolors[test_mask]),
marker=test_marker,
cmap=cmap,
**scatter_kwargs
)
collections.append(test_scatter)
legend_elements.append(test_scatter.legend_elements()[0])
rectangle_to_fill_legend = matplotlib.patches.Rectangle(
(0, 0), 1, 1,
fill=False,
edgecolor='none',
visible=False
)
if all(
e is not None
for e in (colors, train_indices, test_indices, labels)
):
unique_train_colors = np.unique(colors[train_mask])
unique_test_colors = np.unique(colors[test_mask])
new_legend_elements = []
train_element_index = 0
test_element_index = 0
for color in np.unique(colors):
new_tuple = []
if color in unique_train_colors:
new_tuple.append(legend_elements[0][train_element_index])
train_element_index += 1
else:
new_tuple.append(rectangle_to_fill_legend)
if color in unique_test_colors:
new_tuple.append(legend_elements[1][test_element_index])
test_element_index += 1
else:
new_tuple.append(rectangle_to_fill_legend)
new_legend_elements.append(tuple(new_tuple))
legend_elements = new_legend_elements
if labels is not None:
self._set_legend(
axes,
labels,
legend_elements,
legend_title
)
if self._n_components == 2:
axes.set_axis_off()
title = "{} - {}".format(
title,
self._graph.get_name(),
)
if self._node_embedding_method is not None:
title = "{} - {}".format(
title,
self._node_embedding_method
)
axes.set_title(title)
figure.tight_layout()
return figure, axes, collections
def _plot_types(
self,
title: str,
points: np.ndarray,
types: List[int],
type_labels: List[str],
legend_title: str,
predictions: List[int] = None,
k: int = 9,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
other_label: str = "Other",
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs
) -> Tuple[Figure, Axes]:
"""Plot common node types of provided graph.
Parameters
------------------------------
title: str,
Title to use for the plot.
points: np.ndarray,
Points to plot.
types: List[int],
Types of the provided points.
type_labels: List[str],
List of the labels for the provided types.
legend_title: str,
Title for the legend.
predictions: List[int] = None,
List of the labels predicted.
If None, no prediction is visualized.
k: int = 9,
Number of node types to visualize.
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
other_label: str = "Other",
Label to use for edges below the top k threshold.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Arguments to pass to the subplots.
Raises
------------------------------
ValueError,
If edge fitting was not yet executed.
ValueError,
If given k is greater than maximum supported value (10).
ValueError,
If the number of given type labels does not match the number
of given type counts.
Returns
------------------------------
Figure and Axis of the plot.
"""
if k > 9:
raise ValueError(
"Values of k greater than 9 are not supported!"
)
# if not isinstance(types, np.ndarray):
# raise ValueError(
# "Expecting types to be a numpy array."
# )
types = np.array(types)
number_of_types = np.unique(types).size
type_labels = np.array(sanitize_ml_labels(list(type_labels)))
counts = np.bincount(types, minlength=number_of_types)
top_counts = [
index
for index, _ in sorted(
enumerate(zip(counts, type_labels)),
key=lambda x: x[1],
reverse=True
)[:k]
]
type_labels = list(type_labels[top_counts])
for i, element_type in enumerate(types):
if element_type not in top_counts:
types[i] = k
else:
types[i] = top_counts.index(element_type)
if predictions is not None:
predictions = predictions.copy()
for i, element_type in enumerate(predictions):
if element_type not in top_counts:
predictions[i] = k
else:
predictions[i] = top_counts.index(element_type)
if k < number_of_types:
type_labels.append(other_label)
figure, axis, _ = self._plot_scatter(
title=title,
points=points,
colors=types,
edgecolors=predictions,
labels=type_labels,
legend_title=legend_title,
figure=figure,
axes=axes,
scatter_kwargs=scatter_kwargs,
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
return figure, axis
def plot_nodes(
self,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs: Dict
) -> Tuple[Figure, Axes]:
"""Plot nodes of provided graph.
Parameters
------------------------------
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Arguments to pass to the subplots.
Raises
------------------------------
ValueError,
If edge fitting was not yet executed.
Returns
------------------------------
Figure and Axis of the plot.
"""
if self._node_embedding is None:
raise ValueError(
"Node fitting must be executed before plot."
)
figure, axis, _ = self._plot_scatter(
"Nodes embedding",
self._node_embedding,
figure=figure,
axes=axes,
scatter_kwargs=scatter_kwargs,
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
return figure, axis
def plot_edges(
self,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs: Dict
) -> Tuple[Figure, Axes]:
"""Plot edge embedding of provided graph.
Parameters
------------------------------
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Arguments to pass to the subplots.
Raises
------------------------------
ValueError,
If edge fitting was not yet executed.
Returns
------------------------------
Figure and Axis of the plot.
"""
if self._edge_embedding is None:
raise ValueError(
"Edge fitting must be executed before plot."
)
figure, axis, _ = self._plot_scatter(
"Edges embedding",
self._edge_embedding,
figure=figure,
axes=axes,
scatter_kwargs=scatter_kwargs,
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
return figure, axis
def plot_node_types(
self,
node_type_predictions: List[int] = None,
k: int = 9,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
legend_title: str = "Node types",
other_label: str = "Other",
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs
) -> Tuple[Figure, Axes]:
"""Plot common node types of provided graph.
Parameters
------------------------------
node_type_predictions: List[int] = None,
Predictions of the node types.
k: int = 9,
Number of node types to visualize.
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
other_label: str = "Other",
Label to use for edges below the top k threshold.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Arguments to pass to the subplots.
Raises
------------------------------
ValueError,
If edge fitting was not yet executed.
ValueError,
If given k is greater than maximum supported value (10).
Returns
------------------------------
Figure and Axis of the plot.
"""
if not self._graph.has_node_types():
raise ValueError(
"The graph does not have node types!"
)
if self._node_embedding is None:
raise ValueError(
"Node fitting must be executed before plot."
)
node_types = self._graph.get_node_types()
if self._subsampled_node_ids is not None:
node_types = node_types[self._subsampled_node_ids]
return self._plot_types(
"Node types",
self._node_embedding.values,
types=node_types,
type_labels=np.array(self._graph.get_node_type_names()),
legend_title=legend_title,
predictions=node_type_predictions,
k=k,
figure=figure,
axes=axes,
scatter_kwargs=scatter_kwargs,
other_label=other_label,
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
def plot_connected_components(
self,
k: int = 9,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
other_label: str = "Other",
legend_title: str = "Component sizes",
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs
) -> Tuple[Figure, Axes]:
"""Plot common node types of provided graph.
Parameters
------------------------------
k: int = 9,
Number of components to visualize.
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
other_label: str = "Other",
Label to use for edges below the top k threshold.
legend_title: str = "Component sizes",
Title for the legend.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Arguments to pass to the subplots.
Raises
------------------------------
ValueError,
If edge fitting was not yet executed.
ValueError,
If given k is greater than maximum supported value (10).
Returns
------------------------------
Figure and Axis of the plot.
"""
if self._node_embedding is None:
raise ValueError(
"Node fitting must be executed before plot."
)
components, components_number, _, _ = self._graph.connected_components()
sizes = np.bincount(components, minlength=components_number)
if self._subsampled_node_ids is not None:
components = components[self._subsampled_node_ids]
return self._plot_types(
"Components",
self._node_embedding.values,
types=components,
type_labels=np.array([
"Size {}".format(size)
for size in sizes
]),
legend_title=legend_title,
k=k,
figure=figure,
axes=axes,
scatter_kwargs=scatter_kwargs,
other_label=other_label,
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
def plot_node_degrees(
self,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
use_log_scale: bool = True,
**kwargs: Dict
):
"""Plot node degrees heatmap.
Parameters
------------------------------
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
use_log_scale: bool = True,
Whether to use log scale.
**kwargs: Dict,
Additional kwargs for the subplots.
Raises
------------------------------
ValueError,
If edge fitting was not yet executed.
Returns
------------------------------
Figure and Axis of the plot.
"""
if self._node_embedding is None:
raise ValueError(
"Node fitting must be executed before plot."
)
degrees = self._graph.degrees()
if self._subsampled_node_ids is not None:
degrees = degrees[self._subsampled_node_ids]
figure, axes, scatter = self._plot_scatter(
"Node degrees",
self._node_embedding.values,
colors=degrees,
figure=figure,
axes=axes,
scatter_kwargs={
**({} if scatter_kwargs is None else scatter_kwargs),
"cmap": plt.cm.get_cmap('RdYlBu'),
**({"norm": LogNorm()} if use_log_scale else {})
},
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
color_bar = figure.colorbar(scatter[0], ax=axes)
color_bar.set_alpha(1)
color_bar.draw_all()
return figure, axes
def plot_edge_types(
self,
edge_type_predictions: List[int] = None,
k: int = 9,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
other_label: str = "Other",
legend_title: str = "Edge types",
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs: Dict
):
"""Plot common edge types of provided graph.
Parameters
------------------------------
edge_type_predictions: List[int] = None,
Predictions of the edge types.
k: int = 9,
Number of edge types to visualize.
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
other_label: str = "Other",
Label to use for edges below the top k threshold.
legend_title: str = "Edge types",
Title for the legend.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Additional kwargs for the subplots.
Raises
------------------------------
ValueError,
If the graph does not have edge types.
ValueError,
If edge fitting was not yet executed.
ValueError,
If given k is greater than maximum supported value (10).
Returns
------------------------------
Figure and Axis of the plot.
"""
if not self._graph.has_edge_types():
raise ValueError(
"The graph does not have edge types!"
)
if self._edge_embedding is None:
raise ValueError(
"Edge fitting was not yet executed!"
)
edge_types = self._graph.get_edge_types()
if self._subsampled_node_ids is not None:
edge_types = edge_types[self._subsampled_edge_ids]
return self._plot_types(
"Edge types",
self._edge_embedding.values,
types=edge_types,
type_labels=np.array(self._graph.get_edge_type_names()),
legend_title=legend_title,
predictions=edge_type_predictions,
k=k,
figure=figure,
axes=axes,
scatter_kwargs=scatter_kwargs,
other_label=other_label,
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
def plot_edge_weights(
self,
figure: Figure = None,
axes: Axes = None,
scatter_kwargs: Dict = None,
train_indices: np.ndarray = None,
test_indices: np.ndarray = None,
train_marker: str = "o",
test_marker: str = "X",
**kwargs: Dict
):
"""Plot common edge types of provided graph.
Parameters
------------------------------
figure: Figure = None,
Figure to use to plot. If None, a new one is created using the
provided kwargs.
axes: Axes = None,
Axes to use to plot. If None, a new one is created using the
provided kwargs.
scatter_kwargs: Dict = None,
Kwargs to pass to the scatter plot call.
train_indices: np.ndarray = None,
Indices to draw using the training marker.
If None, all points are drawn using the training marker.
test_indices: np.ndarray = None,
Indices to draw using the test marker.
If None, while providing the train indices,
train_marker: str = "o",
The marker to use to draw the training points.
test_marker: str = "X",
The marker to use to draw the test points.
**kwargs: Dict,
Additional kwargs for the subplots.
Raises
------------------------------
ValueError,
If edge fitting was not yet executed.
Returns
------------------------------
Figure and Axis of the plot.
"""
if not self._graph.has_weights():
raise ValueError(
"The graph does not have edge weights!"
)
if self._edge_embedding is None:
raise ValueError(
"Edge fitting must be executed before plot."
)
weights = self._graph.get_weights()
if self._subsampled_node_ids is not None:
weights = weights[self._subsampled_node_ids]
figure, axes, scatter = self._plot_scatter(
"Edge weights",
self._node_embedding.values,
colors=weights,
figure=figure,
axes=axes,
scatter_kwargs={
**({} if scatter_kwargs is None else scatter_kwargs),
"cmap": plt.cm.get_cmap('RdYlBu')
},
train_indices=train_indices,
test_indices=test_indices,
train_marker=train_marker,
test_marker=test_marker,
**kwargs
)
color_bar = figure.colorbar(scatter[0], ax=axes)
color_bar.set_alpha(1)
color_bar.draw_all()
return figure, axes
| [
"matplotlib.legend_handler.HandlerTuple",
"matplotlib.patches.Rectangle",
"numpy.unique",
"sklearn.decomposition.PCA",
"tsnecuda.TSNE",
"numpy.isin",
"ddd_subplots.subplots",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState",
"matplotlib.pyplot.cm.get_cmap",... | [((9087, 9114), 'numpy.arange', 'np.arange', (['args[0].shape[0]'], {}), '(args[0].shape[0])\n', (9096, 9114), True, 'import numpy as np\n'), ((9138, 9184), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'self._random_state'}), '(seed=self._random_state)\n', (9159, 9184), True, 'import numpy as np\n'), ((17775, 17800), 'numpy.zeros', 'np.zeros', (['points.shape[0]'], {}), '(points.shape[0])\n', (17783, 17800), True, 'import numpy as np\n'), ((18223, 18363), 'numpy.array', 'np.array', (["['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',\n 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']"], {}), "(['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',\n 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan'])\n", (18231, 18363), True, 'import numpy as np\n'), ((20233, 20324), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fill': '(False)', 'edgecolor': '"""none"""', 'visible': '(False)'}), "((0, 0), 1, 1, fill=False, edgecolor='none',\n visible=False)\n", (20261, 20324), False, 'import matplotlib\n'), ((25014, 25029), 'numpy.array', 'np.array', (['types'], {}), '(types)\n', (25022, 25029), True, 'import numpy as np\n'), ((25167, 25212), 'numpy.bincount', 'np.bincount', (['types'], {'minlength': 'number_of_types'}), '(types, minlength=number_of_types)\n', (25178, 25212), True, 'import numpy as np\n'), ((36374, 36426), 'numpy.bincount', 'np.bincount', (['components'], {'minlength': 'components_number'}), '(components, minlength=components_number)\n', (36385, 36426), True, 'import numpy as np\n'), ((20534, 20563), 'numpy.unique', 'np.unique', (['colors[train_mask]'], {}), '(colors[train_mask])\n', (20543, 20563), True, 'import numpy as np\n'), ((20597, 20625), 'numpy.unique', 'np.unique', (['colors[test_mask]'], {}), '(colors[test_mask])\n', (20606, 20625), True, 'import numpy as np\n'), ((20759, 20776), 'numpy.unique', 'np.unique', (['colors'], {}), '(colors)\n', (20768, 20776), True, 'import numpy as np\n'), ((25057, 25073), 'numpy.unique', 'np.unique', (['types'], {}), '(types)\n', (25066, 25073), True, 'import numpy as np\n'), ((5021, 5085), 'tsnecuda.TSNE', 'CUDATSNE', ([], {'n_components': '(2)', 'random_seed': 'random_state', 'verbose': '(True)'}), '(n_components=2, random_seed=random_state, verbose=True)\n', (5029, 5085), True, 'from tsnecuda import TSNE as CUDATSNE\n'), ((7379, 7436), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components', 'random_state': 'random_state'}), '(n_components=n_components, random_state=random_state)\n', (7382, 7436), False, 'from sklearn.decomposition import PCA\n'), ((17133, 17204), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '(**{**GraphVisualization.DEFAULT_SUBPLOT_KWARGS, **kwargs})\n', (17145, 17204), True, 'import matplotlib.pyplot as plt\n'), ((17312, 17382), 'ddd_subplots.subplots', 'subplots_3d', ([], {}), '(**{**GraphVisualization.DEFAULT_SUBPLOT_KWARGS, **kwargs})\n', (17323, 17382), True, 'from ddd_subplots import subplots as subplots_3d\n'), ((16864, 16900), 'numpy.isin', 'np.isin', (['train_indices', 'test_indices'], {}), '(train_indices, test_indices)\n', (16871, 16900), True, 'import numpy as np\n'), ((39458, 39483), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdYlBu"""'], {}), "('RdYlBu')\n", (39473, 39483), True, 'import matplotlib.pyplot as plt\n'), ((45519, 45544), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdYlBu"""'], {}), "('RdYlBu')\n", (45534, 45544), True, 'import matplotlib.pyplot as plt\n'), ((39513, 39522), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (39520, 39522), False, 'from matplotlib.colors import ListedColormap, LogNorm\n'), ((5500, 5511), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (5509, 5511), False, 'from multiprocessing import cpu_count\n'), ((10281, 10307), 'matplotlib.legend_handler.HandlerTuple', 'HandlerTuple', ([], {'ndivide': 'None'}), '(ndivide=None)\n', (10293, 10307), False, 'from matplotlib.legend_handler import HandlerBase, HandlerTuple\n'), ((5972, 5983), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (5981, 5983), False, 'from multiprocessing import cpu_count\n')] |
import math
from typing import Tuple
import numpy as np
from PIL import Image
# region Shift Hue
def rgb_to_hsv(rgb):
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
hsv[..., 3:] = rgb[..., 3:]
if rgb.shape[2] == 4:
hsv[..., 3] = rgb[..., 3]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select(
[r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
# This isn't required by anything, but it does help
# me learn about how numpy works above this
def rgb_to_hsv2(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(hsv):
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
if hsv.shape[2] == 4:
rgb[..., 3] = hsv[..., 3]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def shift_hsv_raw(arr, hue: float = None, saturation: float = None, value: float = None):
'''
Shifts HSV by (amount).
'''
hsv = rgb_to_hsv(arr)
if hue != None: hsv[...,0] = (hsv[...,0] + -hue/360.0) % 1.0
if saturation != None: hsv[...,1] += saturation
if value != None: hsv[...,2] += value
rgb = hsv_to_rgb(hsv)
return rgb
def shift_hsv(img: Image, hue: float = None, saturation: float = None, value: float = None, mode = 'RGBA') -> Image:
'''
Shifts HSV of an image by (amount).
'''
arr = np.array(img)
shifted_arr = shift_hsv_raw(arr, hue, saturation, value)
new_img = Image.fromarray(shifted_arr, mode)
return new_img
# endregion
# region Color thief
class cached_property(object):
"""Decorator that creates converts a method with a single
self argument into a property cached on the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, type):
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
def get_palette(image, color_count=10, quality=10):
"""Build a color palette.
We are using the median cut algorithm to cluster similar colors.
"""
image = image.convert('RGBA')
width, height = image.size
pixels = image.getdata()
pixel_count = width * height
valid_pixels = []
for i in range(0, pixel_count, quality):
r, g, b, a = pixels[i]
# If pixel is mostly opaque and not white
if a >= 125:
if not (r > 250 and g > 250 and b > 250):
valid_pixels.append((r, g, b))
# Send array to quantize function which clusters values
# using median cut algorithm
cmap = MMCQ.quantize(valid_pixels, color_count)
return cmap.palette
def get_color(image, quality=10):
"""Get the dominant color."""
palette = get_palette(image, 5, quality)
return palette[0]
class MMCQ(object):
"""Basic Python port of the MMCQ (modified median cut quantization)
algorithm from the Leptonica library (http://www.leptonica.com/).
"""
SIGBITS = 5
RSHIFT = 8 - SIGBITS
MAX_ITERATION = 1000
FRACT_BY_POPULATIONS = 0.75
@staticmethod
def get_color_index(r, g, b):
return (r << (2 * MMCQ.SIGBITS)) + (g << MMCQ.SIGBITS) + b
@staticmethod
def get_histo(pixels):
"""histo (1-d array, giving the number of pixels in each quantized
region of color space)
"""
histo = dict()
for pixel in pixels:
rval = pixel[0] >> MMCQ.RSHIFT
gval = pixel[1] >> MMCQ.RSHIFT
bval = pixel[2] >> MMCQ.RSHIFT
index = MMCQ.get_color_index(rval, gval, bval)
histo[index] = histo.setdefault(index, 0) + 1
return histo
@staticmethod
def vbox_from_pixels(pixels, histo):
rmin = 1000000
rmax = 0
gmin = 1000000
gmax = 0
bmin = 1000000
bmax = 0
for pixel in pixels:
rval = pixel[0] >> MMCQ.RSHIFT
gval = pixel[1] >> MMCQ.RSHIFT
bval = pixel[2] >> MMCQ.RSHIFT
rmin = min(rval, rmin)
rmax = max(rval, rmax)
gmin = min(gval, gmin)
gmax = max(gval, gmax)
bmin = min(bval, bmin)
bmax = max(bval, bmax)
return VBox(rmin, rmax, gmin, gmax, bmin, bmax, histo)
@staticmethod
def median_cut_apply(histo, vbox):
if not vbox.count:
return (None, None)
rw = vbox.r2 - vbox.r1 + 1
gw = vbox.g2 - vbox.g1 + 1
bw = vbox.b2 - vbox.b1 + 1
maxw = max([rw, gw, bw])
# only one pixel, no split
if vbox.count == 1:
return (vbox.copy, None)
# Find the partial sum arrays along the selected axis.
total = 0
sum_ = 0
partialsum = {}
lookaheadsum = {}
do_cut_color = None
if maxw == rw:
do_cut_color = 'r'
for i in range(vbox.r1, vbox.r2+1):
sum_ = 0
for j in range(vbox.g1, vbox.g2+1):
for k in range(vbox.b1, vbox.b2+1):
index = MMCQ.get_color_index(i, j, k)
sum_ += histo.get(index, 0)
total += sum_
partialsum[i] = total
elif maxw == gw:
do_cut_color = 'g'
for i in range(vbox.g1, vbox.g2+1):
sum_ = 0
for j in range(vbox.r1, vbox.r2+1):
for k in range(vbox.b1, vbox.b2+1):
index = MMCQ.get_color_index(j, i, k)
sum_ += histo.get(index, 0)
total += sum_
partialsum[i] = total
else: # maxw == bw
do_cut_color = 'b'
for i in range(vbox.b1, vbox.b2+1):
sum_ = 0
for j in range(vbox.r1, vbox.r2+1):
for k in range(vbox.g1, vbox.g2+1):
index = MMCQ.get_color_index(j, k, i)
sum_ += histo.get(index, 0)
total += sum_
partialsum[i] = total
for i, d in partialsum.items():
lookaheadsum[i] = total - d
# determine the cut planes
dim1 = do_cut_color + '1'
dim2 = do_cut_color + '2'
dim1_val = getattr(vbox, dim1)
dim2_val = getattr(vbox, dim2)
for i in range(dim1_val, dim2_val+1):
if partialsum[i] > (total / 2):
vbox1 = vbox.copy
vbox2 = vbox.copy
left = i - dim1_val
right = dim2_val - i
if left <= right:
d2 = min([dim2_val - 1, int(i + right / 2)])
else:
d2 = max([dim1_val, int(i - 1 - left / 2)])
# avoid 0-count boxes
while not partialsum.get(d2, False):
d2 += 1
count2 = lookaheadsum.get(d2)
while not count2 and partialsum.get(d2-1, False):
d2 -= 1
count2 = lookaheadsum.get(d2)
# set dimensions
setattr(vbox1, dim2, d2)
setattr(vbox2, dim1, getattr(vbox1, dim2) + 1)
return (vbox1, vbox2)
return (None, None)
@staticmethod
def quantize(pixels, max_color):
"""Quantize.
:param pixels: a list of pixel in the form (r, g, b)
:param max_color: max number of colors
"""
if not pixels:
raise Exception('Empty pixels when quantize.')
if max_color < 2 or max_color > 256:
raise Exception('Wrong number of max colors when quantize.')
histo = MMCQ.get_histo(pixels)
# check that we aren't below maxcolors already
if len(histo) <= max_color:
# generate the new colors from the histo and return
pass
# get the beginning vbox from the colors
vbox = MMCQ.vbox_from_pixels(pixels, histo)
pq = PQueue(lambda x: x.count)
pq.push(vbox)
# inner function to do the iteration
def iter_(lh, target):
n_color = 1
n_iter = 0
while n_iter < MMCQ.MAX_ITERATION:
vbox = lh.pop()
if not vbox.count: # just put it back
lh.push(vbox)
n_iter += 1
continue
# do the cut
vbox1, vbox2 = MMCQ.median_cut_apply(histo, vbox)
if not vbox1:
raise Exception("vbox1 not defined; shouldn't happen!")
lh.push(vbox1)
if vbox2: # vbox2 can be null
lh.push(vbox2)
n_color += 1
if n_color >= target:
return
if n_iter > MMCQ.MAX_ITERATION:
return
n_iter += 1
# first set of colors, sorted by population
iter_(pq, MMCQ.FRACT_BY_POPULATIONS * max_color)
# Re-sort by the product of pixel occupancy times the size in
# color space.
pq2 = PQueue(lambda x: x.count * x.volume)
while pq.size():
pq2.push(pq.pop())
# next set - generate the median cuts using the (npix * vol) sorting.
iter_(pq2, max_color - pq2.size())
# calculate the actual colors
cmap = CMap()
while pq2.size():
cmap.push(pq2.pop())
return cmap
class VBox(object):
"""3d color space box"""
def __init__(self, r1, r2, g1, g2, b1, b2, histo):
self.r1 = r1
self.r2 = r2
self.g1 = g1
self.g2 = g2
self.b1 = b1
self.b2 = b2
self.histo = histo
@cached_property
def volume(self):
sub_r = self.r2 - self.r1
sub_g = self.g2 - self.g1
sub_b = self.b2 - self.b1
return (sub_r + 1) * (sub_g + 1) * (sub_b + 1)
@property
def copy(self):
return VBox(self.r1, self.r2, self.g1, self.g2,
self.b1, self.b2, self.histo)
@cached_property
def avg(self):
ntot = 0
mult = 1 << (8 - MMCQ.SIGBITS)
r_sum = 0
g_sum = 0
b_sum = 0
for i in range(self.r1, self.r2 + 1):
for j in range(self.g1, self.g2 + 1):
for k in range(self.b1, self.b2 + 1):
histoindex = MMCQ.get_color_index(i, j, k)
hval = self.histo.get(histoindex, 0)
ntot += hval
r_sum += hval * (i + 0.5) * mult
g_sum += hval * (j + 0.5) * mult
b_sum += hval * (k + 0.5) * mult
if ntot:
r_avg = int(r_sum / ntot)
g_avg = int(g_sum / ntot)
b_avg = int(b_sum / ntot)
else:
r_avg = int(mult * (self.r1 + self.r2 + 1) / 2)
g_avg = int(mult * (self.g1 + self.g2 + 1) / 2)
b_avg = int(mult * (self.b1 + self.b2 + 1) / 2)
return r_avg, g_avg, b_avg
def contains(self, pixel):
rval = pixel[0] >> MMCQ.RSHIFT
gval = pixel[1] >> MMCQ.RSHIFT
bval = pixel[2] >> MMCQ.RSHIFT
return all([
rval >= self.r1,
rval <= self.r2,
gval >= self.g1,
gval <= self.g2,
bval >= self.b1,
bval <= self.b2,
])
@cached_property
def count(self):
npix = 0
for i in range(self.r1, self.r2 + 1):
for j in range(self.g1, self.g2 + 1):
for k in range(self.b1, self.b2 + 1):
index = MMCQ.get_color_index(i, j, k)
npix += self.histo.get(index, 0)
return npix
class CMap(object):
"""Color map"""
def __init__(self):
self.vboxes = PQueue(lambda x: x['vbox'].count * x['vbox'].volume)
@property
def palette(self):
return self.vboxes.map(lambda x: x['color'])
def push(self, vbox):
self.vboxes.push({
'vbox': vbox,
'color': vbox.avg,
})
def size(self):
return self.vboxes.size()
def nearest(self, color):
d1 = None
p_color = None
for i in range(self.vboxes.size()):
vbox = self.vboxes.peek(i)
d2 = math.sqrt(
math.pow(color[0] - vbox['color'][0], 2) +
math.pow(color[1] - vbox['color'][1], 2) +
math.pow(color[2] - vbox['color'][2], 2)
)
if d1 is None or d2 < d1:
d1 = d2
p_color = vbox['color']
return p_color
def map(self, color):
for i in range(self.vboxes.size()):
vbox = self.vboxes.peek(i)
if vbox['vbox'].contains(color):
return vbox['color']
return self.nearest(color)
class PQueue(object):
"""Simple priority queue."""
def __init__(self, sort_key):
self.sort_key = sort_key
self.contents = []
self._sorted = False
def sort(self):
self.contents.sort(key=self.sort_key)
self._sorted = True
def push(self, o):
self.contents.append(o)
self._sorted = False
def peek(self, index=None):
if not self._sorted:
self.sort()
if index is None:
index = len(self.contents) - 1
return self.contents[index]
def pop(self):
if not self._sorted:
self.sort()
return self.contents.pop()
def size(self):
return len(self.contents)
def map(self, f):
return list(map(f, self.contents))
# endregion
# region Duotone
def duotone(img: Image, light_color: Tuple[int, int, int], dark_color: Tuple[int, int, int], contrast: float = 0.5) -> Image:
img = img.convert('RGB')
rgb = np.array(img)
out = np.zeros_like(rgb)
do_calc_contrast = contrast != 0.5
contrast_norm = (1 + contrast - 0.5)
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
if do_calc_contrast:
r = np.clip((r / 255.0 - 0.5) * contrast_norm + 0.5, 0.0, 255.0)
g = np.clip((g / 255.0 - 0.5) * contrast_norm + 0.5, 0.0, 255.0)
b = np.clip((b / 255.0 - 0.5) * contrast_norm + 0.5, 0.0, 255.0)
# whoever made duotone-py had no reason to have rgb_to_hls
# it just converts luminosity into a ratio, to an int, back to a ratio again
# why???
average = np.floor(0.299 * r + 0.587 * g + 0.114 * b)
ratio = average / 255.0
out[..., 0] = np.floor(light_color[0] * ratio + dark_color[0] * (1 - ratio))
out[..., 1] = np.floor(light_color[1] * ratio + dark_color[1] * (1 - ratio))
out[..., 2] = np.floor(light_color[2] * ratio + dark_color[2] * (1 - ratio))
return Image.fromarray(out)
# endregion
| [
"numpy.clip",
"PIL.Image.fromarray",
"numpy.select",
"math.pow",
"numpy.floor",
"numpy.max",
"numpy.array",
"numpy.empty_like",
"numpy.min",
"numpy.zeros_like"
] | [((160, 178), 'numpy.zeros_like', 'np.zeros_like', (['rgb'], {}), '(rgb)\n', (173, 178), True, 'import numpy as np\n'), ((334, 363), 'numpy.max', 'np.max', (['rgb[..., :3]'], {'axis': '(-1)'}), '(rgb[..., :3], axis=-1)\n', (340, 363), True, 'import numpy as np\n'), ((375, 404), 'numpy.min', 'np.min', (['rgb[..., :3]'], {'axis': '(-1)'}), '(rgb[..., :3], axis=-1)\n', (381, 404), True, 'import numpy as np\n'), ((513, 529), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (526, 529), True, 'import numpy as np\n'), ((539, 555), 'numpy.zeros_like', 'np.zeros_like', (['g'], {}), '(g)\n', (552, 555), True, 'import numpy as np\n'), ((565, 581), 'numpy.zeros_like', 'np.zeros_like', (['b'], {}), '(b)\n', (578, 581), True, 'import numpy as np\n'), ((762, 848), 'numpy.select', 'np.select', (['[r == maxc, g == maxc]', '[bc - gc, 2.0 + rc - bc]'], {'default': '(4.0 + gc - rc)'}), '([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 +\n gc - rc)\n', (771, 848), True, 'import numpy as np\n'), ((1451, 1469), 'numpy.empty_like', 'np.empty_like', (['hsv'], {}), '(hsv)\n', (1464, 1469), True, 'import numpy as np\n'), ((1852, 1904), 'numpy.select', 'np.select', (['conditions', '[v, q, p, p, t, v]'], {'default': 'v'}), '(conditions, [v, q, p, p, t, v], default=v)\n', (1861, 1904), True, 'import numpy as np\n'), ((1923, 1975), 'numpy.select', 'np.select', (['conditions', '[v, v, v, q, p, p]'], {'default': 't'}), '(conditions, [v, v, v, q, p, p], default=t)\n', (1932, 1975), True, 'import numpy as np\n'), ((1994, 2046), 'numpy.select', 'np.select', (['conditions', '[v, p, t, v, v, q]'], {'default': 'p'}), '(conditions, [v, p, t, v, v, q], default=p)\n', (2003, 2046), True, 'import numpy as np\n'), ((2623, 2636), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2631, 2636), True, 'import numpy as np\n'), ((2712, 2746), 'PIL.Image.fromarray', 'Image.fromarray', (['shifted_arr', 'mode'], {}), '(shifted_arr, mode)\n', (2727, 2746), False, 'from PIL import Image\n'), ((15074, 15087), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (15082, 15087), True, 'import numpy as np\n'), ((15098, 15116), 'numpy.zeros_like', 'np.zeros_like', (['rgb'], {}), '(rgb)\n', (15111, 15116), True, 'import numpy as np\n'), ((15668, 15711), 'numpy.floor', 'np.floor', (['(0.299 * r + 0.587 * g + 0.114 * b)'], {}), '(0.299 * r + 0.587 * g + 0.114 * b)\n', (15676, 15711), True, 'import numpy as np\n'), ((15759, 15821), 'numpy.floor', 'np.floor', (['(light_color[0] * ratio + dark_color[0] * (1 - ratio))'], {}), '(light_color[0] * ratio + dark_color[0] * (1 - ratio))\n', (15767, 15821), True, 'import numpy as np\n'), ((15840, 15902), 'numpy.floor', 'np.floor', (['(light_color[1] * ratio + dark_color[1] * (1 - ratio))'], {}), '(light_color[1] * ratio + dark_color[1] * (1 - ratio))\n', (15848, 15902), True, 'import numpy as np\n'), ((15921, 15983), 'numpy.floor', 'np.floor', (['(light_color[2] * ratio + dark_color[2] * (1 - ratio))'], {}), '(light_color[2] * ratio + dark_color[2] * (1 - ratio))\n', (15929, 15983), True, 'import numpy as np\n'), ((15996, 16016), 'PIL.Image.fromarray', 'Image.fromarray', (['out'], {}), '(out)\n', (16011, 16016), False, 'from PIL import Image\n'), ((15289, 15349), 'numpy.clip', 'np.clip', (['((r / 255.0 - 0.5) * contrast_norm + 0.5)', '(0.0)', '(255.0)'], {}), '((r / 255.0 - 0.5) * contrast_norm + 0.5, 0.0, 255.0)\n', (15296, 15349), True, 'import numpy as np\n'), ((15362, 15422), 'numpy.clip', 'np.clip', (['((g / 255.0 - 0.5) * contrast_norm + 0.5)', '(0.0)', '(255.0)'], {}), '((g / 255.0 - 0.5) * contrast_norm + 0.5, 0.0, 255.0)\n', (15369, 15422), True, 'import numpy as np\n'), ((15435, 15495), 'numpy.clip', 'np.clip', (['((b / 255.0 - 0.5) * contrast_norm + 0.5)', '(0.0)', '(255.0)'], {}), '((b / 255.0 - 0.5) * contrast_norm + 0.5, 0.0, 255.0)\n', (15442, 15495), True, 'import numpy as np\n'), ((13690, 13730), 'math.pow', 'math.pow', (["(color[2] - vbox['color'][2])", '(2)'], {}), "(color[2] - vbox['color'][2], 2)\n", (13698, 13730), False, 'import math\n'), ((13572, 13612), 'math.pow', 'math.pow', (["(color[0] - vbox['color'][0])", '(2)'], {}), "(color[0] - vbox['color'][0], 2)\n", (13580, 13612), False, 'import math\n'), ((13631, 13671), 'math.pow', 'math.pow', (["(color[1] - vbox['color'][1])", '(2)'], {}), "(color[1] - vbox['color'][1], 2)\n", (13639, 13671), False, 'import math\n')] |
import cv2
import copy
import xxhash
import numpy as np
import imgui
import OpenGL.GL as gl
from .static_vars import *
from timeit import default_timer as timer
from . import imgui_ext
import math
from typing import *
from dataclasses import dataclass
_start = timer()
USE_FAST_HASH = True
LOG_GPU_USAGE = False
"""
Some type synonyms in order to make the code easier to understand
"""
TextureId = int # this is an openGl texture id
Image_RGB = np.ndarray # denotes a RGB image
Image_AnyType = np.ndarray # denotes any image contained in a np.ndarray
ImageAddress = int # this is the image memory address
def _is_close(a: float, b: float) -> bool:
return math.fabs(a - b) < 1E-6
# noinspection PyShadowingNames
class ImageAdjustments:
factor: float
delta: float
def __init__(self, factor: float = 1., delta: float = 0.):
self.factor = factor
self.delta = delta
def is_none(self):
return _is_close(self.factor, 1.) and _is_close(self.delta, 0.)
def adjust(self, image):
if self.is_none():
return image
else:
adjusted = ((image + self.delta) * self.factor).astype(image.dtype)
return adjusted
def __hash__(self):
return hash((self.factor, self.delta))
def __eq__(self, other):
return self.factor == other.factor and self.delta == other.delta
def _hash_image(image):
"""
Two hash variant are possible :
- if imgui_cv.USE_FAST_HASH is True : select 100 random pixels and hash them
- otherwise : compute the hash of the whole image (using xxhash for performance)
:param image:
:return:hash
"""
if USE_FAST_HASH:
rng = np.random.RandomState(89)
inds = rng.randint(low=0, high=image.size, size=100)
b = image.flat[inds]
result = hash(tuple(b.data))
return result
else:
# cf https://stackoverflow.com/questions/16589791/most-efficient-property-to-hash-for-numpy-array
h = xxhash.xxh64()
h.update(image)
result = h.intdigest()
h.reset()
return result
class ImageAndAdjustments:
image: Image_AnyType
image_adjustment: ImageAdjustments
def __init__(self, image, image_adjustments):
self.image = image
self.image_adjustments = image_adjustments
def adjusted_image(self):
return self.image_adjustments.adjust(self.image)
def __hash__(self):
hash_adjust = hash(self.image_adjustments)
hash_image = _hash_image(self.image)
result = hash((hash_adjust, hash_image))
return result
def __eq__(self, other):
"""
For performance reasons, the __eq__ operator is made to take only the hash into account.
@see _image_to_texture()
"""
hash1 = hash(self)
hash2 = hash(other)
return hash1 == hash2
class SizePixel:
width: int
height: int
def __init__(self, width=0, height=0):
self.width = int(width)
self.height = int(height)
@staticmethod
def from_image(image):
self = SizePixel()
self.width = image.shape[1]
self.height = image.shape[0]
return self
def as_tuple_width_height(self):
return self.width, self.height
# ALL_TEXTURES contains a dict of all the images that were transferred to the GPU
# plus their last access time
TimeSecond = float
NB_GEN_TEXTURES = 0
def _generate_texture_id() -> TextureId:
texture_id = gl.glGenTextures(1)
if LOG_GPU_USAGE:
global NB_GEN_TEXTURES
NB_GEN_TEXTURES = NB_GEN_TEXTURES + 1
print(f"NB_GEN_TEXTURES = {NB_GEN_TEXTURES}")
return texture_id
@dataclass
class ImageStoredOnGpu:
image_and_adjustments: ImageAndAdjustments
texture_id: TextureId
time_last_access: TimeSecond = -10000.
def __init__(self, image_and_adjustments: ImageAndAdjustments, time_last_access):
self.image_and_adjustments = image_and_adjustments
self.time_last_access = time_last_access
self.texture_id = _generate_texture_id()
AllTexturesDict = Dict[ImageAddress, ImageStoredOnGpu]
ALL_TEXTURES: AllTexturesDict = {}
def _to_rgb_image(img: Image_AnyType) -> Image_RGB:
img_rgb = None
if len(img.shape) >= 3:
channels = img.shape[2]
else:
channels = 1
if channels == 1:
if img.dtype == np.uint8:
img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.dtype in [np.float32, np.float64]:
img_grey = np.uint8(img * 255.)
img_rgb = cv2.cvtColor(img_grey, cv2.COLOR_GRAY2BGR)
elif channels == 3:
if not img.dtype == np.uint8:
raise ValueError("imgui_cv does only support uint8 images with multiple channels")
img_rgb = img
elif channels == 4:
if not img.dtype == np.uint8:
raise ValueError("imgui_cv does only support uint8 images with multiple channels")
# we do not handle alpha very well...
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
return img_rgb
NB_REFRESH_TEXTURES = 0
def _image_rgb_to_texture_impl(img_rgb: Image_RGB, texture_id: TextureId):
"""
Performs the actual transfer to the gpu and returns a texture_id
"""
# inspired from https://www.programcreek.com/python/example/95539/OpenGL.GL.glPixelStorei (example 3)
if LOG_GPU_USAGE:
global NB_REFRESH_TEXTURES
NB_REFRESH_TEXTURES = NB_REFRESH_TEXTURES + 1
print(f"NB_REFRESH_TEXTURES = {NB_REFRESH_TEXTURES}")
width = img_rgb.shape[1]
height = img_rgb.shape[0]
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glBindTexture(gl.GL_TEXTURE_2D, texture_id)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, width, height, 0, gl.GL_BGR, gl.GL_UNSIGNED_BYTE, img_rgb)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
return texture_id
def _image_to_texture(
image_and_adjustments: ImageAndAdjustments,
always_refresh: bool,
linked_user_image_address: ImageAddress
):
"""
_image_to_texture will transfer the image to the GPU and return a texture Id
Some GPU might choke if too many textures are transferred.
For this reason :
- a cache is maintained (ALL_TEXTURES)
- a quick comparison is made before the transfer:
@see _hash_image()
@see ImageAndAdjustments.__eq__() : for performance reasons, the __eq__ operator
is made to take only the hash into account.
:param image_and_adjustments:
:return: texture_id
"""
now = timer()
if linked_user_image_address == 0:
image_address = id(image_and_adjustments.image)
else:
image_address = linked_user_image_address
shall_refresh = False
if image_address not in ALL_TEXTURES:
ALL_TEXTURES[image_address] = ImageStoredOnGpu(image_and_adjustments, now)
shall_refresh = True
if always_refresh:
shall_refresh = True
image_stored_on_gpu: ImageStoredOnGpu = ALL_TEXTURES[image_address]
image_stored_on_gpu.time_last_access = now
if shall_refresh:
image_and_adjustments_copy = copy.deepcopy(image_and_adjustments)
img_adjusted = image_and_adjustments_copy.adjusted_image()
img_rgb = _to_rgb_image(img_adjusted)
_image_rgb_to_texture_impl(img_rgb, image_stored_on_gpu.texture_id)
return image_stored_on_gpu.texture_id
def _clear_all_cv_textures():
global ALL_TEXTURES
all_textures_updated = {}
textures_to_delete = []
now = timer()
for image_address, image_stored_on_gpu in ALL_TEXTURES.items():
age_seconds = now - image_stored_on_gpu.time_last_access
if age_seconds < 0.3:
all_textures_updated[image_address] = image_stored_on_gpu
else:
textures_to_delete.append(image_stored_on_gpu.texture_id)
ALL_TEXTURES = all_textures_updated
if len(textures_to_delete) > 0:
gl.glDeleteTextures(textures_to_delete)
# print("Delete {0} old texture(s), len={1}".format(len(textures_to_delete), len(ALL_TEXTURES)))
def _image_viewport_size(image, width=None, height=None):
image_width = image.shape[1]
image_height = image.shape[0]
if (width is not None) and (height is not None):
viewport_size = SizePixel(width, height)
elif width is not None:
viewport_size = SizePixel(width, round(image_height / image_width * width))
elif height is not None:
viewport_size = SizePixel(round(image_width / image_height * height), height)
else:
viewport_size = SizePixel.from_image(image)
return viewport_size
@static_vars(
zoomed_status={},
zoom_click_times={},
last_shown_image=None)
def _image_impl(
image_and_ajustments,
width=None, height=None, title="",
always_refresh = False,
linked_user_image_address: ImageAddress = 0
):
statics = _image_impl.statics
statics.last_shown_image = image_and_ajustments
zoom_key = imgui_ext.make_unique_label(title)
if zoom_key not in statics.zoomed_status:
statics.zoom_click_times[zoom_key] = 0
statics.zoomed_status[zoom_key] = False
if statics.zoomed_status[zoom_key]:
viewport_size = SizePixel.from_image(image_and_ajustments.image)
else:
viewport_size = _image_viewport_size(image_and_ajustments.image, width, height)
if zoom_key not in statics.zoomed_status:
statics.zoomed_status[zoom_key] = False
statics.zoom_click_times[zoom_key] = timer()
texture_id = _image_to_texture(
image_and_ajustments,
always_refresh = always_refresh,
linked_user_image_address=linked_user_image_address
)
if title == "":
imgui.image_button(texture_id, viewport_size.width, viewport_size.height, frame_padding=0)
is_mouse_hovering = imgui.is_item_hovered()
else:
imgui.begin_group()
imgui.image_button(texture_id, viewport_size.width, viewport_size.height, frame_padding=0)
is_mouse_hovering = imgui.is_item_hovered()
imgui.text(title)
imgui.end_group()
if is_mouse_hovering and imgui.get_io().mouse_down[0]:
last_time = statics.zoom_click_times[zoom_key]
now = timer()
if now - last_time > 0.3:
statics.zoomed_status[zoom_key] = not statics.zoomed_status[zoom_key]
statics.zoom_click_times[zoom_key] = now
return mouse_position_last_image()
def image(
img,
width=None,
height=None,
title="",
image_adjustments=None,
always_refresh = False,
linked_user_image_address: ImageAddress = 0
):
if image_adjustments is None:
image_adjustments = ImageAdjustments()
image_and_ajustments = ImageAndAdjustments(img, image_adjustments)
return _image_impl(
image_and_ajustments,
width=width, height=height,
title=title,
always_refresh = always_refresh,
linked_user_image_address = linked_user_image_address
)
def _is_in_image(pixel, image_shape):
# type : (imgui.Vec2, shape) -> Bool
w = image_shape[1]
h = image_shape[0]
x = pixel.x
y = pixel.y
return x >= 0 and x < w and y >= 0 and y < h
def _is_in_last_image(pixel):
last_image_shape = _image_impl.statics.last_shown_image.image.shape
return _is_in_image(pixel, last_image_shape)
def mouse_position_last_image():
io = imgui.get_io()
mouse = io.mouse_pos
rect_min = imgui.get_item_rect_min()
mouse_relative = imgui.Vec2(mouse.x - rect_min.x, mouse.y - rect_min.y)
if not _is_in_last_image(mouse_relative):
return None
else:
return mouse_relative
def is_mouse_hovering_last_image(): # only works if the image was presented in its original size
if not imgui.is_item_hovered_rect():
return False
mouse = mouse_position_last_image()
if mouse is None:
return False
else:
return True
def image_explorer(image, width=None, height=None, title="", zoom_key="", hide_buttons=False,
image_adjustments=None,
always_refresh = False
):
"""
:param image_adjustments:
:param hide_buttons:
:param image: opencv / np image.
:param width:
:param height:
:param title: an optional title
:param zoom_key: Set the same zoom_key for two image if you want to link their zoom settings
:return: mouse location in image coordinates (None if the mouse is outside of the image)
"""
if image_adjustments is None:
image_adjustments = ImageAdjustments()
from ._imgui_cv_zoom import image_explorer_autostore_zoominfo
viewport_size = _image_viewport_size(image, width, height)
imgui.begin_group()
mouse_location_original_image = image_explorer_autostore_zoominfo(
image,
viewport_size,
title,
zoom_key,
image_adjustments,
hide_buttons=hide_buttons,
always_refresh = always_refresh
)
imgui.end_group()
return mouse_location_original_image
| [
"numpy.uint8",
"imgui.is_item_hovered_rect",
"copy.deepcopy",
"OpenGL.GL.glTexImage2D",
"numpy.random.RandomState",
"imgui.get_io",
"imgui.get_item_rect_min",
"OpenGL.GL.glGenTextures",
"imgui.end_group",
"math.fabs",
"OpenGL.GL.glBindTexture",
"OpenGL.GL.glDeleteTextures",
"cv2.cvtColor",
... | [((262, 269), 'timeit.default_timer', 'timer', ([], {}), '()\n', (267, 269), True, 'from timeit import default_timer as timer\n'), ((3490, 3509), 'OpenGL.GL.glGenTextures', 'gl.glGenTextures', (['(1)'], {}), '(1)\n', (3506, 3509), True, 'import OpenGL.GL as gl\n'), ((5598, 5641), 'OpenGL.GL.glPixelStorei', 'gl.glPixelStorei', (['gl.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(gl.GL_UNPACK_ALIGNMENT, 1)\n', (5614, 5641), True, 'import OpenGL.GL as gl\n'), ((5646, 5692), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_2D', 'texture_id'], {}), '(gl.GL_TEXTURE_2D, texture_id)\n', (5662, 5692), True, 'import OpenGL.GL as gl\n'), ((5697, 5740), 'OpenGL.GL.glPixelStorei', 'gl.glPixelStorei', (['gl.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(gl.GL_UNPACK_ALIGNMENT, 1)\n', (5713, 5740), True, 'import OpenGL.GL as gl\n'), ((5745, 5821), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['gl.GL_TEXTURE_2D', 'gl.GL_TEXTURE_MAG_FILTER', 'gl.GL_LINEAR'], {}), '(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)\n', (5763, 5821), True, 'import OpenGL.GL as gl\n'), ((5826, 5902), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['gl.GL_TEXTURE_2D', 'gl.GL_TEXTURE_MIN_FILTER', 'gl.GL_LINEAR'], {}), '(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)\n', (5844, 5902), True, 'import OpenGL.GL as gl\n'), ((5907, 6017), 'OpenGL.GL.glTexImage2D', 'gl.glTexImage2D', (['gl.GL_TEXTURE_2D', '(0)', 'gl.GL_RGB', 'width', 'height', '(0)', 'gl.GL_BGR', 'gl.GL_UNSIGNED_BYTE', 'img_rgb'], {}), '(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, width, height, 0, gl.GL_BGR,\n gl.GL_UNSIGNED_BYTE, img_rgb)\n', (5922, 6017), True, 'import OpenGL.GL as gl\n'), ((6018, 6055), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_2D', '(0)'], {}), '(gl.GL_TEXTURE_2D, 0)\n', (6034, 6055), True, 'import OpenGL.GL as gl\n'), ((6743, 6750), 'timeit.default_timer', 'timer', ([], {}), '()\n', (6748, 6750), True, 'from timeit import default_timer as timer\n'), ((7713, 7720), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7718, 7720), True, 'from timeit import default_timer as timer\n'), ((11600, 11614), 'imgui.get_io', 'imgui.get_io', ([], {}), '()\n', (11612, 11614), False, 'import imgui\n'), ((11655, 11680), 'imgui.get_item_rect_min', 'imgui.get_item_rect_min', ([], {}), '()\n', (11678, 11680), False, 'import imgui\n'), ((11702, 11756), 'imgui.Vec2', 'imgui.Vec2', (['(mouse.x - rect_min.x)', '(mouse.y - rect_min.y)'], {}), '(mouse.x - rect_min.x, mouse.y - rect_min.y)\n', (11712, 11756), False, 'import imgui\n'), ((12926, 12945), 'imgui.begin_group', 'imgui.begin_group', ([], {}), '()\n', (12943, 12945), False, 'import imgui\n'), ((13204, 13221), 'imgui.end_group', 'imgui.end_group', ([], {}), '()\n', (13219, 13221), False, 'import imgui\n'), ((665, 681), 'math.fabs', 'math.fabs', (['(a - b)'], {}), '(a - b)\n', (674, 681), False, 'import math\n'), ((1691, 1716), 'numpy.random.RandomState', 'np.random.RandomState', (['(89)'], {}), '(89)\n', (1712, 1716), True, 'import numpy as np\n'), ((1994, 2008), 'xxhash.xxh64', 'xxhash.xxh64', ([], {}), '()\n', (2006, 2008), False, 'import xxhash\n'), ((7321, 7357), 'copy.deepcopy', 'copy.deepcopy', (['image_and_adjustments'], {}), '(image_and_adjustments)\n', (7334, 7357), False, 'import copy\n'), ((8122, 8161), 'OpenGL.GL.glDeleteTextures', 'gl.glDeleteTextures', (['textures_to_delete'], {}), '(textures_to_delete)\n', (8141, 8161), True, 'import OpenGL.GL as gl\n'), ((9694, 9701), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9699, 9701), True, 'from timeit import default_timer as timer\n'), ((9908, 10002), 'imgui.image_button', 'imgui.image_button', (['texture_id', 'viewport_size.width', 'viewport_size.height'], {'frame_padding': '(0)'}), '(texture_id, viewport_size.width, viewport_size.height,\n frame_padding=0)\n', (9926, 10002), False, 'import imgui\n'), ((10027, 10050), 'imgui.is_item_hovered', 'imgui.is_item_hovered', ([], {}), '()\n', (10048, 10050), False, 'import imgui\n'), ((10069, 10088), 'imgui.begin_group', 'imgui.begin_group', ([], {}), '()\n', (10086, 10088), False, 'import imgui\n'), ((10097, 10191), 'imgui.image_button', 'imgui.image_button', (['texture_id', 'viewport_size.width', 'viewport_size.height'], {'frame_padding': '(0)'}), '(texture_id, viewport_size.width, viewport_size.height,\n frame_padding=0)\n', (10115, 10191), False, 'import imgui\n'), ((10216, 10239), 'imgui.is_item_hovered', 'imgui.is_item_hovered', ([], {}), '()\n', (10237, 10239), False, 'import imgui\n'), ((10248, 10265), 'imgui.text', 'imgui.text', (['title'], {}), '(title)\n', (10258, 10265), False, 'import imgui\n'), ((10274, 10291), 'imgui.end_group', 'imgui.end_group', ([], {}), '()\n', (10289, 10291), False, 'import imgui\n'), ((10421, 10428), 'timeit.default_timer', 'timer', ([], {}), '()\n', (10426, 10428), True, 'from timeit import default_timer as timer\n'), ((11974, 12002), 'imgui.is_item_hovered_rect', 'imgui.is_item_hovered_rect', ([], {}), '()\n', (12000, 12002), False, 'import imgui\n'), ((4414, 4451), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (4426, 4451), False, 'import cv2\n'), ((4527, 4548), 'numpy.uint8', 'np.uint8', (['(img * 255.0)'], {}), '(img * 255.0)\n', (4535, 4548), True, 'import numpy as np\n'), ((4570, 4612), 'cv2.cvtColor', 'cv2.cvtColor', (['img_grey', 'cv2.COLOR_GRAY2BGR'], {}), '(img_grey, cv2.COLOR_GRAY2BGR)\n', (4582, 4612), False, 'import cv2\n'), ((5013, 5050), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGRA2BGR'], {}), '(img, cv2.COLOR_BGRA2BGR)\n', (5025, 5050), False, 'import cv2\n'), ((10322, 10336), 'imgui.get_io', 'imgui.get_io', ([], {}), '()\n', (10334, 10336), False, 'import imgui\n')] |
from behave import *
from hamcrest import *
import numpy
@given('a matrix')
def step_impl(context):
context.matrix = numpy.arange(2, 11).reshape(3, 3)
@when('project the agent')
def step_impl(context):
context.result = context.dpop_1.util_manager.project(context.matrix)
@then('result is a matrix has one less dimension')
def step_impl(context):
assert_that(context.result.shape, equal_to((3,)))
@then('result contain optimal utility for agent value chosen')
def step_impl(context):
for index, value in numpy.ndenumerate(context.result):
assert_that(value, equal_to(min(context.matrix[:, index])))
| [
"numpy.ndenumerate",
"numpy.arange"
] | [((528, 561), 'numpy.ndenumerate', 'numpy.ndenumerate', (['context.result'], {}), '(context.result)\n', (545, 561), False, 'import numpy\n'), ((124, 143), 'numpy.arange', 'numpy.arange', (['(2)', '(11)'], {}), '(2, 11)\n', (136, 143), False, 'import numpy\n')] |
"""
The :mod:`sklearnext.metrics.regressio` contains
various metrics for regression tasks.
"""
# Author: <NAME> <<EMAIL>>
# Licence: MIT
import numpy as np
from sklearn.metrics.regression import _check_reg_targets, check_consistent_length
from sklearn.externals.six import string_types
def weighted_mean_squared_error(y_true,
y_pred,
sample_weight=None,
multioutput='uniform_average',
asymmetry_factor=0.5):
"""Weighted mean squared error regression loss
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
asymmetry_factor : float
Asymmetry factor between underprediction and overprediction
in the range [0.0, 0.1]. The balanced case corresponds to the 0.5 value.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
_, y_true, y_pred, multioutput = _check_reg_targets(y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
weights = abs((y_true < y_pred) - asymmetry_factor)
output_errors = 2* np.average(weights * (y_true - y_pred) ** 2, axis=0, weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
multioutput = None
return np.average(output_errors, weights=multioutput)
| [
"sklearn.metrics.regression._check_reg_targets",
"sklearn.metrics.regression.check_consistent_length",
"numpy.average"
] | [((1784, 1831), 'sklearn.metrics.regression._check_reg_targets', '_check_reg_targets', (['y_true', 'y_pred', 'multioutput'], {}), '(y_true, y_pred, multioutput)\n', (1802, 1831), False, 'from sklearn.metrics.regression import _check_reg_targets, check_consistent_length\n'), ((1836, 1890), 'sklearn.metrics.regression.check_consistent_length', 'check_consistent_length', (['y_true', 'y_pred', 'sample_weight'], {}), '(y_true, y_pred, sample_weight)\n', (1859, 1890), False, 'from sklearn.metrics.regression import _check_reg_targets, check_consistent_length\n'), ((2254, 2300), 'numpy.average', 'np.average', (['output_errors'], {'weights': 'multioutput'}), '(output_errors, weights=multioutput)\n', (2264, 2300), True, 'import numpy as np\n'), ((1970, 2045), 'numpy.average', 'np.average', (['(weights * (y_true - y_pred) ** 2)'], {'axis': '(0)', 'weights': 'sample_weight'}), '(weights * (y_true - y_pred) ** 2, axis=0, weights=sample_weight)\n', (1980, 2045), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from timeit import default_timer as timer
class RPRidge:
def __init__(self,rp_dim:int,rp_mode='Sparse',gamma=1.0):
self.rp_dim = rp_dim
self.rp_mode = rp_mode
self.gamma = gamma
def iterate_single(self,X,y,iterations=10,seed=100):
'''
Fits the iterated ridge model with FD
'''
d = X.shape[1]
w = np.zeros((d,1),dtype=float)
all_w = np.zeros((d,iterations))
XTy = (X.T@y).reshape(-1,1)
# Fit the FD
SA = self._get_sketch(X,seed)
H = SA.T@SA + (self.gamma)*np.eye(d)
H_inv = np.linalg.pinv(H)
for it in range(iterations):
grad = X.T@(X@w) + self.gamma*w - XTy
w += - H_inv@grad
all_w[:,it] = np.squeeze(w)
return np.squeeze(w), all_w
def iterate_multiple(self,X,y,iterations=10):
'''
Fits the iterated ridge model with FD
'''
d = X.shape[1]
w = np.zeros((d,1),dtype=float)
all_w = np.zeros((d,iterations))
XTy = (X.T@y).reshape(-1,1)
for it in range(iterations):
SA = self._get_sketch(X,seed=100*it)
H = SA.T@SA + (self.gamma)*np.eye(d)
H_inv = np.linalg.pinv(H)
grad = X.T@(X@w) + self.gamma*w - XTy
w += - H_inv@grad
all_w[:,it] = np.squeeze(w)
return np.squeeze(w), all_w
def _naive_hessian_update(self, sketched_data, vec):
"""
Naively performs the hessian update by evaluating the approximate Hessian
and inverting.
"""
H = sketched_data.T@sketched_data + (self.gamma)*np.eye(sketched_data.shape[1])
H_inv = np.linalg.pinv(H)
return H_inv@vec
def _linear_solve_hessian_update(self, sketched_data, vec):
"""
Implements the hessian_inv@gradient efficiently by using Woodbury.
"""
K = sketched_data@sketched_data.T / self.gamma + np.eye(sketched_data.shape[0])
u = (1/self.gamma)*vec - (sketched_data.T / self.gamma**2)@(np.linalg.solve(K,sketched_data@vec))
return u
def iterate_multiple_timing(self, X, y, iterations=10):
"""
Fits the iterated ridge model with a new sketch every iteration
"""
# * Initialisation not timed
d = X.shape[1]
w = np.zeros((d,1),dtype=float)
all_w = np.zeros((d,iterations))
if self.rp_dim < d:
update_method = self._linear_solve_hessian_update
else:
update_method = self._naive_hessian_update
measurables = {
'sketch time' : None,
'all_times' : np.zeros(iterations+1,dtype=float),
'gradients' : np.zeros((d,iterations),dtype=float),
'updates' : np.zeros((d,iterations),dtype=float),
'sketch' : None
}
SKETCH_TIME = 0.0
TIMER_START = timer()
XTy = (X.T@y).reshape(-1,1)
for it in range(iterations):
# * Timing the sketch separately
SKETCH_TIMER_START = timer()
SA = self._get_sketch(X,seed=10*it)
SKETCH_TIME += timer() - SKETCH_TIMER_START
# * Into grad updates
grad = X.T@(X@w) + self.gamma*w - XTy
update = update_method(SA, grad)
w += - update
all_w[:,it] = np.squeeze(w)
measurables['all_times'][it+1] = timer() - TIMER_START
measurables['sketch time'] = SKETCH_TIME
return np.squeeze(w), all_w, measurables
def iterate_single_timing(self, X, y, iterations=10, seed=100):
"""
Fits the iterated ridge model with a new sketch every iteration
"""
# * Initialisation not timed
d = X.shape[1]
w = np.zeros((d,1),dtype=float)
all_w = np.zeros((d,iterations))
if self.rp_dim < d:
update_method = self._linear_solve_hessian_update
else:
update_method = self._naive_hessian_update
measurables = {
'sketch time' : None,
'all_times' : np.zeros(iterations+1,dtype=float),
'gradients' : np.zeros((d,iterations),dtype=float),
'updates' : np.zeros((d,iterations),dtype=float),
'sketch' : None
}
TIMER_START = timer()
XTy = (X.T@y).reshape(-1,1)
SA = self._get_sketch(X,seed)
SKETCH_TIME = timer() - TIMER_START
for it in range(iterations):
grad = X.T@(X@w) + self.gamma*w - XTy
update = update_method(SA, grad)
w += - update
all_w[:,it] = np.squeeze(w)
measurables['all_times'][it+1] = timer() - TIMER_START
measurables['sketch time'] = SKETCH_TIME
return np.squeeze(w), all_w, measurables
def _get_sketch(self,data,seed=10):
'''
Performs the sketch depending on the chosen mode.
'''
if self.rp_mode == 'Gaussian':
return self._gaussian_projection(data,seed)
elif self.rp_mode == 'SJLT':
return self._sparse_projection(data,10,seed)
else:
raise NotImplementedError
def fit_classical(self,X,y):
'''
Fits the ridge regression model on data X with targets y
'''
d = X.shape[1]
data = np.c_[X,y]
#S_data = self._sparse_projection(data,self.rp_dim)
#S_data = self._gaussian_projection(data,self.rp_dim)
S_data = self._get_sketch(data)
SX = S_data[:,:-1]
Sy = S_data[:,-1]
H_est = SX.T@SX + self.gamma*np.eye(d)
self.H = H_est
self.classical_coef_ = np.linalg.solve(H_est,SX.T@Sy)
def fit_hessian_sketch(self,X,y):
'''
Fits the ridge regression model on data X with targets y
'''
d = X.shape[1]
#SX = self._gaussian_projection(X,self.rp_dim)
#SX = self._sparse_projection(X,self.rp_dim)
SX = self._get_sketch(X)
H_est = SX.T@SX + self.gamma*np.eye(d)
self.hessian_coef_ = np.linalg.solve(H_est,X.T@y)
def get_classical_bias(self,X,w0):
'''
Returns the bias of the estimate
'''
return (self.gamma)*np.linalg.norm(np.linalg.pinv(self.H)@w0)
#return (np.linalg.pinv(self.H)@(self.H - self.gamma*np.eye(X.shape[1])))@w0 - w0
def get_classical_variance(self,X):
'''
Returns the variance term: ||S.T@S@A H_gamma^{-1}||_F^2
'''
S = self.sketch_mat
#return np.linalg.norm(np.linalg.pinv(self.H)@(X.T@(S.T@S)),ord='fro')**2
return np.linalg.norm( S.T@(S@(X@np.linalg.pinv(self.H))) ,ord='fro')**2
def get_hessian_sketch_bias(self,X,w0):
'''
Returns the bias of the Hessian sketch method for regression
'''
return np.linalg.pinv(self.H)@(X.T@(X@w0)) - w0
def get_hessian_sketch_variance(self,X):
'''
Returns the variance term: ||A H_gamma^{-1}||_F^2
'''
return np.linalg.norm(X@np.linalg.pinv(self.H),ord='fro')**2
def _sparse_projection(self,mat,sparsity=1,random_seed=10):
"""
Performs the sparse johnson lindenstrauss transform of Kane and Nelson
"""
[n,_] = mat.shape
sketch = np.zeros((self.rp_dim ,n),dtype=float)
for i in range(n):
nnz_loc = np.random.choice(self.rp_dim ,size=sparsity,replace=False)
nnz_sign = np.random.choice([-1,1],size=sparsity,replace=True)
sketch[nnz_loc,i] = nnz_sign
self.sketch_mat = sketch
return (1./np.sqrt(sparsity))*sketch@mat
def _gaussian_projection(self,mat,random_seed=10):
"""
Performs the dense gaussian random projection.
"""
[n,_] = mat.shape
np.random.seed(random_seed)
S = np.random.randn(self.rp_dim,n) / np.sqrt(self.rp_dim)
self.sketch_mat = S
return S@mat
| [
"numpy.eye",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.random.choice",
"timeit.default_timer",
"numpy.squeeze",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.randn"
] | [((1197, 1226), 'numpy.zeros', 'np.zeros', (['(d, 1)'], {'dtype': 'float'}), '((d, 1), dtype=float)\n', (1205, 1226), True, 'import numpy as np\n'), ((1241, 1266), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {}), '((d, iterations))\n', (1249, 1266), True, 'import numpy as np\n'), ((1431, 1448), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H'], {}), '(H)\n', (1445, 1448), True, 'import numpy as np\n'), ((1799, 1828), 'numpy.zeros', 'np.zeros', (['(d, 1)'], {'dtype': 'float'}), '((d, 1), dtype=float)\n', (1807, 1828), True, 'import numpy as np\n'), ((1843, 1868), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {}), '((d, iterations))\n', (1851, 1868), True, 'import numpy as np\n'), ((2534, 2551), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H'], {}), '(H)\n', (2548, 2551), True, 'import numpy as np\n'), ((3185, 3214), 'numpy.zeros', 'np.zeros', (['(d, 1)'], {'dtype': 'float'}), '((d, 1), dtype=float)\n', (3193, 3214), True, 'import numpy as np\n'), ((3229, 3254), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {}), '((d, iterations))\n', (3237, 3254), True, 'import numpy as np\n'), ((3740, 3747), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3745, 3747), True, 'from timeit import default_timer as timer\n'), ((4623, 4652), 'numpy.zeros', 'np.zeros', (['(d, 1)'], {'dtype': 'float'}), '((d, 1), dtype=float)\n', (4631, 4652), True, 'import numpy as np\n'), ((4667, 4692), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {}), '((d, iterations))\n', (4675, 4692), True, 'import numpy as np\n'), ((5159, 5166), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5164, 5166), True, 'from timeit import default_timer as timer\n'), ((6512, 6545), 'numpy.linalg.solve', 'np.linalg.solve', (['H_est', '(SX.T @ Sy)'], {}), '(H_est, SX.T @ Sy)\n', (6527, 6545), True, 'import numpy as np\n'), ((6920, 6951), 'numpy.linalg.solve', 'np.linalg.solve', (['H_est', '(X.T @ y)'], {}), '(H_est, X.T @ y)\n', (6935, 6951), True, 'import numpy as np\n'), ((8141, 8180), 'numpy.zeros', 'np.zeros', (['(self.rp_dim, n)'], {'dtype': 'float'}), '((self.rp_dim, n), dtype=float)\n', (8149, 8180), True, 'import numpy as np\n'), ((8655, 8682), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (8669, 8682), True, 'import numpy as np\n'), ((1593, 1606), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (1603, 1606), True, 'import numpy as np\n'), ((1622, 1635), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (1632, 1635), True, 'import numpy as np\n'), ((2068, 2085), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H'], {}), '(H)\n', (2082, 2085), True, 'import numpy as np\n'), ((2192, 2205), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (2202, 2205), True, 'import numpy as np\n'), ((2221, 2234), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (2231, 2234), True, 'import numpy as np\n'), ((2802, 2832), 'numpy.eye', 'np.eye', (['sketched_data.shape[0]'], {}), '(sketched_data.shape[0])\n', (2808, 2832), True, 'import numpy as np\n'), ((3492, 3529), 'numpy.zeros', 'np.zeros', (['(iterations + 1)'], {'dtype': 'float'}), '(iterations + 1, dtype=float)\n', (3500, 3529), True, 'import numpy as np\n'), ((3552, 3590), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {'dtype': 'float'}), '((d, iterations), dtype=float)\n', (3560, 3590), True, 'import numpy as np\n'), ((3614, 3652), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {'dtype': 'float'}), '((d, iterations), dtype=float)\n', (3622, 3652), True, 'import numpy as np\n'), ((3899, 3906), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3904, 3906), True, 'from timeit import default_timer as timer\n'), ((4206, 4219), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (4216, 4219), True, 'import numpy as np\n'), ((4351, 4364), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (4361, 4364), True, 'import numpy as np\n'), ((4930, 4967), 'numpy.zeros', 'np.zeros', (['(iterations + 1)'], {'dtype': 'float'}), '(iterations + 1, dtype=float)\n', (4938, 4967), True, 'import numpy as np\n'), ((4990, 5028), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {'dtype': 'float'}), '((d, iterations), dtype=float)\n', (4998, 5028), True, 'import numpy as np\n'), ((5052, 5090), 'numpy.zeros', 'np.zeros', (['(d, iterations)'], {'dtype': 'float'}), '((d, iterations), dtype=float)\n', (5060, 5090), True, 'import numpy as np\n'), ((5263, 5270), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5268, 5270), True, 'from timeit import default_timer as timer\n'), ((5470, 5483), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (5480, 5483), True, 'import numpy as np\n'), ((5615, 5628), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (5625, 5628), True, 'import numpy as np\n'), ((8229, 8288), 'numpy.random.choice', 'np.random.choice', (['self.rp_dim'], {'size': 'sparsity', 'replace': '(False)'}), '(self.rp_dim, size=sparsity, replace=False)\n', (8245, 8288), True, 'import numpy as np\n'), ((8311, 8365), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': 'sparsity', 'replace': '(True)'}), '([-1, 1], size=sparsity, replace=True)\n', (8327, 8365), True, 'import numpy as np\n'), ((8695, 8726), 'numpy.random.randn', 'np.random.randn', (['self.rp_dim', 'n'], {}), '(self.rp_dim, n)\n', (8710, 8726), True, 'import numpy as np\n'), ((8728, 8748), 'numpy.sqrt', 'np.sqrt', (['self.rp_dim'], {}), '(self.rp_dim)\n', (8735, 8748), True, 'import numpy as np\n'), ((1405, 1414), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (1411, 1414), True, 'import numpy as np\n'), ((2487, 2517), 'numpy.eye', 'np.eye', (['sketched_data.shape[1]'], {}), '(sketched_data.shape[1])\n', (2493, 2517), True, 'import numpy as np\n'), ((2901, 2940), 'numpy.linalg.solve', 'np.linalg.solve', (['K', '(sketched_data @ vec)'], {}), '(K, sketched_data @ vec)\n', (2916, 2940), True, 'import numpy as np\n'), ((3982, 3989), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3987, 3989), True, 'from timeit import default_timer as timer\n'), ((4265, 4272), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4270, 4272), True, 'from timeit import default_timer as timer\n'), ((5529, 5536), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5534, 5536), True, 'from timeit import default_timer as timer\n'), ((6448, 6457), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (6454, 6457), True, 'import numpy as np\n'), ((6881, 6890), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (6887, 6890), True, 'import numpy as np\n'), ((7692, 7714), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.H'], {}), '(self.H)\n', (7706, 7714), True, 'import numpy as np\n'), ((2038, 2047), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (2044, 2047), True, 'import numpy as np\n'), ((7101, 7123), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.H'], {}), '(self.H)\n', (7115, 7123), True, 'import numpy as np\n'), ((7893, 7915), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.H'], {}), '(self.H)\n', (7907, 7915), True, 'import numpy as np\n'), ((8456, 8473), 'numpy.sqrt', 'np.sqrt', (['sparsity'], {}), '(sparsity)\n', (8463, 8473), True, 'import numpy as np\n'), ((7498, 7520), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.H'], {}), '(self.H)\n', (7512, 7520), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# donut_plot_with_subgroups_from_dataframe.py
__author__ = "<NAME>" #fomightez on GitHub
__license__ = "MIT"
__version__ = "0.1.0"
# donut_plot_with_subgroups_from_dataframe.py by <NAME>
# ver 0.1
#
#*******************************************************************************
# Verified compatible with both Python 2.7 and Python 3.7; written initially in
# Python 3.
#
#
# PURPOSE: Takes a dataframe, and some information about columns in the
# dataframe and makes a donut plot similar to the one at
# https://python-graph-gallery.com/163-donut-plot-with-subgroups/. The plot is
# a breakdown of the main groups to subgroups with the main groups in an outer
# ring of the dount plot and the subgroups on the inner ring.
#
# The dataframe can be pickled, tab-separated form, or comma-separated form. The
# script will use the extension to decide how to read it in, and so use `.pkl`
# for saving pickled dataframes or `.tsv` or `.csv` for the tab- or comma-
# separated text versions, respectively. If using inside Jupyter or IPython, you
# can use the main function of the script,
# `donut_plot_with_subgroups_from_dataframe()` and when
# calling it supply the dataframe in memory to avoid needing a file
# intermediate.
#
#
#
#
#
#
# Based on `donut_plot_with_total_summary_and_subgroups_from_dataframe.py`
# but made simpler by removing the plot of the total states and just having
# the plot reminiscent of the one at
# https://python-graph-gallery.com/163-donut-plot-with-subgroups/ made from a
# dataframe / tabular text.
#
#
#
#
# Dependencies beyond the mostly standard libraries/modules:
#
#
#
#
# VERSION HISTORY:
# v.0.1. basic working version
#
# To do:
# -
#
#
#
#
# TO RUN:
# Examples,
# Enter on the command line of your terminal, the line
#-----------------------------------
# python donut_plot_with_subgroups_from_dataframe.py data.tsv groups_col subgroups_col
#-----------------------------------
# Issue `donut_plot_with_subgroups_from_dataframe.py -h` for
# details.
#
#
#
#
#
# To use this after importing/pasting or loading into a cell in a Jupyter
# notebook, specify at least the dataframe (or dataframe file) and columns:
# from donut_plot_with_subgroups_from_dataframe import donut_plot_with_subgroups_from_dataframe
# donut_plot_with_subgroups_from_dataframe(df_file="data.tsv",groups_col="status",subgroups_col="subtype");
#
#
#
'''
CURRENT ACTUAL CODE FOR RUNNING/TESTING IN A NOTEBOOK WHEN IMPORTED/LOADED OR
PASTED IN ANOTHER CELL:
from donut_plot_with_subgroups_from_dataframe import donut_plot_with_subgroups_from_dataframe
donut_plot_with_subgroups_from_dataframe(df_file="data.tsv",groups_col="Manufacturer",subgroups_col="In_Stock");
'''
#
#
#*******************************************************************************
#
#*******************************************************************************
##################################
# USER ADJUSTABLE VALUES #
##################################
#
plot_figure_size = (7,8) # width by height written as `(width,height)`;
# If you change this to substantial degree, you may also want to
# adjust text size settings below and possibly turn off plot titles using
# `include_title=False`in favor of adding your own in post-processing.
outer_ring_radius = 1.3 # radius of the outer ring of the donut plot
inner_ring_radius = outer_ring_radius-0.3 # radius of the inner ring of donut
outer_ring_width=0.3
inner_ring_width=0.4
include_title = True
plot_title = "BREAKDOWN"
title_text_size = 20 # font size for title above plot
plot_text_size = 14 # font size for text in the plot
large_img_size = (14,15) # size to be used with `--large_image` `flag. Width
# by height written as `(width,height)`
light_color_for_last_in_subgroup = True # Set this to False to reverse the
# order of the subgroup coloring.
save_plot_name_prefix = "donut_plot"
#
#*******************************************************************************
#**********************END USER ADJUSTABLE VARIABLES****************************
#*******************************************************************************
#*******************************************************************************
###DO NOT EDIT BELOW HERE - ENTER VALUES ABOVE###
import sys
import os
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
###---------------------------HELPER FUNCTIONS--------------------------------###
def generate_output_file_name(save_plot_name_prefix):
'''
Takes a file name prefix as an argument and returns string for the name of
the output file.
save_plot_name_prefix = "donut_plot"
Specific example
=================
Calling function with
("donut_plot")
returns
"donut_plot.png"
'''
return save_plot_name_prefix + ".png"
def extract_dataframe(file_name):
'''
Takes a file name and using the extension determines how to extract the
dataframe recorded in it.
Returns a pandas dataframe object.
Works with pickled, tab-separated text, and comma-seperated text.
(haven't added json yet).
Specify, which with file ending in `.pkl`,`.tsv`, or `.csv`.
Case doesn't matter for the extension.
'''
extension = Path(file_name).suffix
if extension.lower() == ".pkl":
return pd.read_pickle(file_name)
elif extension.lower() == ".tsv":
return pd.read_csv(file_name, sep='\t')
elif extension.lower() == ".csv":
return pd.read_csv(file_name)
else:
sys.stderr.write("\n**ERROR** Cannot determine how dataframe is stored "
"in '{}'.\nChange the file name extension in the input file to be "
"`.pkl`, `.tsv`, or `.csv` to indicate\nif dataframe stored "
"pickled, stored as tab-separated text, or stored as\n"
"comma-separated text."
".\n**EXITING !!**.\n".format(file_name))
sys.exit(1)
def sequential_color_maps_generator():
'''
generator to yield a never-ending supply of sequential color palettes/
color maps.
However it will start with several of the ones like color brwwer defined
sequential ones. See 'sequential'
at https://ggplot2.tidyverse.org/reference/scale_brewer.html (Turns out
same ones already in matplotlib, see
https://matplotlib.org/tutorials/colors/colormaps.html so can even use
without having to convert from seaborn `sns.color_palette` to colormaps,
which I didn't know if it was even possible without moving to the custom
ones)
Only after those are exhausted will it move on to some other ones that
I judged as possibly good options and diverse and then after those are
exhausted it will try to generate random ones.
'''
color_brewer_seq_names = ["Blues", "Reds","Greens","Oranges",
"Purples"] #"Greys" looks bad because white is least
list_of_other_good_sequences = ["teal", "fuchsia", "darkslateblue", "sage",
"darkviolet", "crimson", "darkgoldenrod",
"dodgerblue", "maroon", "darkolivegreen",
"darkturquoise", "royalblue", "chocolate"]
np.random.seed(42)
for col_name in color_brewer_seq_names:
yield plt.get_cmap(col_name) #`plt.get_cmap` use based on
# https://matplotlib.org/tutorials/colors/colormaps.html
for col_name in list_of_other_good_sequences:
try:
yield sns.light_palette(col_name, as_cmap=True)
except ValueError:
yield sns.light_palette(col_name, as_cmap=True,input="xkcd")
while True:
rgb = tuple((np.random.random(size=3) * 1)) # based on
# https://stackoverflow.com/a/48793922/8508004
yield sns.light_palette(rgb, input="rgb", as_cmap=True)
def is_number(s):
'''
check if a string can be cast to a float or numeric (integer).
Takes a string.
Returns True or False
fixed from https://www.pythoncentral.io/how-to-check-if-a-string-is-a-number-in-python-including-unicode/
later noted similar code is at https://code-maven.com/slides/python-programming/is-number
'''
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def cast_to_number(s):
'''
Cast a string to a float or integer.
Tries casting to float first and if that works then it tries casting the
string to an integer. (I thought I saw suggestion of that order somewhere
when searching for what I used as `is_number()` check but cannot find source
right now.)
Returns a float, int, or if fails, False. (Where using, it shouldn't ever
trigger returning `False` because checked all could be converted first.)
based on fixed code from https://www.pythoncentral.io/how-to-check-if-a-string-is-a-number-in-python-including-unicode/
'''
try:
number = float(s)
try:
number = int(s)
return number
except ValueError:
pass
return number
except ValueError:
pass
try:
import unicodedata
num = unicodedata.numeric(s)
return num
except (TypeError, ValueError):
pass
return False
def f7(seq):
'''
remove duplicates from a list whilst preserving order.
For when just using `set` to get to unique because order importance.
from https://stackoverflow.com/a/480227/8508004
'''
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
###--------------------------END OF HELPER FUNCTIONS--------------------------###
###--------------------------END OF HELPER FUNCTIONS--------------------------###
#*******************************************************************************
###------------------------'main' function of script--------------------------##
def donut_plot_with_subgroups_from_dataframe(
df_file=None, df=None, groups_col=None, subgroups_col=None,
save_image=False, save_vg=False, include_percent_in_grp_label=True,
include_total_in_grp_label=True, hilolist = None,
sort_on_subgroup_name=False, advance_color_increments=0,
include_title=include_title, plot_title=plot_title):
'''
Takes the following:
- name of a dataframe file (string) or a dataframe
- text of name of column to use as main group data in the outer ring
- text of name of column to use in subgroupings for the inner ring
- Whether you want an image saved or not. If no image file saved, it tries
to return a plot figure object.
- optionally, for when `save_image=True`, whether you want to save the plot
image as vector graphics
- Optionally including the percent of total for each group in the plot
label.
- Optionally including the total amount for each group in the plot label.
- optionally, a list to use as the high to low intensity degree for coloring
the subgroups can be specified.
- optionally, to use subgroup name in sorting subgroups displayed in the
inner ring of the plot. This needs to be set to `True` to get arrangement of
subgroups in inner ring like in the example
https://python-graph-gallery.com/163-donut-plot-with-subgroups/
- optionally, how many cycles you want the sequential color palette
generator to advance through its first colors.
- optionally, whether you want to include plot title
- optionally, whether you want to set plot title to anything other than
default; it is disregarded if `include_title=False`.
Returns:
A plot, meant for when using in Jupyter or IPython. Not triggered when
called from command line.
Generates:
Depending on how called it can also generate a plot image. This is meant to
be default for the command line; however, it can be included when calling
main function in Jupyter or IPython.
Main function of script.
Takes a dataframe either as a file or passed directly along some information
about columns in the dataframe and makes a donut plot. The plot is a
breakdown of the main groups to subgroups with the main groups in an outer
ring of the dount plot and the subgroups on the inner ring. The style sought
is seen at https://python-graph-gallery.com/163-donut-plot-with-subgroups/ .
If `save_image` is True it saves an image of the plot (png by default). If
`save_image` is False it returns a plot object. The latter being meant for
when using the script in Jupyter notebook.
Additional options are noted under `Takes the following` above.
'''
if df is None:
#read in dataframe from file since none provided in memory
assert df_file != None, ("If no dataframe is provided, a file with the "
"contents of the dataframe as pickled, tab-separated text, or "
"comma-separated text must be provided and the name of that file "
"specified when calling the script.")
# use file extension to decide how to parse dataframe file.
df = extract_dataframe(df_file)
# Prepare derivatives of the dataframe that may be needed for delineating
# the plotting data
tc = df[subgroups_col].value_counts()
total_state_names = tc.index.tolist()
total_state_size = tc.tolist()
grouped = df.groupby(groups_col)
# use `value_counts()` on each group to get the count and name of each state
list_o_subgroup_names_l = []
list_o_subgroup_size_l = []
for name,group in grouped:
dfc = group[subgroups_col].value_counts()
if sort_on_subgroup_name:
dfc = group[subgroups_col].value_counts().sort_index()
list_o_subgroup_names_l.append(dfc.index.tolist())
list_o_subgroup_size_l.append(dfc.tolist())
# Delineate data for the plot:
group_names= grouped.size().index.tolist()
group_size= grouped.size().tolist() #len of each groupby grouping
'''
list_o_subgroup_names_l=[group[subgroups_col].tolist(
) for name, group in grouped]
# flatten that list of lists
subgroup_names=[i for sublt in list_o_subgroup_names_l for i in sublt]
'''
# flatten each list of lists made above to get the list needed
subgroup_names=[i for sublt in list_o_subgroup_names_l for i in sublt]
subgroup_size=[i for sublt in list_o_subgroup_size_l for i in sublt]
assert len(subgroup_size) == len(subgroup_names)
# Create colors generator and colors
colormp = sequential_color_maps_generator()
[next(colormp) for g in range(advance_color_increments)]#advance prior to
# use, if initial skips specified
colorm_per_grp=[next(colormp) for g in group_names]
# Create a switch system for the labels
ip_it_grp_label = {
(True,True):["{} ({:.1%} [{}])".format(
x,y/len(df),y) for x, y in zip(group_names, group_size)],
(True,False):["{} ({:.1%})".format(
x,y/len(df)) for x, y in zip(group_names, group_size)],
(False,True):["{} [{}]".format(
x,y) for x, y in zip(group_names, group_size)],
(False,False):["{}".format(
x) for x, y in zip(group_names, group_size)]}
#Set up for plot.
fig, ax = plt.subplots(figsize=plot_figure_size)
ax.axis('equal')
### First Ring (outside)
### This will be the main groups
labels_with_grp_sz = ip_it_grp_label[(
include_percent_in_grp_label,include_total_in_grp_label)]
mypie, _ = plt.pie(
group_size, radius=outer_ring_radius, labels=labels_with_grp_sz,
textprops={'fontsize': plot_text_size},
colors=[colormp(0.63) for colormp in colorm_per_grp] )
plt.setp( mypie, width=outer_ring_width, edgecolor='white')
### Second Ring (Inside)
### This will be the subgroup counting for each group
list_sub_grp_colors_l = []
subgroups_represented = f7(df[subgroups_col].tolist())
#int_degree = [0.6,0.2]
if hilolist:
assert len(hilolist) == len(subgroups_represented), "The list provided "
"to specify the intensity degree must include all subgroups. Subgroups "
"are: '{}'.format(subgroups_represented)"
subgroups_represented = hilolist
else:
# Provide feedback on what is being used as high to low intensity list
# so user can adjust; using `if __name__ == "__main__"` to customize
# note depending if script called from command line.
sys.stderr.write("Note: No list to specify high to low intensity "
"coloring "
"provided, and so using '{}',\nwhere leftmost identifer corresponds "
"to most intense and rightmost is least.\n".format(
",".join(str(i) for i in subgroups_represented))) # because subgroups
# could be integers as in example from
# https://python-graph-gallery.com/163-donut-plot-with-subgroups/, best
# to have conversion to string,
if __name__ == "__main__":
sys.stderr.write("Look into adding use of the `--hilolist` option "
"to specify the order.\n\n")
else:
sys.stderr.write("Provide a Python list as `hilolist` when calling "
"the function to specify the order of intensity.\n\n")
# assign intensity degree settings for each subgroup so consistent among
# other groups
int_degree = np.linspace(0.6, 0.2, num=len(subgroups_represented))
if not light_color_for_last_in_subgroup:
int_degree.reverse()
# determine colors for each subgroup before `plt.pie` step
for idx,subgroups_l in enumerate(list_o_subgroup_names_l):
cm = colorm_per_grp[idx]
grp_colors = [cm(int_degree[subgroups_represented.index(
sgrp)]) for sgrp in subgroups_l]
list_sub_grp_colors_l.append(grp_colors)
# flatten that list
sub_grp_colors = [i for sublt in list_sub_grp_colors_l for i in sublt]
mypie2, _ = plt.pie(
subgroup_size, radius=inner_ring_radius, labels=subgroup_names,
textprops={'fontsize': plot_text_size}, labeldistance=0.7,
colors=sub_grp_colors)
plt.setp( mypie2, width=inner_ring_width, edgecolor='white')
plt.margins(0,0)
if include_title:
plt.title(plot_title, size = title_text_size)
# Reporting and Saving
#--------------------------------------------------------------------
if save_image:
if plot_figure_size == large_img_size:
output_file_name = generate_output_file_name(
save_plot_name_prefix+ "_larger")
else:
output_file_name = generate_output_file_name(save_plot_name_prefix)
if save_vg:
plt.savefig(output_file_name[:-4]+".svg",
orientation='landscape') # FOR VECTOR GRAPHICS; useful if merging
# into Adobe Illustrator. Based on
# https://neuroscience.telenczuk.pl/?p=331 ; I think ReportLab also
# outputs SVG?
sys.stderr.write("\nPlot image saved to: {}\n".format(
output_file_name[:-4]+".svg"))
else:
# save png
plt.savefig(output_file_name)
sys.stderr.write("\nPlot image saved to: {}\n".format(
output_file_name))
else:
sys.stderr.write("Plot figure object returned.")
return ax
###--------------------------END OF MAIN FUNCTION----------------------------###
###--------------------------END OF MAIN FUNCTION----------------------------###
#*******************************************************************************
###------------------------'main' section of script---------------------------##
def main():
""" Main entry point of the script """
# placing actual main action in a 'helper'script so can call that easily
# with a distinguishing name in Jupyter notebooks, where `main()` may get
# assigned multiple times depending how many scripts imported/pasted in.
kwargs = {}
kwargs['save_image'] = True
kwargs['save_vg'] = save_vg
kwargs['include_percent_in_grp_label'] = include_percent_in_grp_label
kwargs['include_total_in_grp_label'] = include_total_in_grp_label
kwargs['hilolist'] = hilolist
kwargs['sort_on_subgroup_name'] = sort_on_subgroup_name
kwargs['advance_color_increments'] = advance_color_increments
donut_plot_with_subgroups_from_dataframe(
df_file=args.df_file,groups_col=args.groups_col,
subgroups_col=args.subgroups_col,**kwargs)
# using https://www.saltycrane.com/blog/2008/01/how-to-use-args-and-kwargs-in-python/#calling-a-function
# to build keyword arguments to pass to the function above
# (see https://stackoverflow.com/a/28986876/8508004 and
# https://stackoverflow.com/a/1496355/8508004
# (maybe https://stackoverflow.com/a/7437238/8508004 might help too) for
# related help). Makes it easy to add more later.
if __name__ == "__main__":
###-----------------for parsing command line arguments-------------------###
import argparse
parser = argparse.ArgumentParser(prog='donut_plot_with_subgroups_from_dataframe.py',
description="donut_plot_with_subgroups_from_dataframe.py \
takes a dataframe, and some information about columns in the dataframe \
and makes a donut plot. The inner ring is a breakdown of the \
subgroupings per each group in the outer ring of the plot.\
**** Script by <NAME> \
(fomightez @ github) ***")
parser.add_argument("df_file", help="Name of file containing the \
dataframe. Whether it is in the form of a pickled dataframe, \
tab-separated text, or comma-separated text needs to be indicated by \
the file extension. So `.pkl`, `.tsv`, or `.csv` for the file \
extension. \
", metavar="DF_FILE")
parser.add_argument("groups_col", help="Text indicating column in \
dataframe to use as main group data in the outer ring of the plot.\
", metavar="GROUPS")
parser.add_argument("subgroups_col", help="Text indicating column in \
dataframe to use as subgroupings for the inner ring.\
", metavar="SUBGROUPS")
parser.add_argument("-li", "--large_image",help=
"add this flag to make the image saved larger than the default of \
`{}`".format(
plot_figure_size),action="store_true")
#removed reporting exact size of larger one (see code below) because found
#inexplicably it results in a size different than one set size using
#`figure.set_size_inches()`, and rather not confuse things in demo notebook
#by using same numbers and seeing slightly different results. (I suspect
#it is a subtle glitch resulting in slightly different output via the two
#size setting approaches.)
'''
parser.add_argument("-li", "--large_image",help=
"add this flag to make the image saved larger than the default of \
`{}`. Adding this flag will set the saved file size to `{}`.".format(
plot_figure_size,large_img_size),action="store_true")
'''
parser.add_argument("-lopg", "--leave_off_percent_in_group",help=
"add this flag to not display the percent of the total for each group.\
",action="store_true")
parser.add_argument("-lotg", "--leave_off_total_in_group",help=
"add this flag to not display the total amount for each group.\
",action="store_true")
parser.add_argument("-svg", "--save_vg",help=
"add this flag to save as vector graphics \
(**RECOMMENDED FOR PUBLICATION***) instead of default png. Not default \
or saved alongside `.png` version normally because not as easy to deal \
with as typical image file. ",
action="store_true")
parser.add_argument("-ssn", "--sort_on_subgroup_name",help=
"add this flag to sort the subgroups display in the inner ring based \
on the subgroup name like in example at \
https://python-graph-gallery.com/163-donut-plot-with-subgroups/. ",
action="store_true")
parser.add_argument('-hll', '--hilolist', action='store', type=str,
help="This flag is used to specify that you want to control the order \
of the subgroups to range from being dark to light in the degree of \
color intensity in the plot because the default result does not \
suffice. Follow the flag with an order listing, high intensity to low, \
of the subgroup identifiers separated by \
commas, without spaces or quotes. For example `-hll yes,maybe,no`. \
When the script is run the identifiers and default order used will be \
indicated so that you'll have the identifiers at hand when running \
again.\
")# based on https://stackoverflow.com/a/24866869/8508004
parser.add_argument('-ac', '--advance_color', action='store', type=int,
default= '0', help="**FOR ADVANCED USE.** Allows for advancing the \
color palette iterator a specified number of times. The idea is it \
allows skipping a specified amount of the initial colors to help \
'customize' the set of colors in the plot, if needed. Supply \
the number to advance after the flag on the command line. For example, \
`-ac 4`. If that doesn't allow dialing in a good set of colors, and \
you know Python, you can edit the `list_of_other_good_sequences`.")
#I would also like trigger help to display if no arguments provided because
# need at least one for url
if len(sys.argv)==1: #from http://stackoverflow.com/questions/4042452/display-help-message-with-python-argparse-when-script-is-called-without-any-argu
parser.print_help()
sys.exit(1)
args = parser.parse_args()
save_vg = args.save_vg
include_percent_in_grp_label= not args.leave_off_percent_in_group
include_total_in_grp_label= not args.leave_off_total_in_group
if args.large_image:
plot_figure_size = large_img_size
hilolist = args.hilolist
#process to a python list if it exists
if hilolist:
hilolist = args.hilolist.split(',')
#if they hapen to be integers or floats, convert so will match type in
# dataframe
if all([is_number(s) for s in hilolist]):
hilolist = [cast_to_number(s) for s in hilolist]
# make sure all float if any are float, because line above will
# cast to integer if possible
if any(isinstance(x, float) for x in hilolist):
hilolist = [float(x) for x in hilolist]
sort_on_subgroup_name = args.sort_on_subgroup_name
advance_color_increments = args.advance_color
main()
#*******************************************************************************
###-***********************END MAIN PORTION OF SCRIPT***********************-###
#******************************************************************************* | [
"pandas.read_pickle",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.random.random",
"seaborn.light_palette",
"matplotlib.pyplot.pie",
"pathlib2.Path",
"sys.stderr.write",
"numpy.random.seed",
"sys.exit",
"matplotlib.pyplot.title",... | [((7384, 7402), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7398, 7402), True, 'import numpy as np\n'), ((15572, 15610), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'plot_figure_size'}), '(figsize=plot_figure_size)\n', (15584, 15610), True, 'import matplotlib.pyplot as plt\n'), ((16022, 16080), 'matplotlib.pyplot.setp', 'plt.setp', (['mypie'], {'width': 'outer_ring_width', 'edgecolor': '"""white"""'}), "(mypie, width=outer_ring_width, edgecolor='white')\n", (16030, 16080), True, 'import matplotlib.pyplot as plt\n'), ((18291, 18453), 'matplotlib.pyplot.pie', 'plt.pie', (['subgroup_size'], {'radius': 'inner_ring_radius', 'labels': 'subgroup_names', 'textprops': "{'fontsize': plot_text_size}", 'labeldistance': '(0.7)', 'colors': 'sub_grp_colors'}), "(subgroup_size, radius=inner_ring_radius, labels=subgroup_names,\n textprops={'fontsize': plot_text_size}, labeldistance=0.7, colors=\n sub_grp_colors)\n", (18298, 18453), True, 'import matplotlib.pyplot as plt\n'), ((18476, 18535), 'matplotlib.pyplot.setp', 'plt.setp', (['mypie2'], {'width': 'inner_ring_width', 'edgecolor': '"""white"""'}), "(mypie2, width=inner_ring_width, edgecolor='white')\n", (18484, 18535), True, 'import matplotlib.pyplot as plt\n'), ((18541, 18558), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {}), '(0, 0)\n', (18552, 18558), True, 'import matplotlib.pyplot as plt\n'), ((21417, 21844), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""donut_plot_with_subgroups_from_dataframe.py"""', 'description': '"""donut_plot_with_subgroups_from_dataframe.py takes a dataframe, and some information about columns in the dataframe and makes a donut plot. The inner ring is a breakdown of the subgroupings per each group in the outer ring of the plot. **** Script by <NAME> (fomightez @ github) ***"""'}), "(prog='donut_plot_with_subgroups_from_dataframe.py',\n description=\n 'donut_plot_with_subgroups_from_dataframe.py takes a dataframe, and some information about columns in the dataframe and makes a donut plot. The inner ring is a breakdown of the subgroupings per each group in the outer ring of the plot. **** Script by <NAME> (fomightez @ github) ***'\n )\n", (21440, 21844), False, 'import argparse\n'), ((5397, 5412), 'pathlib2.Path', 'Path', (['file_name'], {}), '(file_name)\n', (5401, 5412), False, 'from pathlib2 import Path\n'), ((5471, 5496), 'pandas.read_pickle', 'pd.read_pickle', (['file_name'], {}), '(file_name)\n', (5485, 5496), True, 'import pandas as pd\n'), ((8480, 8502), 'unicodedata.numeric', 'unicodedata.numeric', (['s'], {}), '(s)\n', (8499, 8502), False, 'import unicodedata\n'), ((9460, 9482), 'unicodedata.numeric', 'unicodedata.numeric', (['s'], {}), '(s)\n', (9479, 9482), False, 'import unicodedata\n'), ((18588, 18631), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {'size': 'title_text_size'}), '(plot_title, size=title_text_size)\n', (18597, 18631), True, 'import matplotlib.pyplot as plt\n'), ((19628, 19676), 'sys.stderr.write', 'sys.stderr.write', (['"""Plot figure object returned."""'], {}), "('Plot figure object returned.')\n", (19644, 19676), False, 'import sys\n'), ((26109, 26120), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (26117, 26120), False, 'import sys\n'), ((5550, 5582), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '"""\t"""'}), "(file_name, sep='\\t')\n", (5561, 5582), True, 'import pandas as pd\n'), ((7461, 7483), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['col_name'], {}), '(col_name)\n', (7473, 7483), True, 'import matplotlib.pyplot as plt\n'), ((7950, 7999), 'seaborn.light_palette', 'sns.light_palette', (['rgb'], {'input': '"""rgb"""', 'as_cmap': '(True)'}), "(rgb, input='rgb', as_cmap=True)\n", (7967, 7999), True, 'import seaborn as sns\n'), ((17337, 17440), 'sys.stderr.write', 'sys.stderr.write', (['"""Look into adding use of the `--hilolist` option to specify the order.\n\n"""'], {}), "(\n 'Look into adding use of the `--hilolist` option to specify the order.\\n\\n'\n )\n", (17353, 17440), False, 'import sys\n'), ((17476, 17608), 'sys.stderr.write', 'sys.stderr.write', (['"""Provide a Python list as `hilolist` when calling the function to specify the order of intensity.\n\n"""'], {}), '(\n """Provide a Python list as `hilolist` when calling the function to specify the order of intensity.\n\n"""\n )\n', (17492, 17608), False, 'import sys\n'), ((19037, 19105), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_file_name[:-4] + '.svg')"], {'orientation': '"""landscape"""'}), "(output_file_name[:-4] + '.svg', orientation='landscape')\n", (19048, 19105), True, 'import matplotlib.pyplot as plt\n'), ((19478, 19507), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file_name'], {}), '(output_file_name)\n', (19489, 19507), True, 'import matplotlib.pyplot as plt\n'), ((5636, 5658), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {}), '(file_name)\n', (5647, 5658), True, 'import pandas as pd\n'), ((6070, 6081), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6078, 6081), False, 'import sys\n'), ((7659, 7700), 'seaborn.light_palette', 'sns.light_palette', (['col_name'], {'as_cmap': '(True)'}), '(col_name, as_cmap=True)\n', (7676, 7700), True, 'import seaborn as sns\n'), ((7838, 7862), 'numpy.random.random', 'np.random.random', ([], {'size': '(3)'}), '(size=3)\n', (7854, 7862), True, 'import numpy as np\n'), ((7746, 7801), 'seaborn.light_palette', 'sns.light_palette', (['col_name'], {'as_cmap': '(True)', 'input': '"""xkcd"""'}), "(col_name, as_cmap=True, input='xkcd')\n", (7763, 7801), True, 'import seaborn as sns\n')] |
'''Utils for gene expression
'''
import os, sys
import json
import hashlib
import math
from collections import OrderedDict
import h5py
import requests
import numpy as np
import pandas as pd
import scipy.sparse as sp
from scipy import io
from sklearn.preprocessing import scale
from bson.codec_options import CodecOptions
from .geo_meta import *
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def load_archs4_read_counts(organism='human', gsms=[]):
'''Load data from ARCHS4 h5 file using a list of GSMs.
'''
fn = os.path.abspath(os.path.join(SCRIPT_DIR, '../data/%s_matrix.h5' % organism))
f = h5py.File(fn, 'r')
mat = f['data']['expression']
all_gsms = f['meta']['Sample_geo_accession']
sample_mask = np.in1d(all_gsms, gsms)
if sample_mask.sum() == 0:
raise RuntimeError('None of the gsms %s exists in the h5 file %s_matrix.h5' %
(gsms, organism))
else:
sample_ids = all_gsms[sample_mask]
genes = f['meta']['genes']
# to prevent MongoDB error
genes = map(lambda x:x.replace('.', '_'), genes)
# Retrieve gene by sample matrix
expr_df = pd.DataFrame(mat[sample_mask, :].T, index=genes, columns=sample_ids)
# Filter out non-expressed genes
expr_df = expr_df.loc[expr_df.sum(axis=1) > 0, :]
# Filter out samples with very low read counts
# valid_sample_mask = expr_df.sum(axis=0) > 100
# expr_df = expr_df.loc[:, valid_sample_mask]
return expr_df
def process_sparse_expr_mat(mat, barcodes=None, genes=None):
# The mat should have a shape of (n_sample, n_genes)
n_cells_expressed = np.asarray((mat > 0).sum(axis=0)).ravel()
mask_expressed_genes = n_cells_expressed > 0
# Filter out genes not expressed across all cells
mat = mat[:, mask_expressed_genes]
# Get some sample QC stats
n_reads = np.asarray(mat.sum(axis=1)).ravel()
n_expressed_genes = np.asarray((mat > 0).sum(axis=1)).ravel()
meta_df = pd.DataFrame({
'n_reads': n_reads,
'n_expressed_genes': n_expressed_genes
}, index=barcodes)
expr_df = pd.DataFrame(mat.toarray().T,
index=genes[mask_expressed_genes],
columns=barcodes
)
expr_df.index.name = 'gene'
# Sum up duplicated gene symbols
expr_df = expr_df.reset_index()\
.groupby('gene').agg(np.sum)
return expr_df, meta_df
def parse_10x_h5(filename):
# Parse the h5 file from 10x Genomics
with h5py.File(filename, 'r') as f:
group_name = f.keys()[0]
group = f[group_name]
mat = group['data']
# gene_ids = group['genes']
genes = group['gene_names'][:]
barcodes = group['barcodes'][:]
n_genes, n_samples = len(genes), len(barcodes)
# Construct a sparse matrix object
mat = sp.csr_matrix((group['data'], group['indices'], group['indptr']),
shape=(n_samples, n_genes))
expr_df, meta_df = process_sparse_expr_mat(mat, barcodes, genes)
return expr_df, meta_df
def parse_10x_mtx(mtx_fn, genes_fn, barcodes_fn):
# Parse the .mtx, genes.tsv, barcodes.tsv files from 10x Genomics
mat = io.mmread(mtx_fn)
# assume the 2nd column are gene symbols
genes = pd.read_csv(genes_fn, sep='\t', header=None)[1]
barcodes = pd.read_csv(barcodes_fn, sep='\t', names=['barcodes'])['barcodes']
if mat.shape != (genes.shape[0], barcodes.shape[0]):
raise ValueError('The shape of the expression matrix (%d, %d) is inconsistent with \
the number of genes (%d) and the number of barcodes (%d)' % \
(mat.shape[0], mat.shape[1], genes.shape[0], barcodes.shape[0]))
expr_df, meta_df = process_sparse_expr_mat(mat.tocsr().T, barcodes, genes)
return expr_df, meta_df
def compute_CPMs(expr_df, CPM_cutoff=0.3, at_least_in_persent_samples=1):
'''Convert expression counts to CPM.
'''
n_samples = expr_df.shape[1]
at_least_in_n_samples = int(math.ceil(at_least_in_persent_samples/100. * n_samples))
expr_df = (expr_df * 1e6) / expr_df.sum(axis=0)
# Filter out lowly expressed genes
mask_low_vals = (expr_df > CPM_cutoff).sum(axis=1) > at_least_in_n_samples
expr_df = expr_df.loc[mask_low_vals, :]
return expr_df
def log10_and_zscore(expr_df):
expr_df = np.log10(expr_df + 1.)
if isinstance(expr_df, pd.DataFrame):
expr_df = expr_df.apply(lambda x: (x-x.mean())/x.std(ddof=0), axis=1)
elif isinstance(expr_df, np.ndarray):
expr_df = scale(expr_df, axis=1)
return expr_df
def post_genes_to_enrichr(genes, description):
genes_str = '\n'.join(genes)
payload = {
'list': (None, genes_str),
'description': description
}
resp = requests.post('https://amp.pharm.mssm.edu/Enrichr/addList', files=payload)
if not resp.ok:
return None
else:
data = json.loads(resp.text)
return data['userListId']
class GeneExpressionDataset(object):
"""docstring for GeneExpressionDataset"""
coll = 'dataset'
coll_expr = 'expression'
def __init__(self, df, enrichment_results=[], visualizations=[], meta={}):
self.df = df # df could be CPMs/RPKMs/FPKMs/TPMs or z-scores.
# assert not self.is_zscored()
self.avg_expression = df.mean(axis=1)
self.sample_ids = df.columns.tolist()
self.genes = df.index
self.enrichment_results = enrichment_results
self.visualizations = visualizations
self.meta = meta
self.meta_df = pd.DataFrame(meta.get('meta_df', {}), index=self.sample_ids)
self.id = hashlib.md5(self.df.values.tobytes()).hexdigest()
# Alternative: hash both meta and id
# df_hash = hashlib.md5(self.df.values.tobytes()).hexdigest()
# meta_hash = hashlib.md5(self.meta_df .values.tobytes()).hexdigest()
# self.id = hashlib.md5(df_hash+meta_hash).hexdigest()
def log10_and_zscore(self):
self.df = log10_and_zscore(self.df)
def is_zscored(self):
return self.df.min().min() < 0
def DEGs_posted(self, db, etype='genewise-z'):
'''Whether DEGs (from etype) has been POSTed to Enrichr.'''
result = False
if hasattr(self, 'd_sample_userListId'):
if etype in self.d_sample_userListId:
result = True
else:
doc = db[self.coll].find_one({'id': self.id},
{'d_sample_userListId':True, '_id':False})
if doc:
if len(doc.get('d_sample_userListId', {})) > 0:
d_sample_userListId = doc['d_sample_userListId'].get(etype, {})
if len(d_sample_userListId) > 0 and None not in d_sample_userListId.values():
# make sure the userListIds does not contain None
result = True
return result
def identify_DEGs_genewise_z(self, cutoff=2.33):
if not self.is_zscored():
self.log10_and_zscore()
up_DEGs_df = self.df > cutoff
return up_DEGs_df
def identify_DEGs_samplewise_z(self, cutoff=2.33):
assert not self.is_zscored()
zscore_df = self.df.apply(lambda x: (x-x.mean())/x.std(ddof=0), axis=0)
up_DEGs_df = zscore_df > cutoff
return up_DEGs_df
def identify_DEGs_from_background(self, cutoff=2.33, genes_meta=None):
assert not self.is_zscored()
df = self.df.copy()
# Filter out genes not in genes_meta
df = df.loc[df.index.isin(genes_meta.index)]
genes, samples = df.index, df.columns
gene_means = genes_meta.loc[df.index, 'mean_cpm'].values
gene_stds = genes_meta.loc[df.index, 'std_cpm'].values
# Compute gene-wise z-scores
df = (df.values - gene_means.reshape(-1, 1)) / gene_stds.reshape(-1, 1)
up_DEGs_mat = df > cutoff
up_DEGs_df = pd.DataFrame(up_DEGs_mat, index=genes, columns=samples)
return up_DEGs_df
def identify_DEGs(self, cutoff=2.33, etype='genewise-z', genes_meta=None):
if etype == 'genewise-z':
up_DEGs_df = self.identify_DEGs_genewise_z(cutoff)
elif etype == 'samplewise-z':
up_DEGs_df = self.identify_DEGs_samplewise_z(cutoff)
else:
up_DEGs_df = self.identify_DEGs_from_background(cutoff, genes_meta)
return up_DEGs_df
def post_DEGs_to_Enrichr(self, db, cutoff=2.33, etype='genewise-z', genes_meta=None):
try:
if not self.DEGs_posted(db, etype):
up_DEGs_df = self.identify_DEGs(cutoff, etype, genes_meta)
d_sample_userListId = OrderedDict()
for sample_id in self.sample_ids:
up_genes = self.genes[np.where(up_DEGs_df[sample_id])[0]].tolist()
user_list_id = None
if len(up_genes) > 10:
user_list_id = post_genes_to_enrichr(up_genes, '%s up' % sample_id)
d_sample_userListId[sample_id] = user_list_id
# nest
d_sample_userListId = {etype: d_sample_userListId}
else:
doc = db.get_collection(self.coll, codec_options=CodecOptions(OrderedDict))\
.find_one({'id': self.id},
{'d_sample_userListId.%s'%etype:True, '_id':False},
)
d_sample_userListId = doc['d_sample_userListId']
self.d_sample_userListId = d_sample_userListId
return d_sample_userListId
except Exception as e:
print(e)
throw(e)
def save_DEGs(self, db, etype='genewise-z'):
# Save d_sample_userListId to the existing doc in the db
if hasattr(self, 'd_sample_userListId'):
d_sample_userListId = self.d_sample_userListId
db[self.coll].update({'id': self.id}, {'$set':
{'d_sample_userListId.%s'% etype: d_sample_userListId[etype]}})
def save(self, db):
if hasattr(self, 'd_sample_userListId'):
d_sample_userListId = self.d_sample_userListId
else:
d_sample_userListId = OrderedDict()
doc = {
'id': self.id,
'meta': self.meta,
'sample_ids': self.sample_ids,
'genes': self.genes.tolist(),
'avg_expression': self.avg_expression.tolist(),
'd_sample_userListId': d_sample_userListId
}
insert_result = db[self.coll].insert_one(doc)
gene_expression_docs = [
{'dataset_id': self.id, 'gene': gene, 'values': values.tolist()} for gene, values in self.df.iterrows()
]
_ = db[self.coll_expr].insert(gene_expression_docs)
return insert_result.inserted_id
def exists(self, db):
doc = db[self.coll].find_one({'id': self.id})
return doc is not None
def start(self, db):
# Set the started flag to True and update the doc in db
self.started = True,
db[self.coll].update_one({'id': self.id},
{'$set': {'started': True}})
def finish(self, db):
# Set the done flag to True and update the doc in db
self.done = True
db[self.coll].update_one({'id': self.id},
{'$set': {'done': True}})
@classmethod
def load(cls, dataset_id, db, meta_only=False):
'''Load from the database.'''
doc = db[cls.coll].find_one({'id': dataset_id}, {'_id':False})
if doc is None:
obj = None
else:
if meta_only:
# fake a df
df = pd.DataFrame(index=doc['genes'], columns=doc['sample_ids'])
else:
# retrieve gene expression from expression collection
expressions = db[cls.coll_expr].find({'dataset_id': dataset_id},
{'_id':False, 'gene':True, 'values':True})
df = pd.DataFrame({expr['gene']: expr['values'] for expr in expressions}).transpose()
df.columns = doc['sample_ids']
obj = cls(df, meta=doc['meta'])
obj.id = dataset_id
obj.started = doc.get('started', False)
obj.done = doc.get('done', False)
return obj
@classmethod
def load_meta(cls, dataset_id, db, gene_set_libraries='KEGG_2016,ARCHS4_Cell-lines'):
'''Only load number of samples and number of genes, for fast response under /progress endpoint.
'''
cur = db[cls.coll].aggregate([
{'$match': {'id': dataset_id} },
{'$group': {
'_id': '$id',
'id': {'$first': '$id'},
'n_genes': {'$sum': {'$size': '$genes'} },
'n_samples': {'$sum': {'$size': '$sample_ids'} },
'started': {'$first': '$started'},
'done': {'$first': '$done'},
} }
])
gene_set_libraries = set(gene_set_libraries.split(","))
visualized = [i["name"] for i in db['vis'].find({'dataset_id': dataset_id},{"name":True})]
enriched = [i["gene_set_library"] for i in db['enrichr'].find({'dataset_id': dataset_id},{"gene_set_library": True})]
try:
doc = cur.next()
done = True
vis = set(["PCA", "tSNE"])
if len(vis.intersection(visualized)) < len(vis):
done = False
elif len(gene_set_libraries.intersection(enriched)) < len(gene_set_libraries):
done = False
doc["done"] = done
except StopIteration: # dataset doesn't exist
doc = None
return doc
@classmethod
def query_gene(cls, dataset_id, query_string, db):
'''Given a query string for gene symbols, return matched
gene symbols and their avg_expression.'''
doc = db[cls.coll].find_one({'id': dataset_id},
{'genes':True, 'avg_expression':True, '_id':False})
genes_df = pd.DataFrame(doc)
mask = genes_df.genes.str.contains(query_string, case=False)
return genes_df.loc[mask].rename(columns={'genes': 'gene'})
@classmethod
def get_gene_expr(cls, dataset_id, gene, db):
doc = db[cls.coll_expr].find_one({'dataset_id': dataset_id, 'gene': gene},
{'values': True, '_id':False})
return {gene: doc['values']}
@classmethod
def remove_all(cls, dataset_id, db):
'''Remove all enrichment, visualization, dataset related to the dataset.'''
db[cls.coll].delete_one({'id': dataset_id})
db[cls.coll_expr].delete_many({'dataset_id': dataset_id})
db['enrichr'].delete_many({'dataset_id': dataset_id})
db['enrichr_temp'].delete_many({'dataset_id': dataset_id})
db['vis'].delete_many({'dataset_id': dataset_id})
class GEODataset(GeneExpressionDataset):
"""docstring for GEODataset"""
def __init__(self, gse_id, organism='human', meta_doc=None, meta_only=False, expression_kwargs={}):
self.id = gse_id
self.organism = organism
if meta_doc is None:
# retrieve meta from GEO using the GSE class
meta_doc = self.retrieve_meta()
self.sample_ids = meta_doc['sample_id']
if not meta_only:
# retrieve the expression matrix from the h5 file
df = self.retrieve_expression(expression_kwargs=expression_kwargs)
else:
df = pd.DataFrame(index=[], columns=self.sample_ids)
# order/subset the samples
meta_df = pd.DataFrame(meta_doc['meta_df'], index=self.sample_ids)
meta_df = meta_df.loc[df.columns]
self.sample_ids = df.columns.tolist()
meta_df = meta_df.loc[:, meta_df.nunique() > 1]
# update meta_doc
meta_doc['meta_df'] = meta_df.to_dict(orient='list')
meta_doc['sample_id'] = self.sample_ids
GeneExpressionDataset.__init__(self, df, meta=meta_doc)
self.id = gse_id
# self.meta = meta_doc
# self.meta_df = pd.DataFrame(meta_doc['meta_df'])\
# .set_index('Sample_geo_accession')
def retrieve_expression(self, expression_kwargs={}):
'''Retrieve gene expression from the h5 file'''
df= load_archs4_read_counts(organism=self.organism, gsms=self.sample_ids)
df = compute_CPMs(df, **expression_kwargs)
return df
def save(self, db):
if hasattr(self, 'd_sample_userListId'):
d_sample_userListId = self.d_sample_userListId
else:
d_sample_userListId = OrderedDict()
doc = {
'id': self.id,
'organism': self.organism,
'meta': self.meta,
'sample_ids': self.sample_ids,
'genes': self.genes.tolist(),
'avg_expression': self.avg_expression.tolist(),
'd_sample_userListId': d_sample_userListId
}
insert_result = db[self.coll].insert_one(doc)
gene_expression_docs = [
{'dataset_id': self.id, 'gene':gene, 'values': values.tolist()} for gene, values in self.df.iterrows()
]
_ = db[self.coll_expr].insert(gene_expression_docs)
if hasattr(self, 'gse'):
self.gse.save(db)
return insert_result.inserted_id
def retrieve_meta(self):
'''Retrieve metadata from GEO through the GSE class'''
gse = GSE(self.id)
gse.retrieve()
meta_df = gse.construct_sample_meta_df()
self.gse = gse
# # order/subset the samples
# meta_df = meta_df.loc[df.columns]
# meta_df = meta_df.loc[:, meta_df.nunique() > 1]
meta_doc = gse.meta
meta_doc['meta_df'] = meta_df.to_dict(orient='list')
return meta_doc
def load_series_meta(self, db):
'''Load series-level meta from `geo` collection.'''
self.series = GSE.load(self.id, db).meta
def purge(self, db):
'''Remove every documents about this object across collections.'''
db['gsm'].delete_many({'geo_accession': {'$in': self.sample_ids}})
db['geo'].delete_one({'geo_accession': self.id})
db['dataset'].delete_one({'id': self.id})
for coll in ['expression', 'enrichr', 'enrichr_temp', 'vis', 'preds']:
db[coll].delete_many({'dataset_id': self.id})
@classmethod
def load(cls, gse_id, db, meta_only=False):
'''Load from h5 file.'''
projection = {'_id':False, 'meta':False} # not use the meta field in 'dataset' collection
doc = db[cls.coll].find_one({'id': gse_id}, projection)
gse = GSE.load(gse_id, db)
meta_df = gse.construct_sample_meta_df()
# order/subset the samples
meta_df = meta_df.loc[doc['sample_ids']]
meta_df = meta_df.loc[:, meta_df.nunique() > 1]
meta_doc = gse.meta
meta_doc['meta_df'] = meta_df.to_dict(orient='list')
meta_doc['sample_id'] = meta_df.index.tolist()
obj = cls(doc['id'], organism=doc['organism'], meta_doc=meta_doc, meta_only=meta_only)
return obj
| [
"json.loads",
"numpy.log10",
"requests.post",
"math.ceil",
"pandas.read_csv",
"collections.OrderedDict",
"numpy.where",
"numpy.in1d",
"scipy.io.mmread",
"os.path.join",
"h5py.File",
"os.path.realpath",
"bson.codec_options.CodecOptions",
"pandas.DataFrame",
"scipy.sparse.csr_matrix",
"s... | [((376, 402), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (392, 402), False, 'import os, sys\n'), ((611, 629), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (620, 629), False, 'import h5py\n'), ((723, 746), 'numpy.in1d', 'np.in1d', (['all_gsms', 'gsms'], {}), '(all_gsms, gsms)\n', (730, 746), True, 'import numpy as np\n'), ((1867, 1961), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_reads': n_reads, 'n_expressed_genes': n_expressed_genes}"], {'index': 'barcodes'}), "({'n_reads': n_reads, 'n_expressed_genes': n_expressed_genes},\n index=barcodes)\n", (1879, 1961), True, 'import pandas as pd\n'), ((2907, 2924), 'scipy.io.mmread', 'io.mmread', (['mtx_fn'], {}), '(mtx_fn)\n', (2916, 2924), False, 'from scipy import io\n'), ((3978, 4001), 'numpy.log10', 'np.log10', (['(expr_df + 1.0)'], {}), '(expr_df + 1.0)\n', (3986, 4001), True, 'import numpy as np\n'), ((4363, 4437), 'requests.post', 'requests.post', (['"""https://amp.pharm.mssm.edu/Enrichr/addList"""'], {'files': 'payload'}), "('https://amp.pharm.mssm.edu/Enrichr/addList', files=payload)\n", (4376, 4437), False, 'import requests\n'), ((545, 604), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', "('../data/%s_matrix.h5' % organism)"], {}), "(SCRIPT_DIR, '../data/%s_matrix.h5' % organism)\n", (557, 604), False, 'import os, sys\n'), ((1079, 1147), 'pandas.DataFrame', 'pd.DataFrame', (['mat[sample_mask, :].T'], {'index': 'genes', 'columns': 'sample_ids'}), '(mat[sample_mask, :].T, index=genes, columns=sample_ids)\n', (1091, 1147), True, 'import pandas as pd\n'), ((2295, 2319), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2304, 2319), False, 'import h5py\n'), ((2592, 2690), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (["(group['data'], group['indices'], group['indptr'])"], {'shape': '(n_samples, n_genes)'}), "((group['data'], group['indices'], group['indptr']), shape=(\n n_samples, n_genes))\n", (2605, 2690), True, 'import scipy.sparse as sp\n'), ((2976, 3020), 'pandas.read_csv', 'pd.read_csv', (['genes_fn'], {'sep': '"""\t"""', 'header': 'None'}), "(genes_fn, sep='\\t', header=None)\n", (2987, 3020), True, 'import pandas as pd\n'), ((3036, 3090), 'pandas.read_csv', 'pd.read_csv', (['barcodes_fn'], {'sep': '"""\t"""', 'names': "['barcodes']"}), "(barcodes_fn, sep='\\t', names=['barcodes'])\n", (3047, 3090), True, 'import pandas as pd\n'), ((3658, 3716), 'math.ceil', 'math.ceil', (['(at_least_in_persent_samples / 100.0 * n_samples)'], {}), '(at_least_in_persent_samples / 100.0 * n_samples)\n', (3667, 3716), False, 'import math\n'), ((4485, 4506), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (4495, 4506), False, 'import json\n'), ((7069, 7124), 'pandas.DataFrame', 'pd.DataFrame', (['up_DEGs_mat'], {'index': 'genes', 'columns': 'samples'}), '(up_DEGs_mat, index=genes, columns=samples)\n', (7081, 7124), True, 'import pandas as pd\n'), ((12064, 12081), 'pandas.DataFrame', 'pd.DataFrame', (['doc'], {}), '(doc)\n', (12076, 12081), True, 'import pandas as pd\n'), ((13438, 13494), 'pandas.DataFrame', 'pd.DataFrame', (["meta_doc['meta_df']"], {'index': 'self.sample_ids'}), "(meta_doc['meta_df'], index=self.sample_ids)\n", (13450, 13494), True, 'import pandas as pd\n'), ((4163, 4185), 'sklearn.preprocessing.scale', 'scale', (['expr_df'], {'axis': '(1)'}), '(expr_df, axis=1)\n', (4168, 4185), False, 'from sklearn.preprocessing import scale\n'), ((8923, 8936), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8934, 8936), False, 'from collections import OrderedDict\n'), ((13348, 13395), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[]', 'columns': 'self.sample_ids'}), '(index=[], columns=self.sample_ids)\n', (13360, 13395), True, 'import pandas as pd\n'), ((14328, 14341), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14339, 14341), False, 'from collections import OrderedDict\n'), ((7715, 7728), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7726, 7728), False, 'from collections import OrderedDict\n'), ((10121, 10180), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "doc['genes']", 'columns': "doc['sample_ids']"}), "(index=doc['genes'], columns=doc['sample_ids'])\n", (10133, 10180), True, 'import pandas as pd\n'), ((10375, 10443), 'pandas.DataFrame', 'pd.DataFrame', (["{expr['gene']: expr['values'] for expr in expressions}"], {}), "({expr['gene']: expr['values'] for expr in expressions})\n", (10387, 10443), True, 'import pandas as pd\n'), ((8146, 8171), 'bson.codec_options.CodecOptions', 'CodecOptions', (['OrderedDict'], {}), '(OrderedDict)\n', (8158, 8171), False, 'from bson.codec_options import CodecOptions\n'), ((7794, 7825), 'numpy.where', 'np.where', (['up_DEGs_df[sample_id]'], {}), '(up_DEGs_df[sample_id])\n', (7802, 7825), True, 'import numpy as np\n')] |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
# from arch.api.utils import log_utils
# LOGGER = log_utils.getLogger()
class Gradient:
def compute(self, values, coef, intercept, fit_intercept):
raise NotImplementedError("Method not implemented")
def compute_loss(self, X, Y, coef, intercept):
raise NotImplementedError("Method not implemented")
def load_data(self, data_instance):
# LOGGER.debug("In load_data of gradient function")
X = []
Y = []
# 获取batch数据
for iter_key, instant in data_instance:
weighted_feature = instant.weight * instant.features
X.append(weighted_feature)
if instant.label == 1:
Y.append([1])
else:
Y.append([-1])
X = np.array(X)
Y = np.array(Y)
return X, Y
| [
"numpy.array"
] | [((1393, 1404), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1401, 1404), True, 'import numpy as np\n'), ((1417, 1428), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1425, 1428), True, 'import numpy as np\n')] |
import numpy as np
# sigmoid function
def my_sigmoid(w,x):
return 1/(1+np.exp(-w.T.dot(x.T)))
# 损失函数
def obj_fun(w,x,y):
tmp = y.reshape(1,-1)*np.log(my_sigmoid(w,x)) + \
(1-y.reshape(1,-1))*np.log(1-my_sigmoid(w,x))
return np.sum(-tmp)
# 计算随机梯度的函数
def my_Stgrad(w,x,y):
return (my_sigmoid(w,x) - y)*x.T | [
"numpy.sum"
] | [((241, 253), 'numpy.sum', 'np.sum', (['(-tmp)'], {}), '(-tmp)\n', (247, 253), True, 'import numpy as np\n')] |
"""
Run transfer learning on FashionProductImages dataset. This will first fine-tune
a chosen model from the ImageNet model zoo on classifying the 20 most common
product and then in a second pass will fine-tune the network further on the
remaining, less common, product classes.
"""
import os
import random
import time
import numpy as np
from functools import partial
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torch.nn.parallel
from few_shot_learning.models import AdaptiveHeadClassifier
from few_shot_learning.datasets import FashionProductImages, \
FashionProductImagesSmall
from few_shot_learning.sampler import get_train_and_val_sampler
from few_shot_learning.utils import AverageMeter, ProgressMeter, \
allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results
from config import DATA_PATH
best_acc1 = 0
def transfer(
data_dir=DATA_PATH,
architecture='resnet18',
num_workers=4,
epochs=100,
batch_size=64,
learning_rate=1e-3,
learning_rate_tr=None,
optimizer_cls=torch.optim.Adam,
print_freq=10,
seed=None,
gpu=None,
dtype=None,
distributed=False,
log_dir='~/few-shot-learning/logs',
model_dir='~/few-shot-learning/models',
date_prefix=False,
small_dataset=False
):
log_dir = os.path.expanduser(log_dir)
model_dir = os.path.expanduser(model_dir)
if date_prefix:
date = datetime.now().strftime(r"%y_%m_%d_%H%M")
log_dir = os.path.join(log_dir, date)
model_dir = os.path.join(model_dir, date)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
# make partial functions for allocation of model and inputs
_allocate_model = partial(allocate_model, dtype, distributed, gpu,
architecture)
_allocate_inputs = partial(allocate_inputs, dtype, distributed, gpu)
# TODO not_implemented
# optionally resume from a checkpoint
# if resume:
# restore_model(model, optimizer, gpu, model_dir)
# TODO not_implemented
# if evaluate:
# validate(val_loader, model, criterion, device, print_freq)
# return
# ----------------------------------------------------------------------- #
# Data loading
# ----------------------------------------------------------------------- #
# Imagenet-specific normalization
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# image dimension resize depending on dataset
resize = (80, 60) if small_dataset else (400, 300)
data_transforms = {
'train': transforms.Compose([
# transforms.Resize(resize),
transforms.RandomResizedCrop(resize, scale=(0.8, 1.0)),
transforms.ColorJitter(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]),
'val': transforms.Compose([
transforms.Resize(resize),
transforms.ToTensor(),
normalize
]),
'test': transforms.Compose([
transforms.Resize(resize),
transforms.ToTensor(),
normalize
]),
}
# prepare dictionary of all datasets. There are datasets for train, test,
# and validation for both the top20 and bottom (transfer) classes
dataset = FashionProductImages if not small_dataset \
else FashionProductImagesSmall
data = {
classes: {
split: dataset(
data_dir,
split='train' if split in ['train', 'val'] else 'test',
classes=classes,
transform=data_transforms[split]
) for split in ["train", "test", "val"]
} for classes in ["top", "bottom"]
}
# ending _ft is for initial fine-tuning with top20 classes
trainset_ft = data['top']['train']
valset_ft = data['top']['val']
testset_ft = data['top']['test']
# train and val sampler
train_sampler_ft, train_indices_ft, val_sampler_ft, val_indices_ft = \
get_train_and_val_sampler(trainset_ft,
train_size=0.9,
balanced_training=True)
train_loader_ft = torch.utils.data.DataLoader(
trainset_ft, batch_size=batch_size, num_workers=num_workers,
sampler=train_sampler_ft, pin_memory=True
)
val_loader_ft = torch.utils.data.DataLoader(
valset_ft, batch_size=batch_size, num_workers=num_workers,
sampler=val_sampler_ft, pin_memory=True
)
test_loader_ft = torch.utils.data.DataLoader(
testset_ft, batch_size=batch_size, num_workers=num_workers,
shuffle=False, pin_memory=True
)
# ending _tr for transfer
trainset_tr = data['bottom']['train']
valset_tr = data['bottom']['val']
testset_tr = data['bottom']['test']
# can't stratify along classes since some have only one sample
# TODO: make sure there is at least one sample of every class in the
# training set?
train_sampler_tr, train_indices_tr, val_sampler_tr, val_indices_tr = \
get_train_and_val_sampler(trainset_tr, balanced_training=True,
stratify=False)
train_loader_tr = torch.utils.data.DataLoader(
trainset_tr, batch_size=batch_size, num_workers=num_workers,
sampler=train_sampler_tr
)
val_loader_tr = torch.utils.data.DataLoader(
valset_tr, batch_size=batch_size, num_workers=num_workers,
sampler=val_sampler_tr
)
test_loader_tr = torch.utils.data.DataLoader(
testset_tr, batch_size=batch_size, num_workers=num_workers,
shuffle=False, pin_memory=True
)
# ----------------------------------------------------------------------- #
# Create model and optimizer for initial fine-tuning
# ----------------------------------------------------------------------- #
print("=> using pre-trained model '{}'".format(architecture))
out_features = [trainset_ft.n_classes, trainset_tr.n_classes]
model = AdaptiveHeadClassifier(out_features, architecture=architecture)
model = _allocate_model(model)
# define loss function (criterion) and optimizer
# TODO: different devices
criterion = nn.CrossEntropyLoss().cuda()
# TODO: optimizer args
optimizer_ft = optimizer_cls(model.parameters(), learning_rate)
# TODO parameters as function arguments
lr_scheduler_ft = torch.optim.lr_scheduler.StepLR(optimizer_ft,
step_size=5,
gamma=0.7)
# ----------------------------------------------------------------------- #
# Training: fine-tune
# ----------------------------------------------------------------------- #
print("=> Running {} epochs of fine-tuning (top20)".format(epochs))
train_model(train_loader_ft, val_loader_ft, model, criterion, optimizer_ft,
lr_scheduler_ft, epochs, print_freq, _allocate_inputs,
model_prefix="finetuning",
log_dir=os.path.expanduser(log_dir),
model_dir=os.path.expanduser(model_dir))
_, test_top1_ft, test_top5_ft = validate(test_loader_ft, model, criterion,
print_freq, _allocate_inputs)
# ----------------------------------------------------------------------- #
# Create optimizer for transfer learning
# ----------------------------------------------------------------------- #
# change the active head
try:
model.set_active(1)
except AttributeError:
model.module.set_active(1)
# start a new learning rate scheduler and optimizer
learning_rate_tr = learning_rate if learning_rate_tr is None \
else learning_rate_tr
optimizer_tr = optimizer_cls(model.parameters(), lr=learning_rate_tr)
lr_scheduler_tr = torch.optim.lr_scheduler.StepLR(optimizer_tr,
step_size=5,
gamma=0.7)
# ----------------------------------------------------------------------- #
# Training: transfer
# ----------------------------------------------------------------------- #
print("=> Running {} epochs of transfer learning".format(epochs))
train_model(train_loader_tr, val_loader_tr, model, criterion, optimizer_tr,
lr_scheduler_tr, epochs, print_freq, _allocate_inputs,
model_prefix="transfer",
log_dir=os.path.expanduser(log_dir),
model_dir=os.path.expanduser(model_dir))
_, test_top1_tr, test_top5_tr = validate(test_loader_tr,
model, criterion,
print_freq, _allocate_inputs)
def train_model(
train_loader,
val_loader,
model,
criterion,
optimizer,
lr_scheduler,
epochs,
print_freq,
allocate_inputs,
model_prefix,
log_dir,
model_dir
):
monitor_variables = ("train_loss", "train_acc1", "train_acc5",
"val_loss", "val_acc1", "val_acc5")
results = {v: np.zeros(epochs) for v in monitor_variables}
best_acc1 = 0.0
best_state_dict = None
for epoch in range(epochs):
# train for one epoch
train_loss, train_top1, train_top5 = train_epoch(train_loader, model,
criterion, optimizer,
lr_scheduler, epoch,
print_freq,
allocate_inputs)
# evaluate on validation set
val_loss, top1, top5 = validate(val_loader, model, criterion,
print_freq, allocate_inputs)
# remember best acc@1 and save checkpoint
acc1 = top1.avg
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
# handle nn.DataParallel
try:
checkpoint_state_dict = model.module.state_dict()
except AttributeError:
checkpoint_state_dict = model.state_dict()
if is_best:
# handle nn.DataParallel
try:
best_state_dict = model.module.state_dict()
except AttributeError:
best_state_dict = model.state_dict()
save_checkpoint({
'epoch': epoch + 1,
# 'arch': architecture,
'state_dict': checkpoint_state_dict,
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict()
}, is_best, model_dir=model_dir, prefix=model_prefix)
for key, result in zip(monitor_variables,
(train_loss, train_top1, train_top5,
val_loss, top1, top5)):
results[key][epoch] = result.avg
model.load_state_dict(best_state_dict)
save_results(results, dir=log_dir, prefix=model_prefix)
def train_epoch(train_loader, model, criterion, optimizer, scheduler, epoch,
print_freq, allocate_inputs):
# monitoring progress
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[losses, top1, top5, batch_time, data_time],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
since = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - since)
images, target = allocate_inputs(images, target)
# compute output and loss
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - since)
since = time.time()
if i % print_freq == 0:
progress.display(i)
# anneal learning rate
scheduler.step()
return losses, top1, top5
def validate(val_loader, model, criterion, print_freq, allocate_inputs):
# batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[losses, top1, top5], # [batch_time]
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
# end = time.time()
for i, (images, target) in enumerate(val_loader):
images, target = allocate_inputs(images, target)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
if i % print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses, top1, top5 | [
"few_shot_learning.utils.allocate_inputs",
"torch.nn.CrossEntropyLoss",
"few_shot_learning.utils.AverageMeter",
"torchvision.transforms.ColorJitter",
"few_shot_learning.utils.accuracy",
"few_shot_learning.utils.save_results",
"os.path.isdir",
"os.mkdir",
"torchvision.transforms.ToTensor",
"torchvi... | [((1526, 1553), 'os.path.expanduser', 'os.path.expanduser', (['log_dir'], {}), '(log_dir)\n', (1544, 1553), False, 'import os\n'), ((1570, 1599), 'os.path.expanduser', 'os.path.expanduser', (['model_dir'], {}), '(model_dir)\n', (1588, 1599), False, 'import os\n'), ((2072, 2134), 'functools.partial', 'partial', (['allocate_model', 'dtype', 'distributed', 'gpu', 'architecture'], {}), '(allocate_model, dtype, distributed, gpu, architecture)\n', (2079, 2134), False, 'from functools import partial\n'), ((2189, 2238), 'functools.partial', 'partial', (['allocate_inputs', 'dtype', 'distributed', 'gpu'], {}), '(allocate_inputs, dtype, distributed, gpu)\n', (2196, 2238), False, 'from functools import partial\n'), ((2749, 2824), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2769, 2824), True, 'import torchvision.transforms as transforms\n'), ((4464, 4542), 'few_shot_learning.sampler.get_train_and_val_sampler', 'get_train_and_val_sampler', (['trainset_ft'], {'train_size': '(0.9)', 'balanced_training': '(True)'}), '(trainset_ft, train_size=0.9, balanced_training=True)\n', (4489, 4542), False, 'from few_shot_learning.sampler import get_train_and_val_sampler\n'), ((4634, 4770), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset_ft'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'sampler': 'train_sampler_ft', 'pin_memory': '(True)'}), '(trainset_ft, batch_size=batch_size, num_workers\n =num_workers, sampler=train_sampler_ft, pin_memory=True)\n', (4661, 4770), False, 'import torch\n'), ((4809, 4941), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valset_ft'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'sampler': 'val_sampler_ft', 'pin_memory': '(True)'}), '(valset_ft, batch_size=batch_size, num_workers=\n num_workers, sampler=val_sampler_ft, pin_memory=True)\n', (4836, 4941), False, 'import torch\n'), ((4981, 5105), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset_ft'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(testset_ft, batch_size=batch_size, num_workers=\n num_workers, shuffle=False, pin_memory=True)\n', (5008, 5105), False, 'import torch\n'), ((5519, 5597), 'few_shot_learning.sampler.get_train_and_val_sampler', 'get_train_and_val_sampler', (['trainset_tr'], {'balanced_training': '(True)', 'stratify': '(False)'}), '(trainset_tr, balanced_training=True, stratify=False)\n', (5544, 5597), False, 'from few_shot_learning.sampler import get_train_and_val_sampler\n'), ((5655, 5774), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset_tr'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'sampler': 'train_sampler_tr'}), '(trainset_tr, batch_size=batch_size, num_workers\n =num_workers, sampler=train_sampler_tr)\n', (5682, 5774), False, 'import torch\n'), ((5812, 5927), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valset_tr'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'sampler': 'val_sampler_tr'}), '(valset_tr, batch_size=batch_size, num_workers=\n num_workers, sampler=val_sampler_tr)\n', (5839, 5927), False, 'import torch\n'), ((5966, 6090), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset_tr'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(testset_tr, batch_size=batch_size, num_workers=\n num_workers, shuffle=False, pin_memory=True)\n', (5993, 6090), False, 'import torch\n'), ((6470, 6533), 'few_shot_learning.models.AdaptiveHeadClassifier', 'AdaptiveHeadClassifier', (['out_features'], {'architecture': 'architecture'}), '(out_features, architecture=architecture)\n', (6492, 6533), False, 'from few_shot_learning.models import AdaptiveHeadClassifier\n'), ((6861, 6930), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer_ft'], {'step_size': '(5)', 'gamma': '(0.7)'}), '(optimizer_ft, step_size=5, gamma=0.7)\n', (6892, 6930), False, 'import torch\n'), ((8344, 8413), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer_tr'], {'step_size': '(5)', 'gamma': '(0.7)'}), '(optimizer_tr, step_size=5, gamma=0.7)\n', (8375, 8413), False, 'import torch\n'), ((11524, 11579), 'few_shot_learning.utils.save_results', 'save_results', (['results'], {'dir': 'log_dir', 'prefix': 'model_prefix'}), '(results, dir=log_dir, prefix=model_prefix)\n', (11536, 11579), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((11748, 11777), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {}), "('Time', ':6.3f')\n", (11760, 11777), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((11794, 11823), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Data"""', '""":6.3f"""'], {}), "('Data', ':6.3f')\n", (11806, 11823), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((11837, 11865), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (11849, 11865), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((11877, 11907), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Acc@1"""', '""":6.2f"""'], {}), "('Acc@1', ':6.2f')\n", (11889, 11907), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((11919, 11949), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Acc@5"""', '""":6.2f"""'], {}), "('Acc@5', ':6.2f')\n", (11931, 11949), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((12163, 12174), 'time.time', 'time.time', ([], {}), '()\n', (12172, 12174), False, 'import time\n'), ((13241, 13269), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (13253, 13269), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((13281, 13311), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Acc@1"""', '""":6.2f"""'], {}), "('Acc@1', ':6.2f')\n", (13293, 13311), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((13323, 13353), 'few_shot_learning.utils.AverageMeter', 'AverageMeter', (['"""Acc@5"""', '""":6.2f"""'], {}), "('Acc@5', ':6.2f')\n", (13335, 13353), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((1695, 1722), 'os.path.join', 'os.path.join', (['log_dir', 'date'], {}), '(log_dir, date)\n', (1707, 1722), False, 'import os\n'), ((1743, 1772), 'os.path.join', 'os.path.join', (['model_dir', 'date'], {}), '(model_dir, date)\n', (1755, 1772), False, 'import os\n'), ((1785, 1807), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (1798, 1807), False, 'import os\n'), ((1817, 1834), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (1825, 1834), False, 'import os\n'), ((1847, 1871), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (1860, 1871), False, 'import os\n'), ((1881, 1900), 'os.mkdir', 'os.mkdir', (['model_dir'], {}), '(model_dir)\n', (1889, 1900), False, 'import os\n'), ((1935, 1952), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1946, 1952), False, 'import random\n'), ((1961, 1984), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1978, 1984), False, 'import torch\n'), ((9685, 9701), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (9693, 9701), True, 'import numpy as np\n'), ((12339, 12370), 'few_shot_learning.utils.allocate_inputs', 'allocate_inputs', (['images', 'target'], {}), '(images, target)\n', (12354, 12370), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((12544, 12581), 'few_shot_learning.utils.accuracy', 'accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (12552, 12581), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((12947, 12958), 'time.time', 'time.time', ([], {}), '()\n', (12956, 12958), False, 'import time\n'), ((13537, 13552), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13550, 13552), False, 'import torch\n'), ((6669, 6690), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6688, 6690), True, 'import torch.nn as nn\n'), ((7517, 7544), 'os.path.expanduser', 'os.path.expanduser', (['log_dir'], {}), '(log_dir)\n', (7535, 7544), False, 'import os\n'), ((7572, 7601), 'os.path.expanduser', 'os.path.expanduser', (['model_dir'], {}), '(model_dir)\n', (7590, 7601), False, 'import os\n'), ((8995, 9022), 'os.path.expanduser', 'os.path.expanduser', (['log_dir'], {}), '(log_dir)\n', (9013, 9022), False, 'import os\n'), ((9050, 9079), 'os.path.expanduser', 'os.path.expanduser', (['model_dir'], {}), '(model_dir)\n', (9068, 9079), False, 'import os\n'), ((13669, 13700), 'few_shot_learning.utils.allocate_inputs', 'allocate_inputs', (['images', 'target'], {}), '(images, target)\n', (13684, 13700), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((13884, 13921), 'few_shot_learning.utils.accuracy', 'accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (13892, 13921), False, 'from few_shot_learning.utils import AverageMeter, ProgressMeter, allocate_model, accuracy, allocate_inputs, save_checkpoint, save_results\n'), ((1635, 1649), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1647, 1649), False, 'from datetime import datetime\n'), ((3084, 3138), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['resize'], {'scale': '(0.8, 1.0)'}), '(resize, scale=(0.8, 1.0))\n', (3112, 3138), True, 'import torchvision.transforms as transforms\n'), ((3152, 3176), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {}), '()\n', (3174, 3176), True, 'import torchvision.transforms as transforms\n'), ((3190, 3223), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3221, 3223), True, 'import torchvision.transforms as transforms\n'), ((3237, 3258), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3256, 3258), True, 'import torchvision.transforms as transforms\n'), ((3342, 3367), 'torchvision.transforms.Resize', 'transforms.Resize', (['resize'], {}), '(resize)\n', (3359, 3367), True, 'import torchvision.transforms as transforms\n'), ((3381, 3402), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3400, 3402), True, 'import torchvision.transforms as transforms\n'), ((3487, 3512), 'torchvision.transforms.Resize', 'transforms.Resize', (['resize'], {}), '(resize)\n', (3504, 3512), True, 'import torchvision.transforms as transforms\n'), ((3526, 3547), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3545, 3547), True, 'import torchvision.transforms as transforms\n'), ((12292, 12303), 'time.time', 'time.time', ([], {}), '()\n', (12301, 12303), False, 'import time\n'), ((12910, 12921), 'time.time', 'time.time', ([], {}), '()\n', (12919, 12921), False, 'import time\n')] |
import os
import copy
import numpy as np
import jigsawpy
def case_3_(src_path, dst_path):
# DEMO-3: generate multi-resolution spacing, via local refi-
# nement along coastlines and shallow ridges. Global grid
# resolution is 150KM, background resolution is 67KM and the
# min. adaptive resolution is 33KM.
opts = jigsawpy.jigsaw_jig_t()
topo = jigsawpy.jigsaw_msh_t()
geom = jigsawpy.jigsaw_msh_t()
hraw = jigsawpy.jigsaw_msh_t()
hlim = jigsawpy.jigsaw_msh_t()
#------------------------------------ setup files for JIGSAW
opts.geom_file = \
os.path.join(dst_path, "geom.msh")
opts.jcfg_file = \
os.path.join(dst_path, "opts.jig")
opts.hfun_file = \
os.path.join(dst_path, "spac.msh")
#------------------------------------ define JIGSAW geometry
geom.mshID = "ellipsoid-mesh"
geom.radii = np.full(
3, 6.371E+003, dtype=geom.REALS_t)
jigsawpy.savemsh(opts.geom_file, geom)
#------------------------------------ define spacing pattern
jigsawpy.loadmsh(os.path.join(
src_path, "topo.msh"), topo)
hraw.mshID = "ellipsoid-grid"
hraw.radii = geom.radii
hraw.xgrid = topo.xgrid * np.pi / 180.
hraw.ygrid = topo.ygrid * np.pi / 180.
hfn0 = +150. # global spacing
hfn2 = +33. # adapt. spacing
hfn3 = +67. # arctic spacing
hraw.value = np.sqrt(
np.maximum(-topo.value, 0.0))
hraw.value = \
np.maximum(hraw.value, hfn2)
hraw.value = \
np.minimum(hraw.value, hfn3)
mask = hraw.ygrid < 40. * np.pi / 180.
hraw.value[mask] = hfn0
#------------------------------------ set HFUN grad.-limiter
hlim = copy.copy(hraw)
hlim.slope = np.full( # |dH/dx| limits
topo.value.shape,
+0.050, dtype=hlim.REALS_t)
jigsawpy.savemsh(opts.hfun_file, hlim)
jigsawpy.cmd.marche(opts, hlim)
#------------------------------------ save mesh for Paraview
print("Saving to ../cache/case_3a.vtk")
jigsawpy.savevtk(os.path.join(
dst_path, "case_3a.vtk"), hraw)
print("Saving to ../cache/case_3b.vtk")
jigsawpy.savevtk(os.path.join(
dst_path, "case_3b.vtk"), hlim)
return
| [
"jigsawpy.savemsh",
"numpy.minimum",
"os.path.join",
"jigsawpy.cmd.marche",
"copy.copy",
"jigsawpy.jigsaw_jig_t",
"jigsawpy.jigsaw_msh_t",
"numpy.full",
"numpy.maximum"
] | [((323, 346), 'jigsawpy.jigsaw_jig_t', 'jigsawpy.jigsaw_jig_t', ([], {}), '()\n', (344, 346), False, 'import jigsawpy\n'), ((359, 382), 'jigsawpy.jigsaw_msh_t', 'jigsawpy.jigsaw_msh_t', ([], {}), '()\n', (380, 382), False, 'import jigsawpy\n'), ((395, 418), 'jigsawpy.jigsaw_msh_t', 'jigsawpy.jigsaw_msh_t', ([], {}), '()\n', (416, 418), False, 'import jigsawpy\n'), ((431, 454), 'jigsawpy.jigsaw_msh_t', 'jigsawpy.jigsaw_msh_t', ([], {}), '()\n', (452, 454), False, 'import jigsawpy\n'), ((466, 489), 'jigsawpy.jigsaw_msh_t', 'jigsawpy.jigsaw_msh_t', ([], {}), '()\n', (487, 489), False, 'import jigsawpy\n'), ((584, 618), 'os.path.join', 'os.path.join', (['dst_path', '"""geom.msh"""'], {}), "(dst_path, 'geom.msh')\n", (596, 618), False, 'import os\n'), ((651, 685), 'os.path.join', 'os.path.join', (['dst_path', '"""opts.jig"""'], {}), "(dst_path, 'opts.jig')\n", (663, 685), False, 'import os\n'), ((718, 752), 'os.path.join', 'os.path.join', (['dst_path', '"""spac.msh"""'], {}), "(dst_path, 'spac.msh')\n", (730, 752), False, 'import os\n'), ((867, 905), 'numpy.full', 'np.full', (['(3)', '(6371.0)'], {'dtype': 'geom.REALS_t'}), '(3, 6371.0, dtype=geom.REALS_t)\n', (874, 905), True, 'import numpy as np\n'), ((924, 962), 'jigsawpy.savemsh', 'jigsawpy.savemsh', (['opts.geom_file', 'geom'], {}), '(opts.geom_file, geom)\n', (940, 962), False, 'import jigsawpy\n'), ((1513, 1541), 'numpy.maximum', 'np.maximum', (['hraw.value', 'hfn2'], {}), '(hraw.value, hfn2)\n', (1523, 1541), True, 'import numpy as np\n'), ((1569, 1597), 'numpy.minimum', 'np.minimum', (['hraw.value', 'hfn3'], {}), '(hraw.value, hfn3)\n', (1579, 1597), True, 'import numpy as np\n'), ((1745, 1760), 'copy.copy', 'copy.copy', (['hraw'], {}), '(hraw)\n', (1754, 1760), False, 'import copy\n'), ((1779, 1831), 'numpy.full', 'np.full', (['topo.value.shape', '(+0.05)'], {'dtype': 'hlim.REALS_t'}), '(topo.value.shape, +0.05, dtype=hlim.REALS_t)\n', (1786, 1831), True, 'import numpy as np\n'), ((1886, 1924), 'jigsawpy.savemsh', 'jigsawpy.savemsh', (['opts.hfun_file', 'hlim'], {}), '(opts.hfun_file, hlim)\n', (1902, 1924), False, 'import jigsawpy\n'), ((1930, 1961), 'jigsawpy.cmd.marche', 'jigsawpy.cmd.marche', (['opts', 'hlim'], {}), '(opts, hlim)\n', (1949, 1961), False, 'import jigsawpy\n'), ((1047, 1081), 'os.path.join', 'os.path.join', (['src_path', '"""topo.msh"""'], {}), "(src_path, 'topo.msh')\n", (1059, 1081), False, 'import os\n'), ((1455, 1483), 'numpy.maximum', 'np.maximum', (['(-topo.value)', '(0.0)'], {}), '(-topo.value, 0.0)\n', (1465, 1483), True, 'import numpy as np\n'), ((2091, 2128), 'os.path.join', 'os.path.join', (['dst_path', '"""case_3a.vtk"""'], {}), "(dst_path, 'case_3a.vtk')\n", (2103, 2128), False, 'import os\n'), ((2212, 2249), 'os.path.join', 'os.path.join', (['dst_path', '"""case_3b.vtk"""'], {}), "(dst_path, 'case_3b.vtk')\n", (2224, 2249), False, 'import os\n')] |
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update
from stable_baselines3.bear.policies import VariationalAutoEncoder, BearPolicy
from stable_baselines3.common.preprocessing import get_action_dim, get_flattened_obs_dim
class BEAR(OffPolicyAlgorithm):
"""
BEAR (Bootstrapping Error Accumulation Reduction)
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param policy_delay: Policy and target networks will only be updated once every policy_delay steps
per training steps. The Q values will be updated policy_delay more often (update every training step).
:param target_policy_noise: Standard deviation of Gaussian noise added to target policy
(smoothing noise)
:param target_noise_clip: Limit for absolute value of target policy smoothing noise.
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[BearPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-3,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 100,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = (1, "episode"),
gradient_steps: int = -1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
policy_delay: int = 2,
target_policy_noise: float = 0.2,
target_noise_clip: float = 0.5,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
without_exploration: bool = False,
gumbel_ensemble: bool = False,
gumbel_temperature: float = 0.5,
lagrange_coef: Union[str, float] = "auto",
lagrange_thresh: float = 0.05,
n_sampling: int = 10,
mmd_sigma: float = 20.0,
delta_conf: float = 0.1,
warmup_step: int = -1,
):
super(BEAR, self).__init__(
policy,
env,
BearPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise=action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Box),
without_exploration=without_exploration,
gumbel_ensemble=gumbel_ensemble,
gumbel_temperature=gumbel_temperature
)
self.policy_delay = policy_delay
self.target_noise_clip = target_noise_clip
self.target_policy_noise = target_policy_noise
if _init_setup_model:
self._setup_model()
# Add for BEAR
state_dim = get_flattened_obs_dim(self.observation_space)
action_dim = get_action_dim(self.action_space)
# Autoencoder: used to select the next state action
self.autoencoder = VariationalAutoEncoder(
state_dim,
action_dim,
100,
self.action_space.high[0],
self.device
).to(self.device)
self.ae_optimizer = th.optim.Adam(self.autoencoder.parameters(), lr=1e-4)
self.n_sampling = n_sampling
self.mmd_sigma = mmd_sigma
self.delta_conf = delta_conf
self.warmup_step = warmup_step
self.lagrange_thresh = lagrange_thresh
self.lagrange_coef = lagrange_coef
self.lagrange_coef_optimizer = None
if isinstance(self.lagrange_coef, str) and self.lagrange_coef.startswith("auto"):
# Default initial value of lagrange coef when learned
init_value = 1.0
self.log_lagrange_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)
self.lagrange_coef_optimizer = th.optim.Adam([self.log_lagrange_coef], lr=self.lr_schedule(1))
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.lagrange_coef_tensor = th.tensor(float(self.lagrange_coef)).to(self.device)
self.log_lagrange_coef = self.lagrange_coef_tensor.requires_grad_(False)
def _setup_model(self) -> None:
super(BEAR, self)._setup_model()
self._create_aliases()
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.actor_target = self.policy.actor_target
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def gaussian_mmd_loss(self, sample_1: th.Tensor, sample_2: th.Tensor) -> th.Tensor:
"""
sample_1: [batch, n, dim]
sample_2: [batch, m, dim] # In general, n = m and where n, m: number of samplings to compute mmd
"""
xx = sample_1.unsqueeze(2) - sample_1.unsqueeze(1) # [batch, n, n, dim]
xy = sample_1.unsqueeze(2) - sample_2.unsqueeze(1) # [batch, n, m, dim]
yy = sample_2.unsqueeze(2) - sample_2.unsqueeze(1) # [batch, m, m, dim]
k_xx = th.exp(-(xx ** 2) / (2 * self.mmd_sigma))
k_xy = th.exp(-(xy ** 2) / (2 * self.mmd_sigma))
k_yy = th.exp(-(yy ** 2) / (2 * self.mmd_sigma))
return k_xx.mean() - 2 * k_xy.mean() + k_yy.mean()
def laplacian_mmd_loss(self, sample_1: th.Tensor, sample_2: th.Tensor) -> th.Tensor:
"""
sample_1: [batch, n, dim]
sample_2: [batch, m, dim] # In general, n = m and where n, m: number of samplings to compute mmd
"""
diff_x_x = sample_1.unsqueeze(2) - sample_1.unsqueeze(1) # B x N x N x d
diff_x_x = th.mean((-(diff_x_x.pow(2)).sum(-1) / (2.0 * self.mmd_sigma)).exp(), dim=(1, 2))
diff_x_y = sample_1.unsqueeze(2) - sample_2.unsqueeze(1)
diff_x_y = th.mean((-(diff_x_y.pow(2)).sum(-1) / (2.0 * self.mmd_sigma)).exp(), dim=(1, 2))
diff_y_y = sample_2.unsqueeze(2) - sample_2.unsqueeze(1) # B x N x N x d
diff_y_y = th.mean((-(diff_y_y.pow(2)).sum(-1) / (2.0 * self.mmd_sigma)).exp(), dim=(1, 2))
overall_loss = (diff_x_x + diff_y_y - 2.0 * diff_x_y + 1e-6).sqrt()
return overall_loss.mean()
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update learning rate according to lr schedule
self._update_learning_rate([self.actor.optimizer, self.critic.optimizer])
actor_losses, critic_losses = [], []
autoencoder_losses = []
mmd_losses = []
lagrange_coefs = []
if self.lagrange_coef_optimizer is not None:
lagrange_coef_losses = []
for _ in range(gradient_steps):
self._n_updates += 1
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
# Start: train autoencoder
reconstructed_action, mean, log_std = self.autoencoder(replay_data.observations, replay_data.actions)
std = th.exp(log_std)
ae_kl_loss = th.log(1 / std) + (std ** 2 + mean ** 2) / 2 - 0.5
autoencoder_loss = th.mean((reconstructed_action - replay_data.actions) ** 2) + th.mean(ae_kl_loss)
self.autoencoder.zero_grad()
autoencoder_loss.backward()
self.ae_optimizer.step()
autoencoder_losses.append(autoencoder_loss.item())
# End: train autoencoder
with th.no_grad():
# Select action according to policy and add clipped noise
tile_next_observations = th.repeat_interleave(replay_data.next_observations, repeats=10, dim=0)
tile_next_actions = self.actor(tile_next_observations)
noise = tile_next_actions.clone().data.normal_(0, self.target_policy_noise)
noise = noise.clamp(-self.target_noise_clip, self.target_noise_clip)
tile_next_actions = tile_next_actions + noise
next_q_values = th.cat(self.critic_target.repeated_forward(tile_next_observations, tile_next_actions, batch_size), dim=2)
n_qs = next_q_values.size(2)
next_q_values, _ = th.min(next_q_values, dim=2) # minimum over q_networks
next_q_values, _ = th.max(next_q_values, dim=1, keepdim=True) # maximum over samplings # [batch_size, 1]
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
current_q_values = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values]) / n_qs
critic_losses.append(critic_loss.item())
# Optimize the critics
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Delayed policy updates
if self._n_updates % self.policy_delay == 0:
# Compute actor loss
tile_current_observations = th.repeat_interleave(replay_data.observations, repeats=10, dim=0)
tile_current_actions = self.actor(tile_current_observations)
noise = tile_current_actions.clone().data.normal_(0, self.target_policy_noise)
noise = noise.clamp(-self.target_noise_clip, self.target_noise_clip)
tile_current_actions = tile_current_actions + noise
# current_q_values: [batch_size, n_repeates, n_qs]
current_q_values = th.cat(self.critic.repeated_forward(tile_current_observations, tile_current_actions, batch_size), dim=2)
# Compute mmd loss
vae_actions = self.autoencoder.decode(tile_current_observations, device=self.device).view(batch_size, 10, -1)
policy_actions = tile_current_actions.view(batch_size, 10, -1)
mmd_loss = self.laplacian_mmd_loss(vae_actions.detach(), policy_actions)
mmd_losses.append(mmd_loss.item())
log_lagrange_coef = self.log_lagrange_coef \
if self.lagrange_coef_optimizer is not None \
else self.lagrange_coef_tensor
if self.offline_round_step < self.warmup_step:
actor_loss = 100.0 * (mmd_loss - self.lagrange_thresh)
else:
actor_loss = -current_q_values.mean() \
+ th.exp(log_lagrange_coef) * (mmd_loss - self.lagrange_thresh)
actor_loss = actor_loss.mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
# Optimize the lagrange coefficient if use auto regression
lagrange_coef_loss = None
if self.lagrange_coef_optimizer is not None:
lagrange_coef = th.exp(self.log_lagrange_coef.detach())
lagrange_coef_loss = -th.exp(self.log_lagrange_coef) * (mmd_loss.detach() - self.lagrange_thresh)
lagrange_coef_losses.append(lagrange_coef_loss.item())
self.log_lagrange_coef.data.clamp_(-5.0, 10.0)
else:
lagrange_coef = th.exp(log_lagrange_coef).detach()
lagrange_coefs.append(lagrange_coef.item())
if lagrange_coef_loss is not None:
self.lagrange_coef_optimizer.zero_grad()
lagrange_coef_loss.backward()
self.lagrange_coef_optimizer.step()
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
polyak_update(self.actor.parameters(), self.actor_target.parameters(), self.tau)
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
if len(actor_losses) > 0:
self.logger.record("train/actor_loss", np.mean(actor_losses))
self.logger.record("train/critic_loss", np.mean(critic_losses))
self.logger.record("train/autoencoder_loss", np.mean(autoencoder_losses))
if len(mmd_losses) > 0:
self.logger.record("train/mmd_loss", np.mean(mmd_losses))
if len(lagrange_coefs) > 0:
self.logger.record("train/lagrange_coef", np.mean(lagrange_coefs))
if len(lagrange_coef_losses) > 0 :
self.logger.record("train/lagrange_loss", np.mean(lagrange_coef_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "BEAR",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(BEAR, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(BEAR, self)._excluded_save_params() + ["actor", "critic", "actor_target", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
return state_dicts, []
| [
"numpy.mean",
"torch.nn.functional.mse_loss",
"torch.log",
"stable_baselines3.common.preprocessing.get_action_dim",
"torch.mean",
"torch.max",
"stable_baselines3.bear.policies.VariationalAutoEncoder",
"torch.exp",
"torch.min",
"torch.no_grad",
"torch.repeat_interleave",
"stable_baselines3.comm... | [((6173, 6218), 'stable_baselines3.common.preprocessing.get_flattened_obs_dim', 'get_flattened_obs_dim', (['self.observation_space'], {}), '(self.observation_space)\n', (6194, 6218), False, 'from stable_baselines3.common.preprocessing import get_action_dim, get_flattened_obs_dim\n'), ((6240, 6273), 'stable_baselines3.common.preprocessing.get_action_dim', 'get_action_dim', (['self.action_space'], {}), '(self.action_space)\n', (6254, 6273), False, 'from stable_baselines3.common.preprocessing import get_action_dim, get_flattened_obs_dim\n'), ((8513, 8552), 'torch.exp', 'th.exp', (['(-xx ** 2 / (2 * self.mmd_sigma))'], {}), '(-xx ** 2 / (2 * self.mmd_sigma))\n', (8519, 8552), True, 'import torch as th\n'), ((8570, 8609), 'torch.exp', 'th.exp', (['(-xy ** 2 / (2 * self.mmd_sigma))'], {}), '(-xy ** 2 / (2 * self.mmd_sigma))\n', (8576, 8609), True, 'import torch as th\n'), ((8627, 8666), 'torch.exp', 'th.exp', (['(-yy ** 2 / (2 * self.mmd_sigma))'], {}), '(-yy ** 2 / (2 * self.mmd_sigma))\n', (8633, 8666), True, 'import torch as th\n'), ((10548, 10563), 'torch.exp', 'th.exp', (['log_std'], {}), '(log_std)\n', (10554, 10563), True, 'import torch as th\n'), ((15794, 15816), 'numpy.mean', 'np.mean', (['critic_losses'], {}), '(critic_losses)\n', (15801, 15816), True, 'import numpy as np\n'), ((15871, 15898), 'numpy.mean', 'np.mean', (['autoencoder_losses'], {}), '(autoencoder_losses)\n', (15878, 15898), True, 'import numpy as np\n'), ((6361, 6456), 'stable_baselines3.bear.policies.VariationalAutoEncoder', 'VariationalAutoEncoder', (['state_dim', 'action_dim', '(100)', 'self.action_space.high[0]', 'self.device'], {}), '(state_dim, action_dim, 100, self.action_space.high[0\n ], self.device)\n', (6383, 6456), False, 'from stable_baselines3.bear.policies import VariationalAutoEncoder, BearPolicy\n'), ((10671, 10729), 'torch.mean', 'th.mean', (['((reconstructed_action - replay_data.actions) ** 2)'], {}), '((reconstructed_action - replay_data.actions) ** 2)\n', (10678, 10729), True, 'import torch as th\n'), ((10732, 10751), 'torch.mean', 'th.mean', (['ae_kl_loss'], {}), '(ae_kl_loss)\n', (10739, 10751), True, 'import torch as th\n'), ((10989, 11001), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (10999, 11001), True, 'import torch as th\n'), ((11118, 11188), 'torch.repeat_interleave', 'th.repeat_interleave', (['replay_data.next_observations'], {'repeats': '(10)', 'dim': '(0)'}), '(replay_data.next_observations, repeats=10, dim=0)\n', (11138, 11188), True, 'import torch as th\n'), ((11720, 11748), 'torch.min', 'th.min', (['next_q_values'], {'dim': '(2)'}), '(next_q_values, dim=2)\n', (11726, 11748), True, 'import torch as th\n'), ((11814, 11856), 'torch.max', 'th.max', (['next_q_values'], {'dim': '(1)', 'keepdim': '(True)'}), '(next_q_values, dim=1, keepdim=True)\n', (11820, 11856), True, 'import torch as th\n'), ((12714, 12779), 'torch.repeat_interleave', 'th.repeat_interleave', (['replay_data.observations'], {'repeats': '(10)', 'dim': '(0)'}), '(replay_data.observations, repeats=10, dim=0)\n', (12734, 12779), True, 'import torch as th\n'), ((15723, 15744), 'numpy.mean', 'np.mean', (['actor_losses'], {}), '(actor_losses)\n', (15730, 15744), True, 'import numpy as np\n'), ((15981, 16000), 'numpy.mean', 'np.mean', (['mmd_losses'], {}), '(mmd_losses)\n', (15988, 16000), True, 'import numpy as np\n'), ((16092, 16115), 'numpy.mean', 'np.mean', (['lagrange_coefs'], {}), '(lagrange_coefs)\n', (16099, 16115), True, 'import numpy as np\n'), ((16214, 16243), 'numpy.mean', 'np.mean', (['lagrange_coef_losses'], {}), '(lagrange_coef_losses)\n', (16221, 16243), True, 'import numpy as np\n'), ((10589, 10604), 'torch.log', 'th.log', (['(1 / std)'], {}), '(1 / std)\n', (10595, 10604), True, 'import torch as th\n'), ((12245, 12283), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['current_q', 'target_q_values'], {}), '(current_q, target_q_values)\n', (12255, 12283), True, 'from torch.nn import functional as F\n'), ((7133, 7163), 'torch.ones', 'th.ones', (['(1)'], {'device': 'self.device'}), '(1, device=self.device)\n', (7140, 7163), True, 'import torch as th\n'), ((14132, 14157), 'torch.exp', 'th.exp', (['log_lagrange_coef'], {}), '(log_lagrange_coef)\n', (14138, 14157), True, 'import torch as th\n'), ((14763, 14793), 'torch.exp', 'th.exp', (['self.log_lagrange_coef'], {}), '(self.log_lagrange_coef)\n', (14769, 14793), True, 'import torch as th\n'), ((15039, 15064), 'torch.exp', 'th.exp', (['log_lagrange_coef'], {}), '(log_lagrange_coef)\n', (15045, 15064), True, 'import torch as th\n')] |
"""
.. currentmodule:: clifford
========================================
clifford (:mod:`clifford`)
========================================
The Main Module. Provides two classes, Layout and MultiVector, and several helper functions to implement the algebras.
Classes
===============
.. autosummary::
:toctree: generated/
MultiVector
Layout
ConformalLayout
Frame
Functions
================
.. autosummary::
:toctree: generated/
Cl
conformalize
grade_obj
bases
randomMV
pretty
ugly
eps
"""
# Standard library imports.
from functools import reduce
import os
import itertools
from typing import List, Tuple, Set, Container, Dict, Optional
# Major library imports.
import numpy as np
from numpy import linalg
import numba
import sparse
from clifford.io import write_ga_file, read_ga_file # noqa: F401
from ._version import __version__ # noqa: F401
_eps = 1e-12 # float epsilon for float comparisons
_pretty = True # pretty-print global
_print_precision = 5 # pretty printing precision on floats
try:
NUMBA_DISABLE_PARALLEL = os.environ['NUMBA_DISABLE_PARALLEL']
except KeyError:
NUMBA_PARALLEL = True
else:
NUMBA_PARALLEL = not bool(NUMBA_DISABLE_PARALLEL)
def linear_operator_as_matrix(func, input_blades, output_blades):
"""
Return a matrix that performs the operation of the provided linear
operator function func mapping the input blades to the output blades
"""
ndimin = len(input_blades)
ndimout = len(output_blades)
mat = np.zeros((ndimout, ndimin))
for i, b in enumerate(input_blades):
mat[:, i] = np.array([func(b)[j] for j in output_blades])
return mat
def get_adjoint_function(gradeList):
'''
This function returns a fast jitted adjoint function
'''
grades = np.array(gradeList)
signs = np.power(-1, grades*(grades-1)//2)
@numba.njit
def adjoint_func(value):
return signs * value # elementwise multiplication
return adjoint_func
@numba.njit(parallel=NUMBA_PARALLEL, nogil=True)
def _numba_construct_tables(
gradeList, linear_map_to_bitmap, bitmap_to_linear_map, signature
):
array_length = int(len(gradeList) * len(gradeList))
indices = np.zeros((3, array_length), dtype=np.uint64)
k_list = indices[0, :]
l_list = indices[1, :]
m_list = indices[2, :]
imt_prod_mask = np.zeros(array_length, dtype=np.bool_)
omt_prod_mask = np.zeros(array_length, dtype=np.bool_)
lcmt_prod_mask = np.zeros(array_length, dtype=np.bool_)
# use as small a type as possible to minimize type promotion
mult_table_vals = np.zeros(array_length, dtype=np.int8)
for i, grade_list_i in enumerate(gradeList):
blade_bitmap_i = linear_map_to_bitmap[i]
for j, grade_list_j in enumerate(gradeList):
blade_bitmap_j = linear_map_to_bitmap[j]
v, mul = gmt_element(blade_bitmap_i, blade_bitmap_j, signature, bitmap_to_linear_map)
list_ind = i * len(gradeList) + j
k_list[list_ind] = i
l_list[list_ind] = v
m_list[list_ind] = j
mult_table_vals[list_ind] = mul
grade_list_idx = gradeList[v]
# A_r . B_s = <A_r B_s>_|r-s|
# if r, s != 0
imt_prod_mask[list_ind] = imt_check(grade_list_idx, grade_list_i, grade_list_j)
# A_r ^ B_s = <A_r B_s>_|r+s|
omt_prod_mask[list_ind] = omt_check(grade_list_idx, grade_list_i, grade_list_j)
# A_r _| B_s = <A_r B_s>_(s-r) if s-r >= 0
lcmt_prod_mask[list_ind] = lcmt_check(grade_list_idx, grade_list_i, grade_list_j)
return indices, mult_table_vals, imt_prod_mask, omt_prod_mask, lcmt_prod_mask
def construct_tables(
gradeList, linear_map_to_bitmap, bitmap_to_linear_map, signature
) -> Tuple[sparse.COO, sparse.COO, sparse.COO, sparse.COO]:
# wrap the numba one
indices, *arrs = _numba_construct_tables(
gradeList, linear_map_to_bitmap, bitmap_to_linear_map, signature
)
dims = len(gradeList)
return tuple(
sparse.COO(
coords=indices, data=arr, shape=(dims, dims, dims),
prune=True
)
for arr in arrs
)
def get_mult_function(mt: sparse.COO, gradeList,
grades_a=None, grades_b=None, filter_mask=None):
'''
Returns a function that implements the mult_table on two input multivectors
'''
if (filter_mask is None) and (grades_a is not None) and (grades_b is not None):
# If not specified explicitly, we can specify sparseness by grade
filter_mask = np.zeros(mt.nnz, dtype=bool)
k_list, _, m_list = mt.coords
for i in range(len(filter_mask)):
if gradeList[k_list[i]] in grades_a:
if gradeList[m_list[i]] in grades_b:
filter_mask[i] = 1
filter_mask = sparse.COO(coords=mt.coords, data=filter_mask, shape=mt.shape)
if filter_mask is not None:
# We can pass the sparse filter mask directly
mt = sparse.where(filter_mask, mt, mt.dtype.type(0))
return _get_mult_function(mt)
else:
return _get_mult_function_runtime_sparse(mt)
def _get_mult_function_result_type(a: numba.types.Type, b: numba.types.Type, mt: np.dtype):
a_dt = numba.numpy_support.as_dtype(getattr(a, 'dtype', a))
b_dt = numba.numpy_support.as_dtype(getattr(b, 'dtype', b))
return np.result_type(a_dt, mt, b_dt)
def _get_mult_function(mt: sparse.COO):
"""
Get a function similar to `` lambda a, b: np.einsum('i,ijk,k->j', a, mt, b)``
Returns
-------
func : function (array_like (n_dims,), array_like (n_dims,)) -> array_like (n_dims,)
A function that computes the appropriate multiplication
"""
# unpack for numba
dims = mt.shape[1]
k_list, l_list, m_list = mt.coords
mult_table_vals = mt.data
@numba.generated_jit(nopython=True)
def mv_mult(value, other_value):
# this casting will be done at jit-time
ret_dtype = _get_mult_function_result_type(value, other_value, mult_table_vals.dtype)
mult_table_vals_t = mult_table_vals.astype(ret_dtype)
def mult_inner(value, other_value):
output = np.zeros(dims, dtype=ret_dtype)
for k, l, m, val in zip(k_list, l_list, m_list, mult_table_vals_t):
output[l] += value[k] * val * other_value[m]
return output
return mult_inner
return mv_mult
def _get_mult_function_runtime_sparse(mt: sparse.COO):
"""
A variant of `_get_mult_function` that attempts to exploit runtime zeros
The returned function avoids performing multiplications if vectors contain
zeros.
TODO: determine if this actually helps.
"""
# unpack for numba
dims = mt.shape[1]
k_list, l_list, m_list = mt.coords
mult_table_vals = mt.data
@numba.generated_jit(nopython=True)
def mv_mult(value, other_value):
# this casting will be done at jit-time
ret_dtype = _get_mult_function_result_type(value, other_value, mult_table_vals.dtype)
mult_table_vals_t = mult_table_vals.astype(ret_dtype)
def mult_inner(value, other_value):
output = np.zeros(dims, dtype=ret_dtype)
for ind, k in enumerate(k_list):
v_val = value[k]
if v_val != 0.0:
m = m_list[ind]
ov_val = other_value[m]
if ov_val != 0.0:
l = l_list[ind]
output[l] += v_val * mult_table_vals_t[ind] * ov_val
return output
return mult_inner
return mv_mult
@numba.njit
def gmt_element(bitmap_a, bitmap_b, sig_array, bitmap_to_linear_mapping):
"""
Element of the geometric multiplication table given blades a, b.
The implementation used here is described in chapter 19 of
<NAME>'s book, Geometric Algebra For Computer Science
"""
output_sign = canonical_reordering_sign(bitmap_a, bitmap_b, sig_array)
output_bitmap = bitmap_a^bitmap_b
idx = bitmap_to_linear_mapping[output_bitmap]
return idx, output_sign
@numba.njit
def imt_check(grade_list_idx, grade_list_i, grade_list_j):
"""
A check used in imt table generation
"""
return ((grade_list_idx == abs(grade_list_i - grade_list_j)) and (grade_list_i != 0) and (grade_list_j != 0))
@numba.njit
def omt_check(grade_list_idx, grade_list_i, grade_list_j):
"""
A check used in omt table generation
"""
return grade_list_idx == (grade_list_i + grade_list_j)
@numba.njit
def lcmt_check(grade_list_idx, grade_list_i, grade_list_j):
"""
A check used in lcmt table generation
"""
return grade_list_idx == (grade_list_j - grade_list_i)
@numba.njit
def grade_obj_func(objin_val, gradeList, threshold):
""" returns the modal grade of a multivector """
modal_value_count = np.zeros(objin_val.shape)
n = 0
for g in gradeList:
if np.abs(objin_val[n]) > threshold:
modal_value_count[g] += 1
n += 1
return np.argmax(modal_value_count)
def get_leftLaInv(mult_table, gradeList):
"""
Get a function that returns left-inverse using a computational linear algebra method
proposed by <NAME>.
-1 -1
M where M * M == 1
"""
k_list, l_list, m_list = mult_table.coords
mult_table_vals = mult_table.data
n_dims = mult_table.shape[1]
identity = np.zeros((n_dims,))
identity[gradeList.index(0)] = 1
@numba.njit
def leftLaInvJIT(value):
intermed = _numba_val_get_left_gmt_matrix(value, k_list, l_list, m_list, mult_table_vals, n_dims)
if abs(linalg.det(intermed)) < _eps:
raise ValueError("multivector has no left-inverse")
sol = linalg.solve(intermed, identity)
return sol
return leftLaInvJIT
def general_exp(x, max_order=15):
"""
This implements the series expansion of e**mv where mv is a multivector
The parameter order is the maximum order of the taylor series to use
"""
result = 1.0
if max_order == 0:
return result
# scale by power of 2 so that its norm is < 1
max_val = int(np.max(np.abs(x.value)))
scale = 1
if max_val > 1:
max_val <<= 1
while max_val:
max_val >>= 1
scale <<= 1
scaled = x * (1.0 / scale)
# taylor approximation
tmp = 1.0 + 0.0*x
for i in range(1, max_order):
if np.any(np.abs(tmp.value) > _eps):
tmp = tmp*scaled * (1.0 / i)
result += tmp
else:
break
# undo scaling
while scale > 1:
result *= result
scale >>= 1
return result
def grade_obj(objin, threshold=0.0000001):
'''
Returns the modal grade of a multivector
'''
return grade_obj_func(objin.value, np.asarray(objin.layout.gradeList), threshold)
def grades_present(objin: 'MultiVector', threshold=0.0000001) -> Set[int]:
'''
Returns all the grades of a multivector with coefficient magnitude bigger than threshold
'''
nonzero = abs(objin.value) > threshold
return {
grade_i
for grade_i, nonzero_i in zip(objin.layout.gradeList, nonzero)
if nonzero_i
}
@numba.njit
def count_set_bits(bitmap):
"""
Counts the number of bits set to 1 in bitmap
"""
bmp = bitmap
count = 0
n = 1
while bmp > 0:
if bmp & 1:
count += 1
bmp = bmp >> 1
n = n + 1
return count
@numba.njit
def canonical_reordering_sign_euclidean(bitmap_a, bitmap_b):
"""
Computes the sign for the product of bitmap_a and bitmap_b
assuming a euclidean metric
"""
a = bitmap_a >> 1
sum_value = 0
while a != 0:
sum_value = sum_value + count_set_bits(a & bitmap_b)
a = a >> 1
if (sum_value & 1) == 0:
return 1
else:
return -1
@numba.njit
def canonical_reordering_sign(bitmap_a, bitmap_b, metric):
"""
Computes the sign for the product of bitmap_a and bitmap_b
given the supplied metric
"""
bitmap = bitmap_a & bitmap_b
output_sign = canonical_reordering_sign_euclidean(bitmap_a, bitmap_b)
i = 0
while bitmap != 0:
if (bitmap & 1) != 0:
output_sign *= metric[i]
i = i + 1
bitmap = bitmap >> 1
return output_sign
def compute_reordering_sign_and_canonical_form(blade, metric, firstIdx):
"""
Takes a tuple blade representation and converts it to a canonical
tuple blade representation
"""
bitmap_out = 0
s = 1
for b in blade:
# split into basis blades, which are always canonical
bitmap_b = compute_bitmap_representation((b,), firstIdx)
s *= canonical_reordering_sign(bitmap_out, bitmap_b, metric)
bitmap_out ^= bitmap_b
return s, compute_blade_representation(bitmap_out, firstIdx)
def compute_bitmap_representation(blade: Tuple[int, ...], firstIdx: int) -> int:
"""
Takes a tuple blade representation and converts it to the
bitmap representation
"""
bitmap = 0
for b in blade:
bitmap = bitmap ^ (1 << (b-firstIdx))
return bitmap
def compute_blade_representation(bitmap: int, firstIdx: int) -> Tuple[int, ...]:
"""
Takes a bitmap representation and converts it to the tuple
blade representation
"""
bmp = bitmap
blade = []
n = firstIdx
while bmp > 0:
if bmp & 1:
blade.append(n)
bmp = bmp >> 1
n = n + 1
return tuple(blade)
# todo: work out how to let numba use the COO objects directly
@numba.njit
def _numba_val_get_left_gmt_matrix(x, k_list, l_list, m_list, mult_table_vals, ndims):
intermed = np.zeros((ndims, ndims))
test_ind = 0
for k in k_list:
j = l_list[test_ind]
i = m_list[test_ind]
intermed[j, i] += mult_table_vals[test_ind] * x[k]
test_ind = test_ind + 1
return intermed
def val_get_left_gmt_matrix(mt: sparse.COO, x):
"""
This produces the matrix X that performs left multiplication with x
eg. X@b == (x*b).value
"""
dims = mt.shape[1]
k_list, l_list, m_list = mt.coords
return _numba_val_get_left_gmt_matrix(
x, k_list, l_list, m_list, mt.data, dims
)
def val_get_right_gmt_matrix(mt: sparse.COO, x):
"""
This produces the matrix X that performs right multiplication with x
eg. X@b == (b*x).value
"""
return val_get_left_gmt_matrix(mt.T, x)
# TODO: Move this to the top once we remove circular imports
from ._layout import Layout # noqa: E402
from ._multivector import MultiVector # noqa: E402
from ._conformal_layout import ConformalLayout # noqa: E402
from ._mvarray import MVArray # noqa: E402
def array(obj):
'''
an array method like numpy.array(), but returns a MVArray
Parameters
-------------
obj : MultiVector, list
a MV or a list of MV's
Examples
----------
>>>import clifford as cf
>>>from clifford import g3
>>>import numpy as np
>>>np.random.rand(10)*cf.array(g3.e12)
'''
if isinstance(obj, MultiVector):
# they passed a single MV so make a list of it.
return MVArray([obj])
else:
return MVArray(obj)
class Frame(MVArray):
'''
A frame of vectors
'''
def __new__(cls, input_array):
if not np.all([k.grades() == {1} for k in input_array]):
raise TypeError('Frames must be made from vectors')
obj = MVArray.__new__(cls, input_array)
return obj
def __array_finalize__(self, obj):
if obj is None:
return
@property
def En(self):
'''
Volume element for this frame
En = e1^e2^...^en
'''
return reduce(op, self)
@property
def inv(self):
'''
The inverse frame of self
Returns
---------
inv : `clifford.Frame`
'''
En = self.En
# see D&L sec 4.3
vectors = [
(-1)**(k)*reduce(op, np.hstack([self[:k], self[k+1:]]))*En.inv()
for k in range(len(self))]
return Frame(vectors)
def is_innermorphic_to(self, other, eps=None):
'''
Is this frame `innermorhpic` to other?
*innermorphic* means both frames share the same inner-product
between corresponding vectors. This implies that the two frames
are related by an orthogonal transform
Parameters
------------
other : `clifford.Frame`
the other frame
Returns
----------
value : bool
'''
# make iterable `pairs` of all index combos, without repeat
pairs = list(itertools.combinations(range(len(self)), 2))
a, b = self, other
if eps is None:
eps = _eps
return np.array([
float((b[m]|b[n]) - (a[m]|a[n])) < eps
for m, n in pairs
]).all()
class BladeMap(object):
'''
A Map Relating Blades in two different algebras
Examples
-----------
>>> from clifford import Cl
>>> # Dirac Algebra `D`
>>> D, D_blades = Cl(1, 3, firstIdx=0, names='d')
>>> locals().update(D_blades)
>>> # Pauli Algebra `P`
>>> P, P_blades = Cl(3, names='p')
>>> locals().update(P_blades)
>>> sta_split = BladeMap([(d01, p1),
(d02, p2),
(d03, p3),
(d12, p12),
(d23, p23),
(d13, p13)])
'''
def __init__(self, blades_map, map_scalars=True):
self.blades_map = blades_map
if map_scalars:
# make scalars in each algebra map
s1 = self.b1[0]._newMV(dtype=int)+1
s2 = self.b2[0]._newMV(dtype=int)+1
self.blades_map = [(s1, s2)] + self.blades_map
@property
def b1(self):
return [k[0] for k in self.blades_map]
@property
def b2(self):
return [k[1] for k in self.blades_map]
@property
def layout1(self):
return self.b1[0].layout
@property
def layout2(self):
return self.b2[0].layout
def __call__(self, A):
'''map an MV `A` according to blade_map'''
# determine direction of map
if A.layout == self.layout1:
from_b = self.b1
to_b = self.b2
elif A.layout == self.layout2:
from_b = self.b2
to_b = self.b1
else:
raise ValueError('A doesnt belong to either Algebra in this Map')
# create empty MV, and map values
B = to_b[0]._newMV(dtype=int)
for from_obj, to_obj in zip(from_b, to_b):
B += (sum(A.value*from_obj.value)*to_obj)
return B
# copied from the itertools docs
def _powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
)
def elements(dims: int, firstIdx=0) -> List[Tuple[int, ...]]:
"""Return a list of tuples representing all 2**dims of blades
in a dims-dimensional GA.
Elements are sorted lexicographically.
"""
return list(_powerset(range(firstIdx, firstIdx + dims)))
def Cl(p=0, q=0, r=0, sig=None, names=None, firstIdx=1, mvClass=MultiVector):
"""Returns a Layout and basis blades for the geometric algebra Cl_p,q.
The notation Cl_p,q means that the algebra is p+q dimensional, with
the first p vectors with positive signature and the final q vectors
negative.
Cl(p, q=0, names=None, firstIdx=0) --> Layout, {'name': basisElement, ...}
"""
if sig is None:
layout = Layout._from_Cl(p, q, r, firstIdx=firstIdx, names=names)
else:
layout = Layout._from_sig(sig, firstIdx=firstIdx, names=names)
return layout, layout.bases(mvClass=mvClass)
def bases(layout, mvClass=MultiVector, grades: Optional[Container[int]] = None) -> Dict[str, MultiVector]:
"""Returns a dictionary mapping basis element names to their MultiVector
instances, optionally for specific grades
if you are lazy, you might do this to populate your namespace
with the variables of a given layout.
>>> locals().update(layout.blades())
.. versionchanged:: 1.1.0
This dictionary includes the scalar
"""
dict = {}
for i in range(layout.gaDims):
grade = layout.gradeList[i]
if grades is not None and grade not in grades:
continue
v = np.zeros((layout.gaDims,), dtype=int)
v[i] = 1
dict[layout.names[i]] = mvClass(layout, v)
return dict
def basis_vectors(layout):
'''
dictionary of basis vectors
'''
return bases(layout=layout, grades=[1])
def randomMV(
layout, min=-2.0, max=2.0, grades=None, mvClass=MultiVector,
uniform=None, n=1, normed=False):
"""n Random MultiVectors with given layout.
Coefficients are between min and max, and if grades is a list of integers,
only those grades will be non-zero.
Examples
--------
>>>randomMV(layout, min=-2.0, max=2.0, grades=None, uniform=None, n=2)
"""
if n > 1:
# return many multivectors
return [randomMV(layout=layout, min=min, max=max, grades=grades,
mvClass=mvClass, uniform=uniform, n=1,
normed=normed) for k in range(n)]
if uniform is None:
uniform = np.random.uniform
if grades is None:
mv = mvClass(layout, uniform(min, max, (layout.gaDims,)))
else:
if isinstance(grades, int):
grades = [grades]
newValue = np.zeros((layout.gaDims,))
for i in range(layout.gaDims):
if layout.gradeList[i] in grades:
newValue[i] = uniform(min, max)
mv = mvClass(layout, newValue)
if normed:
mv = mv.normal()
return mv
def pretty(precision=None):
"""Makes repr(M) default to pretty-print.
`precision` arg can be used to set the printed precision.
Parameters
-----------
precision : int
number of sig figs to print past decimal
Examples
----------
>>> pretty(5)
"""
global _pretty
_pretty = True
if precision is not None:
print_precision(precision)
def ugly():
"""Makes repr(M) default to eval-able representation.
ugly()
"""
global _pretty
_pretty = False
def eps(newEps=None):
"""Get/Set the epsilon for float comparisons.
eps(newEps)
"""
global _eps
if newEps is not None:
_eps = newEps
return _eps
def print_precision(newVal):
"""Set the epsilon for float comparisons.
Parameters
-----------
newVal : int
number of sig figs to print (see builtin `round`)
Examples
----------
>>> print_precision(5)
"""
global _print_precision
_print_precision = newVal
def gp(M, N):
"""
Geometric product
gp(M, N) = M * N
M and N must be from the same layout
This is useful in calculating series of products, with `reduce()`
for example
>>>Ms = [M1, M2, M3] # list of multivectors
>>>reduce(gp, Ms) # == M1*M2*M3
"""
return M*N
def ip(M, N):
"""
Inner product function
ip(M, N) = M | N
M and N must be from the same layout
"""
return M ^ N
def op(M, N):
"""
Outer product function
op(M, N) = M ^ N
M and N must be from the same layout
This is useful in calculating series of products, with `reduce()`
for example
>>>Ms = [M1, M2, M3] # list of multivectors
>>>reduce(op, Ms) # == M1^M2^M3
"""
return M ^ N
def conformalize(layout, added_sig=[1, -1], *, mvClass=MultiVector, **kwargs):
'''
Conformalize a Geometric Algebra
Given the `Layout` for a GA of signature (p, q), this
will produce a GA of signature (p+1, q+1), as well as
return a new list of blades and some `stuff`. `stuff`
is a dict containing the null basis blades, and some
up/down functions for projecting in/out of the CGA.
Parameters
-------------
layout: `clifford.Layout`
layout of the GA to conformalize (the base)
added_sig: list-like
list of +1, -1 denoted the added signatures
**kwargs :
passed to Cl() used to generate conformal layout
Returns
---------
layout_c : :class:`ConformalLayout`
layout of the base GA
blades_c : dict
blades for the CGA
stuff: dict
dict mapping the following members of :class:`ConformalLayout` by their
names, for easy unpacking into the global namespace:
.. autosummary::
~ConformalLayout.ep
~ConformalLayout.en
~ConformalLayout.eo
~ConformalLayout.einf
~ConformalLayout.E0
~ConformalLayout.I_base
~ConformalLayout.up
~ConformalLayout.down
~ConformalLayout.homo
Examples
---------
>>> from clifford import Cl, conformalize
>>> G2, blades = Cl(2)
>>> G2c, bladesc, stuff = conformalize(G2)
>>> locals().update(bladesc)
>>> locals().update(stuff)
'''
layout_c = ConformalLayout._from_base_layout(layout, added_sig, **kwargs)
stuff = {
attr: getattr(layout_c, attr)
for attr in [
"ep", "en", "eo", "einf", "E0",
"up", "down", "homo", "I_base",
]
}
return layout_c, layout_c.bases(mvClass=mvClass), stuff
# TODO: fix caching to work
# generate pre-defined algebras and cache them
# sigs = [(1, 1, 0), (2, 0, 0), (3, 1, 0), (3, 0, 0), (3, 2, 0), (4, 0, 0)]
# current_module = sys.modules[__name__]
# caching.build_or_read_cache_and_attach_submods(current_module, sigs=sigs)
| [
"numpy.abs",
"numpy.linalg.solve",
"numpy.power",
"functools.reduce",
"numpy.hstack",
"numpy.result_type",
"numba.njit",
"numpy.argmax",
"numpy.asarray",
"numpy.linalg.det",
"itertools.combinations",
"numpy.array",
"numpy.zeros",
"sparse.COO",
"numba.generated_jit"
] | [((2041, 2088), 'numba.njit', 'numba.njit', ([], {'parallel': 'NUMBA_PARALLEL', 'nogil': '(True)'}), '(parallel=NUMBA_PARALLEL, nogil=True)\n', (2051, 2088), False, 'import numba\n'), ((1568, 1595), 'numpy.zeros', 'np.zeros', (['(ndimout, ndimin)'], {}), '((ndimout, ndimin))\n', (1576, 1595), True, 'import numpy as np\n'), ((1843, 1862), 'numpy.array', 'np.array', (['gradeList'], {}), '(gradeList)\n', (1851, 1862), True, 'import numpy as np\n'), ((1875, 1915), 'numpy.power', 'np.power', (['(-1)', '(grades * (grades - 1) // 2)'], {}), '(-1, grades * (grades - 1) // 2)\n', (1883, 1915), True, 'import numpy as np\n'), ((2260, 2304), 'numpy.zeros', 'np.zeros', (['(3, array_length)'], {'dtype': 'np.uint64'}), '((3, array_length), dtype=np.uint64)\n', (2268, 2304), True, 'import numpy as np\n'), ((2407, 2445), 'numpy.zeros', 'np.zeros', (['array_length'], {'dtype': 'np.bool_'}), '(array_length, dtype=np.bool_)\n', (2415, 2445), True, 'import numpy as np\n'), ((2467, 2505), 'numpy.zeros', 'np.zeros', (['array_length'], {'dtype': 'np.bool_'}), '(array_length, dtype=np.bool_)\n', (2475, 2505), True, 'import numpy as np\n'), ((2528, 2566), 'numpy.zeros', 'np.zeros', (['array_length'], {'dtype': 'np.bool_'}), '(array_length, dtype=np.bool_)\n', (2536, 2566), True, 'import numpy as np\n'), ((2655, 2692), 'numpy.zeros', 'np.zeros', (['array_length'], {'dtype': 'np.int8'}), '(array_length, dtype=np.int8)\n', (2663, 2692), True, 'import numpy as np\n'), ((5471, 5501), 'numpy.result_type', 'np.result_type', (['a_dt', 'mt', 'b_dt'], {}), '(a_dt, mt, b_dt)\n', (5485, 5501), True, 'import numpy as np\n'), ((5941, 5975), 'numba.generated_jit', 'numba.generated_jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5960, 5975), False, 'import numba\n'), ((6936, 6970), 'numba.generated_jit', 'numba.generated_jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (6955, 6970), False, 'import numba\n'), ((8981, 9006), 'numpy.zeros', 'np.zeros', (['objin_val.shape'], {}), '(objin_val.shape)\n', (8989, 9006), True, 'import numpy as np\n'), ((9150, 9178), 'numpy.argmax', 'np.argmax', (['modal_value_count'], {}), '(modal_value_count)\n', (9159, 9178), True, 'import numpy as np\n'), ((9534, 9553), 'numpy.zeros', 'np.zeros', (['(n_dims,)'], {}), '((n_dims,))\n', (9542, 9553), True, 'import numpy as np\n'), ((13821, 13845), 'numpy.zeros', 'np.zeros', (['(ndims, ndims)'], {}), '((ndims, ndims))\n', (13829, 13845), True, 'import numpy as np\n'), ((4652, 4680), 'numpy.zeros', 'np.zeros', (['mt.nnz'], {'dtype': 'bool'}), '(mt.nnz, dtype=bool)\n', (4660, 4680), True, 'import numpy as np\n'), ((4924, 4986), 'sparse.COO', 'sparse.COO', ([], {'coords': 'mt.coords', 'data': 'filter_mask', 'shape': 'mt.shape'}), '(coords=mt.coords, data=filter_mask, shape=mt.shape)\n', (4934, 4986), False, 'import sparse\n'), ((9866, 9898), 'numpy.linalg.solve', 'linalg.solve', (['intermed', 'identity'], {}), '(intermed, identity)\n', (9878, 9898), False, 'from numpy import linalg\n'), ((10927, 10961), 'numpy.asarray', 'np.asarray', (['objin.layout.gradeList'], {}), '(objin.layout.gradeList)\n', (10937, 10961), True, 'import numpy as np\n'), ((15874, 15890), 'functools.reduce', 'reduce', (['op', 'self'], {}), '(op, self)\n', (15880, 15890), False, 'from functools import reduce\n'), ((20726, 20763), 'numpy.zeros', 'np.zeros', (['(layout.gaDims,)'], {'dtype': 'int'}), '((layout.gaDims,), dtype=int)\n', (20734, 20763), True, 'import numpy as np\n'), ((21868, 21894), 'numpy.zeros', 'np.zeros', (['(layout.gaDims,)'], {}), '((layout.gaDims,))\n', (21876, 21894), True, 'import numpy as np\n'), ((4115, 4189), 'sparse.COO', 'sparse.COO', ([], {'coords': 'indices', 'data': 'arr', 'shape': '(dims, dims, dims)', 'prune': '(True)'}), '(coords=indices, data=arr, shape=(dims, dims, dims), prune=True)\n', (4125, 4189), False, 'import sparse\n'), ((6283, 6314), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': 'ret_dtype'}), '(dims, dtype=ret_dtype)\n', (6291, 6314), True, 'import numpy as np\n'), ((7278, 7309), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': 'ret_dtype'}), '(dims, dtype=ret_dtype)\n', (7286, 7309), True, 'import numpy as np\n'), ((9052, 9072), 'numpy.abs', 'np.abs', (['objin_val[n]'], {}), '(objin_val[n])\n', (9058, 9072), True, 'import numpy as np\n'), ((10283, 10298), 'numpy.abs', 'np.abs', (['x.value'], {}), '(x.value)\n', (10289, 10298), True, 'import numpy as np\n'), ((19124, 19152), 'itertools.combinations', 'itertools.combinations', (['s', 'r'], {}), '(s, r)\n', (19146, 19152), False, 'import itertools\n'), ((9758, 9778), 'numpy.linalg.det', 'linalg.det', (['intermed'], {}), '(intermed)\n', (9768, 9778), False, 'from numpy import linalg\n'), ((10552, 10569), 'numpy.abs', 'np.abs', (['tmp.value'], {}), '(tmp.value)\n', (10558, 10569), True, 'import numpy as np\n'), ((16150, 16185), 'numpy.hstack', 'np.hstack', (['[self[:k], self[k + 1:]]'], {}), '([self[:k], self[k + 1:]])\n', (16159, 16185), True, 'import numpy as np\n')] |
# coding: utf-8
# 我觉得变量太多的alpha factor我暂时搁置,以及我觉得rank函数比较奇怪,因为range太大了,是否需要设置一个窗口呢?
#
#
# ## Dropped Index:
# - Alpha30(要用到fama三因子)
# - Alpha75(要用到BENCHMARKINDEX)
# - Alpha143(要用到SELF函数)
# - Alpha149(要用到BENCHMARKINDEX)
# - Alpha181(要用到BENCHMARKINDEX)
# - Alpha182(要用到BENCHMARKINDEX)
### 对于:?较为复杂的表达式,我都先用一些中间变量存储中间运算的结果,以下均采用这一做法,不赘述
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
from scipy.stats.stats import spearmanr
from BTC_Alpha_func import *
def Alpha1(para_list):
return -1 * CORR(RANK(DELTA(LOG(VOLUME),para_list[0])), RANK((CLOSE-OPEN)/OPEN), para_list[1])
def Alpha2(para_list):
return (-1 * DELTA((((CLOSE - LOW) - (HIGH - CLOSE)) / (HIGH - LOW)), para_list[0])).fillna(0)
def Alpha3(para_list):
cache = CLOSE - ((~(CLOSE>DELAY(CLOSE,para_list[0])))*MIN(LOW,DELAY(CLOSE,para_list[0]))\
+ (~(CLOSE>DELAY(CLOSE,para_list[0])))*MAX(HIGH,DELAY(CLOSE,para_list[0])))
return SUM((~(CLOSE==DELAY(CLOSE,1)) * cache), para_list[1])
#这里保留1,是因为我觉得Volume/mean(volume,window_size)还是有明确的概念的
def Alpha4(para_list):
#tail计算的是倒数第二个冒号后面的结果
tail = (((VOLUME / MEAN(VOLUME,para_list[0])) <= 1) * 1\
- ~((VOLUME / MEAN(VOLUME,para_list[0])) <= 1) * (-1))
#med计算的是中间的一个判断句(第1个冒号之后)的结果
med = ((SUM(CLOSE, para_list[1]) / para_list[1]) < ((SUM(CLOSE, para_list[2]) / para_list[2]) - STD(CLOSE, para_list[2]))) * 1\
+ ~(((SUM(CLOSE, para_list[1]) / para_list[1]) < ((SUM(CLOSE, para_list[2]) / para_list[2]) - STD(CLOSE, para_list[2])))) * tail
return (((SUM(CLOSE, para_list[2]) / para_list[2]) + STD(CLOSE, para_list[2])) < (SUM(CLOSE, para_list[1]) / para_list[1])) * (-1)\
+ ~(((SUM(CLOSE, para_list[2]) / para_list[2]) + STD(CLOSE, para_list[2])) < (SUM(CLOSE, para_list[1]) / para_list[1])) * med
def Alpha5(para_list):
return (-1 * TSMAX(CORR(TSRANK(VOLUME, para_list[0]), TSRANK(HIGH, para_list[0]), para_list[0]), para_list[1]))
#here para_list[0] is a float between(0,1)
def Alpha6(para_list):
return (RANK(SIGN(DELTA((((OPEN * para_list[0]) + (HIGH * (1.0-para_list[0])))), para_list[1])))* (-1))
def Alpha7(para_list):
return ((RANK(MAX((VWAP - CLOSE), para_list[0])) + RANK(MIN((VWAP - CLOSE), para_list[0]))) * RANK(DELTA(VOLUME, para_list[0])))
#here para_list[0] is a float between(0,1)
def Alpha8(para_list):
return RANK(DELTA(((((HIGH + LOW) / 2) * para_list[0]) + (VWAP * (1.0-para_list[0]))), para_list[1]) * -1)
#所有的SMA我都加上了assert,我其实在函数里也已经加上了assert,以下不赘述
def Alpha9(para_list):
assert para_list[2] <= para_list[1]
return SMA(((HIGH+LOW)/2-(DELAY(HIGH,para_list[0])+DELAY(LOW,para_list[0]))/2)*(HIGH-LOW)/VOLUME,para_list[1],para_list[2])
#para_list[2] 原来就是平方的,这里先改成了para_list[2]
def Alpha10(para_list):
return RANK(MAX((STD(RET, para_list[0]) * (RET < 0) + (CLOSE * (~(RET < 0)))**para_list[2], para_list[1])))
def Alpha11(para_list):
return SUM(((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW)*VOLUME, para_list[0])
def Alpha12(para_list):
return (RANK((OPEN - (SUM(VWAP, para_list[0]) / para_list[0])))) * (-1 * (RANK(ABS((CLOSE - VWAP)))))
#para_list[0]原来就是开方的,这里也先改了
def Alpha13(para_list):
return (((HIGH * LOW)**para_list[0]) - VWAP) #这个是取调和平均的 我们就算他不用优化把= =
def Alpha14(para_list):
return CLOSE-DELAY(CLOSE, para_list[0])
#这里的1.0保留
def Alpha15(para_list):
return OPEN/DELAY(CLOSE,para_list[0])-1.0
def Alpha16(para_list):
return (-1 * TSMAX(RANK(CORR(RANK(VOLUME), RANK(VWAP), para_list[0])), para_list[0]))
def Alpha17(para_list):
return RANK((VWAP - MAX(VWAP, para_list[0])))**(DELTA(CLOSE, para_list[1]))
def Alpha18(para_list):
return CLOSE/DELAY(CLOSE,para_list[0])
def Alpha19(para_list):
return (CLOSE <= DELAY(CLOSE,para_list[0])) * (CLOSE - DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])\
+ (CLOSE > DELAY(CLOSE,para_list[0])) * (CLOSE - DELAY(CLOSE,para_list[0])/CLOSE)
#100.0保留,表示百分数,以下同
def Alpha20(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*100.0
def Alpha21(para_list):
return REGBETA(MEAN(CLOSE,para_list[0]),SEQUENCE(para_list[0]),para_list[0])
def Alpha22(para_list):
return MEAN((CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])\
-DELAY((CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0]),para_list[1]),para_list[2])
def Alpha23(para_list):
return SMA((CLOSE> DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2])\
/(SMA((CLOSE> DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2])\
+SMA((CLOSE<=DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2]))*100.0
def Alpha24(para_list):
return SMA(CLOSE-DELAY(CLOSE,para_list[0]),para_list[0],para_list[1])
def Alpha25(para_list):
return ((-1 * RANK((DELTA(CLOSE,para_list[0]) * (1 - RANK(DECAYLINEAR((VOLUME / MEAN(VOLUME,para_list[1])), para_list[2])))))) * (1.0 + RANK(SUM(RET, para_list[3]))))
def Alpha26(para_list):
return (((SUM(CLOSE, para_list[0]) / para_list[0]) - CLOSE)) + ((CORR(VWAP, DELAY(CLOSE, para_list[1]), para_list[2])))
def Alpha27(para_list):
return WMA((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*100.0\
+(CLOSE-DELAY(CLOSE,para_list[1]))/DELAY(CLOSE,para_list[1])*100.0,para_list[2])
#这里的para_list[3]原先设置为9,para_list[4],para_list[5]分别的设置为3和2
def Alpha28(para_list):
return para_list[4]*SMA((CLOSE-TSMIN(LOW,para_list[0]))/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0]))*100,para_list[1],para_list[2])\
-para_list[5]*SMA(SMA((CLOSE-TSMIN(LOW,para_list[0]))/(MAX( HIGH,para_list[3])-TSMAX(LOW,para_list[0]))*100,para_list[1],para_list[2]),para_list[1],para_list[2])
def Alpha29(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*VOLUME
def Alpha30(para_list):
return CLOSE - CLOSE
def Alpha31(para_list):
return (CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])*100.0
def Alpha32(para_list):
return (-1 * SUM(RANK(CORR(RANK(HIGH), RANK(VOLUME), para_list[0])), para_list[0]))
def Alpha33(para_list):
return ((((-1 * TSMIN(LOW, para_list[0])) + DELAY(TSMIN(LOW, para_list[0]), para_list[0])) * RANK(((SUM(RET, para_list[1]) - SUM(RET, para_list[2])) / (para_list[3]))))* TSRANK(VOLUME, para_list[0]))
def Alpha34(para_list):
return MEAN(CLOSE,para_list[0])/CLOSE
#here para_list[2] is a float between(0,1)
def Alpha35(para_list):
return (-MIN(RANK(DECAYLINEAR(DELTA(OPEN, para_list[0]), para_list[1])),\
RANK(DECAYLINEAR(CORR((VOLUME), ((OPEN * para_list[2]) + (OPEN *(1-para_list[2]))), para_list[3]),para_list[4]))))
def Alpha36(para_list):
return RANK(SUM(CORR(RANK(VOLUME), RANK(VWAP), para_list[0]), para_list[1]))
def Alpha37(para_list):
return (- RANK(((SUM(OPEN, para_list[0]) * SUM(RET, para_list[0]))\
- DELAY((SUM(OPEN, para_list[0]) * SUM(RET, para_list[0])), para_list[1]))))
def Alpha38(para_list):
return ((SUM(HIGH, para_list[0])/para_list[0]) < HIGH) * (-1.0 * DELTA(HIGH, para_list[1]))
def Alpha39(para_list):
return (-(RANK(DECAYLINEAR(DELTA((CLOSE), para_list[0]),para_list[1]))\
-RANK(DECAYLINEAR(CORR(((VWAP * para_list[2]) + (OPEN * (1-para_list[2]))), SUM(MEAN(VOLUME,para_list[3]), para_list[4]), para_list[5]), para_list[6]))))
def Alpha40(para_list):
return SUM((CLOSE > DELAY(CLOSE,para_list[0]))*VOLUME, para_list[1])\
/SUM((CLOSE<= DELAY(CLOSE,para_list[0]))*VOLUME, para_list[1])*100.0
def Alpha41(para_list):
return (RANK(-MAX(DELTA((VWAP), para_list[0]), para_list[1])))
def Alpha42(para_list):
return ((-RANK(STD(HIGH, para_list[0]))) * CORR(HIGH, VOLUME, para_list[0]))
def Alpha43(para_list):
return SUM(VOLUME * (CLOSE>DELAY(CLOSE,para_list[0]))\
-VOLUME *(~(CLOSE>DELAY(CLOSE,para_list[0]))) * (CLOSE<DELAY(CLOSE,para_list[0])), para_list[1])
def Alpha44(para_list):
return TSRANK(DECAYLINEAR(CORR(LOW, MEAN(VOLUME,para_list[0]), para_list[1]), para_list[2]), para_list[3])\
+ TSRANK(DECAYLINEAR(DELTA(VWAP, para_list[4]), para_list[5]), para_list[6])
def Alpha45(para_list):
return RANK(DELTA(CLOSE * para_list[0] + OPEN * (1-para_list[0]), para_list[1]))\
* RANK(CORR(VWAP, MEAN(VOLUME, para_list[2]), para_list[3]))
#这里4.0也有很明确的概念,就是表示4个window的平均值
def Alpha46(para_list):
return (MEAN(CLOSE,para_list[0])\
+ MEAN(CLOSE,para_list[1])\
+ MEAN(CLOSE,para_list[2])\
+ MEAN(CLOSE,para_list[3]))/(4.0*CLOSE)
def Alpha47(para_list):
return SMA((TSMAX(HIGH,para_list[0])-CLOSE)/(TSMAX(HIGH,para_list[0]) - TSMIN(LOW,para_list[0]))*100.0, para_list[1], para_list[2])
def Alpha48(para_list):
return (-(RANK(SIGN(CLOSE - DELAY(CLOSE, para_list[0]))\
+ SIGN(DELAY(CLOSE, para_list[0]) - DELAY(CLOSE, para_list[1]))\
+ SIGN(DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])))\
* SUM(VOLUME, para_list[1] + para_list[2])) / SUM(VOLUME, para_list[3]))
def Alpha49(para_list):
dividend = SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])
divisor = SUM(~((HIGH+LOW) >= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])
return divisor/dividend
def Alpha50(para_list):
subtend = SUM(~((HIGH+LOW) <= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])\
/(SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1]))
minuend = SUM(~((HIGH+LOW) >= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])\
/(SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1]))
return subtend - minuend
def Alpha51(para_list):
return SUM(~((HIGH+LOW) <= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])\
/(SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1]))
def Alpha52(para_list):
return SUM(MAX(0, HIGH-DELAY((HIGH+LOW+CLOSE)/3,para_list[0])), para_list[1])\
/SUM(MAX(0, DELAY((HIGH+LOW+CLOSE)/3,para_list[0]) - LOW),para_list[1])* 100.0
def Alpha53(para_list):
return COUNT(CLOSE>DELAY(CLOSE,para_list[0]),para_list[1])/para_list[1]*100.0
def Alpha54(para_list):
return (-RANK((STD(ABS(CLOSE - OPEN), para_list[0]) + (CLOSE - OPEN)) + CORR(CLOSE, OPEN, para_list[0])))
#part_B1_value中有/2,/4算是decay sum吧。。,我也替换成了两个参数
def Alpha55(para_list):
part_C_value = MAX(ABS(HIGH-DELAY(CLOSE,para_list[0])),\
ABS(LOW- DELAY(CLOSE,para_list[0])))
part_A_value = (CLOSE+(CLOSE-OPEN)/2.0-DELAY(OPEN,para_list[0]))
part_B1_cond = (ABS(HIGH-DELAY(CLOSE,para_list[0])) > ABS(LOW -DELAY(CLOSE,para_list[0])))\
&(ABS(HIGH-DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(LOW, para_list[0])))
part_B2_cond = (ABS(LOW- DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(LOW, para_list[0])))\
&(ABS(LOW- DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(CLOSE,para_list[0])))
part_B1_value= ABS(HIGH-DELAY(CLOSE,para_list[0])) + ABS(LOW -DELAY(CLOSE,para_list[0]))/para_list[1]\
+ ABS(DELAY(CLOSE,para_list[0])-DELAY(OPEN, para_list[0]))/para_list[2]
part_B2nvalue= (ABS(HIGH-DELAY(LOW ,para_list[0])) + ABS(DELAY(CLOSE,para_list[0])-DELAY(OPEN,para_list[0]))/para_list[2])
part_B_value = (part_B1_cond | (~part_B1_cond) & part_B2_cond) * part_B1_value\
+ ((~part_B1_cond) & (~part_B2_cond)) * part_B2nvalue
return SUM(part_A_value/part_B_value*part_C_value, para_list[1])
#这个signal是返回一个bool list,与原文对照过了,表达式一致,很迷
def Alpha56(paralist):
return RANK((OPEN - TSMIN(OPEN, para_list[0]))) < RANK((RANK(CORR(SUM(((HIGH + LOW)/2.0), para_list[1]), SUM(MEAN(VOLUME,para_list[2]), para_list[3]), para_list[4]))**para_list[5]))
def Alpha57(para_list):
return SMA((CLOSE-TSMIN(LOW,para_list[0]))/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2])
def Alpha58(para_list):
return COUNT(CLOSE>DELAY(CLOSE,para_list[0]),para_list[1])/para_list[1]
def Alpha59(para_list):
return SUM((CLOSE!=DELAY(CLOSE,para_list[0]))*CLOSE\
- ((CLOSE>DELAY(CLOSE,para_list[0]))* MIN(LOW, DELAY(CLOSE,para_list[0]))\
+ ~(CLOSE>DELAY(CLOSE,para_list[0]) * MAX(HIGH,DELAY(CLOSE,para_list[0])))), para_list[1])
def Alpha60(para_list):
return SUM(((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW)*VOLUME,para_list[0])
def Alpha61(para_list):
return (-MAX(RANK(DECAYLINEAR(DELTA(VWAP,para_list[0]),para_list[1])),\
RANK(DECAYLINEAR(RANK(CORR(LOW,MEAN(VOLUME,para_list[2]), para_list[3])),para_list[4]))))
def Alpha62(para_list):
return (-CORR(HIGH, RANK(VOLUME), para_list[0]))
def Alpha63(para_list):
return (SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2]))
def Alpha64(para_list):
return -MAX(RANK(DECAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), para_list[0]),para_list[0])),\
RANK(DECAYLINEAR(MAX(CORR(RANK(CLOSE), RANK(MEAN(VOLUME,para_list[1])), para_list[0]), para_list[2]), para_list[3])))
def Alpha65(para_list):
return MEAN(CLOSE,para_list[0])/CLOSE
def Alpha66(para_list):
return (CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])
def Alpha67(para_list):
return SMA(MAX(CLOSE-DELAY(CLOSE,),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])),para_list[1],para_list[2])
def Alpha68(para_list):
return SMA(((HIGH+LOW)/2-(DELAY(HIGH,para_list[0])+DELAY(LOW,para_list[0]))/para_list[0])*(HIGH-LOW)/VOLUME,para_list[1],para_list[2])
def Alpha69(para_list):
cache= (SUM(DTM,para_list[0])>SUM(DBM,para_list[0])) * (SUM(DTM,para_list[0])- SUM(DBM,para_list[0]))/SUM(DTM,para_list[0]) +(~(SUM(DTM,para_list[0])>SUM(DBM,para_list[0])) & (SUM(DTM,para_list[0])!=SUM(DBM,para_list[0])) * (SUM(DTM,para_list[0])- SUM(DBM,para_list[0]))/SUM(DBM,para_list[0]))
return cache.fillna(method='ffill').fillna(method='bfill')
def Alpha70(para_list):
return STD(AMOUNT,para_list[0])
def Alpha71(para_list):
return (CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])
def Alpha72(para_list):
return SMA((TSMAX(HIGH,para_list[0])-CLOSE)/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2])
def Alpha73(para_list):
return (TSRANK(DECAYLINEAR(DECAYLINEAR(CORR(CLOSE, VOLUME,para_list[0]),para_list[1]),para_list[2]),para_list[3])-RANK(DECAYLINEAR(CORR(VWAP, MEAN(VOLUME,30),4),3))) * -1
#para_list[0] is a float between (0,1)
def Alpha74(para_list):
return RANK(CORR(SUM(((LOW * para_list[0]) + VWAP*(1-para_list[0])), para_list[1]), SUM(MEAN(VOLUME,para_list[2]),para_list[1]), para_list[3])) + RANK(CORR(RANK(VWAP), RANK(VOLUME), para_list[4]))
def Alpha75(para_list):
return CLOSE - CLOSE
def Alpha76(para_list):
return STD(ABS((CLOSE/DELAY(CLOSE,para_list[0])-1.0))/VOLUME,para_list[1])/MEAN(ABS((CLOSE/DELAY(CLOSE,para_list[0])-1.0))/VOLUME,para_list[1])
def Alpha77(para_list):
return MIN(RANK(DECAYLINEAR(((((HIGH + LOW) / 2) + HIGH) - (VWAP+HIGH)),para_list[0])),RANK(DECAYLINEAR(CORR(((HIGH + LOW) / 2), MEAN(VOLUME,para_list[1]),para_list[2]),para_list[3])))
#here para_list[1] is a float
def Alpha78(para_list):
return ((HIGH+LOW+CLOSE)/3-MEAN((HIGH+LOW+CLOSE)/3,para_list[0]))/(para_list[1]*MEAN(ABS(CLOSE-MEAN((HIGH+LOW+CLOSE)/3,para_list[0])),para_list[0]))
def Alpha79(para_list):
return SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2])
def Alpha80(para_list):
return (VOLUME-DELAY(VOLUME,para_list[0]))/DELAY(VOLUME,para_list[0])
def Alpha81(para_list):
return SMA(VOLUME,para_list[0],para_list[1])
def Alpha82(para_list):
return SMA((TSMAX(HIGH,para_list[0])-CLOSE)/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2])
def Alpha83(para_list):
return (-RANK(COVIANCE(RANK(HIGH), RANK(VOLUME), para_list[0])))
def Alpha84(para_list):
return SUM((CLOSE>DELAY(CLOSE,para_list[0]))*VOLUME+\
(~(CLOSE>DELAY(CLOSE,para_list[0]))&(CLOSE<DELAY(CLOSE,para_list[0])))*(-VOLUME),para_list[1])
def Alpha85(para_list):
return TSRANK((VOLUME / MEAN(VOLUME,para_list[0])),para_list[0])\
* TSRANK((-1 * DELTA(CLOSE, para_list[1])), para_list[2])
#para_list[0] is a float
def Alpha86(para_list):
return ( para_list[0] < (((DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3]))) *(-1.0)\
+ (~(para_list[0] < (((DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3]))))\
* ((((( DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3])) < 0) * 1.0\
+ (~((((DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3])) < 0)) *(-1.0))
#LOW*0.9 + LOW*0.1 难道不就是LOW吗?改为HIGH*para_list[4] + LOW*(1-para_list[4]),因此para_list[4] is a float between 0 and 1
def Alpha87(para_list):
return (-(RANK(DECAYLINEAR(DELTA(VWAP, para_list[0]), para_list[1]))\
+ TSRANK(DECAYLINEAR((((LOW) - VWAP) / (OPEN - ((HIGH*para_list[4] + LOW*(1-para_list[4])) / 2))), para_list[2]), para_list[3])))
def Alpha88(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])
def Alpha89(para_list):
return (SMA(CLOSE,para_list[0],para_list[3])\
-SMA(CLOSE,para_list[1],para_list[4])\
-SMA(SMA(CLOSE,para_list[0],para_list[3])\
-SMA(CLOSE,para_list[1],para_list[4]),para_list[2],para_list[5]))
def Alpha90(para_list):
return (-RANK(CORR(RANK(VWAP), RANK(VOLUME), para_list[0])))
def Alpha91(para_list):
return (-(RANK((CLOSE - MAX(CLOSE, para_list[0])))\
*RANK(CORR((MEAN(VOLUME,para_list[1])), LOW, para_list[0]))))
#para_list[0] is a float between 0 and 1
def Alpha92(para_list):
return -MAX(RANK(DECAYLINEAR(DELTA(((CLOSE* para_list[0])+ (VWAP*(1-para_list[0]))),para_list[1]),para_list[2])),\
TSRANK(DECAYLINEAR(ABS(CORR((MEAN(VOLUME,para_list[3])), CLOSE, para_list[4])), para_list[5]), para_list[6]))
def Alpha93(para_list):
return SUM(~(OPEN>=DELAY(OPEN,para_list[0]))*MAX((OPEN-LOW),(OPEN-DELAY(OPEN,para_list[0]))),para_list[1])
def Alpha94(para_list):
return SUM((CLOSE>DELAY(CLOSE,para_list[0])*VOLUME\
+ (~(CLOSE>DELAY(CLOSE,para_list[0])))*(-VOLUME)*(CLOSE<DELAY(CLOSE,para_list[0]))),para_list[1])
def Alpha95(para_list):
return STD(AMOUNT,para_list[0])
def Alpha96(para_list):
return SMA(SMA((CLOSE-TSMIN(LOW,para_list[0]))/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2]),para_list[3],para_list[4])
#跟Alpha95重复
def Alpha97(para_list):
return STD(VOLUME,para_list[0])
#para_list[2] is a float
def Alpha98(para_list):
condition = ((DELTA((SUM(CLOSE, para_list[0]) / para_list[0]), para_list[0]) / DELAY(CLOSE, para_list[0])) <= para_list[2])
return -(condition * ((CLOSE - TSMIN(CLOSE, para_list[0])))\
+(~condition) * DELTA(CLOSE, para_list[1]))
def Alpha99(para_list):
return (-RANK(COVIANCE(RANK(CLOSE), RANK(VOLUME), para_list[0])))
#跟97,95重复
def Alpha100(para_list):
return STD(VOLUME,para_list[0])
'''just return True & False, para_list[4] is a float between 0 and 1'''
def Alpha101(para_list):
return (-(RANK(CORR(CLOSE, SUM(MEAN(VOLUME,para_list[0]), para_list[1]), para_list[2])) <
RANK(CORR(RANK(((HIGH * para_list[4]) + (VWAP * (1-para_list[4])))), RANK(VOLUME), para_list[3]))))
def Alpha102(para_list):
return SMA(MAX(VOLUME-DELAY(VOLUME,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(VOLUME-DELAY(VOLUME,para_list[0])) ,para_list[1],para_list[2])
def Alpha103(para_list):
return ((para_list[0]-LOWDAY(LOW,para_list[0]))/para_list[0])
def Alpha104(para_list):
return (-(DELTA(CORR(HIGH, VOLUME, para_list[0]), para_list[0]) * RANK(STD(CLOSE, para_list[1]))))
def Alpha105(para_list):
return (-1 * CORR(RANK(OPEN), RANK(VOLUME), para_list[0]))
def Alpha106(para_list):
return CLOSE-DELAY(CLOSE,para_list[0])
def Alpha107(para_list):
return -RANK(OPEN - DELAY(HIGH, para_list[0]))\
* RANK(OPEN - DELAY(CLOSE, para_list[0]))\
* RANK(OPEN - DELAY(LOW, para_list[0]))
def Alpha108(para_list):
return (-(RANK((HIGH - MIN(HIGH, para_list[0])))**RANK(CORR((VWAP), (MEAN(VOLUME,para_list[1])), para_list[2]))))
def Alpha109(para_list):
return SMA(HIGH-LOW,para_list[0],para_list[1])/SMA(SMA(HIGH-LOW,para_list[0],para_list[1]),para_list[0],para_list[1])
def Alpha110(para_list):
return SUM(MAX(0,HIGH-DELAY(CLOSE,para_list[0])),para_list[1])\
/SUM(MAX(0,-LOW+DELAY(CLOSE,para_list[0])),para_list[1])
def Alpha111(para_list):
return SMA(VOLUME*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW),para_list[0],para_list[2])\
-SMA(VOLUME*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW),para_list[1],para_list[3])
def Alpha112(para_list):
return (SUM((CLOSE-DELAY(CLOSE,para_list[0])>0) * (CLOSE-DELAY(CLOSE,para_list[1])),para_list[2])\
-SUM((CLOSE-DELAY(CLOSE,para_list[0])<0) * ABS(CLOSE-DELAY(CLOSE,para_list[1])),para_list[2]))\
/(SUM((CLOSE-DELAY(CLOSE,para_list[0])>0) * (CLOSE-DELAY(CLOSE,para_list[1])),para_list[2])\
+SUM((CLOSE-DELAY(CLOSE,para_list[0])<0) * ABS(CLOSE-DELAY(CLOSE,para_list[1])),para_list[2]))
def Alpha113(para_list):
return -(RANK(SUM(DELAY(CLOSE, para_list[0]), para_list[1]) / para_list[1]) * CORR(CLOSE, VOLUME, para_list[2]))\
* RANK(CORR(SUM( CLOSE, para_list[0]), SUM(CLOSE, para_list[1]), para_list[2]))
def Alpha114(para_list):
return ((RANK(DELAY(((HIGH - LOW) / (SUM(CLOSE, para_list[0]) / para_list[0])), para_list[1])) * RANK(RANK(VOLUME)))
/ (((HIGH - LOW) / (SUM(CLOSE, para_list[0]) / para_list[0])) / (VWAP - CLOSE)))
#para_list[0] is a float between 0 and 1
def Alpha115(para_list):
return RANK(CORR(((HIGH * para_list[0]) + (CLOSE * (1-para_list[0]))), MEAN(VOLUME, para_list[1]),para_list[2]))\
**RANK(CORR(TSRANK(((HIGH + LOW) / 2), para_list[3]), TSRANK(VOLUME, para_list[4]), para_list[5]))
def Alpha116(para_list):
return REGBETA(CLOSE,SEQUENCE(para_list[0]),para_list[0])
def Alpha117(para_list):
return ((TSRANK(VOLUME, para_list[0]) * (1 - TSRANK(((CLOSE + HIGH) - LOW), para_list[1])))* (1 - TSRANK(RET, para_list[0])))
def Alpha118(para_list):
return SUM(HIGH-OPEN,para_list[0])/SUM(OPEN-LOW,para_list[0])
def Alpha119(para_list):
return (RANK(DECAYLINEAR(CORR(VWAP, SUM(MEAN(VOLUME,para_list[0]), para_list[1]), para_list[2]),para_list[3]))\
-RANK(DECAYLINEAR(TSRANK(MIN(CORR(RANK(OPEN), RANK(MEAN(VOLUME,para_list[4])), para_list[5]), para_list[6]), para_list[7]), para_list[8])))
def Alpha120(para_list):
return (RANK((VWAP - CLOSE)) / RANK((VWAP + CLOSE)))
def Alpha121(para_list):
return -RANK(VWAP - MIN(VWAP, para_list[0]))**TSRANK(CORR(TSRANK(VWAP, para_list[1]), TSRANK(MEAN(VOLUME,para_list[2]), para_list[3]), para_list[4]), para_list[5])
def Alpha122(para_list):
return (SMA(SMA(SMA(np.log(CLOSE),para_list[0],para_list[1]),para_list[0],para_list[1]),para_list[0],para_list[1])\
/DELAY(SMA(SMA(SMA(np.log(CLOSE),para_list[0],para_list[1]),para_list[0],para_list[1]),para_list[0],para_list[1]),para_list[2])) - 1.0
'''输出的是bool type'''
def Alpha123(para_list):
return (-(RANK(CORR(SUM((HIGH + LOW) /2, para_list[0]), SUM(MEAN(VOLUME,para_list[1]), para_list[2]), para_list[3]))< RANK(CORR(LOW, VOLUME, para_list[4]))))
def Alpha124(para_list):
return (CLOSE - VWAP) / DECAYLINEAR(RANK(TSMAX(CLOSE, para_list[0])),para_list[1])
def Alpha125(para_list):
return (RANK(DECAYLINEAR(CORR((VWAP), MEAN(VOLUME,para_list[0]),para_list[1]), para_list[2]))\
/RANK(DECAYLINEAR(DELTA(((CLOSE * 0.5) + (VWAP * 0.5)), para_list[3]), para_list[4])))
def Alpha126():
return (CLOSE+HIGH+LOW)/3
#原来是平方再开方的,这里我就直接取ABS了
def Alpha127(para_list):
return ABS(MEAN(((CLOSE-MAX(CLOSE,para_list[0]))/(MAX(CLOSE,para_list[0]))), para_list[0]))
def Alpha128(para_list):
return 100-(100/(1+SUM(((HIGH+LOW+CLOSE)/3>DELAY((HIGH+LOW+CLOSE)/3,para_list[0]))*(HIGH+LOW+CLOSE)/3*VOLUME,para_list[1])/
SUM(((HIGH+LOW+CLOSE)/3<DELAY((HIGH+LOW+CLOSE)/3,para_list[0]))*(HIGH+LOW+CLOSE)/3*VOLUME,para_list[1])))
def Alpha129(para_list):
return SUM((CLOSE-DELAY(CLOSE,para_list[0])<0) * ABS(CLOSE-DELAY(CLOSE,para_list[0])),para_list[1])
def Alpha130(para_list):
return (RANK(DECAYLINEAR(CORR(((HIGH + LOW) / 2),MEAN(VOLUME,para_list[0]),para_list[1]),para_list[2]))\
/RANK(DECAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), para_list[3]),para_list[4])))
def Alpha131(para_list):
return (RANK(DELAY(VWAP, para_list[0]))**TSRANK(CORR(CLOSE,MEAN(VOLUME,para_list[1]), para_list[2]), para_list[2]))
def Alpha132(para_list):
return MEAN(AMOUNT,para_list[0])
def Alpha133(para_list):
return ((para_list[0]-HIGHDAY(HIGH,para_list[0]))/para_list[0])\
-((para_list[0]-LOWDAY(LOW ,para_list[0]))/para_list[0])
def Alpha134(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*VOLUME
def Alpha135(para_list):
return SMA(DELAY(CLOSE/DELAY(CLOSE,para_list[0]),para_list[1]),para_list[0],para_list[2])
def Alpha136(para_list):
return ((-RANK(DELTA(RET, para_list[0]))) * CORR(OPEN, VOLUME, para_list[1]))
#这个就是Alpha55把最外面那层sum()去掉,那其实就相当于.rolling.sum(window=1)的情形,此处也算作是重复计算
def Alpha55(para_list):
part_C_value = MAX(ABS(HIGH-DELAY(CLOSE,para_list[0])),\
ABS(LOW- DELAY(CLOSE,para_list[0])))
part_A_value = (CLOSE+(CLOSE-OPEN)/2-DELAY(OPEN,para_list[0]))
part_B1_cond = (ABS(HIGH-DELAY(CLOSE,para_list[0])) > ABS(LOW -DELAY(CLOSE,para_list[0])))\
&(ABS(HIGH-DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(LOW, para_list[0])))
part_B2_cond = (ABS(LOW- DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(LOW, para_list[0])))\
&(ABS(LOW- DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(CLOSE,para_list[0])))
part_B1_value= ABS(HIGH-DELAY(CLOSE,para_list[0]))\
+ ABS(LOW -DELAY(CLOSE,para_list[0]))/para_list[1]\
+ ABS(DELAY(CLOSE,para_list[0])\
-DELAY(OPEN, para_list[0]))/para_list[2]
'''
part_B2pvalue= ABS(LOW -DELAY(CLOSE,1))\
+ ABS(HIGH -DELAY(CLOSE,1))/2\
+ ABS(DELAY(CLOSE,1)-DELAY(OPEN ,1))/4 #same of the previous one
'''
part_B2nvalue= (ABS(HIGH-DELAY(LOW ,para_list[0])) + ABS(DELAY(CLOSE,para_list[0])-DELAY(OPEN,para_list[0]))/para_list[2])
part_B_value = (part_B1_cond | (~part_B1_cond) & part_B2_cond) * part_B1_value\
+ ((~part_B1_cond) & (~part_B2_cond)) * part_B2nvalue
return part_A_value/part_B_value*part_C_value
#here para_list[0] is a float between 0 and 1
def Alpha138(para_list):
return (-(RANK(DECAYLINEAR(DELTA((((LOW * para_list[0]) + (VWAP * (1-para_list[0])))), para_list[1]), para_list[2]))\
-TSRANK(DECAYLINEAR(TSRANK(CORR(TSRANK(LOW, para_list[3]), TSRANK(MEAN(VOLUME,para_list[4]), para_list[5]),para_list[6]),para_list[7]),para_list[8]),para_list[9])))
def Alpha139(para_list):
return (-CORR(OPEN, VOLUME, para_list[0]))
def Alpha140(para_list):
return MIN(RANK(DECAYLINEAR(((RANK(OPEN) + RANK(LOW)) - (RANK(HIGH) + RANK(CLOSE))),para_list[0])),\
TSRANK(DECAYLINEAR(CORR(TSRANK(CLOSE, para_list[1]), TSRANK(MEAN(VOLUME, para_list[2]),para_list[3]),para_list[4]),para_list[5]),para_list[5]))
def Alpha141(para_list):
return (-RANK(CORR(RANK(HIGH), RANK(MEAN(VOLUME,para_list[0])), para_list[1])))
def Alpha142(para_list):
return (((-RANK(TSRANK(CLOSE, para_list[0]))) * RANK(DELTA(DELTA(CLOSE,para_list[1]), para_list[1]))) * RANK(TSRANK((VOLUME/MEAN(VOLUME,para_list[2])), para_list[3])))
#Alpha143,没有定义SELF函数
def Alpha143(para_list):
return CLOSE - CLOSE
def Alpha144(para_list):
return SUMIF(ABS(CLOSE/DELAY(CLOSE,para_list[0])-1)/AMOUNT,para_list[1],CLOSE<DELAY(CLOSE,para_list[0]))/COUNT(CLOSE<DELAY(CLOSE,para_list[0]),para_list[1])
def Alpha145(para_list):
return (MEAN(VOLUME,para_list[0])-MEAN(VOLUME,para_list[1]))/MEAN(VOLUME,para_list[2])
#里面有一个square我就不改了- -
def Alpha146(para_list):
return MEAN((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])\
-SMA((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0]), para_list[1],para_list[4]),para_list[2])\
* ((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])\
-SMA((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0]), para_list[1],para_list[4]))\
/SMA(((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])\
-((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])\
-SMA(( CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0]),para_list[3],para_list[4])))**2,para_list[1],para_list[4])
def Alpha147(para_list):
return REGBETA(MEAN(CLOSE,para_list[0]), SEQUENCE(para_list[0]), para_list[0])
'''这里返回的也是个bool'''
def Alpha148(para_list):
return -(RANK(CORR((OPEN), SUM(MEAN(VOLUME,para_list[0]), para_list[1]), para_list[2])) < RANK((OPEN - TSMIN(OPEN, para_list[3]))))
#Alpha149, BANCHMARKCLOSE没有定义,所以这个index空着
def Alpha149(para_list):
return CLOSE - CLOSE
def Alpha150(para_list):
return (CLOSE+HIGH+LOW)/3*VOLUME
def Alpha151(para_list):
return SMA(CLOSE-DELAY(CLOSE,para_list[0]),para_list[0],para_list[1])
def Alpha152(para_list):
return SMA(MEAN(DELAY(SMA(DELAY(CLOSE/DELAY(CLOSE,para_list[0]),para_list[1]),para_list[0],para_list[1]),para_list[0]),para_list[2])\
-MEAN(DELAY(SMA(DELAY(CLOSE/DELAY(CLOSE,para_list[0]),para_list[1]),para_list[0],para_list[1]),para_list[0]),para_list[3]),para_list[0],para_list[1])
#这里取的window是成倍数的,我不认为他们是独立的,因此我只用了一个parameter来描述
def Alpha153(para_list):
return (MEAN(CLOSE, para_list[0])\
+MEAN(CLOSE,2*para_list[0])\
+MEAN(CLOSE,4*para_list[0])\
+MEAN(CLOSE,8*para_list[0]))/4
#这个返回的也是一个bool
def Alpha154(para_list):
return (((VWAP - MIN(VWAP, para_list[0]))) < (CORR(VWAP, MEAN(VOLUME,para_list[1]), para_list[2])))
def Alpha155(para_list):
return SMA(VOLUME,para_list[0],para_list[3])\
-SMA(VOLUME,para_list[1],para_list[4])\
-SMA(\
SMA(VOLUME,para_list[0],para_list[3])\
-SMA(VOLUME,para_list[1],para_list[4]),\
para_list[2],para_list[5])
#para_list[3] is a float between 0 and 1
def Alpha156(para_list):
return -MAX(RANK(DECAYLINEAR(DELTA(VWAP, para_list[0]), para_list[1])),\
RANK(DECAYLINEAR((-(DELTA(((OPEN * para_list[3]) + (LOW * (1-para_list[3]))), para_list[2])\
/((OPEN * para_list[3]) + (LOW * (1-para_list[3]))))), para_list[1])))
def Alpha157(para_list):
return (MIN(PROD(RANK(RANK(LOG(SUM(TSMIN(RANK(RANK(-RANK(DELTA((CLOSE - para_list[0]), para_list[1])))), para_list[2]), para_list[3])))), para_list[4]), para_list[5]) + TSRANK(DELAY((-RET), para_list[6]), para_list[7]))
def Alpha158(para_list):
return ((HIGH-SMA(CLOSE,para_list[0],para_list[1]))-(LOW-SMA(CLOSE,para_list[0],para_list[1])))/CLOSE
def Alpha159(para_list):
return (CLOSE-SUM(MIN(LOW, DELAY(CLOSE,para_list[3])),para_list[0]))\
/SUM(MAX(HIGH,DELAY(CLOSE,para_list[3]))-MIN(LOW,DELAY(CLOSE,para_list[3])),para_list[0])*para_list[1]*para_list[2]\
+(CLOSE-SUM(MIN(LOW, DELAY(CLOSE,para_list[3])),para_list[1]))\
/SUM(MAX(HIGH,DELAY(CLOSE,para_list[3]))-MIN(LOW,DELAY(CLOSE,para_list[3])),para_list[1])*para_list[1]*para_list[2]\
+(CLOSE-SUM(MIN(LOW, DELAY(CLOSE,para_list[3])),para_list[2]))\
/SUM(MAX(HIGH,DELAY(CLOSE,para_list[3]))-MIN(LOW,DELAY(CLOSE,para_list[3])),para_list[2])*para_list[1]*para_list[2]\
/(para_list[0]*para_list[1]+para_list[1]*para_list[2]+para_list[2]*para_list[0])
def Alpha160(para_list):
return SMA((CLOSE<=DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2])
def Alpha161(para_list):
return MEAN(MAX(MAX((HIGH-LOW),ABS(DELAY(CLOSE,para_list[0])-HIGH)),ABS(DELAY(CLOSE,para_list[0])-LOW)),para_list[1])
def Alpha162(para_list):
return (SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2])\
-MIN(SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2]),para_list[1]))\
/(MAX(SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2]),para_list[1])\
-MIN(SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2]),para_list[1]))
def Alpha163(para_list):
return RANK(((((-RET) * MEAN(VOLUME,para_list[0])) * VWAP) * (HIGH - CLOSE)))
def Alpha164(para_list):
return SMA((((CLOSE>DELAY(CLOSE,para_list[0]))*1/(CLOSE-DELAY(CLOSE,para_list[0]))+ ~(CLOSE>DELAY(CLOSE,para_list[0]))*1)
- MIN(((CLOSE>DELAY(CLOSE,para_list[0]))*1/(CLOSE-DELAY(CLOSE,para_list[0]))+ ~(CLOSE>DELAY(CLOSE,para_list[0]))*1),para_list[1]))/(HIGH-LOW),para_list[2],2)
def Alpha165(para_list):
return SUMAC(CLOSE-MEAN(CLOSE,para_list[0]),para_list[0])\
- SUMAC(CLOSE-MEAN(CLOSE,para_list[0]),para_list[0])/STD(CLOSE,para_list[0])
#**1.5保留 不然120**120估计太大了
def Alpha166(para_list):
return -para_list[0]*((para_list[1])**1.5)*SUM((CLOSE/DELAY(CLOSE,para_list[2])-MEAN(CLOSE/DELAY(CLOSE,para_list[3])-1,para_list[4])),para_list[5]) /((20-1)*(20-2)*((SUM((CLOSE/DELAY(CLOSE,1))**2,20))**1.5))
def Alpha167(para_list):
return SUM((CLOSE-DELAY(CLOSE,para_list[0])>0)*(CLOSE-DELAY(CLOSE,para_list[0])),para_list[1])
def Alpha168(para_list):
return (-VOLUME/MEAN(VOLUME,para_list[0]))
def Alpha169(para_list):
return SMA(MEAN(DELAY(SMA(CLOSE-DELAY(CLOSE,para_list[0]),para_list[1],para_list[0]),para_list[5]),para_list[2])\
-MEAN(DELAY(SMA(CLOSE-DELAY(CLOSE,para_list[0]),para_list[1],para_list[0]),para_list[5]),para_list[3]),para_list[4],para_list[5])
def Alpha170(para_list): #rank * rank - rank almost还是rank
return ((RANK((1 / CLOSE)) * VOLUME / MEAN(VOLUME, para_list[0]))* (HIGH * RANK(HIGH - CLOSE)) / (SUM(HIGH, para_list[1]) / para_list[1])) - RANK(VWAP - DELAY(VWAP, para_list[1]))
def Alpha171(para_list):
return -((LOW - CLOSE) * (OPEN**para_list[0])) / ((CLOSE - HIGH) * (CLOSE**para_list[0]))
def Alpha172(para_list):
return MEAN(ABS(SUM((LD>0 & LD>HD)*LD,para_list[0])/SUM(TR,para_list[1])\
-SUM((HD>0 & HD>LD)*HD,para_list[0])/SUM(TR,para_list[1]))\
/(SUM((LD>0 & LD>HD)*LD,para_list[0])/SUM(TR,para_list[1])\
+SUM((HD>0 & HD>LD)*HD,para_list[0])/SUM(TR,para_list[1])),para_list[2])
#3-2+1或许是某种玄学,没改
def Alpha173(para_list):
return 3*SMA(CLOSE,para_list[0],para_list[1])\
-2*SMA(SMA(CLOSE,para_list[0],para_list[1]),para_list[0],para_list[1])\
+SMA(SMA(SMA(np.log(CLOSE),para_list[0],para_list[1]),para_list[0],para_list[1]),para_list[0],para_list[1])
def Alpha174(para_list):
return SMA((CLOSE>DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2])
def Alpha175(para_list):
return MEAN(MAX(MAX((HIGH-LOW),ABS(DELAY(CLOSE,para_list[0])-HIGH)),ABS(DELAY(CLOSE,para_list[0])-LOW)),para_list[1])
def Alpha176(para_list):
return CORR(RANK((CLOSE - TSMIN(LOW, para_list[0])) / (TSMAX(HIGH, para_list[0]) - TSMIN(LOW,para_list[0]))), RANK(VOLUME), para_list[1])
def Alpha177(para_list):
return ((para_list[0]-HIGHDAY(HIGH,para_list[0]))/para_list[0])
def Alpha178(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*VOLUME
def Alpha179(para_list):
return (RANK(CORR(VWAP, VOLUME, para_list[0])) * RANK(CORR(RANK(LOW), RANK(MEAN(VOLUME,para_list[1])), para_list[2])))
def Alpha180(para_list):
return (MEAN(VOLUME,para_list[0]) < VOLUME) * (-TSRANK(ABS(DELTA(CLOSE, para_list[1])), para_list[2])) * SIGN(DELTA(CLOSE, para_list[1]))\
+ ~(MEAN(VOLUME,para_list[0]) < VOLUME) * (-VOLUME)
#Alpha181 drop for the BENCHMARKINDEX
def Alpha181(para_list):
return CLOSE - CLOSE
#Alpha182 drop for the BENCHMARKINDEX
def Alpha182(para_list):
return CLOSE - CLOSE
def Alpha183(para_list):
return MAX(SUMAC(CLOSE-MEAN(CLOSE,para_list[0]),para_list[0]),para_list[0])\
-MIN(SUMAC(CLOSE-MEAN(CLOSE,para_list[0]),para_list[0]),para_list[0])/STD(CLOSE,para_list[0])
def Alpha184(para_list):
return (RANK(CORR(DELAY((OPEN - CLOSE), para_list[0]), CLOSE, para_list[1])) + RANK((OPEN - CLOSE)))
#**2也没动
def Alpha185(para_list):
return RANK((-((1 - (OPEN / CLOSE))**2)))
def Alpha186(para_list):
return (MEAN(ABS(SUM(((LD>0) & (LD>HD))*LD,para_list[0])/SUM(TR,para_list[0])\
-SUM(((HD>0) & (HD>LD))*HD,para_list[0])/SUM(TR,para_list[0]))\
/(SUM(((LD>0) & (LD>HD))*LD,para_list[0])/SUM(TR,para_list[0])\
+SUM(((HD>0) & (HD>LD))*HD,para_list[0])/SUM(TR,para_list[0])),para_list[1])\
+DELAY(MEAN(ABS(SUM(((LD>0) & (LD>HD))*LD,para_list[0])/SUM(TR,para_list[0])\
-SUM(((HD>0) & (HD>LD))*HD,para_list[0])/SUM(TR,para_list[0]))\
/(SUM(((LD>0) & (LD>HD))*LD,para_list[0])/SUM(TR,para_list[0])\
+SUM(((HD>0) & (HD>LD))*HD,para_list[0])/SUM(TR,para_list[0])),para_list[1]),para_list[1]))/2
def Alpha187(para_list):
return SUM(~(OPEN<=DELAY(OPEN,para_list[0])) * MAX((HIGH-OPEN),(OPEN-DELAY(OPEN,para_list[0]))),para_list[1])
def Alpha188(para_list):
return ((HIGH-LOW-SMA(HIGH-LOW,para_list[0],2))/SMA(HIGH-LOW,para_list[0],2))
def Alpha189(para_list):
return MEAN(ABS(CLOSE-MEAN(CLOSE,para_list[0])),para_list[0])
''' Alpha190我很无奈。。。
def Alpha190(para_list):
return
LOG((COUNT(CLOSE/DELAY(CLOSE)-1>((CLOSE/DELAY(CLOSE,19))^(1/20)-1),20)-1)\
*(SUMIF(((CLOSE/DELAY(CLOSE)-1-(CLOSE/DELAY(CLOSE,19))^(1/20)-1))^2,20,\
CLOSE/DELAY(CLOSE)-1<(CLOSE/DELAY(CLOSE,19))^(1/20)1))\
/((COUNT((CLOSE/DELAY(CLOSE)-1<(CLOSE/DELAY(CLOSE,19))^(1/20)-1),20))\
*(SUMIF(( CLOSE/DELAY(CLOSE)-1-((CLOSE/DELAY(CLOSE,19))^(1/20)-1))^2,20,\
CLOSE/DELAY(CLOSE)-1>(CLOSE/DELAY(CLOSE,19))^(1/20)-1))) )
'''
def Alpha191(para_list):
return ((CORR(MEAN(VOLUME,para_list[0]), LOW, para_list[1]) + ((HIGH + LOW) / 2)) - CLOSE)
| [
"numpy.log"
] | [((36308, 36321), 'numpy.log', 'np.log', (['CLOSE'], {}), '(CLOSE)\n', (36314, 36321), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((24007, 24020), 'numpy.log', 'np.log', (['CLOSE'], {}), '(CLOSE)\n', (24013, 24020), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((24127, 24140), 'numpy.log', 'np.log', (['CLOSE'], {}), '(CLOSE)\n', (24133, 24140), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n')] |
"""
Disctrict Cooling Network Calculations.
Calculate which technologies need to be activated to meet the cooling energy demand and determine the cost and emissions
that result from the activation of these cooling technologies.
"""
import numpy as np
import pandas as pd
from cea.constants import HOURS_IN_YEAR
from cea.optimization.constants import VCC_T_COOL_IN, ACH_T_IN_FROM_CHP_K
from cea.optimization.master import cost_model
from cea.optimization.slave.cooling_resource_activation import calc_vcc_CT_operation, cooling_resource_activator
from cea.technologies.storage_tank_pcm import Storage_tank_PCM
from cea.technologies.chiller_vapor_compression import VaporCompressionChiller
from cea.technologies.cogeneration import calc_cop_CCGT
from cea.technologies.chiller_absorption import AbsorptionChiller
from cea.technologies.supply_systems_database import SupplySystemsDatabase
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def district_cooling_network(locator,
config,
master_to_slave_variables,
network_features,
weather_features):
"""
Computes the parameters for the cooling of the complete DCN, including:
- cost for cooling energy supply
- hourly cooling energy supply
- hourly electricity generation (from trigen) and demand (for VCCs)
- hourly combustion fuel demand (for trigen)
- installed capacity of each cooling technology
:param locator: paths to cea input files and results folders
:param master_to_slave_variables: all the important information on the energy system configuration of an individual
(buildings [connected, non-connected], heating technologies, cooling technologies,
storage etc.)
:param config: configurations of cea
:param network_features: characteristic parameters (pumping energy, mass flow rate, thermal losses & piping cost)
of the district cooling/heating network
:param weather_features: important environmental parameters (e.g. ambient & ground temperature)
:type locator: cea.inputlocator.InputLocator class object
:type master_to_slave_variables: cea.optimization.slave_data.SlaveData class object
:type config: cea.config.Configuration class object
:type network_features: cea.optimization.distribution.network_optimization_features.NetworkOptimizationFeatures
class object
:type weather_features: cea.optimization.preprocessing.preprocessing_main.WeatherFeatures class object
:return district_cooling_costs: costs of all district cooling energy technologies (investment and operational costs
of generation, storage and network)
:return district_cooling_generation_dispatch: hourly thermal energy supply by each component of the district
cooling energy system.
:return district_cooling_electricity_requirements_dispatch: hourly electricity demand of each component of the
district cooling energy generation system.
:return district_cooling_fuel_requirements_dispatch: hourly combustion fuel demand of each component of the
district cooling energy system (i.e. in the current setup only
natural gas demand of the CCGT of the trigeneration system)
:return district_cooling_capacity_installed: capacity of each district-scale cooling technology installed
(corresponding to the given individual)
:rtype district_cooling_costs: dict (27 x float)
:rtype district_cooling_generation_dispatch: dict (15 x 8760-ndarray)
:rtype district_cooling_electricity_requirements_dispatch: dict (6 x 8760-ndarray)
:rtype district_cooling_fuel_requirements_dispatch: dict (1 x 8760-ndarray)
:rtype district_cooling_capacity_installed: dict (9 x float)
"""
if master_to_slave_variables.DCN_exists:
print("DISTRICT COOLING OPERATION")
# THERMAL STORAGE + NETWORK
# Import Temperatures from Network Summary:
Q_thermal_req_W, \
T_district_cooling_return_K, \
T_district_cooling_supply_K, \
mdot_kgpers = calc_network_summary_DCN(master_to_slave_variables)
# Initialize daily storage class
T_ground_K = weather_features.ground_temp
daily_storage = Storage_tank_PCM(activation=master_to_slave_variables.Storage_cooling_on,
size_Wh=master_to_slave_variables.Storage_cooling_size_W,
database_model_parameters= pd.read_excel(locator.get_database_conversion_systems(), sheet_name="TES"),
T_ambient_K = np.average(T_ground_K),
type_storage = config.optimization.cold_storage_type,
debug = master_to_slave_variables.debug
)
# Import Data - cooling energy potential from water bodies
if master_to_slave_variables.WS_BaseVCC_on == 1 or master_to_slave_variables.WS_PeakVCC_on == 1:
water_body_potential = pd.read_csv(locator.get_water_body_potential())
Q_therm_water_body = np.array(water_body_potential['QLake_kW']) * 1E3
total_WS_VCC_installed = master_to_slave_variables.WS_BaseVCC_size_W + \
master_to_slave_variables.WS_PeakVCC_size_W
# TODO: the following line assumes that the thermal energy from the water body is used 1:1 by the VCC.
# i.e. thermal_energy_in = thermal_energy_out for the VCC. Check if this assumption is correct.
Q_therm_water_body_W = [x if x < total_WS_VCC_installed else total_WS_VCC_installed for x in
Q_therm_water_body]
T_source_average_water_body_K = np.array(water_body_potential['Ts_C']) + 273
else:
Q_therm_water_body_W = np.zeros(HOURS_IN_YEAR)
T_source_average_water_body_K = np.zeros(HOURS_IN_YEAR)
# get properties of technology used in this script
absorption_chiller = AbsorptionChiller(
pd.read_excel(locator.get_database_conversion_systems(), sheet_name="Absorption_chiller"), 'double')
CCGT_prop = calc_cop_CCGT(master_to_slave_variables.NG_Trigen_ACH_size_W, ACH_T_IN_FROM_CHP_K, "NG")
VC_chiller = VaporCompressionChiller(locator, scale='DISTRICT')
# initialize variables
Q_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_content_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_to_storage_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_from_storage_W = np.zeros(HOURS_IN_YEAR)
E_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
NG_Trigen_req_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_Trigen_NG_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_directload_W = np.zeros(HOURS_IN_YEAR)
for hour in range(HOURS_IN_YEAR): # cooling supply for all buildings excluding cooling loads from data centers
daily_storage.hour = hour
if master_to_slave_variables.debug is True: print("\nHour {:.0f}".format(hour))
if Q_thermal_req_W[hour] > 0.0:
# only if there is a cooling load!
daily_storage, \
thermal_output, \
electricity_output, \
gas_output = cooling_resource_activator(Q_thermal_req_W[hour],
T_district_cooling_supply_K[hour],
T_district_cooling_return_K[hour],
Q_therm_water_body_W[hour],
T_source_average_water_body_K[hour],
T_ground_K[hour],
daily_storage,
absorption_chiller,
VC_chiller,
CCGT_prop,
master_to_slave_variables)
Q_DailyStorage_content_W[hour] = thermal_output['Qc_DailyStorage_content_W']
Q_DailyStorage_to_storage_W[hour] = thermal_output['Qc_DailyStorage_to_storage_W']
Q_DailyStorage_from_storage_W[hour] = thermal_output['Qc_DailyStorage_from_storage_W']
Q_Trigen_NG_gen_directload_W[hour] = thermal_output['Qc_Trigen_NG_gen_directload_W']
Q_BaseVCC_WS_gen_directload_W[hour] = thermal_output['Qc_BaseVCC_WS_gen_directload_W']
Q_PeakVCC_WS_gen_directload_W[hour] = thermal_output['Qc_PeakVCC_WS_gen_directload_W']
Q_BaseVCC_AS_gen_directload_W[hour] = thermal_output['Qc_BaseVCC_AS_gen_directload_W']
Q_PeakVCC_AS_gen_directload_W[hour] = thermal_output['Qc_PeakVCC_AS_gen_directload_W']
Q_BackupVCC_AS_directload_W[hour] = thermal_output['Qc_BackupVCC_AS_directload_W']
Q_Trigen_NG_gen_W[hour] = thermal_output['Qc_Trigen_NG_gen_W']
Q_BaseVCC_WS_gen_W[hour] = thermal_output['Qc_BaseVCC_WS_gen_W']
Q_PeakVCC_WS_gen_W[hour] = thermal_output['Qc_PeakVCC_WS_gen_W']
Q_BaseVCC_AS_gen_W[hour] = thermal_output['Qc_BaseVCC_AS_gen_W']
Q_PeakVCC_AS_gen_W[hour] = thermal_output['Qc_PeakVCC_AS_gen_W']
Q_BackupVCC_AS_gen_W[hour] = thermal_output['Qc_BackupVCC_AS_gen_W']
E_BaseVCC_WS_req_W[hour] = electricity_output['E_BaseVCC_WS_req_W']
E_PeakVCC_WS_req_W[hour] = electricity_output['E_PeakVCC_WS_req_W']
E_BaseVCC_AS_req_W[hour] = electricity_output['E_BaseVCC_AS_req_W']
E_PeakVCC_AS_req_W[hour] = electricity_output['E_PeakVCC_AS_req_W']
E_Trigen_NG_gen_W[hour] = electricity_output['E_Trigen_NG_gen_W']
NG_Trigen_req_W[hour] = gas_output['NG_Trigen_req_W']
# calculate the electrical capacity as a function of the peak produced by the turbine
master_to_slave_variables.NG_Trigen_CCGT_size_electrical_W = E_Trigen_NG_gen_W.max()
# BACK-UPP VCC - AIR SOURCE
master_to_slave_variables.AS_BackupVCC_size_W = np.amax(Q_BackupVCC_AS_gen_W)
size_chiller_CT = master_to_slave_variables.AS_BackupVCC_size_W
if master_to_slave_variables.AS_BackupVCC_size_W != 0.0:
master_to_slave_variables.AS_BackupVCC_on = 1
Q_BackupVCC_AS_gen_W, \
E_BackupVCC_AS_req_W = np.vectorize(calc_vcc_CT_operation)(Q_BackupVCC_AS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
VCC_T_COOL_IN,
size_chiller_CT,
VC_chiller)
else:
E_BackupVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
# CAPEX (ANNUAL, TOTAL) AND OPEX (FIXED, VAR, ANNUAL) GENERATION UNITS
supply_systems = SupplySystemsDatabase(locator)
mdotnMax_kgpers = np.amax(mdot_kgpers)
performance_costs_generation, \
district_cooling_capacity_installed \
= cost_model.calc_generation_costs_capacity_installed_cooling(locator,
master_to_slave_variables,
supply_systems,
mdotnMax_kgpers
)
# CAPEX (ANNUAL, TOTAL) AND OPEX (FIXED, VAR, ANNUAL) STORAGE UNITS
performance_costs_storage = cost_model.calc_generation_costs_cooling_storage(master_to_slave_variables,
daily_storage
)
# CAPEX (ANNUAL, TOTAL) AND OPEX (FIXED, VAR, ANNUAL) NETWORK
performance_costs_network, \
E_used_district_cooling_network_W = cost_model.calc_network_costs_cooling(locator,
master_to_slave_variables,
network_features,
"DC")
# MERGE COSTS AND EMISSIONS IN ONE FILE
performance = dict(performance_costs_generation, **performance_costs_storage)
district_cooling_costs = dict(performance, **performance_costs_network)
else:
Q_thermal_req_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_from_storage_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_content_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_to_storage_W = np.zeros(HOURS_IN_YEAR)
Q_Trigen_NG_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_directload_W = np.zeros(HOURS_IN_YEAR)
Q_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
E_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
E_used_district_cooling_network_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_BackupVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
NG_Trigen_req_W = np.zeros(HOURS_IN_YEAR)
district_cooling_costs = {}
district_cooling_capacity_installed = {}
# SAVE
district_cooling_generation_dispatch = {
# demand of the network
"Q_districtcooling_sys_req_W": Q_thermal_req_W,
# ENERGY GENERATION TO DIRECT LOAD
# from storage
"Q_DailyStorage_content_W": Q_DailyStorage_content_W,
"Q_DailyStorage_to_storage_W": Q_DailyStorage_to_storage_W,
"Q_DailyStorage_gen_directload_W": Q_DailyStorage_from_storage_W,
# cooling
"Q_Trigen_NG_gen_directload_W": Q_Trigen_NG_gen_directload_W,
"Q_BaseVCC_WS_gen_directload_W": Q_BaseVCC_WS_gen_directload_W,
"Q_PeakVCC_WS_gen_directload_W": Q_PeakVCC_WS_gen_directload_W,
"Q_BaseVCC_AS_gen_directload_W": Q_BaseVCC_AS_gen_directload_W,
"Q_PeakVCC_AS_gen_directload_W": Q_PeakVCC_AS_gen_directload_W,
"Q_BackupVCC_AS_directload_W": Q_BackupVCC_AS_directload_W,
# ENERGY GENERATION TOTAL
# cooling
"Q_Trigen_NG_gen_W": Q_Trigen_NG_gen_W,
"Q_BaseVCC_WS_gen_W": Q_BaseVCC_WS_gen_W,
"Q_PeakVCC_WS_gen_W": Q_PeakVCC_WS_gen_W,
"Q_BaseVCC_AS_gen_W": Q_BaseVCC_AS_gen_W,
"Q_PeakVCC_AS_gen_W": Q_PeakVCC_AS_gen_W,
"Q_BackupVCC_AS_W": Q_BackupVCC_AS_gen_W,
# electricity
"E_Trigen_NG_gen_W": E_Trigen_NG_gen_W
}
district_cooling_electricity_requirements_dispatch = {
# ENERGY REQUIREMENTS
# Electricity
"E_DCN_req_W": E_used_district_cooling_network_W,
"E_BaseVCC_WS_req_W": E_BaseVCC_WS_req_W,
"E_PeakVCC_WS_req_W": E_PeakVCC_WS_req_W,
"E_BaseVCC_AS_req_W": E_BaseVCC_AS_req_W,
"E_PeakVCC_AS_req_W": E_PeakVCC_AS_req_W,
"E_BackupVCC_AS_req_W": E_BackupVCC_AS_req_W,
}
district_cooling_fuel_requirements_dispatch = {
# fuels
"NG_Trigen_req_W": NG_Trigen_req_W
}
# PLOT RESULTS
return district_cooling_costs, \
district_cooling_generation_dispatch, \
district_cooling_electricity_requirements_dispatch, \
district_cooling_fuel_requirements_dispatch, \
district_cooling_capacity_installed
def calc_network_summary_DCN(master_to_slave_vars):
# if there is a district cooling network on site and there is server_heating
district_heating_network = master_to_slave_vars.DHN_exists
df = master_to_slave_vars.DC_network_summary_individual
df = df.fillna(0)
if district_heating_network and master_to_slave_vars.WasteServersHeatRecovery == 1:
T_sup_K = df['T_DCNf_space_cooling_and_refrigeration_sup_K'].values
T_re_K = df['T_DCNf_space_cooling_and_refrigeration_re_K'].values
mdot_kgpers = df['mdot_cool_space_cooling_and_refrigeration_netw_all_kgpers'].values
Q_cooling_req_W = df['Q_DCNf_space_cooling_and_refrigeration_W'].values
else:
T_sup_K = df['T_DCNf_space_cooling_data_center_and_refrigeration_sup_K'].values
T_re_K = df['T_DCNf_space_cooling_data_center_and_refrigeration_re_K'].values
mdot_kgpers = df['mdot_cool_space_cooling_data_center_and_refrigeration_netw_all_kgpers'].values
Q_cooling_req_W = df['Q_DCNf_space_cooling_data_center_and_refrigeration_W'].values
return Q_cooling_req_W, T_re_K, T_sup_K, mdot_kgpers
| [
"numpy.amax",
"numpy.average",
"cea.technologies.chiller_vapor_compression.VaporCompressionChiller",
"cea.optimization.master.cost_model.calc_generation_costs_capacity_installed_cooling",
"cea.optimization.master.cost_model.calc_network_costs_cooling",
"numpy.array",
"numpy.zeros",
"cea.optimization.s... | [((6856, 6948), 'cea.technologies.cogeneration.calc_cop_CCGT', 'calc_cop_CCGT', (['master_to_slave_variables.NG_Trigen_ACH_size_W', 'ACH_T_IN_FROM_CHP_K', '"""NG"""'], {}), "(master_to_slave_variables.NG_Trigen_ACH_size_W,\n ACH_T_IN_FROM_CHP_K, 'NG')\n", (6869, 6948), False, 'from cea.technologies.cogeneration import calc_cop_CCGT\n'), ((6966, 7016), 'cea.technologies.chiller_vapor_compression.VaporCompressionChiller', 'VaporCompressionChiller', (['locator'], {'scale': '"""DISTRICT"""'}), "(locator, scale='DISTRICT')\n", (6989, 7016), False, 'from cea.technologies.chiller_vapor_compression import VaporCompressionChiller\n'), ((7078, 7101), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7086, 7101), True, 'import numpy as np\n'), ((7131, 7154), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7139, 7154), True, 'import numpy as np\n'), ((7184, 7207), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7192, 7207), True, 'import numpy as np\n'), ((7237, 7260), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7245, 7260), True, 'import numpy as np\n'), ((7290, 7313), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7298, 7313), True, 'import numpy as np\n'), ((7349, 7372), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7357, 7372), True, 'import numpy as np\n'), ((7411, 7434), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7419, 7434), True, 'import numpy as np\n'), ((7475, 7498), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7483, 7498), True, 'import numpy as np\n'), ((7528, 7551), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7536, 7551), True, 'import numpy as np\n'), ((7581, 7604), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7589, 7604), True, 'import numpy as np\n'), ((7634, 7657), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7642, 7657), True, 'import numpy as np\n'), ((7687, 7710), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7695, 7710), True, 'import numpy as np\n'), ((7740, 7763), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7748, 7763), True, 'import numpy as np\n'), ((7790, 7813), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7798, 7813), True, 'import numpy as np\n'), ((7845, 7868), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7853, 7868), True, 'import numpy as np\n'), ((7909, 7932), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7917, 7932), True, 'import numpy as np\n'), ((7973, 7996), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (7981, 7996), True, 'import numpy as np\n'), ((8037, 8060), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (8045, 8060), True, 'import numpy as np\n'), ((8101, 8124), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (8109, 8124), True, 'import numpy as np\n'), ((8165, 8188), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (8173, 8188), True, 'import numpy as np\n'), ((8227, 8250), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (8235, 8250), True, 'import numpy as np\n'), ((11748, 11777), 'numpy.amax', 'np.amax', (['Q_BackupVCC_AS_gen_W'], {}), '(Q_BackupVCC_AS_gen_W)\n', (11755, 11777), True, 'import numpy as np\n'), ((12737, 12767), 'cea.technologies.supply_systems_database.SupplySystemsDatabase', 'SupplySystemsDatabase', (['locator'], {}), '(locator)\n', (12758, 12767), False, 'from cea.technologies.supply_systems_database import SupplySystemsDatabase\n'), ((12794, 12814), 'numpy.amax', 'np.amax', (['mdot_kgpers'], {}), '(mdot_kgpers)\n', (12801, 12814), True, 'import numpy as np\n'), ((12915, 13047), 'cea.optimization.master.cost_model.calc_generation_costs_capacity_installed_cooling', 'cost_model.calc_generation_costs_capacity_installed_cooling', (['locator', 'master_to_slave_variables', 'supply_systems', 'mdotnMax_kgpers'], {}), '(locator,\n master_to_slave_variables, supply_systems, mdotnMax_kgpers)\n', (12974, 13047), False, 'from cea.optimization.master import cost_model\n'), ((13453, 13547), 'cea.optimization.master.cost_model.calc_generation_costs_cooling_storage', 'cost_model.calc_generation_costs_cooling_storage', (['master_to_slave_variables', 'daily_storage'], {}), '(master_to_slave_variables,\n daily_storage)\n', (13501, 13547), False, 'from cea.optimization.master import cost_model\n'), ((13867, 13968), 'cea.optimization.master.cost_model.calc_network_costs_cooling', 'cost_model.calc_network_costs_cooling', (['locator', 'master_to_slave_variables', 'network_features', '"""DC"""'], {}), "(locator, master_to_slave_variables,\n network_features, 'DC')\n", (13904, 13968), False, 'from cea.optimization.master import cost_model\n'), ((14462, 14485), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14470, 14485), True, 'import numpy as np\n'), ((14526, 14549), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14534, 14549), True, 'import numpy as np\n'), ((14585, 14608), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14593, 14608), True, 'import numpy as np\n'), ((14647, 14670), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14655, 14670), True, 'import numpy as np\n'), ((14710, 14733), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14718, 14733), True, 'import numpy as np\n'), ((14774, 14797), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14782, 14797), True, 'import numpy as np\n'), ((14838, 14861), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14846, 14861), True, 'import numpy as np\n'), ((14902, 14925), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14910, 14925), True, 'import numpy as np\n'), ((14966, 14989), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (14974, 14989), True, 'import numpy as np\n'), ((15028, 15051), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15036, 15051), True, 'import numpy as np\n'), ((15080, 15103), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15088, 15103), True, 'import numpy as np\n'), ((15133, 15156), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15141, 15156), True, 'import numpy as np\n'), ((15186, 15209), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15194, 15209), True, 'import numpy as np\n'), ((15239, 15262), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15247, 15262), True, 'import numpy as np\n'), ((15292, 15315), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15300, 15315), True, 'import numpy as np\n'), ((15347, 15370), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15355, 15370), True, 'import numpy as np\n'), ((15399, 15422), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15407, 15422), True, 'import numpy as np\n'), ((15467, 15490), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15475, 15490), True, 'import numpy as np\n'), ((15520, 15543), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15528, 15543), True, 'import numpy as np\n'), ((15573, 15596), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15581, 15596), True, 'import numpy as np\n'), ((15626, 15649), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15634, 15649), True, 'import numpy as np\n'), ((15679, 15702), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15687, 15702), True, 'import numpy as np\n'), ((15734, 15757), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15742, 15757), True, 'import numpy as np\n'), ((15784, 15807), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (15792, 15807), True, 'import numpy as np\n'), ((6523, 6546), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (6531, 6546), True, 'import numpy as np\n'), ((6591, 6614), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (6599, 6614), True, 'import numpy as np\n'), ((12608, 12631), 'numpy.zeros', 'np.zeros', (['HOURS_IN_YEAR'], {}), '(HOURS_IN_YEAR)\n', (12616, 12631), True, 'import numpy as np\n'), ((5252, 5274), 'numpy.average', 'np.average', (['T_ground_K'], {}), '(T_ground_K)\n', (5262, 5274), True, 'import numpy as np\n'), ((5785, 5827), 'numpy.array', 'np.array', (["water_body_potential['QLake_kW']"], {}), "(water_body_potential['QLake_kW'])\n", (5793, 5827), True, 'import numpy as np\n'), ((6429, 6467), 'numpy.array', 'np.array', (["water_body_potential['Ts_C']"], {}), "(water_body_potential['Ts_C'])\n", (6437, 6467), True, 'import numpy as np\n'), ((8731, 9034), 'cea.optimization.slave.cooling_resource_activation.cooling_resource_activator', 'cooling_resource_activator', (['Q_thermal_req_W[hour]', 'T_district_cooling_supply_K[hour]', 'T_district_cooling_return_K[hour]', 'Q_therm_water_body_W[hour]', 'T_source_average_water_body_K[hour]', 'T_ground_K[hour]', 'daily_storage', 'absorption_chiller', 'VC_chiller', 'CCGT_prop', 'master_to_slave_variables'], {}), '(Q_thermal_req_W[hour],\n T_district_cooling_supply_K[hour], T_district_cooling_return_K[hour],\n Q_therm_water_body_W[hour], T_source_average_water_body_K[hour],\n T_ground_K[hour], daily_storage, absorption_chiller, VC_chiller,\n CCGT_prop, master_to_slave_variables)\n', (8757, 9034), False, 'from cea.optimization.slave.cooling_resource_activation import calc_vcc_CT_operation, cooling_resource_activator\n'), ((12044, 12079), 'numpy.vectorize', 'np.vectorize', (['calc_vcc_CT_operation'], {}), '(calc_vcc_CT_operation)\n', (12056, 12079), True, 'import numpy as np\n')] |
import panda3d as p3d
import numpy as np
from itertools import izip
from plyfile import PlyData, PlyElement, make2d as PlyMake2D # pip install plyfile
from renderer_util import compute_vertex_normals
class PLYNode(p3d.core.GeomNode):
# TODO (True): large point clouds will overrun the buffer; so, we'll have to
# split up into smaller
MAX_NUM_VERTICES = 2**32
DEFAULT_MESH_COLOR = 180 # grayscale value
def __init__(self, ply_file, name=""):
super(PLYNode, self).__init__(name)
# load mesh
mesh = PlyData.read(ply_file)
v = mesh["vertex"] # easier-to-read reference to the vertex data
self.num_vertices = v.count
self.has_faces = ("face" in mesh and mesh["face"].count > 0)
vertices = np.column_stack(
[v["x"].astype(np.float32, copy=False),
v["y"].astype(np.float32, copy=False),
v["z"].astype(np.float32, copy=False)])
vertex_data = []
self.has_normals = all(
x in v._property_lookup for x in ["nx", "ny", "nz"])
if self.has_normals:
vertex_data += [
v["nx"].astype(np.float32, copy=False),
v["ny"].astype(np.float32, copy=False),
v["nz"].astype(np.float32, copy=False)]
if self.has_faces:
#faces = np.array([f[0] for f in mesh["face"]])
self.num_faces = mesh["face"].count
faces = PlyMake2D(mesh["face"].data["vertex_indices"])
# set up vertex normals from faces
if not self.has_normals:
vertex_data.append(compute_vertex_normals(vertices, faces))
self.has_normals = True
else:
self.num_faces = 0
self.has_colors = all(
x in v._property_lookup for x in ["red", "green", "blue"])
if self.has_colors:
colors = np.empty((len(vertices), 4), dtype=np.uint8)
# TODO (True): maybe check for all integer types?
if v["red"].dtype == np.uint8:
colors[:,0] = v["red"]
colors[:,1] = v["green"]
colors[:,2] = v["blue"]
else:
colors[:,0] = v["red"] * 255.
colors[:,1] = v["green"] * 255.
colors[:,2] = v["blue"] * 255.
colors[:,3] = 255
# alias the color uint8 values as a single float32 for convience
vertex_data.append(colors.view(np.float32))
elif self.has_faces:
# draw a colorless mesh as gray
self.has_colors = True
colors = np.empty((len(vertices), 4), dtype=np.uint8)
colors[:] = np.array(
(PLYNode.DEFAULT_MESH_COLOR,) * 3 + (255,), dtype=np.uint8)
vertex_data.append(colors.view(np.float32))
if vertex_data:
vertices = np.column_stack([vertices] + vertex_data)
# set up data in chunks
for i in xrange(0, len(vertices), PLYNode.MAX_NUM_VERTICES):
stop = min(i + PLYNode.MAX_NUM_VERTICES, len(vertices))
n = stop - i
if self.has_colors and self.has_normals:
p3d_data_format = p3d.core.GeomVertexFormat().getV3n3c4()
elif self.has_colors:
p3d_data_format = p3d.core.GeomVertexFormat().getV3c4()
elif self.has_normals:
p3d_data_format = p3d.core.GeomVertexFormat().getV3n3()
else:
p3d_data_format = p3d.core.GeomVertexFormat().getV3()
p3d_data = p3d.core.GeomVertexData(
name, p3d_data_format, p3d.core.Geom.UHStatic)
p3d_data.setNumRows(n)
p3d_data.modifyArray(0).modifyHandle().setData(
vertices[i:stop].tostring())
# add faces, if available
if self.has_faces:
p3d_primitives = p3d.core.GeomTriangles(p3d.core.Geom.UHStatic)
p3d_primitives.setIndexType(p3d.core.GeomEnums.NTUint32)
mask = np.all(faces >= i, axis=1) & np.all(faces < stop, axis=1)
p3d_primitives.modifyVertices().modifyHandle().setData(
faces[mask].tostring())
# otherwise, render a point cloud
else:
p3d_primitives = p3d.core.GeomPoints(p3d.core.Geom.UHStatic)
p3d_primitives.addNextVertices(n)
geom = p3d.core.Geom(p3d_data)
geom.addPrimitive(p3d_primitives)
self.addGeom(geom)
| [
"panda3d.core.Geom",
"panda3d.core.GeomTriangles",
"numpy.column_stack",
"numpy.array",
"panda3d.core.GeomPoints",
"renderer_util.compute_vertex_normals",
"panda3d.core.GeomVertexData",
"plyfile.make2d",
"plyfile.PlyData.read",
"numpy.all",
"panda3d.core.GeomVertexFormat"
] | [((546, 568), 'plyfile.PlyData.read', 'PlyData.read', (['ply_file'], {}), '(ply_file)\n', (558, 568), False, 'from plyfile import PlyData, PlyElement, make2d as PlyMake2D\n'), ((1448, 1494), 'plyfile.make2d', 'PlyMake2D', (["mesh['face'].data['vertex_indices']"], {}), "(mesh['face'].data['vertex_indices'])\n", (1457, 1494), True, 'from plyfile import PlyData, PlyElement, make2d as PlyMake2D\n'), ((2883, 2924), 'numpy.column_stack', 'np.column_stack', (['([vertices] + vertex_data)'], {}), '([vertices] + vertex_data)\n', (2898, 2924), True, 'import numpy as np\n'), ((3573, 3643), 'panda3d.core.GeomVertexData', 'p3d.core.GeomVertexData', (['name', 'p3d_data_format', 'p3d.core.Geom.UHStatic'], {}), '(name, p3d_data_format, p3d.core.Geom.UHStatic)\n', (3596, 3643), True, 'import panda3d as p3d\n'), ((4434, 4457), 'panda3d.core.Geom', 'p3d.core.Geom', (['p3d_data'], {}), '(p3d_data)\n', (4447, 4457), True, 'import panda3d as p3d\n'), ((2685, 2753), 'numpy.array', 'np.array', (['((PLYNode.DEFAULT_MESH_COLOR,) * 3 + (255,))'], {'dtype': 'np.uint8'}), '((PLYNode.DEFAULT_MESH_COLOR,) * 3 + (255,), dtype=np.uint8)\n', (2693, 2753), True, 'import numpy as np\n'), ((3905, 3951), 'panda3d.core.GeomTriangles', 'p3d.core.GeomTriangles', (['p3d.core.Geom.UHStatic'], {}), '(p3d.core.Geom.UHStatic)\n', (3927, 3951), True, 'import panda3d as p3d\n'), ((4320, 4363), 'panda3d.core.GeomPoints', 'p3d.core.GeomPoints', (['p3d.core.Geom.UHStatic'], {}), '(p3d.core.Geom.UHStatic)\n', (4339, 4363), True, 'import panda3d as p3d\n'), ((1615, 1654), 'renderer_util.compute_vertex_normals', 'compute_vertex_normals', (['vertices', 'faces'], {}), '(vertices, faces)\n', (1637, 1654), False, 'from renderer_util import compute_vertex_normals\n'), ((4048, 4074), 'numpy.all', 'np.all', (['(faces >= i)'], {'axis': '(1)'}), '(faces >= i, axis=1)\n', (4054, 4074), True, 'import numpy as np\n'), ((4077, 4105), 'numpy.all', 'np.all', (['(faces < stop)'], {'axis': '(1)'}), '(faces < stop, axis=1)\n', (4083, 4105), True, 'import numpy as np\n'), ((3208, 3235), 'panda3d.core.GeomVertexFormat', 'p3d.core.GeomVertexFormat', ([], {}), '()\n', (3233, 3235), True, 'import panda3d as p3d\n'), ((3316, 3343), 'panda3d.core.GeomVertexFormat', 'p3d.core.GeomVertexFormat', ([], {}), '()\n', (3341, 3343), True, 'import panda3d as p3d\n'), ((3423, 3450), 'panda3d.core.GeomVertexFormat', 'p3d.core.GeomVertexFormat', ([], {}), '()\n', (3448, 3450), True, 'import panda3d as p3d\n'), ((3513, 3540), 'panda3d.core.GeomVertexFormat', 'p3d.core.GeomVertexFormat', ([], {}), '()\n', (3538, 3540), True, 'import panda3d as p3d\n')] |
from keras.models import Sequential, load_model
from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam, SGD
from keras.backend import clear_session
import numpy as np
import os
import logging
import time
import tempfile
from scipy import misc
from app.models.base import BaseModel
from app.utils import mkdir_p
from app import GENERATED_TILES_FOLDER
def make_untrainable(model):
model.trainable = False
for layer in model.layers:
layer.trainable = False
class BestDCGAN(BaseModel):
EPOCHS = 1000
NOISE_SIZE = 100
MAX_BATCH_SIZE = 128
def _construct_model(self):
self.trainable_discriminator = self._construct_discriminator()
self.untrainable_discriminator = self._construct_discriminator()
self.generator = self._construct_generator()
self.model = self._construct_full(self.generator, self.untrainable_discriminator)
self._compile()
def _construct_from_file(self, filename):
model = load_model(filename)
self.generator = model.layers[0]
self.untrainable_discriminator = model.layers[1]
make_untrainable(self.untrainable_discriminator)
self.model = model
model_copy = load_model(filename)
self.trainable_discriminator = model_copy.layers[1]
self._compile()
def _construct_generator(self):
model = Sequential()
model.add(Dense(input_dim=self.NOISE_SIZE, units=(4*4*1024)))
model.add(Reshape((4, 4, 1024)))
model.add(LeakyReLU(0.2))
model.add(Conv2DTranspose(512, 5, strides=2, padding='same'))
model.add(LeakyReLU(0.2))
model.add(Conv2DTranspose(256, 5, strides=2, padding='same'))
model.add(LeakyReLU(0.2))
model.add(Conv2DTranspose(128, 5, strides=2, padding='same'))
model.add(LeakyReLU(0.2))
model.add(Conv2DTranspose(3, 5, strides=2, padding='same', activation='tanh'))
return model
def _construct_discriminator(self):
model = Sequential()
model.add(Conv2D(64, 5, strides=2, padding='same', input_shape=self.image_size))
model.add(LeakyReLU(0.2))
model.add(Conv2D(128, 5, strides=2, padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(0.2))
model.add(Conv2D(256, 5, strides=2, padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(0.2))
model.add(Conv2D(512, 5, strides=2, padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(0.2))
model.add(Reshape((4*4*512,)))
model.add(Dense(1, activation='sigmoid'))
return model
def _construct_full(self, generator, discriminator):
make_untrainable(discriminator)
model = Sequential()
model.add(generator)
model.add(discriminator)
return model
def _compile(self):
self.trainable_discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.00001), metrics=['accuracy'])
self.model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001), metrics=['accuracy'])
def _copy_weights(self):
self.untrainable_discriminator.set_weights(self.trainable_discriminator.get_weights())
def _generate_noise(self, num):
return np.random.normal(0.0, 1.0, (num, self.NOISE_SIZE))
def _generate_batch(self, num):
return self.generator.predict(self._generate_noise(num))
def generate_image(self):
return np.clip(((self._generate_batch(1)[0] + 1)*128.0), 0, 255).astype('uint8')
def _load_batch(self, size):
return np.array([(next(self.image_loader)/127.5) - 1 for _ in range(size)])
def train(self):
i = 0
model_name = "best_dcgan-{}".format(time.time())
folder = os.path.join(GENERATED_TILES_FOLDER, model_name)
mkdir_p(folder)
for epoch in range(self.EPOCHS):
logging.info("=== Epoch {}".format(epoch))
for batch_base in range(0, len(self.image_loader), self.MAX_BATCH_SIZE):
i += 1
batch_size = min(len(self.image_loader) - batch_base, self.MAX_BATCH_SIZE)
logging.info("Training {} images".format(batch_size))
g_loss = float('inf')
r_loss = float('inf')
while g_loss + r_loss > 0.8:
if g_loss >= r_loss:
generated_images_batch_size = batch_size
generated_images_X = self._generate_batch(generated_images_batch_size)
generated_images_Y = np.array([0.0]*generated_images_batch_size)
gen_loss = self.trainable_discriminator.train_on_batch(generated_images_X, generated_images_Y)
logging.info("Discriminator gen. loss: {}".format(gen_loss))
g_loss = gen_loss[0]
else:
real_images_batch_size = batch_size
real_images_X = self._load_batch(real_images_batch_size)
real_images_Y = np.array([1.0]*real_images_batch_size)
real_loss = self.trainable_discriminator.train_on_batch(real_images_X, real_images_Y)
logging.info("Discriminator real loss: {}".format(real_loss))
r_loss = real_loss[0]
logging.info("Copying weights...")
self._copy_weights()
generator_loss = float('inf')
while generator_loss > (15.0 if i == 1 else 4.0):
generator_batch_size = batch_size
generator_X = self._generate_noise(generator_batch_size)
generator_Y = np.array([1.0]*generator_batch_size)
g_loss = self.model.train_on_batch(generator_X, generator_Y)
generator_loss = g_loss[0]
logging.info("Generator loss: {}".format(g_loss))
logging.info("Generating image...")
filename = os.path.join(folder, '{:06d}.png'.format(i))
image = self.generate_image()
misc.imsave(filename, image)
misc.imsave(os.path.join(folder, '000000__current.png'), image)
if i % 1000 == 0:
logging.info("=== Writing model to disk")
self.model.save("models/" + model_name + "-{}.h5".format(i))
logging.info("=== Writing model to disk")
self.model.save(model_name)
| [
"numpy.random.normal",
"keras.optimizers.Adam",
"keras.layers.Conv2D",
"keras.models.load_model",
"scipy.misc.imsave",
"os.path.join",
"logging.info",
"keras.layers.advanced_activations.LeakyReLU",
"app.utils.mkdir_p",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Conv2DTranspose",
... | [((1072, 1092), 'keras.models.load_model', 'load_model', (['filename'], {}), '(filename)\n', (1082, 1092), False, 'from keras.models import Sequential, load_model\n'), ((1277, 1297), 'keras.models.load_model', 'load_model', (['filename'], {}), '(filename)\n', (1287, 1297), False, 'from keras.models import Sequential, load_model\n'), ((1421, 1433), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1431, 1433), False, 'from keras.models import Sequential, load_model\n'), ((2006, 2018), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2016, 2018), False, 'from keras.models import Sequential, load_model\n'), ((2705, 2717), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2715, 2717), False, 'from keras.models import Sequential, load_model\n'), ((3196, 3246), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(num, self.NOISE_SIZE)'], {}), '(0.0, 1.0, (num, self.NOISE_SIZE))\n', (3212, 3246), True, 'import numpy as np\n'), ((3665, 3713), 'os.path.join', 'os.path.join', (['GENERATED_TILES_FOLDER', 'model_name'], {}), '(GENERATED_TILES_FOLDER, model_name)\n', (3677, 3713), False, 'import os\n'), ((3718, 3733), 'app.utils.mkdir_p', 'mkdir_p', (['folder'], {}), '(folder)\n', (3725, 3733), False, 'from app.utils import mkdir_p\n'), ((1448, 1500), 'keras.layers.Dense', 'Dense', ([], {'input_dim': 'self.NOISE_SIZE', 'units': '(4 * 4 * 1024)'}), '(input_dim=self.NOISE_SIZE, units=4 * 4 * 1024)\n', (1453, 1500), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((1514, 1535), 'keras.layers.Reshape', 'Reshape', (['(4, 4, 1024)'], {}), '((4, 4, 1024))\n', (1521, 1535), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((1551, 1565), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1560, 1565), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((1581, 1631), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(512)', '(5)'], {'strides': '(2)', 'padding': '"""same"""'}), "(512, 5, strides=2, padding='same')\n", (1596, 1631), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((1647, 1661), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1656, 1661), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((1677, 1727), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(5)'], {'strides': '(2)', 'padding': '"""same"""'}), "(256, 5, strides=2, padding='same')\n", (1692, 1727), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((1743, 1757), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1752, 1757), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((1773, 1823), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(5)'], {'strides': '(2)', 'padding': '"""same"""'}), "(128, 5, strides=2, padding='same')\n", (1788, 1823), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((1839, 1853), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1848, 1853), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((1869, 1936), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(3)', '(5)'], {'strides': '(2)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(3, 5, strides=2, padding='same', activation='tanh')\n", (1884, 1936), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2033, 2102), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(5)'], {'strides': '(2)', 'padding': '"""same"""', 'input_shape': 'self.image_size'}), "(64, 5, strides=2, padding='same', input_shape=self.image_size)\n", (2039, 2102), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2118, 2132), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2127, 2132), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2148, 2189), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(5)'], {'strides': '(2)', 'padding': '"""same"""'}), "(128, 5, strides=2, padding='same')\n", (2154, 2189), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2205, 2225), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2223, 2225), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2241, 2255), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2250, 2255), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2271, 2312), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(5)'], {'strides': '(2)', 'padding': '"""same"""'}), "(256, 5, strides=2, padding='same')\n", (2277, 2312), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2328, 2348), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2346, 2348), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2364, 2378), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2373, 2378), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2394, 2435), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(5)'], {'strides': '(2)', 'padding': '"""same"""'}), "(512, 5, strides=2, padding='same')\n", (2400, 2435), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2451, 2471), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2469, 2471), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2487, 2501), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2496, 2501), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2517, 2540), 'keras.layers.Reshape', 'Reshape', (['(4 * 4 * 512,)'], {}), '((4 * 4 * 512,))\n', (2524, 2540), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((2552, 2582), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2557, 2582), False, 'from keras.layers import Dense, Reshape, Flatten, Conv2D, Conv2DTranspose, BatchNormalization, Activation\n'), ((3639, 3650), 'time.time', 'time.time', ([], {}), '()\n', (3648, 3650), False, 'import time\n'), ((5945, 5986), 'logging.info', 'logging.info', (['"""=== Writing model to disk"""'], {}), "('=== Writing model to disk')\n", (5957, 5986), False, 'import logging\n'), ((2891, 2905), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (2895, 2905), False, 'from keras.optimizers import Adam, SGD\n'), ((2992, 3007), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (2996, 3007), False, 'from keras.optimizers import Adam, SGD\n'), ((5031, 5065), 'logging.info', 'logging.info', (['"""Copying weights..."""'], {}), "('Copying weights...')\n", (5043, 5065), False, 'import logging\n'), ((5541, 5576), 'logging.info', 'logging.info', (['"""Generating image..."""'], {}), "('Generating image...')\n", (5553, 5576), False, 'import logging\n'), ((5687, 5715), 'scipy.misc.imsave', 'misc.imsave', (['filename', 'image'], {}), '(filename, image)\n', (5698, 5715), False, 'from scipy import misc\n'), ((5327, 5365), 'numpy.array', 'np.array', (['([1.0] * generator_batch_size)'], {}), '([1.0] * generator_batch_size)\n', (5335, 5365), True, 'import numpy as np\n'), ((5736, 5779), 'os.path.join', 'os.path.join', (['folder', '"""000000__current.png"""'], {}), "(folder, '000000__current.png')\n", (5748, 5779), False, 'import os\n'), ((5825, 5866), 'logging.info', 'logging.info', (['"""=== Writing model to disk"""'], {}), "('=== Writing model to disk')\n", (5837, 5866), False, 'import logging\n'), ((4359, 4404), 'numpy.array', 'np.array', (['([0.0] * generated_images_batch_size)'], {}), '([0.0] * generated_images_batch_size)\n', (4367, 4404), True, 'import numpy as np\n'), ((4777, 4817), 'numpy.array', 'np.array', (['([1.0] * real_images_batch_size)'], {}), '([1.0] * real_images_batch_size)\n', (4785, 4817), True, 'import numpy as np\n')] |
import os
import glob
import picamera
import cv2
import numpy as np
import importlib.util
from datetime import datetime
import videorecorder as vr
import time
from collections import Counter
# If using TPU, need to load a different library
# from tensorflow.lite.python.interpreter import Interpreter
def take_picture(path):
if path is None:
path = "/home/pi/Pictures"
camera = picamera.PiCamera()
try:
camera.capture(os.path.join(path, "image_{0}.jpg".format(datetime.now().strftime('%m%d%Y%H%M%S'))))
finally:
print('Picture taken')
camera.close()
def record_video(path=None, cone_color='green', duration=5, runid=0):
if path is None:
path="/home/pi/Videos"
path = os.path.join(path,cone_color)
try:
recorder = vr.VideoRecorder(path,runid)
print('Loaded Video Recorder')
recorder.start_recording()
time.sleep(duration)
recorder.stop_recording()
except:
print('Video Recording failed')
finally:
print('Video recorded')
class ObjectClassificationModel:
def __init__(self, model_dir, image_dir, graph_name='detect.tflite', min_conf_threshold=0.5, use_TPU=False):
self.model_dir = model_dir
self.image_dir = image_dir
self.min_conf_threshold = float(min_conf_threshold)
self.use_TPU = use_TPU
self._load_model(model_dir=model_dir, graph_name=graph_name)
def _load_model(self, model_dir, graph_name):
CWD_PATH = os.getcwd()
# Load model labels
PATH_TO_LABELS = os.path.join(CWD_PATH, model_dir, 'labelmap.txt')
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
if labels[0] == '???':
del(labels[0])
self.labels = labels
pkg = importlib.util.find_spec('tensorflow')
if pkg is None:
from tflite_runtime.interpreter import Interpreter
if self.use_TPU:
print('Loading tflite interpreter')
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if self.use_TPU:
print('Loading tflite interpreter')
from tflite_runtime.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if self.use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (graph_name == 'detect.tflite'):
graph_name = 'edgetpu.tflite'
PATH_TO_CKPT = os.path.join(CWD_PATH, model_dir, graph_name)
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if self.use_TPU:
self.interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
self.interpreter = Interpreter(model_path=PATH_TO_CKPT)
self.interpreter.allocate_tensors()
# Get model details
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.height = self.input_details[0]['shape'][1]
self.width = self.input_details[0]['shape'][2]
self.floating_model = (self.input_details[0]['dtype'] == np.float32)
self.input_mean = 127.5
self.input_std = 127.5
def classify(self, image_dir):
images = glob.glob(image_dir + '/*')
classes_list = []
scores_list = []
for image_path in images:
print('Classifying: {}'.format(image_path))
# Load image and resize to expected shape [1xHxWx3]
image = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imH, imW, _ = image.shape
image_resized = cv2.resize(image_rgb, (self.width, self.height))
input_data = np.expand_dims(image_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.floating_model:
input_data = (np.float32(input_data) - self.input_mean) / self.input_std
# Perform the actual detection by running the model with the image as input
self.interpreter.set_tensor(self.input_details[0]['index'],input_data)
self.interpreter.invoke()
# Retrieve detection results
# We are not using the boxes right now since we do not need to know
# where picture the object is, only that it is there.
# boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self.interpreter.get_tensor(self.output_details[1]['index'])[0] # Class index of detected objects
scores = self.interpreter.get_tensor(self.output_details[2]['index'])[0] # Confidence of detected objects
classes_list.append(classes[scores > self.min_conf_threshold])
scores_list.append(scores[scores > self.min_conf_threshold])
objects_detected = {}
for classes in classes_list:
objects = set([self.labels[int(c)] for c in classes])
for obj in objects:
if obj in objects_detected.keys():
objects_detected[obj] += 1
else:
objects_detected[obj] = 1
return classes_list, scores_list, objects_detected
def classify_video(self, video_dir):
"""Function to detect objects in video file"""
#1. Get the list of all video files from the directory passed in
videos = glob.glob(video_dir + '/*')
#2. Check the number of *.avi files in the folder
num_videos = len(videos)
#3. Do not run classification if number of videos in the folder are more than 10 and alert
if num_videos > 10:
print('Found more than 10 videos in the directory: {}'.format(video_dir))
return
#4. For each video file
for video_file in videos:
video_name=os.path.basename(video_file)
print('Processing video: {}'.format(video_name))
#4.1 Open the video file
video = cv2.VideoCapture(video_file)
imW = video.get(cv2.CAP_PROP_FRAME_WIDTH)
imH = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
collect_labels = []
#4 .1.1 pass frame by frame to the detection model
index = 0
print('Min Threshold',self.min_conf_threshold)
while(video.isOpened()):
# Acquire frame and resize to expected shape [1xHxWx3]
ret, frame = video.read()
if frame is None:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self.width, self.height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.floating_model:
input_data = (np.float32(input_data) - self.input_mean) / self.input_std
# Perform the actual detection by running the model with the image as input
self.interpreter.set_tensor(self.input_details[0]['index'],input_data)
self.interpreter.invoke()
# Retrieve detection results
boxes = self.interpreter.get_tensor(self.output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self.interpreter.get_tensor(self.output_details[1]['index'])[0] # Class index of detected objects
scores = self.interpreter.get_tensor(self.output_details[2]['index'])[0] # Confidence of detected objects
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > self.min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 4)
# Draw label
object_name = self.labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
collect_labels.append(object_name)
index += 1
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
video.release()
#cv2.destroyAllWindows()
if len(collect_labels)>0:
most_common,num_most_common = Counter(collect_labels).most_common(1)[0]
max_object = most_common
else:
max_object = 'Nothing'
print('Maximum detected object :{}'.format(max_object))
print(Counter(collect_labels))
return max_object
class ConeClassificationModel:
def __init__(self, model_dir, image_dir, graph_name='cone_detect.tflite', min_conf_threshold=0.5, use_TPU=False):
self.model_dir = model_dir
self.image_dir = image_dir
self.min_conf_threshold = float(min_conf_threshold)
self.use_TPU = use_TPU
self._load_model(model_dir=model_dir, graph_name=graph_name)
def _load_model(self, model_dir, graph_name):
CWD_PATH = os.getcwd()
# Load model labels
PATH_TO_LABELS = os.path.join(CWD_PATH, model_dir, 'labelmap.txt')
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
if labels[0] == '???':
del(labels[0])
self.labels = labels
pkg = importlib.util.find_spec('tensorflow')
if pkg is None:
from tflite_runtime.interpreter import Interpreter
if self.use_TPU:
print('Loading tflite interpreter')
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if self.use_TPU:
print('Loading tflite interpreter')
from tflite_runtime.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if self.use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (graph_name == 'cone_detect.tflite'):
graph_name = 'cone_edgetpu.tflite'
# Load the model
PATH_TO_CKPT = os.path.join(CWD_PATH, model_dir, graph_name)
#self.interpreter = Interpreter(model_path=PATH_TO_CKPT)
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if self.use_TPU:
self.interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
self.interpreter = Interpreter(model_path=PATH_TO_CKPT)
self.interpreter.allocate_tensors()
# Get model details
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.height = self.input_details[0]['shape'][1]
self.width = self.input_details[0]['shape'][2]
self.floating_model = (self.input_details[0]['dtype'] == np.float32)
self.input_mean = 127.5
self.input_std = 127.5
def classify(self, image_dir):
images = glob.glob(image_dir + '/*.jpg')
classes_list = []
scores_list = []
boxes_list =[]
for image_path in images:
print('Classifying: {}'.format(image_path))
# Load image and resize to expected shape [1xHxWx3]
image = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imH, imW, _ = image.shape
image_resized = cv2.resize(image_rgb, (self.width, self.height))
input_data = np.expand_dims(image_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self.floating_model:
input_data = (np.float32(input_data) - self.input_mean) / self.input_std
# Perform the actual detection by running the model with the image as input
self.interpreter.set_tensor(self.input_details[0]['index'],input_data)
self.interpreter.invoke()
boxes = self.interpreter.get_tensor(self.output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = self.interpreter.get_tensor(self.output_details[1]['index'])[0] # Class index of detected objects
scores = self.interpreter.get_tensor(self.output_details[2]['index'])[0] # Confidence of detected objects
boxes_list.append(boxes[scores > self.min_conf_threshold])
classes_list.append(classes[scores > self.min_conf_threshold])
scores_list.append(scores[scores > self.min_conf_threshold])
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
objects_dict ={}
for i in range(len(scores)):
if ((scores[i] > self.min_conf_threshold) and (scores[i] <= 1.0)):
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
print((xmin,ymin), (xmax,ymax),(imH,imW))
cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
object_name = self.labels[int(classes[i])] # Look up object name from "labels" array using class index
objects_dict[object_name] = [(xmin,ymin), (xmax,ymax),(imH,imW)]
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(image, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2) # Draw label text
classes_list.append(classes[scores > self.min_conf_threshold])
scores_list.append(scores[scores > self.min_conf_threshold])
objects_detected = {}
for classes in classes_list:
objects = set([self.labels[int(c)] for c in classes])
for obj in objects:
if obj in objects_detected.keys():
objects_detected[obj] += 1
else:
objects_detected[obj] = 1
return boxes_list, classes_list, scores_list, objects_detected, objects_dict
if __name__ == '__main__':
model = ObjectClassificationModel('Sample_TFLite_model', '/home/pi/Pictures/scav_hunt')
classes, scores, objects = model.classify(os.path.join(model.image_dir, 'archive/orange'))
| [
"cv2.rectangle",
"time.sleep",
"cv2.imshow",
"glob.glob",
"numpy.float32",
"picamera.PiCamera",
"cv2.putText",
"cv2.cvtColor",
"cv2.getTextSize",
"cv2.resize",
"cv2.imread",
"os.path.join",
"videorecorder.VideoRecorder",
"os.getcwd",
"collections.Counter",
"datetime.datetime.now",
"t... | [((397, 416), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (414, 416), False, 'import picamera\n'), ((744, 774), 'os.path.join', 'os.path.join', (['path', 'cone_color'], {}), '(path, cone_color)\n', (756, 774), False, 'import os\n'), ((802, 831), 'videorecorder.VideoRecorder', 'vr.VideoRecorder', (['path', 'runid'], {}), '(path, runid)\n', (818, 831), True, 'import videorecorder as vr\n'), ((913, 933), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (923, 933), False, 'import time\n'), ((1515, 1526), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1524, 1526), False, 'import os\n'), ((1581, 1630), 'os.path.join', 'os.path.join', (['CWD_PATH', 'model_dir', '"""labelmap.txt"""'], {}), "(CWD_PATH, model_dir, 'labelmap.txt')\n", (1593, 1630), False, 'import os\n'), ((2679, 2724), 'os.path.join', 'os.path.join', (['CWD_PATH', 'model_dir', 'graph_name'], {}), '(CWD_PATH, model_dir, graph_name)\n', (2691, 2724), False, 'import os\n'), ((3655, 3682), 'glob.glob', 'glob.glob', (["(image_dir + '/*')"], {}), "(image_dir + '/*')\n", (3664, 3682), False, 'import glob\n'), ((5917, 5944), 'glob.glob', 'glob.glob', (["(video_dir + '/*')"], {}), "(video_dir + '/*')\n", (5926, 5944), False, 'import glob\n'), ((10930, 10941), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10939, 10941), False, 'import os\n'), ((10996, 11045), 'os.path.join', 'os.path.join', (['CWD_PATH', 'model_dir', '"""labelmap.txt"""'], {}), "(CWD_PATH, model_dir, 'labelmap.txt')\n", (11008, 11045), False, 'import os\n'), ((12129, 12174), 'os.path.join', 'os.path.join', (['CWD_PATH', 'model_dir', 'graph_name'], {}), '(CWD_PATH, model_dir, graph_name)\n', (12141, 12174), False, 'import os\n'), ((13161, 13192), 'glob.glob', 'glob.glob', (["(image_dir + '/*.jpg')"], {}), "(image_dir + '/*.jpg')\n", (13170, 13192), False, 'import glob\n'), ((17099, 17146), 'os.path.join', 'os.path.join', (['model.image_dir', '"""archive/orange"""'], {}), "(model.image_dir, 'archive/orange')\n", (17111, 17146), False, 'import os\n'), ((3107, 3143), 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_path': 'PATH_TO_CKPT'}), '(model_path=PATH_TO_CKPT)\n', (3118, 3143), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), ((3921, 3943), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3931, 3943), False, 'import cv2\n'), ((3968, 4006), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3980, 4006), False, 'import cv2\n'), ((4074, 4122), 'cv2.resize', 'cv2.resize', (['image_rgb', '(self.width, self.height)'], {}), '(image_rgb, (self.width, self.height))\n', (4084, 4122), False, 'import cv2\n'), ((4148, 4185), 'numpy.expand_dims', 'np.expand_dims', (['image_resized'], {'axis': '(0)'}), '(image_resized, axis=0)\n', (4162, 4185), True, 'import numpy as np\n'), ((6358, 6386), 'os.path.basename', 'os.path.basename', (['video_file'], {}), '(video_file)\n', (6374, 6386), False, 'import os\n'), ((6506, 6534), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (6522, 6534), False, 'import cv2\n'), ((12613, 12649), 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_path': 'PATH_TO_CKPT'}), '(model_path=PATH_TO_CKPT)\n', (12624, 12649), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), ((13441, 13463), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (13451, 13463), False, 'import cv2\n'), ((13488, 13526), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (13500, 13526), False, 'import cv2\n'), ((13594, 13642), 'cv2.resize', 'cv2.resize', (['image_rgb', '(self.width, self.height)'], {}), '(image_rgb, (self.width, self.height))\n', (13604, 13642), False, 'import cv2\n'), ((13668, 13705), 'numpy.expand_dims', 'np.expand_dims', (['image_resized'], {'axis': '(0)'}), '(image_resized, axis=0)\n', (13682, 13705), True, 'import numpy as np\n'), ((7059, 7097), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (7071, 7097), False, 'import cv2\n'), ((7130, 7178), 'cv2.resize', 'cv2.resize', (['frame_rgb', '(self.width, self.height)'], {}), '(frame_rgb, (self.width, self.height))\n', (7140, 7178), False, 'import cv2\n'), ((7208, 7245), 'numpy.expand_dims', 'np.expand_dims', (['frame_resized'], {'axis': '(0)'}), '(frame_resized, axis=0)\n', (7222, 7245), True, 'import numpy as np\n'), ((10015, 10051), 'cv2.imshow', 'cv2.imshow', (['"""Object detector"""', 'frame'], {}), "('Object detector', frame)\n", (10025, 10051), False, 'import cv2\n'), ((10427, 10450), 'collections.Counter', 'Counter', (['collect_labels'], {}), '(collect_labels)\n', (10434, 10450), False, 'from collections import Counter\n'), ((15407, 15472), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xmin, ymin)', '(xmax, ymax)', '(10, 255, 0)', '(2)'], {}), '(image, (xmin, ymin), (xmax, ymax), (10, 255, 0), 2)\n', (15420, 15472), False, 'import cv2\n'), ((15855, 15911), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(2)'], {}), '(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)\n', (15870, 15911), False, 'import cv2\n'), ((16067, 16211), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xmin, label_ymin - labelSize[1] - 10)', '(xmin + labelSize[0], label_ymin + baseLine - 10)', '(255, 255, 255)', 'cv2.FILLED'], {}), '(image, (xmin, label_ymin - labelSize[1] - 10), (xmin +\n labelSize[0], label_ymin + baseLine - 10), (255, 255, 255), cv2.FILLED)\n', (16080, 16211), False, 'import cv2\n'), ((16256, 16354), 'cv2.putText', 'cv2.putText', (['image', 'label', '(xmin, label_ymin - 7)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(0, 0, 0)', '(2)'], {}), '(image, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (0, 0, 0), 2)\n', (16267, 16354), False, 'import cv2\n'), ((2993, 3027), 'tflite_runtime.interpreter.load_delegate', 'load_delegate', (['"""libedgetpu.so.1.0"""'], {}), "('libedgetpu.so.1.0')\n", (3006, 3027), False, 'from tflite_runtime.interpreter import load_delegate\n'), ((4349, 4371), 'numpy.float32', 'np.float32', (['input_data'], {}), '(input_data)\n', (4359, 4371), True, 'import numpy as np\n'), ((8918, 8983), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(xmin, ymin)', '(xmax, ymax)', '(10, 255, 0)', '(4)'], {}), '(frame, (xmin, ymin), (xmax, ymax), (10, 255, 0), 4)\n', (8931, 8983), False, 'import cv2\n'), ((9297, 9353), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(2)'], {}), '(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)\n', (9312, 9353), False, 'import cv2\n'), ((9517, 9661), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(xmin, label_ymin - labelSize[1] - 10)', '(xmin + labelSize[0], label_ymin + baseLine - 10)', '(255, 255, 255)', 'cv2.FILLED'], {}), '(frame, (xmin, label_ymin - labelSize[1] - 10), (xmin +\n labelSize[0], label_ymin + baseLine - 10), (255, 255, 255), cv2.FILLED)\n', (9530, 9661), False, 'import cv2\n'), ((9710, 9808), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(xmin, label_ymin - 7)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 0)', '(2)'], {}), '(frame, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 0, 0), 2)\n', (9721, 9808), False, 'import cv2\n'), ((12499, 12533), 'tflite_runtime.interpreter.load_delegate', 'load_delegate', (['"""libedgetpu.so.1.0"""'], {}), "('libedgetpu.so.1.0')\n", (12512, 12533), False, 'from tflite_runtime.interpreter import load_delegate\n'), ((13869, 13891), 'numpy.float32', 'np.float32', (['input_data'], {}), '(input_data)\n', (13879, 13891), True, 'import numpy as np\n'), ((491, 505), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (503, 505), False, 'from datetime import datetime\n'), ((7437, 7459), 'numpy.float32', 'np.float32', (['input_data'], {}), '(input_data)\n', (7447, 7459), True, 'import numpy as np\n'), ((10201, 10224), 'collections.Counter', 'Counter', (['collect_labels'], {}), '(collect_labels)\n', (10208, 10224), False, 'from collections import Counter\n')] |
from torch.utils.data import Dataset, DataLoader
import os
import json
import sys
import csv
import itertools
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
from data_helper import *
from coord_helper import *
from rotation_lib import angle_axis_from_quaternion, quaternion_from_angle_axis, quat2mat
# Seq2Seq dataset
######################################################
class ClassifierDataset(Dataset):
def __init__(self, home_dir_data, data_list_dir, normal_channel=False, npoints=4096, balanced_sampling=True, split='train', with_wall=False, one_per_pair=False, use_partial_pc=False):
self.home_dir_data = home_dir_data
self.normal_channel = normal_channel
self.npoints = npoints
self.with_wall = with_wall
self.one_per_pair = one_per_pair
self.use_partial_pc = use_partial_pc
print('USING PARTIAL PC', use_partial_pc)
self.data_dir = os.path.join(home_dir_data, 'geo_data')
self.labels_folder_dir = os.path.join(self.data_dir, 'labels')
self.exclude_dir = os.path.join(home_dir_data, 'exclude')
self.cp_result_folder_dir = os.path.join(self.home_dir_data, 'dataset_cp')
if self.use_partial_pc:
self.partial_pc_folder_dir = os.path.join(self.home_dir_data, 'geo_data_partial_cp_pad')
self.partial_pc_dir = {}
all_hook_name, all_hook_urdf, all_object_name, all_object_urdf = load_all_hooks_objects(self.data_dir, self.exclude_dir, self.labels_folder_dir, True, with_wall=with_wall)
self.all_object_name = all_object_name
self.all_hook_name = all_hook_name
self.n_object = len(self.all_object_name)
self.n_hook = len(self.all_hook_name)
# from name to numpy dir
self.all_hook_dict = {name: {'urdf': urdf, 'pc': get_numpy_dir_from_urdf(urdf)} for name, urdf in zip(all_hook_name, all_hook_urdf)}
self.all_object_dict = {name: {'urdf': urdf, 'pc': get_numpy_dir_from_urdf(urdf)} for name, urdf in zip(all_object_name, all_object_urdf)}
self.pos_result_folder_dir = os.path.join(self.home_dir_data, 'collection_result')
self.neg_result_folder_dir = os.path.join(self.home_dir_data, 'collection_result_neg')
# (object_idx, hook_idx, np.array([X, 2]))
reader = open(data_list_dir, 'r').read().splitlines()
self.all_data = []
self.all_result_file_names = set()
tmp_hook_name = 'hook_wall_37'
tmp_object_name = 'mug_61'
ct_constraint = 40
# positive examples
for tmp in reader:
result_file_name, pose_idx = '_'.join(tmp.split('_')[:-1]), tmp.split('_')[-1]
hook_cat, hook_id, object_cat, object_id = decode_result_file_name(result_file_name)
hook_name = '{}_{}'.format(hook_cat, str(hook_id))
object_name = '{}_{}'.format(object_cat, str(object_id))
#if hook_name != tmp_hook_name or object_name != tmp_object_name:
# continue
if not hook_name in self.all_hook_name:
continue
if not object_name in self.all_object_name:
continue
if result_file_name in self.all_result_file_names and self.one_per_pair:
continue
if self.use_partial_pc:
partial_pc_o_dir = os.path.join(self.partial_pc_folder_dir, result_file_name + '_object_partial_pc_pad.npy')
partial_pc_h_dir = os.path.join(self.partial_pc_folder_dir, result_file_name + '_hook_partial_pc_pad.npy')
assert os.path.exists(partial_pc_h_dir)
assert os.path.exists(partial_pc_o_dir)
# if not os.path.exists(partial_pc_h_dir) or not os.path.exists(partial_pc_o_dir):
# continue
self.partial_pc_dir[result_file_name] = {
'hook': partial_pc_h_dir,
'object': partial_pc_o_dir,
}
# if len(self.all_result_file_names) > ct_constraint:
# break
self.all_result_file_names.add(result_file_name)
# assert hook_name in self.all_hook_name
# assert object_name in self.all_object_name
# hook_idx = self.all_hook_name.index(hook_name)
# object_idx = self.all_object_name.index(object_name)
# print(result_file_name, n_pose)
self.all_data.append({
'result_file_name': result_file_name,
'hook_name': hook_name,
'object_name': object_name,
'pose_idx': int(pose_idx),
'label': 1,
'result_dir': os.path.join(self.pos_result_folder_dir, result_file_name + '.txt')
})
n_positive = len(self.all_data)
# negative examples
if not one_per_pair:
for result_file_name in self.all_result_file_names:
hook_cat, hook_id, object_cat, object_id = decode_result_file_name(result_file_name)
hook_name = '{}_{}'.format(hook_cat, str(hook_id))
object_name = '{}_{}'.format(object_cat, str(object_id))
#if hook_name != tmp_hook_name or object_name != tmp_object_name:
# continue
for pose_idx in range(10):
self.all_data.append({
'result_file_name': result_file_name,
'hook_name': hook_name,
'object_name': object_name,
'pose_idx': int(pose_idx),
'label': 0,
'result_dir': os.path.join(self.neg_result_folder_dir, result_file_name + '.txt')
})
# break
# self.all_data = self.all_data[:32]
n_negative = len(self.all_data) - n_positive
self.sampler = None
if not one_per_pair:
if balanced_sampling and split == 'train':
w_pos = 1. * len(self.all_data) / n_positive
w_neg = 1. * len(self.all_data) / n_negative
weights = np.ones((len(self.all_data)))
weights[:n_positive] *= w_pos
weights[n_positive:] *= w_neg
weights = torch.DoubleTensor(weights)
self.sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
print('n_positive', n_positive, 'n_negative', n_negative)
if with_wall:
self.pc_wall = np.load('../scripts/templates/wall/wall_small_pc.npy')[:, :3]
def build_combined_pc(self, pc_o, pc_h, pose, pc_wall=None, hook_offset=None):
pc_o_end = np.dot(pc_o, quat2mat(pose[3:]).T) + pose[:3]
pc_o_end_w_label = np.append(pc_o_end, np.ones((self.npoints, 1)), axis=1)
pc_h_w_label = np.append(pc_h, np.zeros((self.npoints, 1)), axis=1)
if not (pc_wall is None):
pc_wall_w_label = np.append(pc_wall - hook_offset, np.zeros((pc_wall.shape[0], 1)), axis=1)
pc_combined = np.append(pc_o_end_w_label, pc_h_w_label, axis=0)
if not (pc_wall is None):
pc_combined = np.append(pc_combined, pc_wall_w_label, axis=0)
return pc_combined
def __getitem__(self, index):
data_dict = self.all_data[index]
result_file_name = data_dict['result_file_name']
pc_urdf_o = self.all_object_dict[data_dict['object_name']]
pc_urdf_h = self.all_hook_dict[data_dict['hook_name']]
if not self.use_partial_pc:
point_set_o = np.load(pc_urdf_o['pc'])
else:
point_set_o = np.load(self.partial_pc_dir[result_file_name]['object'])
point_set_o = point_set_o[0:self.npoints,:]
if not self.normal_channel:
point_set_o = point_set_o[:,0:3]
# load hook pc
if not self.use_partial_pc:
point_set_h = np.load(pc_urdf_h['pc'])
else:
point_set_h = np.load(self.partial_pc_dir[result_file_name]['hook'])
point_set_h = point_set_h[0:self.npoints,:]
# if not self.normal_channel:
# point_set_h = point_set_h[:,0:3]
pose_idx = data_dict['pose_idx']
# load pose
pose_o_end = load_result_file(data_dict['result_dir'])[pose_idx,-7:]
pose_quaternion = pose_o_end[3:]
pose_aa = angle_axis_from_quaternion(pose_quaternion)
pose_transl_aa = np.zeros((6,))
pose_transl_aa[:3] = pose_o_end[:3]
pose_transl_aa[3:] = pose_aa
pose_np = pose_transl_aa
pc_o_end = np.dot(point_set_o, quat2mat(pose_quaternion).T) + pose_o_end[:3]
pc_o_end_w_label = np.append(pc_o_end[:, :3], np.ones((self.npoints, 1)), axis=1)
pc_h_w_label = np.append(point_set_h[:, :3], np.zeros((self.npoints, 1)), axis=1)
hook_offset = get_hook_wall_offset(pc_urdf_h['urdf'])
if self.with_wall:
pc_wall_w_label = np.append(self.pc_wall - hook_offset, np.zeros((self.pc_wall.shape[0], 1)), axis=1)
pc_combined = np.append(pc_o_end_w_label, pc_h_w_label, axis=0)
if self.with_wall:
pc_combined = np.append(pc_combined, pc_wall_w_label, axis=0)
# from mayavi import mlab as mayalab
# plot_pc(pc_combined)
# mayalab.show()
return {
'pc_o': point_set_o,
'pc_o_end': pc_o_end,
'pc_h': point_set_h,
'pc_combined': pc_combined,
'pose_o': pose_o_end,
'urdf_o': pc_urdf_o['urdf'],
'urdf_h': pc_urdf_h['urdf'],
'label': data_dict['label'],
'result_file_name': result_file_name
}
def __len__(self):
return len(self.all_data)
@staticmethod
def pad_collate_fn_for_dict(batch):
pc_o_batch = [d['pc_o'] for d in batch]
pc_o_batch = np.stack(pc_o_batch, axis=0)
pc_h_batch = [d['pc_h'] for d in batch]
pc_h_batch = np.stack(pc_h_batch, axis=0)
pc_combined_batch = [d['pc_combined'] for d in batch]
pc_combined_batch = np.stack(pc_combined_batch, axis=0)
pose_o_batch = [d['pose_o'] for d in batch]
pose_o_batch = np.stack(pose_o_batch, axis=0)
urdf_o_batch = [d['urdf_o'] for d in batch]
urdf_h_batch = [d['urdf_h'] for d in batch]
result_file_name_batch = [d['result_file_name'] for d in batch]
label_batch = np.array([d['label'] for d in batch])
return {
'input1': pc_o_batch, # (batch_size, 4096, 3 or 6) np.float64
'input2': pc_h_batch, # (batch_size, 4096, 3 or 6) np.float64
'input3': pc_combined_batch, #(batch_size, 4096*2, 4)
'output1': None, # (batch_size, [varies], 2) list of np.int64 array
'output2': None, # (batch_size, [varies], 2) list of np.int64 array
'output3': None, # (batch_size, 4096, 4096) np.bool_
'output4': pose_o_batch, # (batch_size, 7) np.float64
'urdf_o': urdf_o_batch,
'urdf_h': urdf_h_batch,
'label': label_batch,
'result_file_name': result_file_name_batch
}
if __name__ == "__main__":
from torch.utils.data import DataLoader
import torch
torch.manual_seed(2)
home_dir_data = '/home/yifanyou/hang'
# home_dir_data = '/juno/downloads/new_hang'
cp_result_folder_dir = os.path.join(home_dir_data, 'dataset_cp')
train_list_dir = os.path.join(cp_result_folder_dir, 'labels', 'train_list.txt')
test_list_dir = os.path.join(cp_result_folder_dir, 'labels', 'test_list.txt')
train_set = ClassifierDataset(home_dir_data, train_list_dir, False, one_per_pair=True, use_partial_pc=True)
test_set = ClassifierDataset(home_dir_data, test_list_dir, False, split='test', one_per_pair=True, use_partial_pc=True)
# dataset = CPS2Dataset('/scr1/yifan/hang', False)
print('len train', len(train_set))
print('len test', len(test_set))
one_data = train_set[0]
one_data = train_set[1]
one_data = train_set[2]
one_data = train_set[3]
one_data = train_set[4]
one_data = train_set[8]
train_loader = DataLoader(train_set, batch_size=32, sampler=train_set.sampler,
num_workers=1, collate_fn=ClassifierDataset.pad_collate_fn_for_dict)
test_loader = DataLoader(test_set, batch_size=32, shuffle=False,
num_workers=1, collate_fn=ClassifierDataset.pad_collate_fn_for_dict)
# import time
# t_a = time.time()
# for i, one_data in enumerate(train_loader):
# print()
# print('pc o', one_data['input1'].shape)
# print('pc h', one_data['input2'].shape)
# print('pc combined', one_data['input3'].shape)
# print('pose o end', one_data['output4'].shape)
# print(one_data['input3'].shape)
# print(np.sum(one_data['label']))
# # print(one_data['urdf_o'])
# # print(np.nonzero(one_data['output4']))
# if i > 20:
# break
# print(time.time() - t_a)
| [
"torch.manual_seed",
"os.path.exists",
"numpy.ones",
"rotation_lib.angle_axis_from_quaternion",
"os.path.join",
"numpy.append",
"numpy.stack",
"numpy.zeros",
"numpy.array",
"rotation_lib.quat2mat",
"torch.utils.data.DataLoader",
"os.path.abspath",
"numpy.load",
"sys.path.append",
"torch.... | [((250, 276), 'sys.path.append', 'sys.path.append', (['UTILS_DIR'], {}), '(UTILS_DIR)\n', (265, 276), False, 'import sys\n'), ((156, 181), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (171, 181), False, 'import os\n'), ((211, 248), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""', '"""utils"""'], {}), "(BASE_DIR, '..', 'utils')\n", (223, 248), False, 'import os\n'), ((9742, 9762), 'torch.manual_seed', 'torch.manual_seed', (['(2)'], {}), '(2)\n', (9759, 9762), False, 'import torch\n'), ((9874, 9915), 'os.path.join', 'os.path.join', (['home_dir_data', '"""dataset_cp"""'], {}), "(home_dir_data, 'dataset_cp')\n", (9886, 9915), False, 'import os\n'), ((9935, 9997), 'os.path.join', 'os.path.join', (['cp_result_folder_dir', '"""labels"""', '"""train_list.txt"""'], {}), "(cp_result_folder_dir, 'labels', 'train_list.txt')\n", (9947, 9997), False, 'import os\n'), ((10015, 10076), 'os.path.join', 'os.path.join', (['cp_result_folder_dir', '"""labels"""', '"""test_list.txt"""'], {}), "(cp_result_folder_dir, 'labels', 'test_list.txt')\n", (10027, 10076), False, 'import os\n'), ((10599, 10736), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': '(32)', 'sampler': 'train_set.sampler', 'num_workers': '(1)', 'collate_fn': 'ClassifierDataset.pad_collate_fn_for_dict'}), '(train_set, batch_size=32, sampler=train_set.sampler, num_workers\n =1, collate_fn=ClassifierDataset.pad_collate_fn_for_dict)\n', (10609, 10736), False, 'from torch.utils.data import DataLoader\n'), ((10752, 10875), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': '(32)', 'shuffle': '(False)', 'num_workers': '(1)', 'collate_fn': 'ClassifierDataset.pad_collate_fn_for_dict'}), '(test_set, batch_size=32, shuffle=False, num_workers=1,\n collate_fn=ClassifierDataset.pad_collate_fn_for_dict)\n', (10762, 10875), False, 'from torch.utils.data import DataLoader\n'), ((982, 1021), 'os.path.join', 'os.path.join', (['home_dir_data', '"""geo_data"""'], {}), "(home_dir_data, 'geo_data')\n", (994, 1021), False, 'import os\n'), ((1049, 1086), 'os.path.join', 'os.path.join', (['self.data_dir', '"""labels"""'], {}), "(self.data_dir, 'labels')\n", (1061, 1086), False, 'import os\n'), ((1108, 1146), 'os.path.join', 'os.path.join', (['home_dir_data', '"""exclude"""'], {}), "(home_dir_data, 'exclude')\n", (1120, 1146), False, 'import os\n'), ((1177, 1223), 'os.path.join', 'os.path.join', (['self.home_dir_data', '"""dataset_cp"""'], {}), "(self.home_dir_data, 'dataset_cp')\n", (1189, 1223), False, 'import os\n'), ((2045, 2098), 'os.path.join', 'os.path.join', (['self.home_dir_data', '"""collection_result"""'], {}), "(self.home_dir_data, 'collection_result')\n", (2057, 2098), False, 'import os\n'), ((2130, 2187), 'os.path.join', 'os.path.join', (['self.home_dir_data', '"""collection_result_neg"""'], {}), "(self.home_dir_data, 'collection_result_neg')\n", (2142, 2187), False, 'import os\n'), ((6107, 6156), 'numpy.append', 'np.append', (['pc_o_end_w_label', 'pc_h_w_label'], {'axis': '(0)'}), '(pc_o_end_w_label, pc_h_w_label, axis=0)\n', (6116, 6156), True, 'import numpy as np\n'), ((7243, 7286), 'rotation_lib.angle_axis_from_quaternion', 'angle_axis_from_quaternion', (['pose_quaternion'], {}), '(pose_quaternion)\n', (7269, 7286), False, 'from rotation_lib import angle_axis_from_quaternion, quaternion_from_angle_axis, quat2mat\n'), ((7306, 7320), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (7314, 7320), True, 'import numpy as np\n'), ((7867, 7916), 'numpy.append', 'np.append', (['pc_o_end_w_label', 'pc_h_w_label'], {'axis': '(0)'}), '(pc_o_end_w_label, pc_h_w_label, axis=0)\n', (7876, 7916), True, 'import numpy as np\n'), ((8532, 8560), 'numpy.stack', 'np.stack', (['pc_o_batch'], {'axis': '(0)'}), '(pc_o_batch, axis=0)\n', (8540, 8560), True, 'import numpy as np\n'), ((8619, 8647), 'numpy.stack', 'np.stack', (['pc_h_batch'], {'axis': '(0)'}), '(pc_h_batch, axis=0)\n', (8627, 8647), True, 'import numpy as np\n'), ((8727, 8762), 'numpy.stack', 'np.stack', (['pc_combined_batch'], {'axis': '(0)'}), '(pc_combined_batch, axis=0)\n', (8735, 8762), True, 'import numpy as np\n'), ((8827, 8857), 'numpy.stack', 'np.stack', (['pose_o_batch'], {'axis': '(0)'}), '(pose_o_batch, axis=0)\n', (8835, 8857), True, 'import numpy as np\n'), ((9037, 9074), 'numpy.array', 'np.array', (["[d['label'] for d in batch]"], {}), "([d['label'] for d in batch])\n", (9045, 9074), True, 'import numpy as np\n'), ((1283, 1342), 'os.path.join', 'os.path.join', (['self.home_dir_data', '"""geo_data_partial_cp_pad"""'], {}), "(self.home_dir_data, 'geo_data_partial_cp_pad')\n", (1295, 1342), False, 'import os\n'), ((5861, 5887), 'numpy.ones', 'np.ones', (['(self.npoints, 1)'], {}), '((self.npoints, 1))\n', (5868, 5887), True, 'import numpy as np\n'), ((5930, 5957), 'numpy.zeros', 'np.zeros', (['(self.npoints, 1)'], {}), '((self.npoints, 1))\n', (5938, 5957), True, 'import numpy as np\n'), ((6203, 6250), 'numpy.append', 'np.append', (['pc_combined', 'pc_wall_w_label'], {'axis': '(0)'}), '(pc_combined, pc_wall_w_label, axis=0)\n', (6212, 6250), True, 'import numpy as np\n'), ((6558, 6582), 'numpy.load', 'np.load', (["pc_urdf_o['pc']"], {}), "(pc_urdf_o['pc'])\n", (6565, 6582), True, 'import numpy as np\n'), ((6608, 6664), 'numpy.load', 'np.load', (["self.partial_pc_dir[result_file_name]['object']"], {}), "(self.partial_pc_dir[result_file_name]['object'])\n", (6615, 6664), True, 'import numpy as np\n'), ((6849, 6873), 'numpy.load', 'np.load', (["pc_urdf_h['pc']"], {}), "(pc_urdf_h['pc'])\n", (6856, 6873), True, 'import numpy as np\n'), ((6899, 6953), 'numpy.load', 'np.load', (["self.partial_pc_dir[result_file_name]['hook']"], {}), "(self.partial_pc_dir[result_file_name]['hook'])\n", (6906, 6953), True, 'import numpy as np\n'), ((7548, 7574), 'numpy.ones', 'np.ones', (['(self.npoints, 1)'], {}), '((self.npoints, 1))\n', (7555, 7574), True, 'import numpy as np\n'), ((7631, 7658), 'numpy.zeros', 'np.zeros', (['(self.npoints, 1)'], {}), '((self.npoints, 1))\n', (7639, 7658), True, 'import numpy as np\n'), ((7956, 8003), 'numpy.append', 'np.append', (['pc_combined', 'pc_wall_w_label'], {'axis': '(0)'}), '(pc_combined, pc_wall_w_label, axis=0)\n', (7965, 8003), True, 'import numpy as np\n'), ((3103, 3196), 'os.path.join', 'os.path.join', (['self.partial_pc_folder_dir', "(result_file_name + '_object_partial_pc_pad.npy')"], {}), "(self.partial_pc_folder_dir, result_file_name +\n '_object_partial_pc_pad.npy')\n", (3115, 3196), False, 'import os\n'), ((3216, 3307), 'os.path.join', 'os.path.join', (['self.partial_pc_folder_dir', "(result_file_name + '_hook_partial_pc_pad.npy')"], {}), "(self.partial_pc_folder_dir, result_file_name +\n '_hook_partial_pc_pad.npy')\n", (3228, 3307), False, 'import os\n'), ((3315, 3347), 'os.path.exists', 'os.path.exists', (['partial_pc_h_dir'], {}), '(partial_pc_h_dir)\n', (3329, 3347), False, 'import os\n'), ((3360, 3392), 'os.path.exists', 'os.path.exists', (['partial_pc_o_dir'], {}), '(partial_pc_o_dir)\n', (3374, 3392), False, 'import os\n'), ((5400, 5427), 'torch.DoubleTensor', 'torch.DoubleTensor', (['weights'], {}), '(weights)\n', (5418, 5427), False, 'import torch\n'), ((5616, 5670), 'numpy.load', 'np.load', (['"""../scripts/templates/wall/wall_small_pc.npy"""'], {}), "('../scripts/templates/wall/wall_small_pc.npy')\n", (5623, 5670), True, 'import numpy as np\n'), ((6050, 6081), 'numpy.zeros', 'np.zeros', (['(pc_wall.shape[0], 1)'], {}), '((pc_wall.shape[0], 1))\n', (6058, 6081), True, 'import numpy as np\n'), ((7805, 7841), 'numpy.zeros', 'np.zeros', (['(self.pc_wall.shape[0], 1)'], {}), '((self.pc_wall.shape[0], 1))\n', (7813, 7841), True, 'import numpy as np\n'), ((4165, 4232), 'os.path.join', 'os.path.join', (['self.pos_result_folder_dir', "(result_file_name + '.txt')"], {}), "(self.pos_result_folder_dir, result_file_name + '.txt')\n", (4177, 4232), False, 'import os\n'), ((5787, 5805), 'rotation_lib.quat2mat', 'quat2mat', (['pose[3:]'], {}), '(pose[3:])\n', (5795, 5805), False, 'from rotation_lib import angle_axis_from_quaternion, quaternion_from_angle_axis, quat2mat\n'), ((7451, 7476), 'rotation_lib.quat2mat', 'quat2mat', (['pose_quaternion'], {}), '(pose_quaternion)\n', (7459, 7476), False, 'from rotation_lib import angle_axis_from_quaternion, quaternion_from_angle_axis, quat2mat\n'), ((4905, 4972), 'os.path.join', 'os.path.join', (['self.neg_result_folder_dir', "(result_file_name + '.txt')"], {}), "(self.neg_result_folder_dir, result_file_name + '.txt')\n", (4917, 4972), False, 'import os\n')] |
"""
<NAME>
descriptor.py
Takes in a directory of sub-directories of images and produces a descriptor
file for all the images found in the sub-directories.
,:'/ _..._
// ( `""-.._.'
\| / 6\___
| 6 4
| /
\_ .--'
(_'---'`)
/ `'---`()
,' |
, .'` |
)\ _.-' ;
/ | .'` _ /
/` / .' '. , |
/ / / \ ; | |
| \ | | .| | |
\ `"| /.-' | | |
'-..-\ _.;.._ | |.;-.
\ <`.._ )) | .;-. ))
(__. ` ))-' \_ ))'
`'--"` `""`
I'll describe each image for you...okay that first one is gray...the
second one is also gray...the next one's gray...
"""
import os
import sys
import pickle
import cv2 as cv
import numpy as np
def read_train_dir(train_dir):
"""
Take in a directory of sub-directories of images
Go through each sub-directory that matches one of the backgrounds
(grass, ocean, readcarpet, road, wheatfield)
Get all the images in each sub-directory and put it into a list
Append that list of all images in sub-directory to train_dir_imgs list
Return train_dir_imgs list which will contain all the images in all the
sub-directories sorted by background classification
"""
train_dir_imgs = list()
img_dirs = ["grass", "ocean", "redcarpet", "road", "wheatfield"]
for fil in os.listdir(train_dir):
if fil in img_dirs:
images = find_images(train_dir + "/" + fil)
train_dir_imgs.append(images)
return train_dir_imgs
def find_images(dir):
"""
Take in directory of images, open directory, read in each image,
add to list of images, return list of images
"""
images = list()
for fil in os.listdir(dir):
if fil.lower().endswith('.jpeg'):
try:
img = cv.imread(dir + "/" + fil, 1)
except cv.error:
print("{} malformed!".format(fil))
sys.exit()
images.append(img)
return images
def get_3D_hist(sub_img):
"""
Take in a sub-image
Get 3D histogram of the colors of the image and return it
"""
M, N = sub_img.shape[:2]
t = 4
pixels = sub_img.reshape(M * N, 3)
hist_3D, _ = np.histogramdd(pixels, (t, t, t))
return hist_3D
def get_sub_imgs(img):
"""
Take in an image
Using b_w and b_h = 4, get all sub-images within the image
(i.e. 25 blocks of equal size, evenly spaced from top left corner)
Return the list of sub-images
"""
H, W = img.shape[:2]
b_w = b_h = 4
del_w = W // (b_w + 1)
del_h = H // (b_h + 1)
sub_imgs = np.empty((5,5,del_h,del_w,3))
for i in range(b_w+1):
for j in range(b_h+1):
w1 = i*del_w
w2 = w1 + del_w
h1 = j*del_h
h2 = h1 + del_h
sub_img = img[h1:h2, w1:w2]
sub_imgs[i,j] = sub_img
return sub_imgs
def get_desc_vec(sub_imgs):
"""
Take in a list of sub-images
For each sub-image:
- Stack it with the sub-image next to it, the sub-image below it,
and the sub-image below and to the right of it
This will create a series of overlapping blocks since most sub-images
will be used multiple times
"""
desc_vec = np.empty((0))
init = True
for i in range(4):
for j in range(4):
block = np.vstack(
(np.hstack((sub_imgs[i,j], sub_imgs[i+1, j])),
np.hstack((sub_imgs[i,j+1], sub_imgs[i+1,j+1])))
)
if init == True:
desc_vec = get_3D_hist(block)
init = False
else:
desc_vec = np.hstack((desc_vec, get_3D_hist(block)))
desc_vec = desc_vec.flatten()
return desc_vec
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 2:
print("Usage: {} train_dir".format(sys.argv[0]))
sys.exit()
else:
train_dir_name = sys.argv[1]
train_dir = list()
try:
train_dir = read_train_dir(train_dir_name)
except FileNotFoundError:
print("{} not found!".format(train_dir_name))
sys.exit()
"""
Create outfile for descriptors
Go through each image found in the sub-directories
Get the descriptor vector for each and append it to list
Dump the descriptor vectors into outfile
"""
outfile = open("{}/desc.txt".format(train_dir_name), "wb")
desc_vec_list = []
for i in range(len(train_dir)):
for img in train_dir[i]:
subimgs = get_sub_imgs(img)
desc_vec = get_desc_vec(subimgs)
desc_vec_list.append(desc_vec)
pickle.dump(desc_vec_list, outfile)
outfile.close()
| [
"os.listdir",
"pickle.dump",
"numpy.histogramdd",
"numpy.hstack",
"numpy.empty",
"sys.exit",
"cv2.imread"
] | [((1863, 1884), 'os.listdir', 'os.listdir', (['train_dir'], {}), '(train_dir)\n', (1873, 1884), False, 'import os\n'), ((2230, 2245), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (2240, 2245), False, 'import os\n'), ((2738, 2771), 'numpy.histogramdd', 'np.histogramdd', (['pixels', '(t, t, t)'], {}), '(pixels, (t, t, t))\n', (2752, 2771), True, 'import numpy as np\n'), ((3137, 3170), 'numpy.empty', 'np.empty', (['(5, 5, del_h, del_w, 3)'], {}), '((5, 5, del_h, del_w, 3))\n', (3145, 3170), True, 'import numpy as np\n'), ((3793, 3804), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3801, 3804), True, 'import numpy as np\n'), ((5206, 5241), 'pickle.dump', 'pickle.dump', (['desc_vec_list', 'outfile'], {}), '(desc_vec_list, outfile)\n', (5217, 5241), False, 'import pickle\n'), ((4461, 4471), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4469, 4471), False, 'import sys\n'), ((4695, 4705), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4703, 4705), False, 'import sys\n'), ((2328, 2357), 'cv2.imread', 'cv.imread', (["(dir + '/' + fil)", '(1)'], {}), "(dir + '/' + fil, 1)\n", (2337, 2357), True, 'import cv2 as cv\n'), ((2454, 2464), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2462, 2464), False, 'import sys\n'), ((3921, 3968), 'numpy.hstack', 'np.hstack', (['(sub_imgs[i, j], sub_imgs[i + 1, j])'], {}), '((sub_imgs[i, j], sub_imgs[i + 1, j]))\n', (3930, 3968), True, 'import numpy as np\n'), ((3983, 4038), 'numpy.hstack', 'np.hstack', (['(sub_imgs[i, j + 1], sub_imgs[i + 1, j + 1])'], {}), '((sub_imgs[i, j + 1], sub_imgs[i + 1, j + 1]))\n', (3992, 4038), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
A simple coin flipping example. The model is written in PyMC3.
Inspired by Stan's toy example.
Probability model
Prior: Beta
Likelihood: Bernoulli
Variational model
Likelihood: Mean-field Beta
"""
import edward as ed
import pymc3 as pm
import numpy as np
import theano
from edward.models import PyMC3Model, Variational, Beta
data_shared = theano.shared(np.zeros(1))
with pm.Model() as model:
beta = pm.Beta('beta', 1, 1, transform=None)
out = pm.Bernoulli('data',
beta,
observed=data_shared)
data = ed.Data(np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]))
m = PyMC3Model(model, data_shared)
variational = Variational()
variational.add(Beta())
inference = ed.MFVI(m, variational, data)
inference.run(n_iter=10000)
| [
"edward.MFVI",
"edward.models.Beta",
"pymc3.Beta",
"edward.models.PyMC3Model",
"numpy.array",
"numpy.zeros",
"pymc3.Model",
"edward.models.Variational",
"pymc3.Bernoulli"
] | [((650, 680), 'edward.models.PyMC3Model', 'PyMC3Model', (['model', 'data_shared'], {}), '(model, data_shared)\n', (660, 680), False, 'from edward.models import PyMC3Model, Variational, Beta\n'), ((695, 708), 'edward.models.Variational', 'Variational', ([], {}), '()\n', (706, 708), False, 'from edward.models import PyMC3Model, Variational, Beta\n'), ((746, 775), 'edward.MFVI', 'ed.MFVI', (['m', 'variational', 'data'], {}), '(m, variational, data)\n', (753, 775), True, 'import edward as ed\n'), ((394, 405), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (402, 405), True, 'import numpy as np\n'), ((413, 423), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (421, 423), True, 'import pymc3 as pm\n'), ((445, 482), 'pymc3.Beta', 'pm.Beta', (['"""beta"""', '(1)', '(1)'], {'transform': 'None'}), "('beta', 1, 1, transform=None)\n", (452, 482), True, 'import pymc3 as pm\n'), ((493, 541), 'pymc3.Bernoulli', 'pm.Bernoulli', (['"""data"""', 'beta'], {'observed': 'data_shared'}), "('data', beta, observed=data_shared)\n", (505, 541), True, 'import pymc3 as pm\n'), ((604, 644), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 0, 0, 0, 0, 1]'], {}), '([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])\n', (612, 644), True, 'import numpy as np\n'), ((725, 731), 'edward.models.Beta', 'Beta', ([], {}), '()\n', (729, 731), False, 'from edward.models import PyMC3Model, Variational, Beta\n')] |
from core.base_environment import *
import numpy as np
from overrides import overrides
from pyhelper_fns import vis_utils
def str2action(cmd):
cmd = cmd.strip()
if cmd == 'w':
#up
ctrl = [0, 0.1]
elif cmd == 'a':
#left
ctrl = [-0.1, 0]
elif cmd == 'd':
#right
ctrl = [0.1, 0]
elif cmd == 's':
#down
ctrl = [0, -0.1]
else:
return None
ctrl = np.array(ctrl).reshape((2,))
return ctrl
class DiscreteActionFour(BaseDiscreteAction):
@overrides
def num_actions(self):
return 4
def minval(self):
return 0
def maxval(self):
return 3
@overrides
def process(self, action):
assert len(action) == 1
assert action[0] in [0, 1, 2, 3]
if action[0] == 0:
#up
ctrl = [0, 0.1]
elif action[0] == 1:
#left
ctrl = [-0.1, 0]
elif action[0] == 2:
#right
ctrl = [0.1, 0]
elif action[0] == 3:
#down
ctrl = [0, -0.1]
else:
raise Exception('Action %s not recognized' % action)
ctrl = np.array(ctrl).reshape((2,))
return ctrl
class ContinuousAction(BaseContinuousAction):
@overrides
def action_dim(self):
return 2
@overrides
def process(self, action):
return action
class MoveTeleportSimulator(BaseSimulator):
def __init__(self, **kwargs):
super(MoveTeleportSimulator, self).__init__(**kwargs)
self._pos = {}
self._pos['manipulator'] = np.zeros((2,))
self._pos['object'] = np.zeros((2,))
self._pos['goal'] = np.zeros((2,))
#Maximum and minimum locations of objects
self._range_min = -1
self._range_max = 1
#Manipulate radius
self._manipulate_radius = 0.2
#Image size
self._imSz = 64
self._im = np.zeros((self._imSz, self._imSz, 3), dtype=np.uint8)
def object_names(self):
return self._pos.keys()
def _dist(self, x, y):
dist = x - y
dist = np.sqrt(np.sum(dist * dist))
return dist
def dist_manipulator_object(self):
return self._dist(self._pos['manipulator'], self._pos['object'])
def dist_object_goal(self):
return self._dist(self._pos['object'], self._pos['goal'])
def _clip(self, val):
val = np.clip(val, self._range_min, self._range_max)
return val
@overrides
def step(self, ctrl):
self._pos['manipulator'] += ctrl.reshape((2,))
self._pos['manipulator'] = self._clip(self._pos['manipulator'])
if self.dist_manipulator_object() < self._manipulate_radius:
self._pos['object'] = self._pos['manipulator'].copy()
def _get_bin(self, rng, coords):
try:
x = np.where(rng <= coords[0])[0][-1]
y = np.where(rng <= coords[1])[0][-1]
except:
print (coords)
raise Exception('Something is incorrect')
return x, y
def _plot_object(self, coords, color='r'):
x, y = coords
mnx, mxx = max(0, x - 2), min(self._imSz, x + 2)
mny, mxy = max(0, y - 2), min(self._imSz, y + 2)
if color == 'r':
self._im[mny:mxy, mnx:mxx, 0] = 255
elif color == 'g':
self._im[mny:mxy, mnx:mxx, 1] = 255
else:
self._im[mny:mxy, mnx:mxx, 2] = 255
@overrides
def get_image(self):
imSz = self._imSz
rng = np.linspace(self._range_min, self._range_max, imSz)
g_x, g_y = self._get_bin(rng, self._pos['goal'])
m_x, m_y = self._get_bin(rng, self._pos['manipulator'])
o_x, o_y = self._get_bin(rng, self._pos['object'])
self._im = np.zeros((imSz, imSz, 3), dtype=np.uint8)
self._plot_object((o_x, o_y), 'r')
self._plot_object((g_x, g_y), 'g')
self._plot_object((m_x, m_y), 'b')
return self._im.copy()
@overrides
def _setup_renderer(self):
self._canvas = vis_utils.MyAnimation(None, height=self._imSz, width=self._imSz)
@overrides
def render(self):
self._canvas._display(self.get_image())
class InitFixed(BaseInitializer):
@overrides
def sample_env_init(self):
self.simulator._pos['goal'] = np.array([0.5, 0.5])
self.simulator._pos['object'] = np.array([-0.7, -0.5])
self.simulator._pos['manipulator'] = np.array([-0.9, -0.6])
class InitRandom(BaseInitializer):
@overrides
def sample_env_init(self):
range_mag = self.simulator._range_max - self.simulator._range_min
for k in self.simulator._pos.keys():
self.simulator._pos[k] = range_mag * self.random.rand(2,) + \
self.simulator._range_min
class ObsState(BaseObservation):
@overrides
def ndim(self):
dim = {}
dim['feat'] = 6
return dim
@overrides
def observation(self):
obs = {}
obs['feat'] = np.zeros((6,))
for i, k in enumerate(self.simulator._pos.keys()):
obs[2*i, 2*i + 2] = self.simulator._pos[k].copy()
return obs
class ObsIm(BaseObservation):
@overrides
def ndim(self):
dim = {}
dim['im'] = (self.simulator._imSz, self.simulator._imSz, 3)
return dim
@overrides
def observation(self):
obs = {}
obs['im'] = self.simulator.get_image()
return obs
class RewardSimple(BaseRewarder):
#The radius around the goal in which reward is provided to the agent.
@property
def radius(self):
return self.prms['radius'] if hasattr(self.prms, 'radius') else 0.2
@overrides
def get(self):
if self.simulator.dist_object_goal() < self.radius:
return 1
else:
return 0
def get_environment(initName='InitRandom', obsName='ObsIm', rewName='RewardSimple',
actType='DiscreteActionFour', max_episode_length=100,
initPrms={}, obsPrms={}, rewPrms={}, actPrms={}):
sim = MoveTeleportSimulator()
initObj = globals()[initName](sim, initPrms)
obsObj = globals()[obsName](sim, obsPrms)
rewObj = globals()[rewName](sim, rewPrms)
actObj = globals()[actType](actPrms)
env = BaseEnvironment(sim, initObj, obsObj, rewObj, actObj,
params={'max_episode_length':max_episode_length})
return env
| [
"numpy.clip",
"pyhelper_fns.vis_utils.MyAnimation",
"numpy.where",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.sum"
] | [((1424, 1438), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (1432, 1438), True, 'import numpy as np\n'), ((1470, 1484), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (1478, 1484), True, 'import numpy as np\n'), ((1516, 1530), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (1524, 1530), True, 'import numpy as np\n'), ((1735, 1788), 'numpy.zeros', 'np.zeros', (['(self._imSz, self._imSz, 3)'], {'dtype': 'np.uint8'}), '((self._imSz, self._imSz, 3), dtype=np.uint8)\n', (1743, 1788), True, 'import numpy as np\n'), ((2188, 2234), 'numpy.clip', 'np.clip', (['val', 'self._range_min', 'self._range_max'], {}), '(val, self._range_min, self._range_max)\n', (2195, 2234), True, 'import numpy as np\n'), ((3202, 3253), 'numpy.linspace', 'np.linspace', (['self._range_min', 'self._range_max', 'imSz'], {}), '(self._range_min, self._range_max, imSz)\n', (3213, 3253), True, 'import numpy as np\n'), ((3437, 3478), 'numpy.zeros', 'np.zeros', (['(imSz, imSz, 3)'], {'dtype': 'np.uint8'}), '((imSz, imSz, 3), dtype=np.uint8)\n', (3445, 3478), True, 'import numpy as np\n'), ((3692, 3756), 'pyhelper_fns.vis_utils.MyAnimation', 'vis_utils.MyAnimation', (['None'], {'height': 'self._imSz', 'width': 'self._imSz'}), '(None, height=self._imSz, width=self._imSz)\n', (3713, 3756), False, 'from pyhelper_fns import vis_utils\n'), ((3949, 3969), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (3957, 3969), True, 'import numpy as np\n'), ((4006, 4028), 'numpy.array', 'np.array', (['[-0.7, -0.5]'], {}), '([-0.7, -0.5])\n', (4014, 4028), True, 'import numpy as np\n'), ((4070, 4092), 'numpy.array', 'np.array', (['[-0.9, -0.6]'], {}), '([-0.9, -0.6])\n', (4078, 4092), True, 'import numpy as np\n'), ((4598, 4612), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (4606, 4612), True, 'import numpy as np\n'), ((392, 406), 'numpy.array', 'np.array', (['ctrl'], {}), '(ctrl)\n', (400, 406), True, 'import numpy as np\n'), ((1912, 1931), 'numpy.sum', 'np.sum', (['(dist * dist)'], {}), '(dist * dist)\n', (1918, 1931), True, 'import numpy as np\n'), ((1030, 1044), 'numpy.array', 'np.array', (['ctrl'], {}), '(ctrl)\n', (1038, 1044), True, 'import numpy as np\n'), ((2591, 2617), 'numpy.where', 'np.where', (['(rng <= coords[0])'], {}), '(rng <= coords[0])\n', (2599, 2617), True, 'import numpy as np\n'), ((2635, 2661), 'numpy.where', 'np.where', (['(rng <= coords[1])'], {}), '(rng <= coords[1])\n', (2643, 2661), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 09:17:52 2019
@author: <NAME>, https://github.com/zhaofenqiang
Contact: <EMAIL>
"""
import numpy as np
from interp_numpy import resampleSphereSurf, bilinearResampleSphereSurfImg
# from utils import get_neighs_order
def get_rot_mat_zyz(z1, y2, z3):
"""
first z3, then y2, lastly z1
"""
return np.array([[np.cos(z1) * np.cos(y2) * np.cos(z3) - np.sin(z1) * np.sin(z3), -np.cos(z1) * np.cos(y2) * np.sin(z3) - np.sin(z1) * np.cos(z3), np.cos(z1) * np.sin(y2)],
[np.cos(z1) * np.sin(z3) + np.sin(z1) * np.cos(y2) * np.cos(z3), -np.sin(z1) * np.cos(y2) * np.sin(z3) + np.cos(z1) * np.cos(z3), np.sin(z1) * np.sin(y2)],
[-np.sin(y2) * np.cos(z3), np.sin(y2) * np.sin(z3), np.cos(y2)]])
def get_rot_mat_zyx(z1, y2, x3):
"""
first x3, then y2, lastly z1
"""
return np.array([[np.cos(z1) * np.cos(y2), np.cos(z1) * np.sin(y2) * np.sin(x3) - np.sin(z1) * np.cos(x3), np.sin(z1) * np.sin(x3) + np.cos(z1) * np.cos(x3) * np.sin(y2)],
[np.cos(y2) * np.sin(z1), np.cos(z1) * np.cos(x3) + np.sin(z1) * np.sin(y2) * np.sin(x3), np.cos(x3) * np.sin(z1) * np.sin(y2) - np.cos(z1) * np.sin(x3)],
[-np.sin(y2), np.cos(y2) * np.sin(x3), np.cos(y2) * np.cos(x3)]])
def initialRigidAlign(moving, fixed, SearchWidth=64/180*(np.pi), numIntervals=16, minSearchWidth=8/180*(np.pi), moving_xyz=None, bi=True, fixed_img=None, verbose=False):
assert len(moving) == len(moving_xyz), "moving feature's size is not correct"
radius = np.amax(moving_xyz[:,0])
if bi == False:
neigh_orders = None
fixed_xyz = None
raise NotImplementedError('Not implemented.')
Center1 = 0.
bestCenter1 = 0.
Center2 = 0.
bestCenter2 = 0.
Center3 = 0.
bestCenter3 = 0.
numIntervals = numIntervals+1
curr_energy = float('inf')
while SearchWidth >= minSearchWidth:
for alpha in np.linspace(Center1-SearchWidth, Center1+SearchWidth, num=numIntervals):
for beta in np.linspace(Center2-SearchWidth, Center2+SearchWidth, num=numIntervals):
for gamma in np.linspace(Center3-SearchWidth, Center3+SearchWidth, num=numIntervals):
curr_rot = get_rot_mat_zyx(alpha, beta, gamma)
curr_vertices = curr_rot.dot(np.transpose(moving_xyz))
curr_vertices = np.transpose(curr_vertices)
if bi:
feat_inter = bilinearResampleSphereSurfImg(curr_vertices, fixed_img, radius=radius)
else:
feat_inter = resampleSphereSurf(fixed_xyz, curr_vertices, fixed, neigh_orders=neigh_orders)
feat_inter = np.squeeze(feat_inter)
tmp_energy = np.mean((feat_inter - moving)**2)
# tmp_energy = 1-(((feat_inter - feat_inter.mean()) * (moving - moving.mean())).mean() / feat_inter.std() / moving.std())
if alpha == 0. and beta == 0. and gamma == 0. :
prev_energy = tmp_energy
if tmp_energy < curr_energy:
if verbose:
print('Rotate by', alpha, ',', beta, ', ', gamma)
print('current energy: ', curr_energy)
curr_energy = tmp_energy
bestCenter1 = alpha
bestCenter2 = beta
bestCenter3 = gamma
Center1 = bestCenter1
Center2 = bestCenter2
Center3 = bestCenter3
SearchWidth = SearchWidth/2.
return np.array([bestCenter1, bestCenter2, bestCenter3]), prev_energy, curr_energy
| [
"numpy.mean",
"interp_numpy.bilinearResampleSphereSurfImg",
"interp_numpy.resampleSphereSurf",
"numpy.squeeze",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.transpose",
"numpy.amax"
] | [((1698, 1723), 'numpy.amax', 'np.amax', (['moving_xyz[:, 0]'], {}), '(moving_xyz[:, 0])\n', (1705, 1723), True, 'import numpy as np\n'), ((2101, 2176), 'numpy.linspace', 'np.linspace', (['(Center1 - SearchWidth)', '(Center1 + SearchWidth)'], {'num': 'numIntervals'}), '(Center1 - SearchWidth, Center1 + SearchWidth, num=numIntervals)\n', (2112, 2176), True, 'import numpy as np\n'), ((3894, 3943), 'numpy.array', 'np.array', (['[bestCenter1, bestCenter2, bestCenter3]'], {}), '([bestCenter1, bestCenter2, bestCenter3])\n', (3902, 3943), True, 'import numpy as np\n'), ((2198, 2273), 'numpy.linspace', 'np.linspace', (['(Center2 - SearchWidth)', '(Center2 + SearchWidth)'], {'num': 'numIntervals'}), '(Center2 - SearchWidth, Center2 + SearchWidth, num=numIntervals)\n', (2209, 2273), True, 'import numpy as np\n'), ((804, 814), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (810, 814), True, 'import numpy as np\n'), ((2300, 2375), 'numpy.linspace', 'np.linspace', (['(Center3 - SearchWidth)', '(Center3 + SearchWidth)'], {'num': 'numIntervals'}), '(Center3 - SearchWidth, Center3 + SearchWidth, num=numIntervals)\n', (2311, 2375), True, 'import numpy as np\n'), ((528, 538), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (534, 538), True, 'import numpy as np\n'), ((541, 551), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (547, 551), True, 'import numpy as np\n'), ((705, 715), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (711, 715), True, 'import numpy as np\n'), ((718, 728), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (724, 728), True, 'import numpy as np\n'), ((767, 777), 'numpy.cos', 'np.cos', (['z3'], {}), '(z3)\n', (773, 777), True, 'import numpy as np\n'), ((779, 789), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (785, 789), True, 'import numpy as np\n'), ((792, 802), 'numpy.sin', 'np.sin', (['z3'], {}), '(z3)\n', (798, 802), True, 'import numpy as np\n'), ((923, 933), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (929, 933), True, 'import numpy as np\n'), ((936, 946), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (942, 946), True, 'import numpy as np\n'), ((1111, 1121), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (1117, 1121), True, 'import numpy as np\n'), ((1124, 1134), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (1130, 1134), True, 'import numpy as np\n'), ((1300, 1310), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (1306, 1310), True, 'import numpy as np\n'), ((1329, 1339), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (1335, 1339), True, 'import numpy as np\n'), ((1342, 1352), 'numpy.sin', 'np.sin', (['x3'], {}), '(x3)\n', (1348, 1352), True, 'import numpy as np\n'), ((1400, 1410), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (1406, 1410), True, 'import numpy as np\n'), ((1413, 1423), 'numpy.cos', 'np.cos', (['x3'], {}), '(x3)\n', (1419, 1423), True, 'import numpy as np\n'), ((2556, 2583), 'numpy.transpose', 'np.transpose', (['curr_vertices'], {}), '(curr_vertices)\n', (2568, 2583), True, 'import numpy as np\n'), ((2941, 2963), 'numpy.squeeze', 'np.squeeze', (['feat_inter'], {}), '(feat_inter)\n', (2951, 2963), True, 'import numpy as np\n'), ((2997, 3032), 'numpy.mean', 'np.mean', (['((feat_inter - moving) ** 2)'], {}), '((feat_inter - moving) ** 2)\n', (3004, 3032), True, 'import numpy as np\n'), ((425, 435), 'numpy.cos', 'np.cos', (['z3'], {}), '(z3)\n', (431, 435), True, 'import numpy as np\n'), ((438, 448), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (444, 448), True, 'import numpy as np\n'), ((451, 461), 'numpy.sin', 'np.sin', (['z3'], {}), '(z3)\n', (457, 461), True, 'import numpy as np\n'), ((490, 500), 'numpy.sin', 'np.sin', (['z3'], {}), '(z3)\n', (496, 500), True, 'import numpy as np\n'), ((503, 513), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (509, 513), True, 'import numpy as np\n'), ((516, 526), 'numpy.cos', 'np.cos', (['z3'], {}), '(z3)\n', (522, 526), True, 'import numpy as np\n'), ((576, 586), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (582, 586), True, 'import numpy as np\n'), ((589, 599), 'numpy.sin', 'np.sin', (['z3'], {}), '(z3)\n', (595, 599), True, 'import numpy as np\n'), ((628, 638), 'numpy.cos', 'np.cos', (['z3'], {}), '(z3)\n', (634, 638), True, 'import numpy as np\n'), ((667, 677), 'numpy.sin', 'np.sin', (['z3'], {}), '(z3)\n', (673, 677), True, 'import numpy as np\n'), ((680, 690), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (686, 690), True, 'import numpy as np\n'), ((693, 703), 'numpy.cos', 'np.cos', (['z3'], {}), '(z3)\n', (699, 703), True, 'import numpy as np\n'), ((754, 764), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (760, 764), True, 'import numpy as np\n'), ((979, 989), 'numpy.sin', 'np.sin', (['x3'], {}), '(x3)\n', (985, 989), True, 'import numpy as np\n'), ((992, 1002), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (998, 1002), True, 'import numpy as np\n'), ((1005, 1015), 'numpy.cos', 'np.cos', (['x3'], {}), '(x3)\n', (1011, 1015), True, 'import numpy as np\n'), ((1024, 1034), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (1030, 1034), True, 'import numpy as np\n'), ((1037, 1047), 'numpy.sin', 'np.sin', (['x3'], {}), '(x3)\n', (1043, 1047), True, 'import numpy as np\n'), ((1076, 1086), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (1082, 1086), True, 'import numpy as np\n'), ((1141, 1151), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (1147, 1151), True, 'import numpy as np\n'), ((1154, 1164), 'numpy.cos', 'np.cos', (['x3'], {}), '(x3)\n', (1160, 1164), True, 'import numpy as np\n'), ((1193, 1203), 'numpy.sin', 'np.sin', (['x3'], {}), '(x3)\n', (1199, 1203), True, 'import numpy as np\n'), ((1238, 1248), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (1244, 1248), True, 'import numpy as np\n'), ((1251, 1261), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (1257, 1261), True, 'import numpy as np\n'), ((1264, 1274), 'numpy.sin', 'np.sin', (['x3'], {}), '(x3)\n', (1270, 1274), True, 'import numpy as np\n'), ((2494, 2518), 'numpy.transpose', 'np.transpose', (['moving_xyz'], {}), '(moving_xyz)\n', (2506, 2518), True, 'import numpy as np\n'), ((2653, 2723), 'interp_numpy.bilinearResampleSphereSurfImg', 'bilinearResampleSphereSurfImg', (['curr_vertices', 'fixed_img'], {'radius': 'radius'}), '(curr_vertices, fixed_img, radius=radius)\n', (2682, 2723), False, 'from interp_numpy import resampleSphereSurf, bilinearResampleSphereSurfImg\n'), ((2787, 2865), 'interp_numpy.resampleSphereSurf', 'resampleSphereSurf', (['fixed_xyz', 'curr_vertices', 'fixed'], {'neigh_orders': 'neigh_orders'}), '(fixed_xyz, curr_vertices, fixed, neigh_orders=neigh_orders)\n', (2805, 2865), False, 'from interp_numpy import resampleSphereSurf, bilinearResampleSphereSurfImg\n'), ((399, 409), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (405, 409), True, 'import numpy as np\n'), ((412, 422), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (418, 422), True, 'import numpy as np\n'), ((477, 487), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (483, 487), True, 'import numpy as np\n'), ((602, 612), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (608, 612), True, 'import numpy as np\n'), ((615, 625), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (621, 625), True, 'import numpy as np\n'), ((654, 664), 'numpy.cos', 'np.cos', (['y2'], {}), '(y2)\n', (660, 664), True, 'import numpy as np\n'), ((953, 963), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (959, 963), True, 'import numpy as np\n'), ((966, 976), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (972, 976), True, 'import numpy as np\n'), ((1050, 1060), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (1056, 1060), True, 'import numpy as np\n'), ((1063, 1073), 'numpy.cos', 'np.cos', (['x3'], {}), '(x3)\n', (1069, 1073), True, 'import numpy as np\n'), ((1167, 1177), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (1173, 1177), True, 'import numpy as np\n'), ((1180, 1190), 'numpy.sin', 'np.sin', (['y2'], {}), '(y2)\n', (1186, 1190), True, 'import numpy as np\n'), ((1212, 1222), 'numpy.cos', 'np.cos', (['x3'], {}), '(x3)\n', (1218, 1222), True, 'import numpy as np\n'), ((1225, 1235), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (1231, 1235), True, 'import numpy as np\n'), ((464, 474), 'numpy.cos', 'np.cos', (['z1'], {}), '(z1)\n', (470, 474), True, 'import numpy as np\n'), ((641, 651), 'numpy.sin', 'np.sin', (['z1'], {}), '(z1)\n', (647, 651), True, 'import numpy as np\n')] |
'''
MIT License
Optimal Testing and Containment Strategies for Universities in Mexico amid COVID-19
Copyright © 2021 Test and Contain. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,and <NAME>. https://www.testandcontain.com/
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import dash
import time, random, pandas as pd, json
from dash.dash import no_update
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from dash.dependencies import Input, Output, State, MATCH, ALL
from app import dash_app
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from preprocess import blank_fig, health_label
from preprocess import population, _, campuses
from layout import get_layout
import flask
import numpy as np
import os
from dash.exceptions import PreventUpdate
dash_app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
@dash_app.callback(
Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
print ("Displaying", pathname)
if pathname == '/':
dash.callback_context.response.set_cookie('campus_cookie', "/campus1")
return get_layout(len(campuses["campus1"]["categories"]), campuses["campus1"]["no_solutions"], campuses["campus1"]["budget"], campuses["campus1"]["buckets"], campuses["campus1"]["d"], campuses["campus1"]["pi"], campuses["campus1"]["p"], campuses["campus1"]["categories"])
else:
dash.callback_context.response.set_cookie('campus_cookie', pathname)
return get_layout(len(campuses[pathname[1:]]["categories"]), campuses[pathname[1:]]["no_solutions"], campuses[pathname[1:]]["budget"], campuses[pathname[1:]]["buckets"], campuses[pathname[1:]]["d"], campuses[pathname[1:]]["pi"], campuses[pathname[1:]]["p"], campuses[pathname[1:]]["categories"])
@dash_app.callback(
[Output(component_id='location-label', component_property='children'),
Output('campus_id', 'data')
],
[
Input('page-content', 'children')
]
)
def update_campus(page_content):
allcookies=dict(flask.request.cookies)
if 'campus_cookie' in allcookies:
campus_id_prev = allcookies['campus_cookie']
if (campus_id_prev is None):
return campuses['campus1']['label'],"campus1"
return campuses[campus_id_prev[1:]]['label'],campus_id_prev[1:]
def get_fig(solution, campus_id):
"""Generates figure from a solution row."""
categories = campuses[campus_id]['categories']
k = len(categories)
fig = make_subplots(rows=2, cols=1,
subplot_titles=[_("# Unnecessarily self-isolating individuals"),
_("# Prevented critical infections")],
specs=[[{}], [{}]], shared_xaxes=False,
shared_yaxes=False, vertical_spacing=0.25, row_heights=[k, 1])
# Create subfigures
contain_labels = [f'Containment {i}' for i in range(1,k+1)]
y_prev=[cat['name'] for cat in categories]
x_prev=solution[contain_labels]
x_prev=np.trunc(x_prev).astype(int).tolist()
contain_fig = go.Bar(
x=x_prev,
y=[y_prev[i]+"<br>("+str(x_prev[i])+") " for i in range(k)],
marker=dict(color='purple',
line=dict(color='black', width=0)),
orientation='h')
x_prev = -solution[health_label]
x_prev=np.trunc(x_prev).astype(int).tolist()
y_prev = [population['name']]
health_fig = go.Bar(
x=x_prev,
y=[y_prev[i]+"<br>("+str(x_prev[i])+") " for i in range(len(y_prev))],
marker=dict(color=['orange'],
line=dict(color='black', width=0)),
orientation='h')
# Add subfigures to fig
fig.append_trace(contain_fig, 1, 1)
fig.append_trace(health_fig, 2, 1)
# Fix the x-axis of health bar subplot
fig.update_xaxes(range=[0, sum(cat['size'] for cat in categories)], row=2, col=1)
fig.layout.update(margin=dict(l=0, r=10, t=20, b=0),
paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)',
showlegend=False)
fig.update_yaxes(autorange='reversed')
fig.layout.height = 300
return fig
@dash_app.callback(
[Output({'type': 'threshold', 'index': MATCH}, 'value'),
Output({'type': 'threshold', 'index': MATCH}, 'max'),
Output({'type': 'categories_size', 'index': MATCH}, 'children')],
Input('campus_id','data'),
State({'type': 'threshold', 'index': MATCH}, 'id')
)
def update_size_treshold(campus_id, id):
"""Update the size"""
print("Running 'update_size_threshold'.")
categories = campuses[campus_id]['categories']
i = int(id['index'])
if campus_id is not None:
thres_size_string = int(categories[i]['size'])
else:
thres_size_string = None
return thres_size_string, thres_size_string, thres_size_string
@dash_app.callback(
[Output({'type': 'threshold_h'}, 'value'),
Output({'type': 'threshold_h'}, 'max'),
Output({'type': 'population_size'},'children')],
Input('campus_id','data')
)
def update_size_threshold_Healt(campus_id):
print("Running 'update_size_threshold_Healt'.", campus_id)
if campus_id is not None:
population = campuses[campus_id]['population']
return 0, population, population
@dash_app.callback(
Output({'type': 'categories_name', 'index': MATCH}, 'children'),
Output({'type': 'sol_name', 'index': MATCH}, 'children'),
Input('campus_id','data'),
State({'type': 'threshold', 'index': MATCH}, 'id')
)
def update_names(campus_id, id):
"""Update the names"""
print("Running 'update_names'.", campus_id)
categories = campuses[campus_id]['categories']
if campus_id is not None:
i = int(id['index'])
return f"{categories[i]['name']}",f"{categories[i]['name']}"
else:
return None
@dash_app.callback(
Output({'type': 'percent', 'index': MATCH}, 'children'),
Input({'type': 'threshold', 'index': MATCH}, 'value'),
Input('campus_id','data'),
State({'type': 'threshold', 'index': MATCH}, 'id')
)
def update_percentage(threshold, campus_id, id):
"""Update the percentage box corresponding to the threshold value that was updated."""
print("Running 'update_percentage'.")
categories = campuses[campus_id]['categories']
i = int(id['index'])
if threshold is not None:
div = int(categories[i]['size'])
percentage = 0 if (div == 0) else (int(threshold) * 100 / div)
return f"{round(percentage, 2)}%"
else:
return f"100%"
@dash_app.callback(
Output({'type': 'percent_h'}, 'children'),
Input({'type': 'threshold_h'}, 'value'),
Input('campus_id','data')
)
def update_percentage_Healt(threshold, campus_id):
"""Update the percentage box corresponding to the threshold value that was updated."""
print("Running 'update_percentage_Health'.")
population = campuses[campus_id]['population']
if threshold is not None:
percentage = int(threshold) * 100 / int(population)
return f"{round(percentage, 2)}%"
else:
return f"0.0%"
@dash_app.callback(
Output("asked_no_solutions_store","data"),
Output("loading-output", "children"),
Input("asked_no_solutions_button", "n_clicks"),
State("asked_no_solutions", "value"),
State('campus_id','data')
)
def update_asked_solutions(n_clicks,asked_no_solutions, campus_id):
print("Running 'update_asked_solutions'.")
if campus_id is None:
print ("Default campus")
campus_id = "campus1"
return "done",""
print (campus_id)
os.system('julia pareto/pareto.jl data/'+ campus_id +'.json ' + str(asked_no_solutions) + ' data/'+ campus_id +'.csv')
print("From method")
return "done",""
##This method has problems when there are different number of categories
@dash_app.callback(
Output("bar-chart", "figure"),
Output({'type': 'allocation', 'index': ALL}, 'children'),
Output({'type': 'groupsize', 'index': ALL}, 'children'),
Input("jsoned_solutions", "data"),
Input("current-solution", "value"),
State('campus_id','data'),
State("solutions", "data"),
)
def update_displayed_solution(jsoned_solutions, sol_index, campus_id, solutions):
"""Updates the figure and the allocation/group size boxes when current_solution is modified."""
print("Running 'update_displayed_solution'.")
k = len (campuses[campus_id]['categories'])
# If sol_index is None, return None
if sol_index is None:
return blank_fig, (None,)*k, (None,)*k
# If sol_index is not an int, do nothing.
elif not isinstance(sol_index, int):
return no_update, [no_update]*k, [no_update]*k
# Load the solution from dataframe
row_index = solutions[sol_index-1]
jsoned_solutions = json.loads(jsoned_solutions)
specific = jsoned_solutions['data'][row_index]
specific2 = pd.DataFrame(specific, jsoned_solutions['columns'])
# Get updated bar chart
fig = get_fig(specific2[0], campus_id)
# Get allocation and group sizes
g_labels = [f'g{i}' for i in range(1,k+1)]
t_labels = [f't{i}' for i in range(1,k+1)]
t = list(specific2[0][t_labels])
g = list(specific2[0][g_labels])
# Return figure, allocation, and group sizes
return fig, t, g
@dash_app.callback(
Output("solutions", "data"),
Output("threshold_vals", "data"),
Output("threshold_h_val", "data"),
Output("solution-num-sentence", "children"),
Output("current-solution", "value"),
Output("current-solution", "max"),
Output("jsoned_solutions", "data"),
Input({'type': 'threshold', 'index': ALL}, 'value'),
Input({'type': 'threshold_h'}, 'value'),
Input("asked_no_solutions_store", "data"),
State('campus_id', 'data'),
State("current-solution", "value")
)
def update_solution_set(thresholds, threshold_h, asked_no_solutions, campus_id, current_sol):
"""Updates the set of solutions stored when one of the thresholds changes."""
print("Running 'update_solution_set'.")
# Check that all thresholds are integers, otherwise do nothing.
if not all(map(lambda x: isinstance(x, int), thresholds)):
return (no_update,)*6
sols, jsoned_solutions = get_solutions(thresholds, threshold_h, campus_id)
num_sols = len(sols)
if current_sol is not None and current_sol < num_sols:
picked_sol = current_sol
elif num_sols > 0:
picked_sol = random.randint(1, num_sols)
else:
picked_sol = None
if num_sols != 1:
solutions_sentence = _("There are {} solutions that satisfy the thresholds.").format(num_sols)
else:
solutions_sentence = _("There is one solution that satisfies the thresholds.")
return sols, thresholds, threshold_h, solutions_sentence, picked_sol, num_sols, jsoned_solutions
def get_solutions(thresholds, threshold_h, campus_id):
if campus_id is not None:
print ("Reading file", campuses[campus_id]['file'])
df = pd.read_csv(campuses[campus_id]['file'])
k = len(campuses[campus_id]['categories'])
if df.columns.size != 3*k+1:
raise Exception("Data input has inconsistent number of categories!")
g_labels = [f'g{i}' for i in range(1,k+1)]
t_labels = [f't{i}' for i in range(1,k+1)]
contain_labels = [f'Containment {i}' for i in range(1,k+1)]
health_label = ['Health']
obj_labels = health_label + contain_labels
col_labels = g_labels + t_labels + obj_labels
df.columns = col_labels
"""Return list of solutions (=indices of dataframe) that are not filtered out by thresholds."""
df = df.sort_values(by=['Health'], ignore_index=True)
contain_mask = (df[contain_labels] <= thresholds[:]).all(axis=1)
health_mask = (-df[health_label] >= threshold_h).all(axis=1)
mask = contain_mask & health_mask
return list(mask[mask].index), df.to_json(orient="split")
@dash_app.callback(
Output("solutions-row", "children"),
Input('save-button', 'n_clicks'),
State('campus_id','data'),
State("current-solution", "value"),
State("solutions", "data"),
State("jsoned_solutions", "data"),
State("solutions-row", "children")
)
def save_solution(n_clicks, campus_id, sol_index, solutions, jsoned_solutions, saved_solutions):
"""Saves the current figure and the allocations / group sizes when the save button is clicked."""
print("Running 'save_solution'.")
# If sol_index is not an int, do nothing.
if not isinstance(sol_index, int):
return no_update
row_index = solutions[sol_index-1]
jsoned_solutions = json.loads(jsoned_solutions)
specific = jsoned_solutions['data'][row_index]
specific2 = pd.DataFrame(specific, jsoned_solutions['columns'])
k = len(campuses[campus_id]['categories'])
# Get updated box-graph
fig = get_fig(specific2[0], campus_id)
# Get allocation and group sizes
g_labels = [f'g{i}' for i in range(1,k+1)]
t_labels = [f't{i}' for i in range(1,k+1)]
t = list(specific2[0][t_labels])
g = list(specific2[0][g_labels])
# Get time at which solution is saved, to use as index
timestamp = time.time()
column = dbc.Col([
dbc.Card([
dcc.Graph(id={'type': 'saved-graph', 'index': sol_index},
figure=fig, config={'staticPlot': True}, className="mb-1"),
html.Span(_("Allocation: {}.").format(t)),
html.Span(_("Group sizes: {}.").format(g)),
], id={'type': 'saved_solution', 'index': timestamp}, className="p-3 mb-3"),
], width=6)
saved_solutions.append(column)
# Return solution column
return saved_solutions | [
"json.loads",
"pandas.read_csv",
"numpy.trunc",
"dash.dependencies.Output",
"dash_core_components.Location",
"dash.dependencies.Input",
"dash.callback_context.response.set_cookie",
"preprocess._",
"pandas.DataFrame",
"dash.dependencies.State",
"time.time",
"random.randint",
"dash_html_compon... | [((1966, 2000), 'dash.dependencies.Output', 'Output', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (1972, 2000), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((5528, 5554), 'dash.dependencies.Input', 'Input', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (5533, 5554), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((5559, 5609), 'dash.dependencies.State', 'State', (["{'type': 'threshold', 'index': MATCH}", '"""id"""'], {}), "({'type': 'threshold', 'index': MATCH}, 'id')\n", (5564, 5609), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6170, 6196), 'dash.dependencies.Input', 'Input', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (6175, 6196), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6459, 6522), 'dash.dependencies.Output', 'Output', (["{'type': 'categories_name', 'index': MATCH}", '"""children"""'], {}), "({'type': 'categories_name', 'index': MATCH}, 'children')\n", (6465, 6522), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6528, 6584), 'dash.dependencies.Output', 'Output', (["{'type': 'sol_name', 'index': MATCH}", '"""children"""'], {}), "({'type': 'sol_name', 'index': MATCH}, 'children')\n", (6534, 6584), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6590, 6616), 'dash.dependencies.Input', 'Input', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (6595, 6616), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6621, 6671), 'dash.dependencies.State', 'State', (["{'type': 'threshold', 'index': MATCH}", '"""id"""'], {}), "({'type': 'threshold', 'index': MATCH}, 'id')\n", (6626, 6671), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((7020, 7075), 'dash.dependencies.Output', 'Output', (["{'type': 'percent', 'index': MATCH}", '"""children"""'], {}), "({'type': 'percent', 'index': MATCH}, 'children')\n", (7026, 7075), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((7081, 7134), 'dash.dependencies.Input', 'Input', (["{'type': 'threshold', 'index': MATCH}", '"""value"""'], {}), "({'type': 'threshold', 'index': MATCH}, 'value')\n", (7086, 7134), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((7140, 7166), 'dash.dependencies.Input', 'Input', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (7145, 7166), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((7171, 7221), 'dash.dependencies.State', 'State', (["{'type': 'threshold', 'index': MATCH}", '"""id"""'], {}), "({'type': 'threshold', 'index': MATCH}, 'id')\n", (7176, 7221), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((7727, 7768), 'dash.dependencies.Output', 'Output', (["{'type': 'percent_h'}", '"""children"""'], {}), "({'type': 'percent_h'}, 'children')\n", (7733, 7768), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((7774, 7813), 'dash.dependencies.Input', 'Input', (["{'type': 'threshold_h'}", '"""value"""'], {}), "({'type': 'threshold_h'}, 'value')\n", (7779, 7813), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((7819, 7845), 'dash.dependencies.Input', 'Input', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (7824, 7845), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((8293, 8335), 'dash.dependencies.Output', 'Output', (['"""asked_no_solutions_store"""', '"""data"""'], {}), "('asked_no_solutions_store', 'data')\n", (8299, 8335), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((8340, 8376), 'dash.dependencies.Output', 'Output', (['"""loading-output"""', '"""children"""'], {}), "('loading-output', 'children')\n", (8346, 8376), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((8382, 8428), 'dash.dependencies.Input', 'Input', (['"""asked_no_solutions_button"""', '"""n_clicks"""'], {}), "('asked_no_solutions_button', 'n_clicks')\n", (8387, 8428), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((8434, 8470), 'dash.dependencies.State', 'State', (['"""asked_no_solutions"""', '"""value"""'], {}), "('asked_no_solutions', 'value')\n", (8439, 8470), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((8476, 8502), 'dash.dependencies.State', 'State', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (8481, 8502), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((9977, 10005), 'json.loads', 'json.loads', (['jsoned_solutions'], {}), '(jsoned_solutions)\n', (9987, 10005), False, 'import time, random, pandas as pd, json\n'), ((10073, 10124), 'pandas.DataFrame', 'pd.DataFrame', (['specific', "jsoned_solutions['columns']"], {}), "(specific, jsoned_solutions['columns'])\n", (10085, 10124), True, 'import time, random, pandas as pd, json\n'), ((9038, 9067), 'dash.dependencies.Output', 'Output', (['"""bar-chart"""', '"""figure"""'], {}), "('bar-chart', 'figure')\n", (9044, 9067), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((9073, 9129), 'dash.dependencies.Output', 'Output', (["{'type': 'allocation', 'index': ALL}", '"""children"""'], {}), "({'type': 'allocation', 'index': ALL}, 'children')\n", (9079, 9129), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((9135, 9190), 'dash.dependencies.Output', 'Output', (["{'type': 'groupsize', 'index': ALL}", '"""children"""'], {}), "({'type': 'groupsize', 'index': ALL}, 'children')\n", (9141, 9190), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((9196, 9229), 'dash.dependencies.Input', 'Input', (['"""jsoned_solutions"""', '"""data"""'], {}), "('jsoned_solutions', 'data')\n", (9201, 9229), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((9235, 9269), 'dash.dependencies.Input', 'Input', (['"""current-solution"""', '"""value"""'], {}), "('current-solution', 'value')\n", (9240, 9269), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((9275, 9301), 'dash.dependencies.State', 'State', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (9280, 9301), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((9306, 9332), 'dash.dependencies.State', 'State', (['"""solutions"""', '"""data"""'], {}), "('solutions', 'data')\n", (9311, 9332), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10497, 10524), 'dash.dependencies.Output', 'Output', (['"""solutions"""', '"""data"""'], {}), "('solutions', 'data')\n", (10503, 10524), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10530, 10562), 'dash.dependencies.Output', 'Output', (['"""threshold_vals"""', '"""data"""'], {}), "('threshold_vals', 'data')\n", (10536, 10562), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10568, 10601), 'dash.dependencies.Output', 'Output', (['"""threshold_h_val"""', '"""data"""'], {}), "('threshold_h_val', 'data')\n", (10574, 10601), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10607, 10650), 'dash.dependencies.Output', 'Output', (['"""solution-num-sentence"""', '"""children"""'], {}), "('solution-num-sentence', 'children')\n", (10613, 10650), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10656, 10691), 'dash.dependencies.Output', 'Output', (['"""current-solution"""', '"""value"""'], {}), "('current-solution', 'value')\n", (10662, 10691), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10697, 10730), 'dash.dependencies.Output', 'Output', (['"""current-solution"""', '"""max"""'], {}), "('current-solution', 'max')\n", (10703, 10730), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10736, 10770), 'dash.dependencies.Output', 'Output', (['"""jsoned_solutions"""', '"""data"""'], {}), "('jsoned_solutions', 'data')\n", (10742, 10770), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10776, 10827), 'dash.dependencies.Input', 'Input', (["{'type': 'threshold', 'index': ALL}", '"""value"""'], {}), "({'type': 'threshold', 'index': ALL}, 'value')\n", (10781, 10827), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10833, 10872), 'dash.dependencies.Input', 'Input', (["{'type': 'threshold_h'}", '"""value"""'], {}), "({'type': 'threshold_h'}, 'value')\n", (10838, 10872), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10878, 10919), 'dash.dependencies.Input', 'Input', (['"""asked_no_solutions_store"""', '"""data"""'], {}), "('asked_no_solutions_store', 'data')\n", (10883, 10919), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10925, 10951), 'dash.dependencies.State', 'State', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (10930, 10951), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((10957, 10991), 'dash.dependencies.State', 'State', (['"""current-solution"""', '"""value"""'], {}), "('current-solution', 'value')\n", (10962, 10991), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((13804, 13832), 'json.loads', 'json.loads', (['jsoned_solutions'], {}), '(jsoned_solutions)\n', (13814, 13832), False, 'import time, random, pandas as pd, json\n'), ((13900, 13951), 'pandas.DataFrame', 'pd.DataFrame', (['specific', "jsoned_solutions['columns']"], {}), "(specific, jsoned_solutions['columns'])\n", (13912, 13951), True, 'import time, random, pandas as pd, json\n'), ((14350, 14361), 'time.time', 'time.time', ([], {}), '()\n', (14359, 14361), False, 'import time, random, pandas as pd, json\n'), ((13137, 13172), 'dash.dependencies.Output', 'Output', (['"""solutions-row"""', '"""children"""'], {}), "('solutions-row', 'children')\n", (13143, 13172), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((13178, 13210), 'dash.dependencies.Input', 'Input', (['"""save-button"""', '"""n_clicks"""'], {}), "('save-button', 'n_clicks')\n", (13183, 13210), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((13216, 13242), 'dash.dependencies.State', 'State', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (13221, 13242), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((13247, 13281), 'dash.dependencies.State', 'State', (['"""current-solution"""', '"""value"""'], {}), "('current-solution', 'value')\n", (13252, 13281), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((13287, 13313), 'dash.dependencies.State', 'State', (['"""solutions"""', '"""data"""'], {}), "('solutions', 'data')\n", (13292, 13313), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((13319, 13352), 'dash.dependencies.State', 'State', (['"""jsoned_solutions"""', '"""data"""'], {}), "('jsoned_solutions', 'data')\n", (13324, 13352), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((13358, 13392), 'dash.dependencies.State', 'State', (['"""solutions-row"""', '"""children"""'], {}), "('solutions-row', 'children')\n", (13363, 13392), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((1868, 1905), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""url"""', 'refresh': '(False)'}), "(id='url', refresh=False)\n", (1880, 1905), True, 'import dash_core_components as dcc\n'), ((1911, 1938), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""page-content"""'}), "(id='page-content')\n", (1919, 1938), True, 'import dash_html_components as html\n'), ((2129, 2199), 'dash.callback_context.response.set_cookie', 'dash.callback_context.response.set_cookie', (['"""campus_cookie"""', '"""/campus1"""'], {}), "('campus_cookie', '/campus1')\n", (2170, 2199), False, 'import dash\n'), ((2498, 2566), 'dash.callback_context.response.set_cookie', 'dash.callback_context.response.set_cookie', (['"""campus_cookie"""', 'pathname'], {}), "('campus_cookie', pathname)\n", (2539, 2566), False, 'import dash\n'), ((2007, 2031), 'dash.dependencies.Input', 'Input', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (2012, 2031), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((2915, 2983), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""location-label"""', 'component_property': '"""children"""'}), "(component_id='location-label', component_property='children')\n", (2921, 2983), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((2989, 3016), 'dash.dependencies.Output', 'Output', (['"""campus_id"""', '"""data"""'], {}), "('campus_id', 'data')\n", (2995, 3016), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((3037, 3070), 'dash.dependencies.Input', 'Input', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (3042, 3070), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((5340, 5394), 'dash.dependencies.Output', 'Output', (["{'type': 'threshold', 'index': MATCH}", '"""value"""'], {}), "({'type': 'threshold', 'index': MATCH}, 'value')\n", (5346, 5394), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((5400, 5452), 'dash.dependencies.Output', 'Output', (["{'type': 'threshold', 'index': MATCH}", '"""max"""'], {}), "({'type': 'threshold', 'index': MATCH}, 'max')\n", (5406, 5452), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((5458, 5521), 'dash.dependencies.Output', 'Output', (["{'type': 'categories_size', 'index': MATCH}", '"""children"""'], {}), "({'type': 'categories_size', 'index': MATCH}, 'children')\n", (5464, 5521), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6027, 6067), 'dash.dependencies.Output', 'Output', (["{'type': 'threshold_h'}", '"""value"""'], {}), "({'type': 'threshold_h'}, 'value')\n", (6033, 6067), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6073, 6111), 'dash.dependencies.Output', 'Output', (["{'type': 'threshold_h'}", '"""max"""'], {}), "({'type': 'threshold_h'}, 'max')\n", (6079, 6111), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((6117, 6164), 'dash.dependencies.Output', 'Output', (["{'type': 'population_size'}", '"""children"""'], {}), "({'type': 'population_size'}, 'children')\n", (6123, 6164), False, 'from dash.dependencies import Input, Output, State, MATCH, ALL\n'), ((11848, 11905), 'preprocess._', '_', (['"""There is one solution that satisfies the thresholds."""'], {}), "('There is one solution that satisfies the thresholds.')\n", (11849, 11905), False, 'from preprocess import population, _, campuses\n'), ((12166, 12206), 'pandas.read_csv', 'pd.read_csv', (["campuses[campus_id]['file']"], {}), "(campuses[campus_id]['file'])\n", (12177, 12206), True, 'import time, random, pandas as pd, json\n'), ((11615, 11642), 'random.randint', 'random.randint', (['(1)', 'num_sols'], {}), '(1, num_sols)\n', (11629, 11642), False, 'import time, random, pandas as pd, json\n'), ((3641, 3688), 'preprocess._', '_', (['"""# Unnecessarily self-isolating individuals"""'], {}), "('# Unnecessarily self-isolating individuals')\n", (3642, 3688), False, 'from preprocess import population, _, campuses\n'), ((3730, 3766), 'preprocess._', '_', (['"""# Prevented critical infections"""'], {}), "('# Prevented critical infections')\n", (3731, 3766), False, 'from preprocess import population, _, campuses\n'), ((11735, 11791), 'preprocess._', '_', (['"""There are {} solutions that satisfy the thresholds."""'], {}), "('There are {} solutions that satisfy the thresholds.')\n", (11736, 11791), False, 'from preprocess import population, _, campuses\n'), ((4098, 4114), 'numpy.trunc', 'np.trunc', (['x_prev'], {}), '(x_prev)\n', (4106, 4114), True, 'import numpy as np\n'), ((4456, 4472), 'numpy.trunc', 'np.trunc', (['x_prev'], {}), '(x_prev)\n', (4464, 4472), True, 'import numpy as np\n'), ((14416, 14536), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': "{'type': 'saved-graph', 'index': sol_index}", 'figure': 'fig', 'config': "{'staticPlot': True}", 'className': '"""mb-1"""'}), "(id={'type': 'saved-graph', 'index': sol_index}, figure=fig,\n config={'staticPlot': True}, className='mb-1')\n", (14425, 14536), True, 'import dash_core_components as dcc\n'), ((14578, 14598), 'preprocess._', '_', (['"""Allocation: {}."""'], {}), "('Allocation: {}.')\n", (14579, 14598), False, 'from preprocess import population, _, campuses\n'), ((14633, 14654), 'preprocess._', '_', (['"""Group sizes: {}."""'], {}), "('Group sizes: {}.')\n", (14634, 14654), False, 'from preprocess import population, _, campuses\n')] |
# encoding = UTF-8
import numpy as np
import message_passing
import nn
import randomtest
import matplotlib.pyplot as plt
def draw_graph(result, k):
x_values = range(1, k+2)
y_values = result
'''
scatter()
x:横坐标 y:纵坐标 s:点的尺寸
'''
plt.scatter(x_values, y_values, s=10)
# 设置图表标题并给坐标轴加上标签
plt.title('AMP results for SVD random matrix', fontsize=10)
plt.xlabel('Steps', fontsize=14)
plt.ylabel('(x-x0)^2/N', fontsize=14)
# 设置刻度标记的大小
plt.tick_params(axis='both', which='major', labelsize=14)
# 设置每个坐标轴的取值范围
plt.axis([0, 110, 0, max(result)])
plt.show()
# setting constant variables
M = 200
N = 1000
d = 30
# initialize matrix A and x
A = np.zeros((M, N))
x = np.zeros(N)
x_sample = np.zeros([N, 10000])
y_sample = np.zeros([M, 10000])
with open('data/AAA_Gauss_1000_1000.mtx', 'r') as f1:
list1 = f1.readlines()
with open('data/XXX0samples_Gaussian_1000.dat', 'r') as f2:
list2 = f2.readlines()
f1.close()
f2.close()
# constructing Gaussian matrix A
for i in range(2, M+2):
A[i-2] = list1[i].split()
for j in range(N):
A[:, j] = A[:, j] / np.sqrt(sum([k*k for k in A[:, j]]))
for i in range(3, d+3):
temp = list2[i].split()
x[int(temp[0])-1] = float(temp[1])
# construct samples of the original vector x and calculate the compressed vector y
s = np.random.randint(4, 1003, [d, 10000])
for j in range(10000):
for i in s[:, j]:
temp = list2[i].split()
x_sample[int(temp[0]), j] = float(temp[1])
for i in range(10000):
y_sample[:, i] = np.dot(A, x_sample[:, i])
# generating random matrix
A1 = np.random.random_sample((M, N))
for i in range(M):
A1[i, :] = A1[i, :] / np.sqrt(sum([k*k for k in A1[i, :]]))
u, s, vT = np.linalg.svd(A, full_matrices=False)
sd = np.diag(s)
print(np.average(A[1]))
# x1, result1, k1 = message_passing.amp(A1, x)
# x2, result2, k2 = message_passing.vamp(A1, x)
# print(result1)
# draw_graph(result1, k1)
# nn.nn(A, x, M, N)
| [
"numpy.random.random_sample",
"matplotlib.pyplot.ylabel",
"numpy.average",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"numpy.diag",
"numpy.zeros",
"numpy.random.randint",
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.linalg.svd",
"matplotlib.pyplot.title",
"matplotlib.py... | [((705, 721), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (713, 721), True, 'import numpy as np\n'), ((726, 737), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (734, 737), True, 'import numpy as np\n'), ((749, 769), 'numpy.zeros', 'np.zeros', (['[N, 10000]'], {}), '([N, 10000])\n', (757, 769), True, 'import numpy as np\n'), ((781, 801), 'numpy.zeros', 'np.zeros', (['[M, 10000]'], {}), '([M, 10000])\n', (789, 801), True, 'import numpy as np\n'), ((1341, 1379), 'numpy.random.randint', 'np.random.randint', (['(4)', '(1003)', '[d, 10000]'], {}), '(4, 1003, [d, 10000])\n', (1358, 1379), True, 'import numpy as np\n'), ((1612, 1643), 'numpy.random.random_sample', 'np.random.random_sample', (['(M, N)'], {}), '((M, N))\n', (1635, 1643), True, 'import numpy as np\n'), ((1739, 1776), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {'full_matrices': '(False)'}), '(A, full_matrices=False)\n', (1752, 1776), True, 'import numpy as np\n'), ((1782, 1792), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (1789, 1792), True, 'import numpy as np\n'), ((260, 297), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_values', 'y_values'], {'s': '(10)'}), '(x_values, y_values, s=10)\n', (271, 297), True, 'import matplotlib.pyplot as plt\n'), ((325, 384), 'matplotlib.pyplot.title', 'plt.title', (['"""AMP results for SVD random matrix"""'], {'fontsize': '(10)'}), "('AMP results for SVD random matrix', fontsize=10)\n", (334, 384), True, 'import matplotlib.pyplot as plt\n'), ((389, 421), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {'fontsize': '(14)'}), "('Steps', fontsize=14)\n", (399, 421), True, 'import matplotlib.pyplot as plt\n'), ((426, 463), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""(x-x0)^2/N"""'], {'fontsize': '(14)'}), "('(x-x0)^2/N', fontsize=14)\n", (436, 463), True, 'import matplotlib.pyplot as plt\n'), ((485, 542), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(14)'}), "(axis='both', which='major', labelsize=14)\n", (500, 542), True, 'import matplotlib.pyplot as plt\n'), ((606, 616), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (614, 616), True, 'import matplotlib.pyplot as plt\n'), ((1553, 1578), 'numpy.dot', 'np.dot', (['A', 'x_sample[:, i]'], {}), '(A, x_sample[:, i])\n', (1559, 1578), True, 'import numpy as np\n'), ((1800, 1816), 'numpy.average', 'np.average', (['A[1]'], {}), '(A[1])\n', (1810, 1816), True, 'import numpy as np\n')] |
import itertools
import numpy as np
import numpy.testing as npt
import pytest
from quara.objects.composite_system import CompositeSystem
from quara.objects.composite_system_typical import generate_composite_system
from quara.objects.elemental_system import ElementalSystem
from quara.objects.matrix_basis import get_normalized_pauli_basis
from quara.objects.mprocess import MProcess
from quara.objects.povm import (
get_x_povm,
get_y_povm,
get_z_povm,
)
from quara.objects.qoperation_typical import (
generate_qoperation,
generate_qoperation_object,
)
from quara.objects.tester_typical import (
generate_tester_states,
generate_tester_povms,
)
from quara.protocol.qtomography.standard.linear_estimator import LinearEstimator
from quara.protocol.qtomography.standard.standard_qmpt import (
cqpt_to_cqmpt,
StandardQmpt,
)
from quara.protocol.qtomography.standard.loss_minimization_estimator import (
LossMinimizationEstimator,
)
from quara.loss_function.standard_qtomography_based_weighted_relative_entropy import (
StandardQTomographyBasedWeightedRelativeEntropy,
StandardQTomographyBasedWeightedRelativeEntropyOption,
)
from quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error import (
StandardQTomographyBasedWeightedProbabilityBasedSquaredError,
StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption,
)
from quara.minimization_algorithm.projected_gradient_descent_backtracking import (
ProjectedGradientDescentBacktracking,
ProjectedGradientDescentBacktrackingOption,
)
class TestStandardQmpt:
def test_testers(self):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(mode="mprocess", name="x-type1", c_sys=c_sys)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=True,
schedules="all",
)
assert len(qmpt.testers) == 7
def test_is_valid_experiment(self):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(mode="mprocess", name="x-type1", c_sys=c_sys)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=True,
schedules="all",
)
# is_valid_experiment == True
assert qmpt.is_valid_experiment() == True
# is_valid_experiment == False
e_sys0 = ElementalSystem(0, get_normalized_pauli_basis())
c_sys0 = CompositeSystem([e_sys0])
e_sys1 = ElementalSystem(1, get_normalized_pauli_basis())
c_sys1 = CompositeSystem([e_sys1])
povm_x = get_x_povm(c_sys1)
povm_y = get_y_povm(c_sys0)
povm_z = get_z_povm(c_sys0)
povms = [povm_x, povm_y, povm_z]
qmpt.experiment.povms = povms
assert qmpt.is_valid_experiment() == False
def test_cqpt_to_cqmpt():
# Case 1: on_para_eq_constraint=False
# Arrange
c_qpt = np.array(list(range(1, 17)))
dim = 2
m = 3
# Act
actual_a_qmpt, actual_b_qmpt = cqpt_to_cqmpt(
c_qpt=c_qpt, dim=dim, m_mprocess=m, on_para_eq_constraint=False
)
# Assert
expected_a_qmpt = np.array(
[
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
],
]
)
npt.assert_almost_equal(actual_a_qmpt, expected_a_qmpt, decimal=15)
expected_b_qmpt = np.array([0, 0, 0.0])
npt.assert_almost_equal(actual_b_qmpt, expected_b_qmpt, decimal=15)
# Case 2: on_para_eq_constraint=True
# Act
actual_a_qmpt, actual_b_qmpt = cqpt_to_cqmpt(
c_qpt=c_qpt, dim=dim, m_mprocess=m, on_para_eq_constraint=True
)
# Assert
expected_a_qmpt = np.array(
[
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0,
],
[
-1,
-2,
-3,
-4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-1,
-2,
-3,
-4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16.0,
],
]
)
npt.assert_almost_equal(actual_a_qmpt, expected_a_qmpt, decimal=15)
expected_b_qmpt = np.array([0, 0, 1.0])
npt.assert_almost_equal(actual_b_qmpt, expected_b_qmpt, decimal=15)
def test_set_coeffs():
c_sys = generate_composite_system(mode="qubit", num=1, ids_esys=[1])
# Tester Objects
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in ["x0", "y0", "z0", "z1"]
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in ["x", "y", "z"]
]
# Case 1: on_para_eq_constarint = True
on_para_eq_constraint = True
num_outcomes = 2
actual = StandardQmpt(
tester_states,
tester_povms,
num_outcomes=num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
seed_data=7,
)
# Assert
assert actual.calc_matA().shape == (48, 28)
assert actual.calc_vecB().shape == (48,)
# Case 1: on_para_eq_constarint = False
on_para_eq_constraint = False
num_outcomes = 2
actual = StandardQmpt(
tester_states,
tester_povms,
num_outcomes=num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
seed_data=7,
)
# Assert
assert actual.calc_matA().shape == (48, 32)
assert actual.calc_vecB().shape == (48,)
def calc_prob_dist_with_experiment(source_qmpt, qope):
tmp_experiment = source_qmpt._experiment.copy()
for schedule_index in range(len(tmp_experiment.schedules)):
target_index = source_qmpt._get_target_index(tmp_experiment, schedule_index)
tmp_experiment.mprocesses[target_index] = qope
return tmp_experiment.calc_prob_dists()
@pytest.mark.parametrize(("on_para_eq_constraint"), [(True), (False)])
def test_compare_prob_dist_1qubit(on_para_eq_constraint: bool):
# Arrange
c_sys = generate_composite_system(mode="qubit", num=1, ids_esys=[1])
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# Qmpt
num_outcomes = 2
qmpt = StandardQmpt(
tester_states,
tester_povms,
num_outcomes=num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
seed_data=7,
)
# TrueObject
true_object_name = "x-type1"
true_object = generate_qoperation_object(
mode="mprocess", object_name="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Act
actual_list = qmpt.calc_prob_dists(true_object)
# Assert
expected_list = calc_prob_dist_with_experiment(qmpt, true_object)
for actual, expected in zip(actual_list, expected_list):
npt.assert_almost_equal(actual, expected, decimal=15)
@pytest.mark.qmpt_twoqubit
@pytest.mark.parametrize(("on_para_eq_constraint"), [(True), (False)])
def test_compare_prob_dist_2qubit(on_para_eq_constraint: bool):
c_sys = generate_composite_system(mode="qubit", num=2, ids_esys=[1, 2])
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(state_names, state_names)
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(povm_names, povm_names)
]
# True Object
true_object_name = "x-type1_x-type1"
true_object = generate_qoperation_object(
mode="mprocess", object_name="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# StandardQmpt
num_outcomes = true_object.num_outcomes # 4
qmpt = StandardQmpt(
tester_states,
tester_povms,
num_outcomes=num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
seed_data=7,
)
# Act
actual_list = qmpt.calc_prob_dists(true_object)
# Assert
expected_list = calc_prob_dist_with_experiment(qmpt, true_object)
for actual, expected in zip(actual_list, expected_list):
# If decimal is set to 15, the test will fail.
npt.assert_almost_equal(actual, expected, decimal=14)
@pytest.mark.qmpt_onequtrit
@pytest.mark.parametrize(("on_para_eq_constraint"), [(True), (False)])
def test_compare_prob_dist_1qutrit(on_para_eq_constraint: bool):
c_sys = generate_composite_system(mode="qutrit", num=1, ids_esys=[1])
# Tester Objects
state_names = [
"01z0",
"12z0",
"02z1",
"01x0",
"01y0",
"12x0",
"12y0",
"02x0",
"02y0",
]
povm_names = ["01x3", "01y3", "z3", "12x3", "12y3", "02x3", "02y3"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
num_outcomes = 3
qmpt = StandardQmpt(
tester_states,
tester_povms,
num_outcomes=num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
seed_data=7,
)
# True Object
true_object_name = "z3-type1"
true_object = generate_qoperation_object(
mode="mprocess", object_name="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Act
actual_list = qmpt.calc_prob_dists(true_object)
# Assert
expected_list = calc_prob_dist_with_experiment(qmpt, true_object)
for actual, expected in zip(actual_list, expected_list):
npt.assert_almost_equal(actual, expected, decimal=15)
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[("z-type1", True), ("z-type1", False)],
)
def test_calc_estimate_LinearEstimator_1qubit(
true_object_name: str, on_para_eq_constraint: bool
):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LinearEstimator()
# Act
result = estimator.calc_estimate(
qtomography=qmpt, empi_dists=empi_dists, is_computation_time_required=True
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=15)
@pytest.mark.qmpt_twoqubit
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[
("x-type1_x-type1", True),
("x-type1_x-type1", False),
("bell-type1", True),
("bell-type1", False),
],
)
def test_calc_estimate_LinearEstimator_2qubit(
true_object_name: str, on_para_eq_constraint: bool
):
# Arrange
num_qubits = 2
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(state_names, repeat=num_qubits)
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(povm_names, repeat=num_qubits)
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LinearEstimator()
# Act
result = estimator.calc_estimate(
qtomography=qmpt, empi_dists=empi_dists, is_computation_time_required=True
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=14)
@pytest.mark.qmpt_onequtrit
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[("z3-type1", True), ("z3-type1", False), ("z2-type1", True), ("z2-type1", False)],
)
def test_calc_estimate_LinearEstimator_1qutrit(
true_object_name: str, on_para_eq_constraint: bool
):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qutrit", num=num_qubits)
# Tester Objects
state_names = [
"01z0",
"12z0",
"02z1",
"01x0",
"01y0",
"12x0",
"12y0",
"02x0",
"02y0",
]
povm_names = ["01x3", "01y3", "z3", "12x3", "12y3", "02x3", "02y3"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LinearEstimator()
# Act
result = estimator.calc_estimate(
qtomography=qmpt, empi_dists=empi_dists, is_computation_time_required=True
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=14)
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[("z-type1", True), ("z-type1", False)],
)
def test_calc_estimate_MLE_1qubit(true_object_name: str, on_para_eq_constraint: bool):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
# eps_proj_physical=1e-5,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LossMinimizationEstimator()
loss = StandardQTomographyBasedWeightedRelativeEntropy()
loss_option = StandardQTomographyBasedWeightedRelativeEntropyOption("identity")
algo = ProjectedGradientDescentBacktracking()
algo_option = ProjectedGradientDescentBacktrackingOption(
mode_stopping_criterion_gradient_descent="sum_absolute_difference_variable",
num_history_stopping_criterion_gradient_descent=1,
eps=1e-9,
)
# Act
result = estimator.calc_estimate(
qtomography=qmpt,
empi_dists=empi_dists,
loss=loss,
loss_option=loss_option,
algo=algo,
algo_option=algo_option,
is_computation_time_required=True,
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=6)
@pytest.mark.qmpt_twoqubit
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[
("x-type1_x-type1", True),
("x-type1_x-type1", False),
("bell-type1", True),
("bell-type1", False),
],
)
def test_calc_estimate_MLE_2qubit(true_object_name: str, on_para_eq_constraint: bool):
# Arrange
num_qubits = 2
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(state_names, repeat=num_qubits)
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(povm_names, repeat=num_qubits)
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
eps_proj_physical=1e-5,
eps_truncate_imaginary_part=1e-12,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LossMinimizationEstimator()
loss = StandardQTomographyBasedWeightedRelativeEntropy()
loss_option = StandardQTomographyBasedWeightedRelativeEntropyOption("identity")
algo = ProjectedGradientDescentBacktracking()
algo_option = ProjectedGradientDescentBacktrackingOption(
mode_stopping_criterion_gradient_descent="sum_absolute_difference_variable",
num_history_stopping_criterion_gradient_descent=1,
eps=1e-9,
)
# Act
result = estimator.calc_estimate(
qtomography=qmpt,
empi_dists=empi_dists,
loss=loss,
loss_option=loss_option,
algo=algo,
algo_option=algo_option,
is_computation_time_required=True,
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=1)
@pytest.mark.qmpt_onequtrit
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[("z3-type1", True), ("z3-type1", False), ("z2-type1", True), ("z2-type1", False)],
)
def test_calc_estimate_MLE_1qutrit(true_object_name: str, on_para_eq_constraint: bool):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qutrit", num=num_qubits)
# Tester Objects
state_names = [
"01z0",
"12z0",
"02z1",
"01x0",
"01y0",
"12x0",
"12y0",
"02x0",
"02y0",
]
povm_names = ["01x3", "01y3", "z3", "12x3", "12y3", "02x3", "02y3"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
eps_proj_physical=1e-5,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LossMinimizationEstimator()
loss = StandardQTomographyBasedWeightedRelativeEntropy()
loss_option = StandardQTomographyBasedWeightedRelativeEntropyOption("identity")
algo = ProjectedGradientDescentBacktracking()
algo_option = ProjectedGradientDescentBacktrackingOption(
mode_stopping_criterion_gradient_descent="sum_absolute_difference_variable",
num_history_stopping_criterion_gradient_descent=1,
eps=1e-9,
)
# Act
result = estimator.calc_estimate(
qtomography=qmpt,
empi_dists=empi_dists,
loss=loss,
loss_option=loss_option,
algo=algo,
algo_option=algo_option,
is_computation_time_required=True,
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=1)
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[("z-type1", True), ("z-type1", False)],
)
def test_calc_estimate_LSE_1qubit(true_object_name: str, on_para_eq_constraint: bool):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
# eps_proj_physical=1e-5,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LossMinimizationEstimator()
loss = StandardQTomographyBasedWeightedProbabilityBasedSquaredError()
loss_option = StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption(
"identity"
)
algo = ProjectedGradientDescentBacktracking()
algo_option = ProjectedGradientDescentBacktrackingOption(
mode_stopping_criterion_gradient_descent="sum_absolute_difference_variable",
num_history_stopping_criterion_gradient_descent=1,
eps=1e-9,
)
# Act
result = estimator.calc_estimate(
qtomography=qmpt,
empi_dists=empi_dists,
loss=loss,
loss_option=loss_option,
algo=algo,
algo_option=algo_option,
is_computation_time_required=True,
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=7)
@pytest.mark.qmpt_twoqubit
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[
("x-type1_x-type1", True),
("x-type1_x-type1", False),
("bell-type1", True),
("bell-type1", False),
],
)
def test_calc_estimate_LSE_2qubit(true_object_name: str, on_para_eq_constraint: bool):
# Arrange
num_qubits = 2
c_sys = generate_composite_system(mode="qubit", num=num_qubits)
# Tester Objects
state_names = ["x0", "y0", "z0", "z1"]
povm_names = ["x", "y", "z"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(state_names, repeat=num_qubits)
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=f"{a}_{b}", c_sys=c_sys
)
for a, b in itertools.product(povm_names, repeat=num_qubits)
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
eps_proj_physical=1e-5,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LossMinimizationEstimator()
loss = StandardQTomographyBasedWeightedProbabilityBasedSquaredError()
loss_option = StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption(
"identity"
)
algo = ProjectedGradientDescentBacktracking()
algo_option = ProjectedGradientDescentBacktrackingOption(
mode_stopping_criterion_gradient_descent="sum_absolute_difference_variable",
num_history_stopping_criterion_gradient_descent=1,
eps=1e-9,
)
# Act
result = estimator.calc_estimate(
qtomography=qmpt,
empi_dists=empi_dists,
loss=loss,
loss_option=loss_option,
algo=algo,
algo_option=algo_option,
is_computation_time_required=True,
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=2)
@pytest.mark.qmpt_onequtrit
@pytest.mark.parametrize(
("true_object_name", "on_para_eq_constraint"),
[("z3-type1", True), ("z3-type1", False), ("z2-type1", True), ("z2-type1", False)],
)
def test_calc_estimate_LSE_1qutrit(true_object_name: str, on_para_eq_constraint: bool):
# Arrange
num_qubits = 1
c_sys = generate_composite_system(mode="qutrit", num=num_qubits)
# Tester Objects
state_names = [
"01z0",
"12z0",
"02z1",
"01x0",
"01y0",
"12x0",
"12y0",
"02x0",
"02y0",
]
povm_names = ["01x3", "01y3", "z3", "12x3", "12y3", "02x3", "02y3"]
tester_states = [
generate_qoperation_object(
mode="state", object_name="state", name=name, c_sys=c_sys
)
for name in state_names
]
tester_povms = [
generate_qoperation_object(
mode="povm", object_name="povm", name=name, c_sys=c_sys
)
for name in povm_names
]
# True Object
true_object = generate_qoperation(
mode="mprocess", name=true_object_name, c_sys=c_sys
)
if on_para_eq_constraint is False:
true_object = MProcess(
hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys
)
# Qmpt
qmpt = StandardQmpt(
states=tester_states,
povms=tester_povms,
num_outcomes=true_object.num_outcomes,
on_para_eq_constraint=on_para_eq_constraint,
eps_proj_physical=1e-5,
schedules="all",
)
# empi_dists
prob_dists = qmpt.calc_prob_dists(true_object)
empi_dists = [(10, prob_dist) for prob_dist in prob_dists]
# Estimator
estimator = LossMinimizationEstimator()
loss = StandardQTomographyBasedWeightedProbabilityBasedSquaredError()
loss_option = StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption(
"identity"
)
algo = ProjectedGradientDescentBacktracking()
algo_option = ProjectedGradientDescentBacktrackingOption(
mode_stopping_criterion_gradient_descent="sum_absolute_difference_variable",
num_history_stopping_criterion_gradient_descent=1,
eps=1e-9,
)
# Act
result = estimator.calc_estimate(
qtomography=qmpt,
empi_dists=empi_dists,
loss=loss,
loss_option=loss_option,
algo=algo,
algo_option=algo_option,
is_computation_time_required=True,
)
actual = result.estimated_qoperation
# Assert
for a, e in zip(actual.hss, true_object.hss):
npt.assert_almost_equal(a, e, decimal=1)
| [
"quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktracking",
"quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt",
"numpy.array",
"quara.objects.composite_system.CompositeSystem",
"quara.objects.composite_system_typical.generate_composite_system",
... | [((12343, 12406), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""on_para_eq_constraint"""', '[True, False]'], {}), "('on_para_eq_constraint', [True, False])\n", (12366, 12406), False, 'import pytest\n'), ((13872, 13935), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""on_para_eq_constraint"""', '[True, False]'], {}), "('on_para_eq_constraint', [True, False])\n", (13895, 13935), False, 'import pytest\n'), ((15566, 15629), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""on_para_eq_constraint"""', '[True, False]'], {}), "('on_para_eq_constraint', [True, False])\n", (15589, 15629), False, 'import pytest\n'), ((17211, 17327), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('z-type1', True), ('z-type1', False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'z-type1', True), ('z-type1', False)])\n", (17234, 17327), False, 'import pytest\n'), ((18998, 19180), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('x-type1_x-type1', True), ('x-type1_x-type1', False), ('bell-type1', True\n ), ('bell-type1', False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'x-type1_x-type1', True), ('x-type1_x-type1', False), ('bell-type1', \n True), ('bell-type1', False)])\n", (19021, 19180), False, 'import pytest\n'), ((20974, 21137), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('z3-type1', True), ('z3-type1', False), ('z2-type1', True), ('z2-type1', \n False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'z3-type1', True), ('z3-type1', False), ('z2-type1', True), ('z2-type1',\n False)])\n", (20997, 21137), False, 'import pytest\n'), ((22945, 23061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('z-type1', True), ('z-type1', False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'z-type1', True), ('z-type1', False)])\n", (22968, 23061), False, 'import pytest\n'), ((25303, 25485), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('x-type1_x-type1', True), ('x-type1_x-type1', False), ('bell-type1', True\n ), ('bell-type1', False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'x-type1_x-type1', True), ('x-type1_x-type1', False), ('bell-type1', \n True), ('bell-type1', False)])\n", (25326, 25485), False, 'import pytest\n'), ((27891, 28054), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('z3-type1', True), ('z3-type1', False), ('z2-type1', True), ('z2-type1', \n False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'z3-type1', True), ('z3-type1', False), ('z2-type1', True), ('z2-type1',\n False)])\n", (27914, 28054), False, 'import pytest\n'), ((30431, 30547), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('z-type1', True), ('z-type1', False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'z-type1', True), ('z-type1', False)])\n", (30454, 30547), False, 'import pytest\n'), ((32829, 33011), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('x-type1_x-type1', True), ('x-type1_x-type1', False), ('bell-type1', True\n ), ('bell-type1', False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'x-type1_x-type1', True), ('x-type1_x-type1', False), ('bell-type1', \n True), ('bell-type1', False)])\n", (32852, 33011), False, 'import pytest\n'), ((35414, 35577), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('true_object_name', 'on_para_eq_constraint')", "[('z3-type1', True), ('z3-type1', False), ('z2-type1', True), ('z2-type1', \n False)]"], {}), "(('true_object_name', 'on_para_eq_constraint'), [(\n 'z3-type1', True), ('z3-type1', False), ('z2-type1', True), ('z2-type1',\n False)])\n", (35437, 35577), False, 'import pytest\n'), ((4445, 4523), 'quara.protocol.qtomography.standard.standard_qmpt.cqpt_to_cqmpt', 'cqpt_to_cqmpt', ([], {'c_qpt': 'c_qpt', 'dim': 'dim', 'm_mprocess': 'm', 'on_para_eq_constraint': '(False)'}), '(c_qpt=c_qpt, dim=dim, m_mprocess=m, on_para_eq_constraint=False)\n', (4458, 4523), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((4574, 5070), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, \n 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n 11, 12, 13, 14, 15, 16]]'], {}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, \n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7,\n 8, 9, 10, 11, 12, 13, 14, 15, 16]])\n', (4582, 5070), True, 'import numpy as np\n'), ((7458, 7525), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['actual_a_qmpt', 'expected_a_qmpt'], {'decimal': '(15)'}), '(actual_a_qmpt, expected_a_qmpt, decimal=15)\n', (7481, 7525), True, 'import numpy.testing as npt\n'), ((7549, 7570), 'numpy.array', 'np.array', (['[0, 0, 0.0]'], {}), '([0, 0, 0.0])\n', (7557, 7570), True, 'import numpy as np\n'), ((7575, 7642), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['actual_b_qmpt', 'expected_b_qmpt'], {'decimal': '(15)'}), '(actual_b_qmpt, expected_b_qmpt, decimal=15)\n', (7598, 7642), True, 'import numpy.testing as npt\n'), ((7730, 7807), 'quara.protocol.qtomography.standard.standard_qmpt.cqpt_to_cqmpt', 'cqpt_to_cqmpt', ([], {'c_qpt': 'c_qpt', 'dim': 'dim', 'm_mprocess': 'm', 'on_para_eq_constraint': '(True)'}), '(c_qpt=c_qpt, dim=dim, m_mprocess=m, on_para_eq_constraint=True)\n', (7743, 7807), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((7857, 8332), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0], [-1,\n -2, -3, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -2, -3, -4, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16.0]]'], {}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0.0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6,\n 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0.0], [-1, -2, -3, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -2, -3, \n -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 10, 11, 12, 13, \n 14, 15, 16.0]])\n', (7865, 8332), True, 'import numpy as np\n'), ((10527, 10594), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['actual_a_qmpt', 'expected_a_qmpt'], {'decimal': '(15)'}), '(actual_a_qmpt, expected_a_qmpt, decimal=15)\n', (10550, 10594), True, 'import numpy.testing as npt\n'), ((10617, 10638), 'numpy.array', 'np.array', (['[0, 0, 1.0]'], {}), '([0, 0, 1.0])\n', (10625, 10638), True, 'import numpy as np\n'), ((10643, 10710), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['actual_b_qmpt', 'expected_b_qmpt'], {'decimal': '(15)'}), '(actual_b_qmpt, expected_b_qmpt, decimal=15)\n', (10666, 10710), True, 'import numpy.testing as npt\n'), ((10748, 10808), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': '(1)', 'ids_esys': '[1]'}), "(mode='qubit', num=1, ids_esys=[1])\n", (10773, 10808), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((11308, 11438), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', (['tester_states', 'tester_povms'], {'num_outcomes': 'num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'seed_data': '(7)'}), '(tester_states, tester_povms, num_outcomes=num_outcomes,\n on_para_eq_constraint=on_para_eq_constraint, seed_data=7)\n', (11320, 11438), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((11702, 11832), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', (['tester_states', 'tester_povms'], {'num_outcomes': 'num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'seed_data': '(7)'}), '(tester_states, tester_povms, num_outcomes=num_outcomes,\n on_para_eq_constraint=on_para_eq_constraint, seed_data=7)\n', (11714, 11832), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((12503, 12563), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': '(1)', 'ids_esys': '[1]'}), "(mode='qubit', num=1, ids_esys=[1])\n", (12528, 12563), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((13055, 13185), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', (['tester_states', 'tester_povms'], {'num_outcomes': 'num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'seed_data': '(7)'}), '(tester_states, tester_povms, num_outcomes=num_outcomes,\n on_para_eq_constraint=on_para_eq_constraint, seed_data=7)\n', (13067, 13185), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((13298, 13406), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""mprocess"""', 'object_name': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', object_name='mprocess', name=\n true_object_name, c_sys=c_sys)\n", (13324, 13406), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((14018, 14081), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': '(2)', 'ids_esys': '[1, 2]'}), "(mode='qubit', num=2, ids_esys=[1, 2])\n", (14043, 14081), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((14682, 14790), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""mprocess"""', 'object_name': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', object_name='mprocess', name=\n true_object_name, c_sys=c_sys)\n", (14708, 14790), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((15035, 15165), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', (['tester_states', 'tester_povms'], {'num_outcomes': 'num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'seed_data': '(7)'}), '(tester_states, tester_povms, num_outcomes=num_outcomes,\n on_para_eq_constraint=on_para_eq_constraint, seed_data=7)\n', (15047, 15165), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((15713, 15774), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qutrit"""', 'num': '(1)', 'ids_esys': '[1]'}), "(mode='qutrit', num=1, ids_esys=[1])\n", (15738, 15774), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((16421, 16551), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', (['tester_states', 'tester_povms'], {'num_outcomes': 'num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'seed_data': '(7)'}), '(tester_states, tester_povms, num_outcomes=num_outcomes,\n on_para_eq_constraint=on_para_eq_constraint, seed_data=7)\n', (16433, 16551), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((16666, 16774), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""mprocess"""', 'object_name': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', object_name='mprocess', name=\n true_object_name, c_sys=c_sys)\n", (16692, 16774), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((17484, 17539), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (17509, 17539), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((18024, 18096), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (18043, 18096), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((18289, 18453), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n schedules='all')\n", (18301, 18453), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((18657, 18674), 'quara.protocol.qtomography.standard.linear_estimator.LinearEstimator', 'LinearEstimator', ([], {}), '()\n', (18672, 18674), False, 'from quara.protocol.qtomography.standard.linear_estimator import LinearEstimator\n'), ((19371, 19426), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (19396, 19426), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((19999, 20071), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (20018, 20071), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((20264, 20428), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n schedules='all')\n", (20276, 20428), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((20632, 20649), 'quara.protocol.qtomography.standard.linear_estimator.LinearEstimator', 'LinearEstimator', ([], {}), '()\n', (20647, 20649), False, 'from quara.protocol.qtomography.standard.linear_estimator import LinearEstimator\n'), ((21291, 21347), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qutrit"""', 'num': 'num_qubits'}), "(mode='qutrit', num=num_qubits)\n", (21316, 21347), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((21998, 22070), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (22017, 22070), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((22263, 22427), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n schedules='all')\n", (22275, 22427), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((22631, 22648), 'quara.protocol.qtomography.standard.linear_estimator.LinearEstimator', 'LinearEstimator', ([], {}), '()\n', (22646, 22648), False, 'from quara.protocol.qtomography.standard.linear_estimator import LinearEstimator\n'), ((23200, 23255), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (23225, 23255), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((23740, 23812), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (23759, 23812), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((24005, 24169), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n schedules='all')\n", (24017, 24169), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((24407, 24434), 'quara.protocol.qtomography.standard.loss_minimization_estimator.LossMinimizationEstimator', 'LossMinimizationEstimator', ([], {}), '()\n', (24432, 24434), False, 'from quara.protocol.qtomography.standard.loss_minimization_estimator import LossMinimizationEstimator\n'), ((24446, 24495), 'quara.loss_function.standard_qtomography_based_weighted_relative_entropy.StandardQTomographyBasedWeightedRelativeEntropy', 'StandardQTomographyBasedWeightedRelativeEntropy', ([], {}), '()\n', (24493, 24495), False, 'from quara.loss_function.standard_qtomography_based_weighted_relative_entropy import StandardQTomographyBasedWeightedRelativeEntropy, StandardQTomographyBasedWeightedRelativeEntropyOption\n'), ((24514, 24579), 'quara.loss_function.standard_qtomography_based_weighted_relative_entropy.StandardQTomographyBasedWeightedRelativeEntropyOption', 'StandardQTomographyBasedWeightedRelativeEntropyOption', (['"""identity"""'], {}), "('identity')\n", (24567, 24579), False, 'from quara.loss_function.standard_qtomography_based_weighted_relative_entropy import StandardQTomographyBasedWeightedRelativeEntropy, StandardQTomographyBasedWeightedRelativeEntropyOption\n'), ((24591, 24629), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktracking', 'ProjectedGradientDescentBacktracking', ([], {}), '()\n', (24627, 24629), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((24648, 24843), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktrackingOption', 'ProjectedGradientDescentBacktrackingOption', ([], {'mode_stopping_criterion_gradient_descent': '"""sum_absolute_difference_variable"""', 'num_history_stopping_criterion_gradient_descent': '(1)', 'eps': '(1e-09)'}), "(\n mode_stopping_criterion_gradient_descent=\n 'sum_absolute_difference_variable',\n num_history_stopping_criterion_gradient_descent=1, eps=1e-09)\n", (24690, 24843), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((25658, 25713), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (25683, 25713), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((26286, 26358), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (26305, 26358), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((26551, 26780), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'eps_proj_physical': '(1e-05)', 'eps_truncate_imaginary_part': '(1e-12)', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n eps_proj_physical=1e-05, eps_truncate_imaginary_part=1e-12, schedules='all'\n )\n", (26563, 26780), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((26994, 27021), 'quara.protocol.qtomography.standard.loss_minimization_estimator.LossMinimizationEstimator', 'LossMinimizationEstimator', ([], {}), '()\n', (27019, 27021), False, 'from quara.protocol.qtomography.standard.loss_minimization_estimator import LossMinimizationEstimator\n'), ((27033, 27082), 'quara.loss_function.standard_qtomography_based_weighted_relative_entropy.StandardQTomographyBasedWeightedRelativeEntropy', 'StandardQTomographyBasedWeightedRelativeEntropy', ([], {}), '()\n', (27080, 27082), False, 'from quara.loss_function.standard_qtomography_based_weighted_relative_entropy import StandardQTomographyBasedWeightedRelativeEntropy, StandardQTomographyBasedWeightedRelativeEntropyOption\n'), ((27101, 27166), 'quara.loss_function.standard_qtomography_based_weighted_relative_entropy.StandardQTomographyBasedWeightedRelativeEntropyOption', 'StandardQTomographyBasedWeightedRelativeEntropyOption', (['"""identity"""'], {}), "('identity')\n", (27154, 27166), False, 'from quara.loss_function.standard_qtomography_based_weighted_relative_entropy import StandardQTomographyBasedWeightedRelativeEntropy, StandardQTomographyBasedWeightedRelativeEntropyOption\n'), ((27178, 27216), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktracking', 'ProjectedGradientDescentBacktracking', ([], {}), '()\n', (27214, 27216), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((27235, 27430), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktrackingOption', 'ProjectedGradientDescentBacktrackingOption', ([], {'mode_stopping_criterion_gradient_descent': '"""sum_absolute_difference_variable"""', 'num_history_stopping_criterion_gradient_descent': '(1)', 'eps': '(1e-09)'}), "(\n mode_stopping_criterion_gradient_descent=\n 'sum_absolute_difference_variable',\n num_history_stopping_criterion_gradient_descent=1, eps=1e-09)\n", (27277, 27430), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((28190, 28246), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qutrit"""', 'num': 'num_qubits'}), "(mode='qutrit', num=num_qubits)\n", (28215, 28246), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((28897, 28969), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (28916, 28969), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((29162, 29351), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'eps_proj_physical': '(1e-05)', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n eps_proj_physical=1e-05, schedules='all')\n", (29174, 29351), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((29562, 29589), 'quara.protocol.qtomography.standard.loss_minimization_estimator.LossMinimizationEstimator', 'LossMinimizationEstimator', ([], {}), '()\n', (29587, 29589), False, 'from quara.protocol.qtomography.standard.loss_minimization_estimator import LossMinimizationEstimator\n'), ((29601, 29650), 'quara.loss_function.standard_qtomography_based_weighted_relative_entropy.StandardQTomographyBasedWeightedRelativeEntropy', 'StandardQTomographyBasedWeightedRelativeEntropy', ([], {}), '()\n', (29648, 29650), False, 'from quara.loss_function.standard_qtomography_based_weighted_relative_entropy import StandardQTomographyBasedWeightedRelativeEntropy, StandardQTomographyBasedWeightedRelativeEntropyOption\n'), ((29669, 29734), 'quara.loss_function.standard_qtomography_based_weighted_relative_entropy.StandardQTomographyBasedWeightedRelativeEntropyOption', 'StandardQTomographyBasedWeightedRelativeEntropyOption', (['"""identity"""'], {}), "('identity')\n", (29722, 29734), False, 'from quara.loss_function.standard_qtomography_based_weighted_relative_entropy import StandardQTomographyBasedWeightedRelativeEntropy, StandardQTomographyBasedWeightedRelativeEntropyOption\n'), ((29746, 29784), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktracking', 'ProjectedGradientDescentBacktracking', ([], {}), '()\n', (29782, 29784), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((29803, 29998), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktrackingOption', 'ProjectedGradientDescentBacktrackingOption', ([], {'mode_stopping_criterion_gradient_descent': '"""sum_absolute_difference_variable"""', 'num_history_stopping_criterion_gradient_descent': '(1)', 'eps': '(1e-09)'}), "(\n mode_stopping_criterion_gradient_descent=\n 'sum_absolute_difference_variable',\n num_history_stopping_criterion_gradient_descent=1, eps=1e-09)\n", (29845, 29998), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((30686, 30741), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (30711, 30741), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((31226, 31298), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (31245, 31298), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((31491, 31655), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n schedules='all')\n", (31503, 31655), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((31893, 31920), 'quara.protocol.qtomography.standard.loss_minimization_estimator.LossMinimizationEstimator', 'LossMinimizationEstimator', ([], {}), '()\n', (31918, 31920), False, 'from quara.protocol.qtomography.standard.loss_minimization_estimator import LossMinimizationEstimator\n'), ((31932, 31994), 'quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error.StandardQTomographyBasedWeightedProbabilityBasedSquaredError', 'StandardQTomographyBasedWeightedProbabilityBasedSquaredError', ([], {}), '()\n', (31992, 31994), False, 'from quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error import StandardQTomographyBasedWeightedProbabilityBasedSquaredError, StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption\n'), ((32013, 32091), 'quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error.StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption', 'StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption', (['"""identity"""'], {}), "('identity')\n", (32079, 32091), False, 'from quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error import StandardQTomographyBasedWeightedProbabilityBasedSquaredError, StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption\n'), ((32117, 32155), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktracking', 'ProjectedGradientDescentBacktracking', ([], {}), '()\n', (32153, 32155), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((32174, 32369), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktrackingOption', 'ProjectedGradientDescentBacktrackingOption', ([], {'mode_stopping_criterion_gradient_descent': '"""sum_absolute_difference_variable"""', 'num_history_stopping_criterion_gradient_descent': '(1)', 'eps': '(1e-09)'}), "(\n mode_stopping_criterion_gradient_descent=\n 'sum_absolute_difference_variable',\n num_history_stopping_criterion_gradient_descent=1, eps=1e-09)\n", (32216, 32369), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((33184, 33239), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (33209, 33239), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((33812, 33884), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (33831, 33884), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((34077, 34266), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'eps_proj_physical': '(1e-05)', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n eps_proj_physical=1e-05, schedules='all')\n", (34089, 34266), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((34477, 34504), 'quara.protocol.qtomography.standard.loss_minimization_estimator.LossMinimizationEstimator', 'LossMinimizationEstimator', ([], {}), '()\n', (34502, 34504), False, 'from quara.protocol.qtomography.standard.loss_minimization_estimator import LossMinimizationEstimator\n'), ((34516, 34578), 'quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error.StandardQTomographyBasedWeightedProbabilityBasedSquaredError', 'StandardQTomographyBasedWeightedProbabilityBasedSquaredError', ([], {}), '()\n', (34576, 34578), False, 'from quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error import StandardQTomographyBasedWeightedProbabilityBasedSquaredError, StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption\n'), ((34597, 34675), 'quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error.StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption', 'StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption', (['"""identity"""'], {}), "('identity')\n", (34663, 34675), False, 'from quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error import StandardQTomographyBasedWeightedProbabilityBasedSquaredError, StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption\n'), ((34701, 34739), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktracking', 'ProjectedGradientDescentBacktracking', ([], {}), '()\n', (34737, 34739), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((34758, 34953), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktrackingOption', 'ProjectedGradientDescentBacktrackingOption', ([], {'mode_stopping_criterion_gradient_descent': '"""sum_absolute_difference_variable"""', 'num_history_stopping_criterion_gradient_descent': '(1)', 'eps': '(1e-09)'}), "(\n mode_stopping_criterion_gradient_descent=\n 'sum_absolute_difference_variable',\n num_history_stopping_criterion_gradient_descent=1, eps=1e-09)\n", (34800, 34953), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((35713, 35769), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qutrit"""', 'num': 'num_qubits'}), "(mode='qutrit', num=num_qubits)\n", (35738, 35769), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((36420, 36492), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': 'true_object_name', 'c_sys': 'c_sys'}), "(mode='mprocess', name=true_object_name, c_sys=c_sys)\n", (36439, 36492), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((36685, 36874), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': 'on_para_eq_constraint', 'eps_proj_physical': '(1e-05)', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=on_para_eq_constraint,\n eps_proj_physical=1e-05, schedules='all')\n", (36697, 36874), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((37085, 37112), 'quara.protocol.qtomography.standard.loss_minimization_estimator.LossMinimizationEstimator', 'LossMinimizationEstimator', ([], {}), '()\n', (37110, 37112), False, 'from quara.protocol.qtomography.standard.loss_minimization_estimator import LossMinimizationEstimator\n'), ((37124, 37186), 'quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error.StandardQTomographyBasedWeightedProbabilityBasedSquaredError', 'StandardQTomographyBasedWeightedProbabilityBasedSquaredError', ([], {}), '()\n', (37184, 37186), False, 'from quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error import StandardQTomographyBasedWeightedProbabilityBasedSquaredError, StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption\n'), ((37205, 37283), 'quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error.StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption', 'StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption', (['"""identity"""'], {}), "('identity')\n", (37271, 37283), False, 'from quara.loss_function.standard_qtomography_based_weighted_probability_based_squared_error import StandardQTomographyBasedWeightedProbabilityBasedSquaredError, StandardQTomographyBasedWeightedProbabilityBasedSquaredErrorOption\n'), ((37309, 37347), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktracking', 'ProjectedGradientDescentBacktracking', ([], {}), '()\n', (37345, 37347), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((37366, 37561), 'quara.minimization_algorithm.projected_gradient_descent_backtracking.ProjectedGradientDescentBacktrackingOption', 'ProjectedGradientDescentBacktrackingOption', ([], {'mode_stopping_criterion_gradient_descent': '"""sum_absolute_difference_variable"""', 'num_history_stopping_criterion_gradient_descent': '(1)', 'eps': '(1e-09)'}), "(\n mode_stopping_criterion_gradient_descent=\n 'sum_absolute_difference_variable',\n num_history_stopping_criterion_gradient_descent=1, eps=1e-09)\n", (37408, 37561), False, 'from quara.minimization_algorithm.projected_gradient_descent_backtracking import ProjectedGradientDescentBacktracking, ProjectedGradientDescentBacktrackingOption\n'), ((1698, 1753), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (1723, 1753), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((2306, 2371), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': '"""x-type1"""', 'c_sys': 'c_sys'}), "(mode='mprocess', name='x-type1', c_sys=c_sys)\n", (2325, 2371), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((2403, 2546), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': '(True)', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=True, schedules='all')\n", (2415, 2546), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((2750, 2805), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', ([], {'mode': '"""qubit"""', 'num': 'num_qubits'}), "(mode='qubit', num=num_qubits)\n", (2775, 2805), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((3358, 3423), 'quara.objects.qoperation_typical.generate_qoperation', 'generate_qoperation', ([], {'mode': '"""mprocess"""', 'name': '"""x-type1"""', 'c_sys': 'c_sys'}), "(mode='mprocess', name='x-type1', c_sys=c_sys)\n", (3377, 3423), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((3455, 3598), 'quara.protocol.qtomography.standard.standard_qmpt.StandardQmpt', 'StandardQmpt', ([], {'states': 'tester_states', 'povms': 'tester_povms', 'num_outcomes': 'true_object.num_outcomes', 'on_para_eq_constraint': '(True)', 'schedules': '"""all"""'}), "(states=tester_states, povms=tester_povms, num_outcomes=\n true_object.num_outcomes, on_para_eq_constraint=True, schedules='all')\n", (3467, 3598), False, 'from quara.protocol.qtomography.standard.standard_qmpt import cqpt_to_cqmpt, StandardQmpt\n'), ((3877, 3902), 'quara.objects.composite_system.CompositeSystem', 'CompositeSystem', (['[e_sys0]'], {}), '([e_sys0])\n', (3892, 3902), False, 'from quara.objects.composite_system import CompositeSystem\n'), ((3986, 4011), 'quara.objects.composite_system.CompositeSystem', 'CompositeSystem', (['[e_sys1]'], {}), '([e_sys1])\n', (4001, 4011), False, 'from quara.objects.composite_system import CompositeSystem\n'), ((4030, 4048), 'quara.objects.povm.get_x_povm', 'get_x_povm', (['c_sys1'], {}), '(c_sys1)\n', (4040, 4048), False, 'from quara.objects.povm import get_x_povm, get_y_povm, get_z_povm\n'), ((4066, 4084), 'quara.objects.povm.get_y_povm', 'get_y_povm', (['c_sys0'], {}), '(c_sys0)\n', (4076, 4084), False, 'from quara.objects.povm import get_x_povm, get_y_povm, get_z_povm\n'), ((4102, 4120), 'quara.objects.povm.get_z_povm', 'get_z_povm', (['c_sys0'], {}), '(c_sys0)\n', (4112, 4120), False, 'from quara.objects.povm import get_x_povm, get_y_povm, get_z_povm\n'), ((10861, 10950), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (10887, 10950), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((11049, 11136), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (11075, 11136), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((12693, 12782), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (12719, 12782), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((12868, 12955), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (12894, 12955), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((13477, 13548), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (13485, 13548), False, 'from quara.objects.mprocess import MProcess\n'), ((13788, 13841), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['actual', 'expected'], {'decimal': '(15)'}), '(actual, expected, decimal=15)\n', (13811, 13841), True, 'import numpy.testing as npt\n'), ((14211, 14307), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=\n f'{a}_{b}', c_sys=c_sys)\n", (14237, 14307), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((14424, 14517), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=f'{a}_{b}',\n c_sys=c_sys)\n", (14450, 14517), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((14861, 14932), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (14869, 14932), False, 'from quara.objects.mprocess import MProcess\n'), ((15481, 15534), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['actual', 'expected'], {'decimal': '(14)'}), '(actual, expected, decimal=14)\n', (15504, 15534), True, 'import numpy.testing as npt\n'), ((16070, 16159), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (16096, 16159), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((16245, 16332), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (16271, 16332), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((16845, 16916), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (16853, 16916), False, 'from quara.objects.mprocess import MProcess\n'), ((17154, 17207), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['actual', 'expected'], {'decimal': '(15)'}), '(actual, expected, decimal=15)\n', (17177, 17207), True, 'import numpy.testing as npt\n'), ((17669, 17758), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (17695, 17758), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((17844, 17931), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (17870, 17931), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((18172, 18243), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (18180, 18243), False, 'from quara.objects.mprocess import MProcess\n'), ((18926, 18967), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(15)'}), '(a, e, decimal=15)\n', (18949, 18967), True, 'import numpy.testing as npt\n'), ((19556, 19652), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=\n f'{a}_{b}', c_sys=c_sys)\n", (19582, 19652), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((19775, 19868), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=f'{a}_{b}',\n c_sys=c_sys)\n", (19801, 19868), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((20147, 20218), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (20155, 20218), False, 'from quara.objects.mprocess import MProcess\n'), ((20901, 20942), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(14)'}), '(a, e, decimal=14)\n', (20924, 20942), True, 'import numpy.testing as npt\n'), ((21643, 21732), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (21669, 21732), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((21818, 21905), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (21844, 21905), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((22146, 22217), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (22154, 22217), False, 'from quara.objects.mprocess import MProcess\n'), ((22900, 22941), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(14)'}), '(a, e, decimal=14)\n', (22923, 22941), True, 'import numpy.testing as npt\n'), ((23385, 23474), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (23411, 23474), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((23560, 23647), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (23586, 23647), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((23888, 23959), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (23896, 23959), False, 'from quara.objects.mprocess import MProcess\n'), ((25232, 25272), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(6)'}), '(a, e, decimal=6)\n', (25255, 25272), True, 'import numpy.testing as npt\n'), ((25843, 25939), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=\n f'{a}_{b}', c_sys=c_sys)\n", (25869, 25939), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((26062, 26155), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=f'{a}_{b}',\n c_sys=c_sys)\n", (26088, 26155), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((26434, 26505), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (26442, 26505), False, 'from quara.objects.mprocess import MProcess\n'), ((27819, 27859), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(1)'}), '(a, e, decimal=1)\n', (27842, 27859), True, 'import numpy.testing as npt\n'), ((28542, 28631), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (28568, 28631), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((28717, 28804), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (28743, 28804), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((29045, 29116), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (29053, 29116), False, 'from quara.objects.mprocess import MProcess\n'), ((30387, 30427), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(1)'}), '(a, e, decimal=1)\n', (30410, 30427), True, 'import numpy.testing as npt\n'), ((30871, 30960), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (30897, 30960), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((31046, 31133), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (31072, 31133), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((31374, 31445), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (31382, 31445), False, 'from quara.objects.mprocess import MProcess\n'), ((32758, 32798), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(7)'}), '(a, e, decimal=7)\n', (32781, 32798), True, 'import numpy.testing as npt\n'), ((33369, 33465), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=\n f'{a}_{b}', c_sys=c_sys)\n", (33395, 33465), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((33588, 33681), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'f"""{a}_{b}"""', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=f'{a}_{b}',\n c_sys=c_sys)\n", (33614, 33681), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((33960, 34031), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (33968, 34031), False, 'from quara.objects.mprocess import MProcess\n'), ((35342, 35382), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(2)'}), '(a, e, decimal=2)\n', (35365, 35382), True, 'import numpy.testing as npt\n'), ((36065, 36154), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (36091, 36154), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((36240, 36327), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (36266, 36327), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((36568, 36639), 'quara.objects.mprocess.MProcess', 'MProcess', ([], {'hss': 'true_object.hss', 'on_para_eq_constraint': '(False)', 'c_sys': 'c_sys'}), '(hss=true_object.hss, on_para_eq_constraint=False, c_sys=c_sys)\n', (36576, 36639), False, 'from quara.objects.mprocess import MProcess\n'), ((37950, 37990), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['a', 'e'], {'decimal': '(1)'}), '(a, e, decimal=1)\n', (37973, 37990), True, 'import numpy.testing as npt\n'), ((1903, 1992), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (1929, 1992), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((2102, 2189), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (2128, 2189), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((2955, 3044), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""state"""', 'object_name': '"""state"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='state', object_name='state', name=name,\n c_sys=c_sys)\n", (2981, 3044), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((3154, 3241), 'quara.objects.qoperation_typical.generate_qoperation_object', 'generate_qoperation_object', ([], {'mode': '"""povm"""', 'object_name': '"""povm"""', 'name': 'name', 'c_sys': 'c_sys'}), "(mode='povm', object_name='povm', name=name,\n c_sys=c_sys)\n", (3180, 3241), False, 'from quara.objects.qoperation_typical import generate_qoperation, generate_qoperation_object\n'), ((3830, 3858), 'quara.objects.matrix_basis.get_normalized_pauli_basis', 'get_normalized_pauli_basis', ([], {}), '()\n', (3856, 3858), False, 'from quara.objects.matrix_basis import get_normalized_pauli_basis\n'), ((3939, 3967), 'quara.objects.matrix_basis.get_normalized_pauli_basis', 'get_normalized_pauli_basis', ([], {}), '()\n', (3965, 3967), False, 'from quara.objects.matrix_basis import get_normalized_pauli_basis\n'), ((14345, 14388), 'itertools.product', 'itertools.product', (['state_names', 'state_names'], {}), '(state_names, state_names)\n', (14362, 14388), False, 'import itertools\n'), ((14556, 14597), 'itertools.product', 'itertools.product', (['povm_names', 'povm_names'], {}), '(povm_names, povm_names)\n', (14573, 14597), False, 'import itertools\n'), ((19690, 19739), 'itertools.product', 'itertools.product', (['state_names'], {'repeat': 'num_qubits'}), '(state_names, repeat=num_qubits)\n', (19707, 19739), False, 'import itertools\n'), ((19907, 19955), 'itertools.product', 'itertools.product', (['povm_names'], {'repeat': 'num_qubits'}), '(povm_names, repeat=num_qubits)\n', (19924, 19955), False, 'import itertools\n'), ((25977, 26026), 'itertools.product', 'itertools.product', (['state_names'], {'repeat': 'num_qubits'}), '(state_names, repeat=num_qubits)\n', (25994, 26026), False, 'import itertools\n'), ((26194, 26242), 'itertools.product', 'itertools.product', (['povm_names'], {'repeat': 'num_qubits'}), '(povm_names, repeat=num_qubits)\n', (26211, 26242), False, 'import itertools\n'), ((33503, 33552), 'itertools.product', 'itertools.product', (['state_names'], {'repeat': 'num_qubits'}), '(state_names, repeat=num_qubits)\n', (33520, 33552), False, 'import itertools\n'), ((33720, 33768), 'itertools.product', 'itertools.product', (['povm_names'], {'repeat': 'num_qubits'}), '(povm_names, repeat=num_qubits)\n', (33737, 33768), False, 'import itertools\n')] |
import sys
import numpy as np
import dimod
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
def loadFile(filename):
commands= {}
with open(filename) as fh:
for line in fh:
if line[0] != "#":
command, description = line.strip().split('\t', 1)
commands[command] = description.strip()
return commands
def coef2Prob(commands):
linear=dict()
quadratic=dict()
N=int(commands["N"])
for i in range(0,N):
linear[i]=float(commands["h{}".format(i)])
for i in range(1,N):
for j in range(0,i):
quadratic[(i,j)]=float(commands["J{}_{}".format(i,j)])
const=float(commands["const"])
return quadratic, linear, const
if __name__ == '__main__':
quadratic, linear, const=coef2Prob(loadFile(sys.argv[1]))
bqm=dimod.BinaryQuadraticModel(linear,quadratic,const,dimod.Vartype.SPIN)
solver=EmbeddingComposite(DWaveSampler())
computation=solver.sample(bqm, num_reads=1000, annealing_time=20)
print("QPU time used:", computation.info['timing']['qpu_access_time'], "microseconds.")
print("#histogram")
hist=dict()
for i in range(len(computation.record)):
ene=computation.record[i]['energy']
cnt=computation.record[i]['num_occurrences']
if ene in hist:
hist[ene]+=cnt
else:
hist[ene]=cnt
for x in hist:
print(x,"\t",hist[x])
energy_vec=computation.data_vectors['energy']
i_best=np.argmin(energy_vec)
energy=computation.record[i_best]['energy']
print("energy=",energy)
vec=computation.record[i_best]['sample']
print("vec=",vec)
np.save("solution_vec",vec)
| [
"numpy.argmin",
"dwave.system.samplers.DWaveSampler",
"numpy.save",
"dimod.BinaryQuadraticModel"
] | [((873, 945), 'dimod.BinaryQuadraticModel', 'dimod.BinaryQuadraticModel', (['linear', 'quadratic', 'const', 'dimod.Vartype.SPIN'], {}), '(linear, quadratic, const, dimod.Vartype.SPIN)\n', (899, 945), False, 'import dimod\n'), ((1538, 1559), 'numpy.argmin', 'np.argmin', (['energy_vec'], {}), '(energy_vec)\n', (1547, 1559), True, 'import numpy as np\n'), ((1707, 1735), 'numpy.save', 'np.save', (['"""solution_vec"""', 'vec'], {}), "('solution_vec', vec)\n", (1714, 1735), True, 'import numpy as np\n'), ((973, 987), 'dwave.system.samplers.DWaveSampler', 'DWaveSampler', ([], {}), '()\n', (985, 987), False, 'from dwave.system.samplers import DWaveSampler\n')] |
"""
Inter Rising Edge Timer:
This example outlines the use of the single channel inter rising edge time measurement function of the time controller.
Tis example showcases its use by generating a histogram of signal at CH1.
First, the start_iretimer() function must be called which will start the hardware module for it.
Next you can request data that is currently stored in the hardware FIFO using the function acquire_iretimer_data() which will return a list of times. If the signal is high speed, the module will stop listening to additional pulses until the FIFO is cleared through reads.
In order to shut down the module, call the stop_iretimer() function which will stop the module and hold it in reset.
"""
from TimeController import *
import _thread
from time import sleep
from time import perf_counter
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(levelname)7s] %(module)s -- %(message)s')
from pyqtgraph.ptime import time
curIndex = 0
app = QtGui.QApplication([])
fpsv = 0
fpss = 0
lperf = perf_counter()
size = 10000000
data1 = np.zeros(size)
data2 = np.zeros(size)
data3 = np.zeros(size)
data4 = np.zeros(size)
histplot = pg.plot()
histplot.setTitle("Help")
histplot.setRange(QtCore.QRectF(0, 0, 1000e-9, 1000))
histplot.setLabel('bottom', 'Interval', units='s')
histplot.setLabel('left', 'Counts', units='')
SPT = TimeController("169.254.0.1",6050,0)
SPT.start_iretimer()
y1,x1 = np.histogram(data1[:curIndex],bins=np.linspace(0,1000e-9,1000))
temp1 = histplot.plot(x1, y1, stepMode=True, fillLevel=0, fillOutline=False, brush=(0,0,255,150))
def updatePlot():
global data, histplot,app,curIndex,temp,size,SPT,lperf,fpsv,fpss
perf = perf_counter()
fpsv = 1/(perf-lperf)
lperf=perf
y1, x1 = np.histogram(data1[:curIndex], bins=np.linspace(0, 1000e-9, 1000))
temp1.setData(x1, y1)
histplot.setTitle("Captures: "+str(curIndex)+" FPS:"+str(fpss))
app.processEvents()
def updateFPS():
global fpsv,fpss
fpss=fpsv
def acquireData():
global data,curIndex,size,SPT
while True:
if(curIndex >= size):
SPT.stop_iretimer()
break
times = SPT.acquire_iretimer_data()
times1 = times
if(curIndex+len(times) > size):
data1[curIndex:size] = times1[0:(size-curIndex)]
else:
data1[curIndex:curIndex+len(times1)]=times1
log.debug("TIME@"+str(curIndex)+"="+str(data1[curIndex]))
curIndex+=len(times1)
sleep(0)
timer = QtCore.QTimer()
timer.timeout.connect(updatePlot)
timer.start(2)
fpsc = QtCore.QTimer()
fpsc.timeout.connect(updateFPS)
fpsc.start(1000)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
_thread.start_new_thread(acquireData,( ))
QtGui.QApplication.instance().exec_()
| [
"logging.getLogger",
"logging.basicConfig",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"pyqtgraph.Qt.QtCore.QRectF",
"pyqtgraph.plot",
"time.perf_counter",
"time.sleep",
"numpy.zeros",
"pyqtgraph.Qt.QtGui.QApplication",
"numpy.linspace",
"_thread.start_new_thread",
"pyqtgraph.Qt.QtCore.QTimer... | [((915, 942), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (932, 942), False, 'import logging\n'), ((943, 1053), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s [%(levelname)7s] %(module)s -- %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s [%(levelname)7s] %(module)s -- %(message)s')\n", (962, 1053), False, 'import logging\n'), ((1101, 1123), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (1119, 1123), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1150, 1164), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1162, 1164), False, 'from time import perf_counter\n'), ((1189, 1203), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1197, 1203), True, 'import numpy as np\n'), ((1212, 1226), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1220, 1226), True, 'import numpy as np\n'), ((1235, 1249), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1243, 1249), True, 'import numpy as np\n'), ((1258, 1272), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1266, 1272), True, 'import numpy as np\n'), ((1284, 1293), 'pyqtgraph.plot', 'pg.plot', ([], {}), '()\n', (1291, 1293), True, 'import pyqtgraph as pg\n'), ((2617, 2632), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (2630, 2632), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((2689, 2704), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (2702, 2704), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1338, 1370), 'pyqtgraph.Qt.QtCore.QRectF', 'QtCore.QRectF', (['(0)', '(0)', '(1e-06)', '(1000)'], {}), '(0, 0, 1e-06, 1000)\n', (1351, 1370), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1803, 1817), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1815, 1817), False, 'from time import perf_counter\n'), ((1578, 1605), 'numpy.linspace', 'np.linspace', (['(0)', '(1e-06)', '(1000)'], {}), '(0, 1e-06, 1000)\n', (1589, 1605), True, 'import numpy as np\n'), ((2600, 2608), 'time.sleep', 'sleep', (['(0)'], {}), '(0)\n', (2605, 2608), False, 'from time import sleep\n'), ((2882, 2923), '_thread.start_new_thread', '_thread.start_new_thread', (['acquireData', '()'], {}), '(acquireData, ())\n', (2906, 2923), False, 'import _thread\n'), ((1908, 1935), 'numpy.linspace', 'np.linspace', (['(0)', '(1e-06)', '(1000)'], {}), '(0, 1e-06, 1000)\n', (1919, 1935), True, 'import numpy as np\n'), ((2932, 2961), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (2959, 2961), False, 'from pyqtgraph.Qt import QtGui, QtCore\n')] |
import numpy as np
from VariableUnittest import VariableUnitTest
from gwlfe.Input.WaterBudget import GroundWatLE
class TestGroundWatLE(VariableUnitTest):
def test_GroundWatLE_ground_truth(self):
z = self.z
np.testing.assert_array_almost_equal(
np.load(self.basepath + "/GroundWatLE.npy"),
GroundWatLE.GroundWatLE(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef), decimal=7)
def test_GroundWatLE(self):
z = self.z
np.testing.assert_array_almost_equal(
GroundWatLE.GroundWatLE_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef),
GroundWatLE.GroundWatLE(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef), decimal=7)
| [
"numpy.load",
"gwlfe.Input.WaterBudget.GroundWatLE.GroundWatLE_f",
"gwlfe.Input.WaterBudget.GroundWatLE.GroundWatLE"
] | [((280, 323), 'numpy.load', 'np.load', (["(self.basepath + '/GroundWatLE.npy')"], {}), "(self.basepath + '/GroundWatLE.npy')\n", (287, 323), True, 'import numpy as np\n'), ((337, 620), 'gwlfe.Input.WaterBudget.GroundWatLE.GroundWatLE', 'GroundWatLE.GroundWatLE', (['z.NYrs', 'z.DaysMonth', 'z.Temp', 'z.InitSnow_0', 'z.Prec', 'z.NRur', 'z.NUrb', 'z.Area', 'z.CNI_0', 'z.AntMoist_0', 'z.Grow_0', 'z.CNP_0', 'z.Imper', 'z.ISRR', 'z.ISRA', 'z.CN', 'z.UnsatStor_0', 'z.KV', 'z.PcntET', 'z.DayHrs', 'z.MaxWaterCap', 'z.SatStor_0', 'z.RecessionCoef', 'z.SeepCoef'], {}), '(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,\n z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.\n Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z\n .MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef)\n', (360, 620), False, 'from gwlfe.Input.WaterBudget import GroundWatLE\n'), ((837, 1122), 'gwlfe.Input.WaterBudget.GroundWatLE.GroundWatLE_f', 'GroundWatLE.GroundWatLE_f', (['z.NYrs', 'z.DaysMonth', 'z.Temp', 'z.InitSnow_0', 'z.Prec', 'z.NRur', 'z.NUrb', 'z.Area', 'z.CNI_0', 'z.AntMoist_0', 'z.Grow_0', 'z.CNP_0', 'z.Imper', 'z.ISRR', 'z.ISRA', 'z.CN', 'z.UnsatStor_0', 'z.KV', 'z.PcntET', 'z.DayHrs', 'z.MaxWaterCap', 'z.SatStor_0', 'z.RecessionCoef', 'z.SeepCoef'], {}), '(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,\n z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.\n Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z\n .MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef)\n', (862, 1122), False, 'from gwlfe.Input.WaterBudget import GroundWatLE\n'), ((1236, 1519), 'gwlfe.Input.WaterBudget.GroundWatLE.GroundWatLE', 'GroundWatLE.GroundWatLE', (['z.NYrs', 'z.DaysMonth', 'z.Temp', 'z.InitSnow_0', 'z.Prec', 'z.NRur', 'z.NUrb', 'z.Area', 'z.CNI_0', 'z.AntMoist_0', 'z.Grow_0', 'z.CNP_0', 'z.Imper', 'z.ISRR', 'z.ISRA', 'z.CN', 'z.UnsatStor_0', 'z.KV', 'z.PcntET', 'z.DayHrs', 'z.MaxWaterCap', 'z.SatStor_0', 'z.RecessionCoef', 'z.SeepCoef'], {}), '(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,\n z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.\n Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z\n .MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef)\n', (1259, 1519), False, 'from gwlfe.Input.WaterBudget import GroundWatLE\n')] |
from sklearn.neighbors import KNeighborsClassifier
from PIL import Image
import numpy as np
import json
import time
import sys
import argparse
import os
from datetime import datetime
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_output_file_name(path):
head, tail = os.path.split(path)
file_ = tail or os.path.basename(head)
return '.'.join(file_.split('.')[:-1]) + f"_{str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))}.png"
def load_emojis_json(file="emojis_data/colors_emojis_mean_unicode.json"):
"""
The emojis json files is an array of arrays;
Each array has the format: [emoji image path, [RGB values]]
"""
with open(file) as emoji_json_data:
emojis = np.array(json.load(emoji_json_data), dtype=object)
colors = []
emojis_files = []
unicode_emojis = []
for pair in emojis:
emojis_files.append(np.array(pair[0]))
colors.append(np.array(pair[1]))
unicode_emojis.append(np.array(pair[2]))
return [np.array(x) for x in (colors, emojis_files, unicode_emojis)]
def mirror_images(im1, im2):
dst = Image.new('RGBA', (im1.width + im2.width,
max(im1.height, im2.height)))
dst.paste((0, 0, 0), [0, 0, dst.size[0], dst.size[1]])
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def stack_emojis_horizontally(files):
"""
opens emojis images and stack them horizontally, might consider keep the images loaded.
:files: array of emoji image paths.
"""
images = [Image.open(file).convert('RGBA') for file in files]
images_combined = np.hstack((np.asarray(image) for image in images))
# TODO: add black background
return Image.fromarray(images_combined)
def stack_emojis_vertically(images):
"""
:images: array of PIL.Image
"""
images_combined = np.vstack((np.asarray(image) for image in images))
return Image.fromarray(images_combined)
def scale_image(image, scale):
scaled_size = np.round(np.array(image.size) / scale).astype(int)
return np.array(image.resize(scaled_size, Image.BILINEAR))
def convert_to_imoji(knn, emojis, image, unicode_emojis=None, bg_color=None):
shape = image.shape[:2]
pixels = image.reshape(-1, 3)
indices = knn.predict(pixels).astype(int)
emojis_rows = emojis[indices].reshape(shape)
unicode_emojis = unicode_emojis[indices].reshape(shape)
output = '\n'.join(['\u2009'.join(line) for line in unicode_emojis])
with open("output_emoji.txt", "w", encoding="utf-8") as f:
f.write(output)
output_image = None
for emoji_row in emojis_rows:
row = stack_emojis_horizontally(emoji_row)
# If we have previous row/s, stack them vertically
if output_image is not None:
row = stack_emojis_vertically([output_image, row])
output_image = row
if bg_color is not None:
bg_img = Image.new("RGBA", output_image.size, bg_color)
bg_img.paste(output_image, (0, 0), output_image)
output_image = bg_img
return output_image
def start_imoji(input_file, output_file, scale, mirror=False, bg_color=None):
colors, emojis, unicode_emojis = load_emojis_json()
knn = KNeighborsClassifier(n_neighbors=1, algorithm="kd_tree")
knn.fit(X=colors, y=np.arange(len(colors)))
original_image = Image.open(input_file)
scaled_image = scale_image(original_image, scale)
emoji_image = convert_to_imoji(
knn, emojis, scaled_image, unicode_emojis, bg_color)
if mirror:
mirrored = mirror_images(original_image, emoji_image)
mirrored.save(f"output/{output_file}")
return mirrored
else:
emoji_image.save(f"output/{output_file}")
return emoji_image
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Turns images into emojis! IMOJIS!')
parser.add_argument('-s', '--scale', type=int, help="Divides the original image size by that scale, \
1 for the original size, affects performance a lot.", default=15)
parser.add_argument(
'-i', '--input', help="input image file name", required=True)
parser.add_argument(
'-m', '--mirror', help="Add the original image next to the imoji", action="store_true")
parser.add_argument(
'-o', '--open', help="Open the image as well as saving it.", action="store_true")
parser.add_argument(
'-bg', '--background', help="Color for background.", default=None, action="store")
args = parser.parse_args()
t = time.time()
if not os.path.exists("output"):
os.makedirs("output")
output = start_imoji(args.input, get_output_file_name(
args.input), args.scale, args.mirror, args.background)
print(f"Run time: {time.time()-t}")
if args.open:
output.show()
| [
"os.path.exists",
"PIL.Image.fromarray",
"PIL.Image.open",
"argparse.ArgumentParser",
"os.makedirs",
"PIL.Image.new",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.asarray",
"os.path.split",
"json.load",
"numpy.array",
"datetime.datetime.now",
"os.path.basename",
"warnings.simplefilter"... | [((200, 262), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (221, 262), False, 'import warnings\n'), ((314, 333), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (327, 333), False, 'import os\n'), ((1757, 1789), 'PIL.Image.fromarray', 'Image.fromarray', (['images_combined'], {}), '(images_combined)\n', (1772, 1789), False, 'from PIL import Image\n'), ((1965, 1997), 'PIL.Image.fromarray', 'Image.fromarray', (['images_combined'], {}), '(images_combined)\n', (1980, 1997), False, 'from PIL import Image\n'), ((3268, 3324), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)', 'algorithm': '"""kd_tree"""'}), "(n_neighbors=1, algorithm='kd_tree')\n", (3288, 3324), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3394, 3416), 'PIL.Image.open', 'Image.open', (['input_file'], {}), '(input_file)\n', (3404, 3416), False, 'from PIL import Image\n'), ((3845, 3917), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Turns images into emojis! IMOJIS!"""'}), "(description='Turns images into emojis! IMOJIS!')\n", (3868, 3917), False, 'import argparse\n'), ((4594, 4605), 'time.time', 'time.time', ([], {}), '()\n', (4603, 4605), False, 'import time\n'), ((354, 376), 'os.path.basename', 'os.path.basename', (['head'], {}), '(head)\n', (370, 376), False, 'import os\n'), ((1039, 1050), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1047, 1050), True, 'import numpy as np\n'), ((2964, 3010), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'output_image.size', 'bg_color'], {}), "('RGBA', output_image.size, bg_color)\n", (2973, 3010), False, 'from PIL import Image\n'), ((4618, 4642), 'os.path.exists', 'os.path.exists', (['"""output"""'], {}), "('output')\n", (4632, 4642), False, 'import os\n'), ((4652, 4673), 'os.makedirs', 'os.makedirs', (['"""output"""'], {}), "('output')\n", (4663, 4673), False, 'import os\n'), ((761, 787), 'json.load', 'json.load', (['emoji_json_data'], {}), '(emoji_json_data)\n', (770, 787), False, 'import json\n'), ((917, 934), 'numpy.array', 'np.array', (['pair[0]'], {}), '(pair[0])\n', (925, 934), True, 'import numpy as np\n'), ((958, 975), 'numpy.array', 'np.array', (['pair[1]'], {}), '(pair[1])\n', (966, 975), True, 'import numpy as np\n'), ((1007, 1024), 'numpy.array', 'np.array', (['pair[2]'], {}), '(pair[2])\n', (1015, 1024), True, 'import numpy as np\n'), ((1673, 1690), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1683, 1690), True, 'import numpy as np\n'), ((1914, 1931), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1924, 1931), True, 'import numpy as np\n'), ((1588, 1604), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (1598, 1604), False, 'from PIL import Image\n'), ((2058, 2078), 'numpy.array', 'np.array', (['image.size'], {}), '(image.size)\n', (2066, 2078), True, 'import numpy as np\n'), ((4820, 4831), 'time.time', 'time.time', ([], {}), '()\n', (4829, 4831), False, 'import time\n'), ((430, 444), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (442, 444), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import struct
import numpy as np
import subprocess
import matplotlib.pyplot as plt
from collections import namedtuple
# N - number of oscillators
# freq - oscillator frequency (N elements)
# phase - oscillator phases (N elements)
# k - coupling coefficients (NxN matrix)
DataPreset = namedtuple('DataPreset', ['N', 'k', 'freq', 'phase'])
class KuramotoSimulation(object):
def __init__(self):
self.N_steps = 0
self.dt = 0.01
self.noise = 0.0
self.dump_interval = 0
self.coupling_type = 'kuramoto'
self.forcing_strength = 0.0
self.forcing_freq = 0.0
self.freq_modulation_enabled = False
self.k_modulation_enabled = False
def run(self, preset_name, r_file=None, mean_file=None, mean_vel_file=None):
cmd = ['kuramoto_simulation',
'--quiet',
'--preset', preset_name,
'--steps', str(self.N_steps),
'--dump-interval', str(self.dump_interval),
'--dt', str(self.dt),
'--noise', str(self.noise),
'--coupling', self.coupling_type,
'--forcing-strength', str(self.forcing_strength),
'--forcing-freq', str(self.forcing_freq)]
if r_file is not None:
cmd.append('--r-file')
if type(r_file) is str and r_file:
cmd.append(r_file)
if mean_file is not None:
cmd.append('--mean-phase-file')
if type(mean_file) is str and mean_file:
cmd.append(mean_file)
if mean_vel_file is not None:
cmd.append('--mean-vel-file')
if type(mean_vel_file) is str and mean_vel_file:
cmd.append(mean_vel_file)
if self.k_modulation_enabled:
cmd.append('--enable-k-modulation')
if self.freq_modulation_enabled:
cmd.append('--enable-freq-modulation')
# print cmd
subprocess.call(cmd)
'''
def write_to_file(self, preset_name):
conf_str = \
'{N_steps:d}\n' + \
'{dt:f}\n' + \
'{noise:f}\n' + \
'{dump_interval:d}\n' + \
'{coupling_type}\n' + \
'{forcing_strength:f}\n' + \
'{forcing_freq:f}\n' + \
'{freq_modulation_enabled}\n' + \
'{k_modulation_enabled}'
args = {
'N_steps': self.N_steps,
'dt': self.dt,
'noise': self.noise,
'dump_interval': self.dump_interval,
'coupling_type': self.coupling_type,
'forcing_strength': self.forcing_strength,
'forcing_freq': self.forcing_freq,
'freq_modulation_enabled': 'freq_modulation' if self.freq_modulation_enabled else 'no_freq_modulation',
'k_modulation_enabled': 'k_modulation' if self.k_modulation_enabled else 'no_k_modulation',
}
conf = conf_str.format(**args)
with open(preset_name + '.conf.txt', 'w') as f:
f.write(conf)
def read_from_file(self, preset_name):
with open(preset_name + '.conf.txt', 'r') as f:
lines = f.readlines()
self.N_steps = int(lines[0])
delf.dt = float(lines[1])
self.noise = float(lines[2])
self.dump_interval = int(lines[3])
self.coupling_type = lines[4]
self.forcing_strength = float(lines[5])
self.forcing_freq = float(lines[6])
self.freq_modulation_enabled = True if lines[7] == 'freq_modulation' else False
self.k_modulation_enabled = True if lines[11] == 'k_modulation' else False
'''
def load_preset_from_file(name):
with open(name + '.preset', 'rb') as f:
N = struct.unpack('I', f.read(4))[0]
freq = np.fromfile(f, dtype=np.float64, count=N)
phase = np.fromfile(f, dtype=np.float64, count=N)
k = np.fromfile(f, dtype=np.float64, count=N*N)
return DataPreset(N, k, freq, phase)
def write_preset_to_file(preset_name, preset):
preset_file_name = preset_name + '.preset'
try:
os.remove(preset_file_name)
except Exception:
pass
with open(preset_file_name, 'wb') as f:
f.write(struct.pack('I', preset.N))
preset.freq.astype(np.float64).tofile(f)
preset.phase.astype(np.float64).tofile(f)
preset.k.astype(np.float64).tofile(f)
def write_freq_modul_data_to_file(file_name, freq_ampl, freq_freq, freq_offset):
with open(file_name + '.fm.preset', 'wb') as f:
freq_ampl.astype(np.float64).tofile(f)
freq_freq.astype(np.float64).tofile(f)
freq_offset.astype(np.float64).tofile(f)
def write_k_modul_data_to_file(file_name, k_ampl, k_freq, k_offset):
with open(file_name + '.km.preset', 'wb') as f:
k_ampl.astype(np.float64).tofile(f)
k_freq.astype(np.float64).tofile(f)
k_offset.astype(np.float64).tofile(f)
def save_plot(path, ext='png', close=True):
directory = os.path.split(path)[0]
filename = "%s.%s" % (os.path.split(path)[1], ext)
if directory == '':
directory = '.'
if not os.path.exists(directory):
os.makedirs(directory)
savepath = os.path.join(directory, filename)
# print("Saving figure to '%s'" % savepath)
plt.savefig(savepath)
if close:
plt.close()
| [
"os.path.exists",
"numpy.fromfile",
"collections.namedtuple",
"matplotlib.pyplot.savefig",
"os.makedirs",
"os.path.join",
"struct.pack",
"os.path.split",
"matplotlib.pyplot.close",
"subprocess.call",
"os.remove"
] | [((351, 404), 'collections.namedtuple', 'namedtuple', (['"""DataPreset"""', "['N', 'k', 'freq', 'phase']"], {}), "('DataPreset', ['N', 'k', 'freq', 'phase'])\n", (361, 404), False, 'from collections import namedtuple\n'), ((5386, 5419), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (5398, 5419), False, 'import os\n'), ((5472, 5493), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savepath'], {}), '(savepath)\n', (5483, 5493), True, 'import matplotlib.pyplot as plt\n'), ((2019, 2039), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (2034, 2039), False, 'import subprocess\n'), ((3959, 4000), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float64', 'count': 'N'}), '(f, dtype=np.float64, count=N)\n', (3970, 4000), True, 'import numpy as np\n'), ((4017, 4058), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float64', 'count': 'N'}), '(f, dtype=np.float64, count=N)\n', (4028, 4058), True, 'import numpy as np\n'), ((4071, 4116), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float64', 'count': '(N * N)'}), '(f, dtype=np.float64, count=N * N)\n', (4082, 4116), True, 'import numpy as np\n'), ((4274, 4301), 'os.remove', 'os.remove', (['preset_file_name'], {}), '(preset_file_name)\n', (4283, 4301), False, 'import os\n'), ((5176, 5195), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (5189, 5195), False, 'import os\n'), ((5313, 5338), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (5327, 5338), False, 'import os\n'), ((5348, 5370), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (5359, 5370), False, 'import os\n'), ((5516, 5527), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5525, 5527), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4432), 'struct.pack', 'struct.pack', (['"""I"""', 'preset.N'], {}), "('I', preset.N)\n", (4417, 4432), False, 'import struct\n'), ((5225, 5244), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (5238, 5244), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 17:19:24 2021
@author: tungdang
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 14:16:33 2021
@author: tungbioinfo
"""
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import math
from scipy.misc import factorial
import numpy as np
import numpy.ma as ma
import pandas as pd
from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma
from scipy import linalg
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import _deprecate_positional_args, check_is_fitted
from sklearn import cluster
from sklearn.utils.extmath import row_norms
#------------------------------------------------------------------------------
# Check gamma + delta update
#------------------------------------------------------------------------------
gamma_vi = np.ones((n_components, n_features))
delta_vi = np.ones((n_components, n_features))
n_components = 5
n_samples, n_features = X.shape
nk = np.dot(resp.T, select) + 10 * np.finfo(resp.dtype).eps
gamma = np.ones((n_components, n_features))
delta = np.ones((n_components, n_features))
means_ = gamma_vi / gamma_vi.sum(axis=1)[:,np.newaxis]
part_1 = np.dot(resp.T, select) * means_ * digamma(means_.sum(axis=1))[:, np.newaxis]
dig_sum_1 = np.empty((n_samples, n_components))
for k in range(n_components):
y = X + means_[k]
dig_sum_1[:,k] = np.sum(y, axis=1)
part_2 = np.dot((resp * digamma(dig_sum_1)).T, select) * means_
#dig_sum_1_ = digamma(dig_sum_1)
#diff_1 = digamma(means_.sum(axis=1)) - np.sum(digamma(dig_sum_1), axis=0)
"""
part_ = np.empty((n_components, n_samples, n_features))
for k in range(n_components):
y = X + means_[k]
y = select * digamma(y) * resp[:,k][:, np.newaxis]
part_[k] = y
part_3_ = np.sum(part_, axis=1)
"""
part_3 = np.empty((n_components, n_features))
for k in range(n_components):
y = X + means_[k]
y = select * digamma(y) * resp[:,k][:, np.newaxis]
part_3[k] = np.sum(y, axis=0)
part_3 = part_3 * means_
part_4 = np.dot(resp.T, select) * means_ * digamma(means_)
gamma_vi = gamma + part_1 - part_2 + part_3 - part_4
#gamma_vi = gamma + part_1 - part_2 - part_4
gamma_dig = digamma(gamma_vi)
delta_vi = gamma_vi
#------------------------------------------------------------------------------
# Check iota + kappa update
#------------------------------------------------------------------------------
iota = np.ones((n_features))
kappa = np.ones((n_features))
iota_vi = np.ones((n_features))
kappa_vi = np.ones((n_features))
means_rj = iota_vi / iota_vi.sum()
part_1 = ((1-select) * means_rj * digamma(means_rj.sum())).sum(axis=0)
y = X + means_rj
part_2 = ((1-select) * means_rj * digamma(y.sum(axis=1))[:,np.newaxis]).sum(axis=0)
part_3 = ((1-select) * means_rj * digamma(y)).sum(axis=0)
part_4 = ((1-select) * means_rj * digamma(means_rj)).sum(axis=0)
iota_vi = iota + part_1 - part_2 + part_3 - part_4
#iota_vi = iota + part_1 - part_2 - part_4
iota_dig = digamma(iota_vi)
kappa_vi = iota_vi
#------------------------------------------------------------------------------
# Check log DM update
#------------------------------------------------------------------------------
means_ = gamma_vi / gamma_vi.sum(axis=1)[:,np.newaxis]
log_means_ = np.log(means_)
#log_means_ = np.where(np.isnan(log_means_), ma.array(log_means_, mask=np.isnan(log_means_)).mean(axis=0), log_means_)
sum_1_1 = np.empty((n_samples, n_components))
for k in range(n_components):
y = X + means_[k]
sum_1_1[:, k] = np.sum(y, axis=1)
part_1 = gammaln(means_.sum(axis=1)) - gammaln(sum_1_1)
part_1 = select.sum(axis=1)[:, np.newaxis] * part_1
sum_2_1 = means_ * (digamma(gamma_vi) - digamma(gamma_vi.sum(axis=1))[:, np.newaxis] - log_means_) * digamma(means_.sum(axis=1))[:, np.newaxis]
sum_2_1 = np.dot(select, sum_2_1.T)
sum_2_2 = np.empty((n_samples, n_components))
for k in range(n_components):
y = X + means_[k]
sum_2_2[:, k] = np.sum(y, axis=1)
sum_2_2 = digamma(sum_2_2) * np.dot(select, (means_ * (digamma(gamma_vi) - digamma(gamma_vi.sum(axis=1))[:, np.newaxis] - log_means_)).T)
part_2 = sum_2_1 - sum_2_2
sum_3_1 = np.empty((n_samples, n_components))
for k in range(n_components):
y = X + means_[k]
y = select * gammaln(y)
sum_3_1[:, k] = np.sum(y, axis=1)
sum_3_2 = np.dot(select, gammaln(means_).T)
part_3 = sum_3_1 - sum_3_2
sum_4_1 = np.empty((n_samples, n_components))
for k in range(n_components):
y = X + means_[k]
y = select * digamma(y) * means_[k] * (digamma(gamma_vi)[k] - digamma(gamma_vi.sum(axis=1))[k] - log_means_[k])
sum_4_1[:, k] = np.sum(y, axis=1)
sum_4_2 = np.dot(select, (means_ * digamma(means_) * (digamma(gamma_vi) - digamma(gamma_vi.sum(axis=1))[:, np.newaxis] - log_means_)).T)
part_4 = sum_4_1 - sum_4_2
#part_4 = sum_4_2
X_fact = np.empty((n_samples, n_features))
for i in range(n_samples):
for j in range(n_features):
X_fact[i][j] = select[i][j] * np.log(1/(math.factorial(X[i][j])) + 1e-6)
estimate_log_dm = part_1 + part_2 + part_3 + part_4 + X_fact.sum(axis=1)[:, np.newaxis]
#estimate_log_dm = part_2 + part_3 + part_4 + X_fact.sum(axis=1)[:, np.newaxis]
#------------------------------------------------------------------------------
# Check estimate log weight
#------------------------------------------------------------------------------
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
weight_concentration_ = (
1. + nk,
(weight_concentration_prior +
np.hstack((np.cumsum(nk[::-1])[-2::-1], 0))))
digamma_sum = digamma(weight_concentration_[0] +
weight_concentration_[1])
digamma_a = digamma(weight_concentration_[0])
digamma_b = digamma(weight_concentration_[1])
estimate_log_weight = (digamma_a - digamma_sum +
np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])))
#------------------------------------------------------------------------------
# Check estimate allocator variables
#------------------------------------------------------------------------------
weighted_log_prob = estimate_log_dm + estimate_log_weight
log_prob_norm = logsumexp(weighted_log_prob, axis = 1)
with np.errstate(under = 'ignore'):
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
resp_update = np.exp(log_resp)
log_resp_max = log_resp.argmax(axis=1)
#------------------------------------------------------------------------------
# Check estimate dirichlet selected
#------------------------------------------------------------------------------
xi1 = select_prior + select.sum(axis=0)
xi2 = select_prior + (1 - select).sum(axis=0)
#------------------------------------------------------------------------------
# Check log selection update
#------------------------------------------------------------------------------
means_ = gamma_vi / gamma_vi.sum(axis=1)[:,np.newaxis]
log_means_ = np.log(means_)
sum_1_1 = np.empty((n_samples, n_components))
for k in range(n_components):
y = X + means_[k]
sum_1_1[:, k] = np.sum(y, axis=1)
part_1 = (resp * gammaln(means_.sum(axis=1)) - resp * gammaln(sum_1_1)).sum(axis=1)
part_1 = part_1 / part_1.sum()
sum_2_1 = means_ * (digamma(gamma_vi) - digamma(gamma_vi.sum(axis=1))[:, np.newaxis] - log_means_) * digamma(means_.sum(axis=1))[:, np.newaxis]
sum_2_1 = np.dot(resp, sum_2_1)
sum_2_2 = np.empty((n_samples, n_components))
for k in range(n_components):
y = X + means_[k]
sum_2_2[:, k] = np.sum(y, axis=1)
sum_2_2 = np.dot((resp * digamma(sum_2_2)), (means_ * (digamma(gamma_vi) - digamma(gamma_vi.sum(axis=1))[:, np.newaxis] - log_means_)))
part_2 = sum_2_1 - sum_2_2
sum_3_1 = np.empty((n_components, n_samples, n_features))
for k in range(n_components):
y = X + means_[k]
y = resp[:,k][:, np.newaxis] * gammaln(y)
sum_3_1[k] = y
sum_3_1 = np.sum(sum_3_1, axis=0)
sum_3_2 = np.dot(resp, gammaln(means_))
part_3 = sum_3_1 - sum_3_2
sum_4_1 = np.empty((n_components, n_samples, n_features))
for k in range(n_components):
y = X + means_[k]
y = resp[:,k][:, np.newaxis] * digamma(y) * means_[k] * (digamma(gamma_vi)[k] - digamma(gamma_vi.sum(axis=1))[k] - log_means_[k])
sum_4_1[k] = y
sum_4_1 = np.sum(sum_4_1, axis=0)
sum_4_2 = np.dot(resp, (means_ * digamma(means_) * (digamma(gamma_vi) - digamma(gamma_vi.sum(axis=1))[:, np.newaxis] - log_means_)))
part_4 = sum_4_1 - sum_4_2
#part_4 = sum_4_2
X_fact = np.empty((n_samples, n_features))
resp_ = resp.sum(axis=1)
for i in range(n_samples):
for j in range(n_features):
X_fact[i][j] = resp_[i] * np.log(1/(math.factorial(X[i][j])) + 1e-6)
#X_fact[i][j] = 1/(math.factorial(X[i][j]))
"""
X_fact_3d = np.empty((n_components, n_samples, n_features))
for k in range(n_components):
for i in range(n_samples):
for j in range(n_features):
X_fact_3d[k][i][j] = resp[i][k] * np.log(1/(math.factorial(X[i][j])) + 1e-6)
#X_fact[i][j] = 1/(math.factorial(X[i][j]))
X_fact_ = np.sum(X_fact_3d, axis=0)
"""
#estimate_log_select = part_1[:, np.newaxis] + part_2 + part_3 + part_4 + X_fact
estimate_log_select = part_2 + part_3 + part_4 + X_fact
estimate_log_select = estimate_log_select + (digamma(xi1) - digamma(xi1 + xi2))
check = pd.DataFrame(data={"X0":part_1 ,"X1":part_2[:, 2323], "X2":part_3[:, 2323], "X3":part_4[:, 2323]})
#------------------------------------------------------------------------------
# Check log rejection update
#------------------------------------------------------------------------------
means_ = iota_vi / iota_vi.sum()
log_means_ = np.log(means_)
part_1_ = gammaln(means_.sum()) - gammaln((X + means_).sum(axis=1))
part_1_ = part_1_ / part_1_.sum()
sum_2_1_ = means_ * (digamma(iota_vi) - digamma(iota_vi.sum()) - log_means_) * digamma(means_.sum())
sum_2_2_ = means_ * (digamma(iota_vi) - digamma(iota_vi.sum()) - log_means_) * digamma((X + means_).sum(axis=1))[:, np.newaxis]
part_2_ = sum_2_1_ - sum_2_2_
part_3_ = gammaln((X + means_)) - gammaln(means_)
sum_4_1_ = means_ * (digamma(iota_vi) - digamma(iota_vi.sum()) - log_means_) * digamma((X + means_))
sum_4_2_ = means_ * (digamma(iota_vi) - digamma(iota_vi.sum()) - log_means_) * digamma((means_))
part_4_ = sum_4_1_ - sum_4_2_
#part_4 = sum_4_2
X_fact = np.empty((n_samples, n_features))
for i in range(n_samples):
for j in range(n_features):
X_fact[i][j] = np.log(1/math.factorial(X[i][j]) + 1e-6)
"""
part3_row_max = np.empty((n_samples))
for i in range(n_samples):
part3_row_max[i] = np.max(part_3[i])
"""
#estimate_log_reject = part_1_[:, np.newaxis] + part_2_ + part_3_ + part_4_ + X_fact
estimate_log_reject = part_2_ + part_3_ + part_4_ + X_fact
estimate_log_reject = estimate_log_reject + (digamma(xi2) - digamma(xi1 + xi2))
#------------------------------------------------------------------------------
# Check estimate selection variables
#------------------------------------------------------------------------------
def sigmoid(x):
"Numerically stable sigmoid function."
n_samples, n_features = x.shape
z = np.empty((n_samples, n_features))
for i in range(n_samples):
for j in range(n_features):
if x[i][j] >= 0:
z[i][j] = np.exp(-x[i][j])
z[i][j] = 1 / (1 + z[i][j])
else:
z[i][j] = np.exp(x[i][j])
z[i][j] = z[i][j] / (1 + z[i][j])
return z
select_exp = np.exp(estimate_log_select)
select_exp = np.nan_to_num(select_exp, posinf=1)
select_exp_ = sigmoid(estimate_log_select)
select_ = select_exp.sum(axis=0)/336
reject_exp = np.exp(estimate_log_reject)
reject_exp = np.nan_to_num(reject_exp, posinf=1)
reject_exp_ = sigmoid(estimate_log_reject)
reject_ = reject_exp_.sum(axis=0)/336
select_update = (select_exp + 1e-6) / (select_exp + reject_exp + 1e-6)
select_update_ = select_update.sum(axis=0)/336
select_update_sig = (select_exp_ + 1e-6) / (select_exp_ + reject_exp_ + 1e-6)
select_update_sig_ = select_update_sig.sum(axis=0)/336
| [
"scipy.special.digamma",
"scipy.special.gammaln",
"numpy.ones",
"math.factorial",
"numpy.log",
"numpy.exp",
"numpy.sum",
"numpy.dot",
"numpy.errstate",
"numpy.empty",
"numpy.finfo",
"pandas.DataFrame",
"numpy.cumsum",
"scipy.special.logsumexp",
"numpy.nan_to_num"
] | [((924, 959), 'numpy.ones', 'np.ones', (['(n_components, n_features)'], {}), '((n_components, n_features))\n', (931, 959), True, 'import numpy as np\n'), ((972, 1007), 'numpy.ones', 'np.ones', (['(n_components, n_features)'], {}), '((n_components, n_features))\n', (979, 1007), True, 'import numpy as np\n'), ((1128, 1163), 'numpy.ones', 'np.ones', (['(n_components, n_features)'], {}), '((n_components, n_features))\n', (1135, 1163), True, 'import numpy as np\n'), ((1173, 1208), 'numpy.ones', 'np.ones', (['(n_components, n_features)'], {}), '((n_components, n_features))\n', (1180, 1208), True, 'import numpy as np\n'), ((1366, 1401), 'numpy.empty', 'np.empty', (['(n_samples, n_components)'], {}), '((n_samples, n_components))\n', (1374, 1401), True, 'import numpy as np\n'), ((1902, 1938), 'numpy.empty', 'np.empty', (['(n_components, n_features)'], {}), '((n_components, n_features))\n', (1910, 1938), True, 'import numpy as np\n'), ((2287, 2304), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (2294, 2304), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((2525, 2544), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (2532, 2544), True, 'import numpy as np\n'), ((2556, 2575), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (2563, 2575), True, 'import numpy as np\n'), ((2590, 2609), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (2597, 2609), True, 'import numpy as np\n'), ((2624, 2643), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (2631, 2643), True, 'import numpy as np\n'), ((3089, 3105), 'scipy.special.digamma', 'digamma', (['iota_vi'], {}), '(iota_vi)\n', (3096, 3105), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((3378, 3392), 'numpy.log', 'np.log', (['means_'], {}), '(means_)\n', (3384, 3392), True, 'import numpy as np\n'), ((3523, 3558), 'numpy.empty', 'np.empty', (['(n_samples, n_components)'], {}), '((n_samples, n_components))\n', (3531, 3558), True, 'import numpy as np\n'), ((3913, 3938), 'numpy.dot', 'np.dot', (['select', 'sum_2_1.T'], {}), '(select, sum_2_1.T)\n', (3919, 3938), True, 'import numpy as np\n'), ((3949, 3984), 'numpy.empty', 'np.empty', (['(n_samples, n_components)'], {}), '((n_samples, n_components))\n', (3957, 3984), True, 'import numpy as np\n'), ((4251, 4286), 'numpy.empty', 'np.empty', (['(n_samples, n_components)'], {}), '((n_samples, n_components))\n', (4259, 4286), True, 'import numpy as np\n'), ((4487, 4522), 'numpy.empty', 'np.empty', (['(n_samples, n_components)'], {}), '((n_samples, n_components))\n', (4495, 4522), True, 'import numpy as np\n'), ((4921, 4954), 'numpy.empty', 'np.empty', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (4929, 4954), True, 'import numpy as np\n'), ((5681, 5741), 'scipy.special.digamma', 'digamma', (['(weight_concentration_[0] + weight_concentration_[1])'], {}), '(weight_concentration_[0] + weight_concentration_[1])\n', (5688, 5741), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((5785, 5818), 'scipy.special.digamma', 'digamma', (['weight_concentration_[0]'], {}), '(weight_concentration_[0])\n', (5792, 5818), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((5831, 5864), 'scipy.special.digamma', 'digamma', (['weight_concentration_[1]'], {}), '(weight_concentration_[1])\n', (5838, 5864), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((6270, 6306), 'scipy.special.logsumexp', 'logsumexp', (['weighted_log_prob'], {'axis': '(1)'}), '(weighted_log_prob, axis=1)\n', (6279, 6306), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((6424, 6440), 'numpy.exp', 'np.exp', (['log_resp'], {}), '(log_resp)\n', (6430, 6440), True, 'import numpy as np\n'), ((7025, 7039), 'numpy.log', 'np.log', (['means_'], {}), '(means_)\n', (7031, 7039), True, 'import numpy as np\n'), ((7051, 7086), 'numpy.empty', 'np.empty', (['(n_samples, n_components)'], {}), '((n_samples, n_components))\n', (7059, 7086), True, 'import numpy as np\n'), ((7449, 7470), 'numpy.dot', 'np.dot', (['resp', 'sum_2_1'], {}), '(resp, sum_2_1)\n', (7455, 7470), True, 'import numpy as np\n'), ((7482, 7517), 'numpy.empty', 'np.empty', (['(n_samples, n_components)'], {}), '((n_samples, n_components))\n', (7490, 7517), True, 'import numpy as np\n'), ((7783, 7830), 'numpy.empty', 'np.empty', (['(n_components, n_samples, n_features)'], {}), '((n_components, n_samples, n_features))\n', (7791, 7830), True, 'import numpy as np\n'), ((7958, 7981), 'numpy.sum', 'np.sum', (['sum_3_1'], {'axis': '(0)'}), '(sum_3_1, axis=0)\n', (7964, 7981), True, 'import numpy as np\n'), ((8065, 8112), 'numpy.empty', 'np.empty', (['(n_components, n_samples, n_features)'], {}), '((n_components, n_samples, n_features))\n', (8073, 8112), True, 'import numpy as np\n'), ((8328, 8351), 'numpy.sum', 'np.sum', (['sum_4_1'], {'axis': '(0)'}), '(sum_4_1, axis=0)\n', (8334, 8351), True, 'import numpy as np\n'), ((8540, 8573), 'numpy.empty', 'np.empty', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (8548, 8573), True, 'import numpy as np\n'), ((9369, 9476), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'X0': part_1, 'X1': part_2[:, 2323], 'X2': part_3[:, 2323], 'X3': part_4[:,\n 2323]}"}), "(data={'X0': part_1, 'X1': part_2[:, 2323], 'X2': part_3[:, \n 2323], 'X3': part_4[:, 2323]})\n", (9381, 9476), True, 'import pandas as pd\n'), ((9706, 9720), 'numpy.log', 'np.log', (['means_'], {}), '(means_)\n', (9712, 9720), True, 'import numpy as np\n'), ((10398, 10431), 'numpy.empty', 'np.empty', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (10406, 10431), True, 'import numpy as np\n'), ((11561, 11588), 'numpy.exp', 'np.exp', (['estimate_log_select'], {}), '(estimate_log_select)\n', (11567, 11588), True, 'import numpy as np\n'), ((11602, 11637), 'numpy.nan_to_num', 'np.nan_to_num', (['select_exp'], {'posinf': '(1)'}), '(select_exp, posinf=1)\n', (11615, 11637), True, 'import numpy as np\n'), ((11733, 11760), 'numpy.exp', 'np.exp', (['estimate_log_reject'], {}), '(estimate_log_reject)\n', (11739, 11760), True, 'import numpy as np\n'), ((11774, 11809), 'numpy.nan_to_num', 'np.nan_to_num', (['reject_exp'], {'posinf': '(1)'}), '(reject_exp, posinf=1)\n', (11787, 11809), True, 'import numpy as np\n'), ((1065, 1087), 'numpy.dot', 'np.dot', (['resp.T', 'select'], {}), '(resp.T, select)\n', (1071, 1087), True, 'import numpy as np\n'), ((1475, 1492), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1481, 1492), True, 'import numpy as np\n'), ((2062, 2079), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (2068, 2079), True, 'import numpy as np\n'), ((2153, 2168), 'scipy.special.digamma', 'digamma', (['means_'], {}), '(means_)\n', (2160, 2168), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((3631, 3648), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3637, 3648), True, 'import numpy as np\n'), ((3689, 3705), 'scipy.special.gammaln', 'gammaln', (['sum_1_1'], {}), '(sum_1_1)\n', (3696, 3705), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((4057, 4074), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4063, 4074), True, 'import numpy as np\n'), ((4085, 4101), 'scipy.special.digamma', 'digamma', (['sum_2_2'], {}), '(sum_2_2)\n', (4092, 4101), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((4387, 4404), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4393, 4404), True, 'import numpy as np\n'), ((4711, 4728), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4717, 4728), True, 'import numpy as np\n'), ((6314, 6341), 'numpy.errstate', 'np.errstate', ([], {'under': '"""ignore"""'}), "(under='ignore')\n", (6325, 6341), True, 'import numpy as np\n'), ((7159, 7176), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (7165, 7176), True, 'import numpy as np\n'), ((7590, 7607), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (7596, 7607), True, 'import numpy as np\n'), ((8009, 8024), 'scipy.special.gammaln', 'gammaln', (['means_'], {}), '(means_)\n', (8016, 8024), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10099, 10118), 'scipy.special.gammaln', 'gammaln', (['(X + means_)'], {}), '(X + means_)\n', (10106, 10118), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10123, 10138), 'scipy.special.gammaln', 'gammaln', (['means_'], {}), '(means_)\n', (10130, 10138), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10220, 10239), 'scipy.special.digamma', 'digamma', (['(X + means_)'], {}), '(X + means_)\n', (10227, 10239), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10321, 10336), 'scipy.special.digamma', 'digamma', (['means_'], {}), '(means_)\n', (10328, 10336), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((11202, 11235), 'numpy.empty', 'np.empty', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (11210, 11235), True, 'import numpy as np\n'), ((1276, 1298), 'numpy.dot', 'np.dot', (['resp.T', 'select'], {}), '(resp.T, select)\n', (1282, 1298), True, 'import numpy as np\n'), ((2119, 2141), 'numpy.dot', 'np.dot', (['resp.T', 'select'], {}), '(resp.T, select)\n', (2125, 2141), True, 'import numpy as np\n'), ((4356, 4366), 'scipy.special.gammaln', 'gammaln', (['y'], {}), '(y)\n', (4363, 4366), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((4430, 4445), 'scipy.special.gammaln', 'gammaln', (['means_'], {}), '(means_)\n', (4437, 4445), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((7633, 7649), 'scipy.special.digamma', 'digamma', (['sum_2_2'], {}), '(sum_2_2)\n', (7640, 7649), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((7918, 7928), 'scipy.special.gammaln', 'gammaln', (['y'], {}), '(y)\n', (7925, 7928), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((9325, 9337), 'scipy.special.digamma', 'digamma', (['xi1'], {}), '(xi1)\n', (9332, 9337), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((9340, 9358), 'scipy.special.digamma', 'digamma', (['(xi1 + xi2)'], {}), '(xi1 + xi2)\n', (9347, 9358), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10865, 10877), 'scipy.special.digamma', 'digamma', (['xi2'], {}), '(xi2)\n', (10872, 10877), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10880, 10898), 'scipy.special.digamma', 'digamma', (['(xi1 + xi2)'], {}), '(xi1 + xi2)\n', (10887, 10898), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((1095, 1115), 'numpy.finfo', 'np.finfo', (['resp.dtype'], {}), '(resp.dtype)\n', (1103, 1115), True, 'import numpy as np\n'), ((2008, 2018), 'scipy.special.digamma', 'digamma', (['y'], {}), '(y)\n', (2015, 2018), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((2892, 2902), 'scipy.special.digamma', 'digamma', (['y'], {}), '(y)\n', (2899, 2902), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((2951, 2968), 'scipy.special.digamma', 'digamma', (['means_rj'], {}), '(means_rj)\n', (2958, 2968), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((5483, 5503), 'numpy.finfo', 'np.finfo', (['resp.dtype'], {}), '(resp.dtype)\n', (5491, 5503), True, 'import numpy as np\n'), ((8385, 8400), 'scipy.special.digamma', 'digamma', (['means_'], {}), '(means_)\n', (8392, 8400), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((1517, 1535), 'scipy.special.digamma', 'digamma', (['dig_sum_1'], {}), '(dig_sum_1)\n', (1524, 1535), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((3779, 3796), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (3786, 3796), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((4592, 4602), 'scipy.special.digamma', 'digamma', (['y'], {}), '(y)\n', (4599, 4602), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((4764, 4779), 'scipy.special.digamma', 'digamma', (['means_'], {}), '(means_)\n', (4771, 4779), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((5954, 5988), 'numpy.cumsum', 'np.cumsum', (['(digamma_b - digamma_sum)'], {}), '(digamma_b - digamma_sum)\n', (5963, 5988), True, 'import numpy as np\n'), ((7232, 7248), 'scipy.special.gammaln', 'gammaln', (['sum_1_1'], {}), '(sum_1_1)\n', (7239, 7248), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((7315, 7332), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (7322, 7332), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((7663, 7680), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (7670, 7680), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((8200, 8210), 'scipy.special.digamma', 'digamma', (['y'], {}), '(y)\n', (8207, 8210), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((8404, 8421), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (8411, 8421), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((9849, 9865), 'scipy.special.digamma', 'digamma', (['iota_vi'], {}), '(iota_vi)\n', (9856, 9865), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((9950, 9966), 'scipy.special.digamma', 'digamma', (['iota_vi'], {}), '(iota_vi)\n', (9957, 9966), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10162, 10178), 'scipy.special.digamma', 'digamma', (['iota_vi'], {}), '(iota_vi)\n', (10169, 10178), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10263, 10279), 'scipy.special.digamma', 'digamma', (['iota_vi'], {}), '(iota_vi)\n', (10270, 10279), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((11358, 11374), 'numpy.exp', 'np.exp', (['(-x[i][j])'], {}), '(-x[i][j])\n', (11364, 11374), True, 'import numpy as np\n'), ((11463, 11478), 'numpy.exp', 'np.exp', (['x[i][j]'], {}), '(x[i][j])\n', (11469, 11478), True, 'import numpy as np\n'), ((4618, 4635), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (4625, 4635), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((4783, 4800), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (4790, 4800), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((5631, 5650), 'numpy.cumsum', 'np.cumsum', (['nk[::-1]'], {}), '(nk[::-1])\n', (5640, 5650), True, 'import numpy as np\n'), ((8226, 8243), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (8233, 8243), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((10523, 10546), 'math.factorial', 'math.factorial', (['X[i][j]'], {}), '(X[i][j])\n', (10537, 10546), False, 'import math\n'), ((4130, 4147), 'scipy.special.digamma', 'digamma', (['gamma_vi'], {}), '(gamma_vi)\n', (4137, 4147), False, 'from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma\n'), ((5062, 5085), 'math.factorial', 'math.factorial', (['X[i][j]'], {}), '(X[i][j])\n', (5076, 5085), False, 'import math\n'), ((8702, 8725), 'math.factorial', 'math.factorial', (['X[i][j]'], {}), '(X[i][j])\n', (8716, 8725), False, 'import math\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
class RandomPixelShuffle(object):
"""Randomly shuffle pixel and channels within an image."""
def __init__(self, image_chw, seed):
rng = np.random.RandomState(seed=seed)
total_dim = np.prod(image_chw)
self.image_chw = image_chw
self.perm = torch.from_numpy(rng.permutation(total_dim))
def __call__(self, tensor):
return torch.reshape(torch.flatten(tensor)[self.perm], self.image_chw)
class RandomBlockShuffle(object):
"""Randomly shuffle blocks within an image."""
def __init__(self, image_size, block_size, seed):
if image_size % block_size != 0:
raise KeyError(f'RandomBlockShuffle: image size {image_size} cannot be divided by block size {block_size}')
self.image_size = image_size
self.block_size = block_size
self.n_blocks = (image_size // block_size)**2
rng = np.random.RandomState(seed=seed)
self.perm = torch.from_numpy(rng.permutation(self.n_blocks))
self.unfold_op = torch.nn.Unfold(kernel_size=block_size, stride=block_size)
self.fold_op = torch.nn.Fold(output_size=image_size, kernel_size=block_size, stride=block_size)
def __call__(self, tensor):
blocks = self.unfold_op(tensor.unsqueeze(0)) # (1, block_size, n_blocks)
assert blocks.size(2) == self.n_blocks
blocks = blocks[..., self.perm] # shuffle blocks
tensor = self.fold_op(blocks) # (1, C, H, W)
return tensor.squeeze(0)
| [
"numpy.prod",
"torch.nn.Unfold",
"numpy.random.RandomState",
"torch.nn.Fold",
"torch.flatten"
] | [((754, 786), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (775, 786), True, 'import numpy as np\n'), ((803, 821), 'numpy.prod', 'np.prod', (['image_chw'], {}), '(image_chw)\n', (810, 821), True, 'import numpy as np\n'), ((1441, 1473), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (1462, 1473), True, 'import numpy as np\n'), ((1561, 1619), 'torch.nn.Unfold', 'torch.nn.Unfold', ([], {'kernel_size': 'block_size', 'stride': 'block_size'}), '(kernel_size=block_size, stride=block_size)\n', (1576, 1619), False, 'import torch\n'), ((1639, 1724), 'torch.nn.Fold', 'torch.nn.Fold', ([], {'output_size': 'image_size', 'kernel_size': 'block_size', 'stride': 'block_size'}), '(output_size=image_size, kernel_size=block_size, stride=block_size\n )\n', (1652, 1724), False, 'import torch\n'), ((971, 992), 'torch.flatten', 'torch.flatten', (['tensor'], {}), '(tensor)\n', (984, 992), False, 'import torch\n')] |
import numpy as np
from itertools import product
from itertools import permutations
def choose_nums(big, small):
"""
This function takes 2 inputs (number of big number and number of small
numbers) and returns a list of appropriate randomized inetegers
"""
#test inputs
assert type(big) is int, "Function input (big) must be type integer"
assert type(small) is int, "Function input (small) must be type integer"
assert big > 0 and big < 6, "Function input (big) must be between 0 and 6"
assert small > 0 and small < 6, "Function input (small) must be between 0 and 6"
assert big + small == 6, "Function inputs must sum to 6"
#create random list
big_nums = [100, 75, 50, 25]
numbers = []
for i in range(big):
temp_num = np.random.randint(0, 3)
numbers.append(big_nums[temp_num])
for i in range(small):
temp_num = np.random.randint(1, 10)
numbers.append(temp_num)
return numbers
def solve(numbers, target):
"""
Function takes in a list of 6 numbers and a target number then
evaluates every possible permutation of numbers and operations, returning
the first sequence of operators and numbers which equal target number
"""
#test inputs
assert type(numbers) == list, "Input (numbers) must be a list"
assert len(numbers) == 6, "Input (numbers) must have length 6"
assert target > 0 and target < 1000, "target value must between 0 and 1000"
#make list of permutations of numbers and operators
all_combinations = list(permutations(numbers))
operators = ['+', '*', '-', '/']
all_order_permutations = list(product(operators, repeat = 5))
#iterate through all possible combinations of numbers and operators
for i in range(len(all_combinations)):
for j in range(len(all_order_permutations)):
operator_combinations = [str(all_combinations[i][0]),
all_order_permutations[j][0],
str(all_combinations[i][1]),
all_order_permutations[j][1],
str(all_combinations[i][2]),
all_order_permutations[j][2],
str(all_combinations[i][3]),
all_order_permutations[j][3],
str(all_combinations[i][4]),
all_order_permutations[j][4],
str(all_combinations[i][5])]
#turn each combination list into a single string
formula = ""
for substring in operator_combinations:
formula += substring
#evaluate formula sting and compare to target
if eval(formula) == target:
return formula
else:
print(eval(formula))
#check if no solution will be reached
if i == len(all_combinations):
return print("No solutions found")
#function generates random target number
def target_number():
return np.random.randint(100, 999)
def main():
"""
Main function allows user to choose between manual input and random input.
Function then returns list of numbers and target. Solution is then revealed
when requested.
"""
#user chooses game type
chs_or_rand = str(input("Would you like to choose numbers or take random ones? \n (type choose or random)"))
#case if user inputs list manually
if chs_or_rand == "choose":
numbers = []
for i in range(6):
x = int(input("Input number:"))
numbers.append(x)
target = int(input("Input target number (100-999):"))
print(f"Your numbers are: {numbers}. \n The target number is {target}")
print("When you are ready to get solutions, enter y")
cont = str(input())
if cont == "y":
print(f"A possible solution is : {solve(numbers, target)}")
else:
True
#case if user chooses random game
elif chs_or_rand == "random":
big = int(input("How many large numbers would you like?"))
small = 6 - big
numbers = choose_nums(big, small)
target = target_number()
print(f"Your numbers are: {numbers}. \n The target number is {target}")
print("When you are ready to get solutions, enter y")
cont = str(input())
if cont == "y":
print(f"A possible solution is : {solve(numbers, target)}")
else:
True
#error statement if input does not match options
else:
print("Incorrect input. Please try again.")
main()
#failed previous ideas/attempts
"""
def attempt_1(numbers, target):
all_combinations = list(permutations(numbers))
# possible_iterations = np.math.factorial(len(numbers)) * 4**(len(numbers) - 1)
operators = ['+', '*', '-', '/'] # change '//' to '/' for floating point division
for i in range(len(all_combinations)):
for opers in product(operators, repeat=len(all_combinations[i]) - 1):
formula = [str(all_combinations[i][0])]
for op, operand in zip(opers, all_combinations[i][1:]):
formula.extend([op, str(operand)])
formula = ' '.join(formula)
## print('{} = {}'.format(formula, eval(formula)))
if eval(formula) == target:
return formula
else:
return "No solution found"
def attempt_2(numbers, target):
all_combinations = list(permutations(numbers))
operators = ['+', '*', '-', '/']
all_order_permutations = list(product(operators, repeat=5))
for opers in product(operators, repeat=5):
formula = [str(all_combinations[0])]
for op, operand in zip(opers, all_combinations[1:]):
formula.extend([op, str(operand)])
formula = ' '.join(formula)
if eval(formula) == target:
return formula
def operation_orders(iterations):
# (This was an idea for keeping a running list of orders before i landed on eval function)
# This function returns the order of the 5 operations taken between the 6 numbers to get the target number.
# Each integer corresponds to a basic algabraic operation as such:
# 0-add
# 1-subtract
# 2-multiply
# 3-divide
ops = [0, 1, 2, 3]
opord = list(product(ops, ops, ops, ops, ops))
opord_inst = opord[iterations]
return opord_inst
"""
| [
"itertools.permutations",
"numpy.random.randint",
"itertools.product"
] | [((3151, 3178), 'numpy.random.randint', 'np.random.randint', (['(100)', '(999)'], {}), '(100, 999)\n', (3168, 3178), True, 'import numpy as np\n'), ((784, 807), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (801, 807), True, 'import numpy as np\n'), ((897, 921), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (914, 921), True, 'import numpy as np\n'), ((1557, 1578), 'itertools.permutations', 'permutations', (['numbers'], {}), '(numbers)\n', (1569, 1578), False, 'from itertools import permutations\n'), ((1651, 1679), 'itertools.product', 'product', (['operators'], {'repeat': '(5)'}), '(operators, repeat=5)\n', (1658, 1679), False, 'from itertools import product\n')] |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA2D
#
# https://github.com/CNES/Pandora2D
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions associated to the interpolation method used in the refinement step.
"""
import multiprocessing
from typing import Dict, Tuple
from json_checker import And, Checker
from scipy.interpolate import interp2d
from scipy.optimize import minimize
import numpy as np
import xarray as xr
from . import refinement
@refinement.AbstractRefinement.register_subclass("interpolation")
class Interpolation(refinement.AbstractRefinement):
"""
Interpolation class allows to perform the subpixel cost refinement step
"""
def __init__(self, **cfg: str) -> None:
"""
:param cfg: optional configuration, {}
:type cfg: dict
:return: None
"""
self.cfg = self.check_conf(**cfg)
@staticmethod
def check_conf(**cfg: str) -> Dict[str, str]:
"""
Check the refinement configuration
:param cfg: user_config for refinement
:type cfg: dict
:return: cfg: global configuration
:rtype: cfg: dict
"""
schema = {
"refinement_method": And(str, lambda x: x in ["interpolation"]),
}
checker = Checker(schema)
checker.validate(cfg)
return cfg
@staticmethod
def compute_cost_matrix(p_args) -> Tuple[float, float]:
"""
Process the interpolation and minimize of a cost_matrix
:param cost_volumes: Dataset with 4D datas
:type cost_volumes: xr.Dataset
:param coords_pix_row: array from disp_min_row to disp_max_row
:type coords_pix_row: np.array
:param coords_pix_col: array from disp_min_col to disp_max_col
:type coords_pix_col: np.array
:param args_matrix_cost: 2D matrix with cost for one pixel (dim: dispy, dispx)
:type args_matrix_cost: np.array
:return: res: min of args_matrix_cost in 2D
:rtype: Tuple(float, float)
"""
cost_volumes, coords_pix_row, coords_pix_col, args_matrix_cost = p_args
# bounds ((disp_min_row, disp_max_row), (disp_min_col, disp_max_col))
bounds = [
(cost_volumes["disp_col"].data[0], cost_volumes["disp_col"].data[-1]),
(cost_volumes["disp_row"].data[0], cost_volumes["disp_row"].data[-1]),
]
# start point for minimize
x_0 = (coords_pix_row, coords_pix_col)
# prepare cost_matrix for min or max research
if cost_volumes.attrs["type_measure"] == "max":
matrix_cost = -args_matrix_cost
else:
matrix_cost = args_matrix_cost
# looking for inf values
matrix_cost[matrix_cost == np.inf] = np.nan
# looking for nans values
nans = np.isnan(matrix_cost)
# if matrix_cost full of nans
if np.all(nans):
res = (np.nan, np.nan)
# if cost matrix with nans and cost
elif True in nans and np.all(nans) is not True:
# interp nans values
matrix_cost[nans] = np.interp(np.nonzero(nans)[0], np.nonzero(~nans)[0], matrix_cost[~nans])
# interp matrix_cost
fonction_interpolation = interp2d(
cost_volumes["disp_col"].data, cost_volumes["disp_row"].data, matrix_cost, "cubic"
)
wrap = lambda f: fonction_interpolation(*f)
# looking for min
res = minimize(wrap, x_0, bounds=bounds).x
# if cost matrix full of values
else:
# interp matrix_cost
fonction_interpolation = interp2d(
cost_volumes["disp_col"].data, cost_volumes["disp_row"].data, matrix_cost, kind="cubic"
)
# looking for min
wrap = lambda f: fonction_interpolation(*f)
res = minimize(wrap, x_0, bounds=bounds).x
return res
def refinement_method(self, cost_volumes: xr.Dataset, pixel_maps: xr.Dataset) -> Tuple[np.array, np.array]:
"""
Compute refine disparity maps
:param cost_volumes: Cost_volumes has (row, col, disp_col, disp_row) dimensions
:type cost_volumes: xr.Dataset
:param pixel_maps: dataset of pixel disparity maps
:type pixel_maps: xr.Dataset
:return: delta_col, delta_row: subpixel disparity maps
:rtype: Tuple[np.array, np.array]
"""
#cost_columes data
data = cost_volumes["cost_volumes"].data
# transform 4D row, col, dcol, drow into drow, dcol, row * col
nrow, ncol, ndispcol, ndisprow = data.shape
cost_matrix = np.rollaxis(np.rollaxis(data, 3, 0), 3, 1).reshape((ndisprow, ndispcol, nrow * ncol))
# flatten pixel maps for multiprocessing
liste_row = list(pixel_maps["row_map"].data.flatten().tolist())
liste_col = list(pixel_maps["col_map"].data.flatten().tolist())
# args for multiprocessing
args = [
(cost_volumes, liste_col[i], liste_row[i], cost_matrix[:, :, i]) for i in range(0, cost_matrix.shape[2])
]
with multiprocessing.Pool(multiprocessing.cpu_count()) as p:
# liste([drow, dcol])
map_carte = p.map(self.compute_cost_matrix, args)
# compute disparity maps
delta_col = np.array(map_carte)[:, 0]
delta_row = np.array(map_carte)[:, 1]
# reshape disparity maps
delta_col = np.reshape(delta_col, (pixel_maps["col_map"].data.shape[0], pixel_maps["col_map"].data.shape[1]))
delta_row = np.reshape(delta_row, (pixel_maps["col_map"].data.shape[0], pixel_maps["col_map"].data.shape[1]))
return delta_col, delta_row
| [
"numpy.reshape",
"json_checker.Checker",
"scipy.optimize.minimize",
"numpy.rollaxis",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.isnan",
"numpy.nonzero",
"json_checker.And",
"numpy.all",
"scipy.interpolate.interp2d"
] | [((1888, 1903), 'json_checker.Checker', 'Checker', (['schema'], {}), '(schema)\n', (1895, 1903), False, 'from json_checker import And, Checker\n'), ((3432, 3453), 'numpy.isnan', 'np.isnan', (['matrix_cost'], {}), '(matrix_cost)\n', (3440, 3453), True, 'import numpy as np\n'), ((3504, 3516), 'numpy.all', 'np.all', (['nans'], {}), '(nans)\n', (3510, 3516), True, 'import numpy as np\n'), ((6068, 6170), 'numpy.reshape', 'np.reshape', (['delta_col', "(pixel_maps['col_map'].data.shape[0], pixel_maps['col_map'].data.shape[1])"], {}), "(delta_col, (pixel_maps['col_map'].data.shape[0], pixel_maps[\n 'col_map'].data.shape[1]))\n", (6078, 6170), True, 'import numpy as np\n'), ((6186, 6288), 'numpy.reshape', 'np.reshape', (['delta_row', "(pixel_maps['col_map'].data.shape[0], pixel_maps['col_map'].data.shape[1])"], {}), "(delta_row, (pixel_maps['col_map'].data.shape[0], pixel_maps[\n 'col_map'].data.shape[1]))\n", (6196, 6288), True, 'import numpy as np\n'), ((1815, 1857), 'json_checker.And', 'And', (['str', "(lambda x: x in ['interpolation'])"], {}), "(str, lambda x: x in ['interpolation'])\n", (1818, 1857), False, 'from json_checker import And, Checker\n'), ((5942, 5961), 'numpy.array', 'np.array', (['map_carte'], {}), '(map_carte)\n', (5950, 5961), True, 'import numpy as np\n'), ((5988, 6007), 'numpy.array', 'np.array', (['map_carte'], {}), '(map_carte)\n', (5996, 6007), True, 'import numpy as np\n'), ((3861, 3957), 'scipy.interpolate.interp2d', 'interp2d', (["cost_volumes['disp_col'].data", "cost_volumes['disp_row'].data", 'matrix_cost', '"""cubic"""'], {}), "(cost_volumes['disp_col'].data, cost_volumes['disp_row'].data,\n matrix_cost, 'cubic')\n", (3869, 3957), False, 'from scipy.interpolate import interp2d\n'), ((4249, 4350), 'scipy.interpolate.interp2d', 'interp2d', (["cost_volumes['disp_col'].data", "cost_volumes['disp_row'].data", 'matrix_cost'], {'kind': '"""cubic"""'}), "(cost_volumes['disp_col'].data, cost_volumes['disp_row'].data,\n matrix_cost, kind='cubic')\n", (4257, 4350), False, 'from scipy.interpolate import interp2d\n'), ((5757, 5784), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5782, 5784), False, 'import multiprocessing\n'), ((3627, 3639), 'numpy.all', 'np.all', (['nans'], {}), '(nans)\n', (3633, 3639), True, 'import numpy as np\n'), ((4088, 4122), 'scipy.optimize.minimize', 'minimize', (['wrap', 'x_0'], {'bounds': 'bounds'}), '(wrap, x_0, bounds=bounds)\n', (4096, 4122), False, 'from scipy.optimize import minimize\n'), ((4481, 4515), 'scipy.optimize.minimize', 'minimize', (['wrap', 'x_0'], {'bounds': 'bounds'}), '(wrap, x_0, bounds=bounds)\n', (4489, 4515), False, 'from scipy.optimize import minimize\n'), ((5275, 5298), 'numpy.rollaxis', 'np.rollaxis', (['data', '(3)', '(0)'], {}), '(data, 3, 0)\n', (5286, 5298), True, 'import numpy as np\n'), ((3728, 3744), 'numpy.nonzero', 'np.nonzero', (['nans'], {}), '(nans)\n', (3738, 3744), True, 'import numpy as np\n'), ((3749, 3766), 'numpy.nonzero', 'np.nonzero', (['(~nans)'], {}), '(~nans)\n', (3759, 3766), True, 'import numpy as np\n')] |
import numpy as np
import math
import random
import matplotlib.pyplot as plt
# ref : http://outlace.com/rlpart3.html
# TODO : add ploting method
class QAgent:
# TODO : inherit from some RL classes which implement alpha , gamma , etc and delete them from constructor
# TODO : make growing strategy parameter
# TODO : add a param for constant learning rate
decr = 0.9999 # decrease factor param (tune param)
def __init__(self, env, interp, alpha = 1, gamma = 0.5, eps = 1, maxIter = 10, maxAction = 50000, nbStateBatch = 10000 ):
self.env = env
self.interp = interp
self.interp.env = self.env
self.nbAction = self.env.actions.shape[0]
self.nbStateBatch = nbStateBatch # to grow qvalue table
self.qValue = np.zeros((nbStateBatch, self.nbAction))
self.alpha = alpha
self.gamma = gamma
self.eps = eps
self.maxIter = maxIter
self.maxAction = maxAction
def updateQ(self, prevState, prevAction, currentState, reward, alphaI):
oldInfo = (1-alphaI) * self.qValue[prevState, prevAction]
newInfo = alphaI * ( reward + self.gamma * np.max(self.qValue[currentState,:]) )
self.qValue[prevState, prevAction] = oldInfo + newInfo
def chooseAction(self,fromState,epsI): # TODO : uniform with QNN one
epsThreshold = random.uniform(0,1)
if epsThreshold > epsI:
# Exploitaton
choosedAction = np.argmax(self.qValue[fromState,:])
else:
# Exploration
choosedAction = random.randint(0, self.nbAction - 1)
return(choosedAction)
def process(self):
print("QL processing ...")
j = 0
while j < self.maxIter:
i = 0
newEnv = self.env.getInstance()
self.env = newEnv
currentState = self.interp.getState(self.env.observation)
alphaI = self.alpha
epsI = self.eps
while i < self.maxAction:
prevState = currentState
selectedAction = self.chooseAction(currentState,epsI)
reward, currentObs= self.env.applyAction(selectedAction)
prevState = currentState
currentState = self.interp.getState(currentObs)
self.updateQ(prevState, selectedAction, currentState, reward, alphaI)
alphaI = alphaI * self.decr
epsI = epsI * self.decr
if currentState == self.qValue.shape[0]-1 :
print("Growing Q ..") # TODO : put in method
growTable = np.zeros((self.qValue.shape[0] + self.nbStateBatch, self.qValue.shape[1]))
growTable[:self.qValue.shape[0], :self.qValue.shape[1] ] = self.qValue
self.qValue = growTable
if self.env.isFinalState == 1:
print("Final state ! at epoch : ",i)
print(self.env.observation)
break
i = i + 1
j = j + 1
class EnvSudoku:
def __init__(self):
gameFile="sudoku.dat" # TODO : manage mask
n=np.fromfile(gameFile,dtype=int,sep=" ")
size=int(math.sqrt(len(n)))
gameRange=np.arange(size)
cellSize=int(math.sqrt(size))
cellRange=np.arange(cellSize)
n=n.reshape(size,size)
mask=(n==0)*1
## Initialise Observation ( board)
nums=np.zeros(size)
num1=np.zeros(size)
for ib in cellRange: # fill in the gaps with determinist local cells resolution
for jb in cellRange:
for k in gameRange:
nums[k]=k+1
for i in cellRange:
for j in cellRange:
i1 = ib*cellSize + i
j1 = jb*cellSize + j
if n[i1][j1] !=0:
ix = n[i1][j1]
nums[ix-1]=0
iy = -1
for k in gameRange:
if nums[k]!=0:
iy+=1
num1[iy] = nums[k]
kk=0
for i in cellRange:
for j in cellRange:
i1 = ib*cellSize + i
j1 = jb*cellSize + j
if n[i1][j1] ==0:
n[i1][j1]=num1[kk]
kk+=1
self.observation = n # The board is observed
# Generate actions , these ones are specific for the input file and cannot be generalize
forbidenActions = np.argwhere(mask.reshape(-1) == 0 ) # non movable cells , from file
rawActions = np.ones(n.size) # all permutations between 2 cells
rawActions = np.triu(rawActions) # Delete symetric permutation
rawActions = rawActions - np.eye(n.size) # Delete unitary permutation
rawActions[:,forbidenActions] = 0
rawActions[forbidenActions,] = 0
rawActions = np.argwhere(rawActions == 1)
realActions = np.zeros((len(rawActions),2,2))
realActions[:,:,0] = rawActions // len(n) # convert index to coordinates
realActions[:,:,1] = (rawActions) % len(n)
self.actions = realActions.astype(int) # List of useful permutations between two cells
self.isFinalState = 0 # TODO : to be moved on interpretor
def applyAction(self, action):
cellOne,cellTwo = self.actions[action,:]
temp = self.observation[cellOne[0], cellOne[1]]
self.observation[cellOne[0], cellOne[1]] = self.observation[cellTwo[0], cellTwo[1]]
self.observation[cellTwo[0], cellTwo[1]] = temp
reward = self.getReward()
obs = self.observation
return (reward , obs)
def getReward(self):
# TODO : try different reward like : alpha*delta(energy)
def check(i, k, ncheck):
# determines number of unique elements in each row (k=1) or column (k!=1)
nu=0
if k!=1:
ncheck=np.transpose(ncheck)
nu=len(np.unique(ncheck[i,]))
return(nu)
def checksq(Is, Js, ncheck):
nu=0
sCell=int(pow(ncheck.size,1/4)) # compute these kind of variable outsite
subcell=ncheck[sCell*Is:sCell*Is+sCell,sCell*Js:sCell*Js+sCell]
nu=len(np.unique(subcell))
return(nu)
nsum=0
ncheck = self.observation
nCell=int(pow(ncheck.size,1/4))
nmax=3*pow(nCell,4)
nRange=np.arange(ncheck.shape[1])
cRange=np.arange(int(pow(ncheck.size,1/4)))
for i in nRange:
nsum += check(i,1,ncheck) + check(i,2,ncheck)
for i in cRange:
for j in cRange:
nsum += checksq(i,j,ncheck)
energy = nmax-nsum
if energy != 0:
reward = 100 * (1 / energy) # TODO : function from to tune
#reward = 10 * (1 / energy) # good value for tabular Q
else:
reward = 10000 # To tune
#reward = 1000 # good value for tabular Q
self.isFinalState = 1
return(reward)
def getInstance(self):
return (type(self)())
class Interpretor:
def __init__(self, env):
initObservation = env.observation
self.stateList = []
self.getState(initObservation) #only to init stateList
def getState(self, obs):
rawState = obs.reshape(-1)
n = rawState.shape[0] # size of board
k = int(math.sqrt(n)) # box size
currentState = 0
# convert state to number for compression
for i in np.arange(rawState.shape[0]):
currentState = currentState + rawState[i] * ( (k+1) ** i)
isSeenState = np.argwhere(self.stateList == currentState) # TODO init issue
if isSeenState.any(): #known state
foundState = isSeenState[0,0]
state = foundState
else: # discovered state
self.stateList = np.append(self.stateList, currentState)
state = self.stateList.size - 1 # id of the state
return(state)
def getStateVector(self,obs):
# TODO : shall encode to mean someting
currentObs = np.array(obs.reshape(1,-1)) # FIX , create new instance to avoid reference copy
return(currentObs)
## Main
myEnv = EnvSudoku()
myInter = Interpretor(myEnv)
myQ = QAgent(myEnv,myInter)
myQ.process()
# Exploitation
myQ.eps = 0.05 # 5% d'aléatoire
myQ.process()
| [
"random.uniform",
"numpy.fromfile",
"numpy.eye",
"numpy.ones",
"numpy.unique",
"math.sqrt",
"numpy.argmax",
"numpy.max",
"numpy.append",
"numpy.zeros",
"numpy.argwhere",
"numpy.triu",
"numpy.transpose",
"random.randint",
"numpy.arange"
] | [((816, 855), 'numpy.zeros', 'np.zeros', (['(nbStateBatch, self.nbAction)'], {}), '((nbStateBatch, self.nbAction))\n', (824, 855), True, 'import numpy as np\n'), ((1453, 1473), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1467, 1473), False, 'import random\n'), ((3326, 3367), 'numpy.fromfile', 'np.fromfile', (['gameFile'], {'dtype': 'int', 'sep': '""" """'}), "(gameFile, dtype=int, sep=' ')\n", (3337, 3367), True, 'import numpy as np\n'), ((3423, 3438), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (3432, 3438), True, 'import numpy as np\n'), ((3497, 3516), 'numpy.arange', 'np.arange', (['cellSize'], {}), '(cellSize)\n', (3506, 3516), True, 'import numpy as np\n'), ((3630, 3644), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (3638, 3644), True, 'import numpy as np\n'), ((3659, 3673), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (3667, 3673), True, 'import numpy as np\n'), ((4954, 4969), 'numpy.ones', 'np.ones', (['n.size'], {}), '(n.size)\n', (4961, 4969), True, 'import numpy as np\n'), ((5028, 5047), 'numpy.triu', 'np.triu', (['rawActions'], {}), '(rawActions)\n', (5035, 5047), True, 'import numpy as np\n'), ((5267, 5295), 'numpy.argwhere', 'np.argwhere', (['(rawActions == 1)'], {}), '(rawActions == 1)\n', (5278, 5295), True, 'import numpy as np\n'), ((6886, 6912), 'numpy.arange', 'np.arange', (['ncheck.shape[1]'], {}), '(ncheck.shape[1])\n', (6895, 6912), True, 'import numpy as np\n'), ((8046, 8074), 'numpy.arange', 'np.arange', (['rawState.shape[0]'], {}), '(rawState.shape[0])\n', (8055, 8074), True, 'import numpy as np\n'), ((8189, 8232), 'numpy.argwhere', 'np.argwhere', (['(self.stateList == currentState)'], {}), '(self.stateList == currentState)\n', (8200, 8232), True, 'import numpy as np\n'), ((1566, 1602), 'numpy.argmax', 'np.argmax', (['self.qValue[fromState, :]'], {}), '(self.qValue[fromState, :])\n', (1575, 1602), True, 'import numpy as np\n'), ((1676, 1712), 'random.randint', 'random.randint', (['(0)', '(self.nbAction - 1)'], {}), '(0, self.nbAction - 1)\n', (1690, 1712), False, 'import random\n'), ((3461, 3476), 'math.sqrt', 'math.sqrt', (['size'], {}), '(size)\n', (3470, 3476), False, 'import math\n'), ((5115, 5129), 'numpy.eye', 'np.eye', (['n.size'], {}), '(n.size)\n', (5121, 5129), True, 'import numpy as np\n'), ((7923, 7935), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (7932, 7935), False, 'import math\n'), ((8439, 8478), 'numpy.append', 'np.append', (['self.stateList', 'currentState'], {}), '(self.stateList, currentState)\n', (8448, 8478), True, 'import numpy as np\n'), ((6366, 6386), 'numpy.transpose', 'np.transpose', (['ncheck'], {}), '(ncheck)\n', (6378, 6386), True, 'import numpy as np\n'), ((6410, 6431), 'numpy.unique', 'np.unique', (['ncheck[i,]'], {}), '(ncheck[i,])\n', (6419, 6431), True, 'import numpy as np\n'), ((6696, 6714), 'numpy.unique', 'np.unique', (['subcell'], {}), '(subcell)\n', (6705, 6714), True, 'import numpy as np\n'), ((1244, 1280), 'numpy.max', 'np.max', (['self.qValue[currentState, :]'], {}), '(self.qValue[currentState, :])\n', (1250, 1280), True, 'import numpy as np\n'), ((2759, 2833), 'numpy.zeros', 'np.zeros', (['(self.qValue.shape[0] + self.nbStateBatch, self.qValue.shape[1])'], {}), '((self.qValue.shape[0] + self.nbStateBatch, self.qValue.shape[1]))\n', (2767, 2833), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
import numpy as np
import torch
import librosa
from scipy import signal
from torch import Tensor
class Spectrogram(object):
def __init__(
self,
n_fft: int,
hop_length: int,
) -> None:
self.n_fft = n_fft
self.hop_length = hop_length
def __call__(self, sound: np.ndarray, normalize: bool) -> torch.FloatTensor:
stft = librosa.stft(sound, n_fft=self.n_fft, hop_length=self.hop_length, window=signal.windows.hamming)
spectrogram, _ = librosa.magphase(stft)
spectrogram = np.log1p(spectrogram)
if normalize:
spectrogram -= spectrogram.mean()
spectrogram /= np.std(spectrogram)
return torch.FloatTensor(spectrogram)
class MelSpectrogram(object):
def __init__(
self,
n_fft: int,
hop_length: int,
sampling_rate: int = 16000,
n_mel: int = 80,
) -> None:
self.n_fft = n_fft
self.hop_length = hop_length
self.sampling_rate = sampling_rate
self.n_mel = n_mel
def __call__(self, sound: np.ndarray, normalize: bool) -> torch.FloatTensor:
melspectrogram = librosa.feature.melspectrogram(
sound,
sr=self.sampling_rate,
n_mels=self.n_mel,
n_fft=self.n_fft,
hop_length=self.hop_length
)
log_melspectrogram = librosa.amplitude_to_db(melspectrogram)
if normalize:
log_melspectrogram -= log_melspectrogram.mean()
log_melspectrogram /= np.std(log_melspectrogram)
return torch.FloatTensor(log_melspectrogram)
class MFCC(object):
def __init__(
self,
n_fft: int,
hop_length: int,
sampling_rate: int = 16000,
n_mfcc: int = 40,
) -> None:
self.n_fft = n_fft
self.hop_length = hop_length
self.sampling_rate = sampling_rate
self.n_mfcc = n_mfcc
def __call__(self, sound: np.ndarray, normalize: bool) -> torch.FloatTensor:
mfcc = librosa.feature.mfcc(
sound,
sr=self.sampling_rate,
n_mfcc=self.n_mfcc,
n_fft=self.n_fft,
hop_length=self.hop_length,
)
if normalize:
mfcc -= mfcc.mean()
mfcc /= np.std(mfcc)
return torch.FloatTensor(mfcc)
class FilterBank(object):
def __init__(
self,
frame_length: float = 0.020,
frame_stride: float = 0.010,
sampling_rate: int = 16000,
n_mel: int = 80,
) -> None:
self.frame_length = frame_length * 1000
self.frame_stride = frame_stride * 1000
self.sampling_rate = sampling_rate
self.n_mel = n_mel
import torchaudio
def __call__(self, sound: np.ndarray, normalize: bool):
filter_bank = torchaudio.compliance.kaldi.fbank(
Tensor(sound).unsqueeze(0),
num_mel_bins=self.n_mel,
frame_length=self.frame_length,
frame_shift=self.frame_stride,
sample_frequency=float(self.sampling_rate),
)
filter_bank = filter_bank.transpose(0, 1)
if normalize:
filter_bank -= filter_bank.mean()
filter_bank /= np.std(filter_bank)
return filter_bank
| [
"librosa.feature.melspectrogram",
"librosa.magphase",
"torch.Tensor",
"librosa.feature.mfcc",
"numpy.std",
"librosa.stft",
"librosa.amplitude_to_db",
"numpy.log1p",
"torch.FloatTensor"
] | [((883, 984), 'librosa.stft', 'librosa.stft', (['sound'], {'n_fft': 'self.n_fft', 'hop_length': 'self.hop_length', 'window': 'signal.windows.hamming'}), '(sound, n_fft=self.n_fft, hop_length=self.hop_length, window=\n signal.windows.hamming)\n', (895, 984), False, 'import librosa\n'), ((1005, 1027), 'librosa.magphase', 'librosa.magphase', (['stft'], {}), '(stft)\n', (1021, 1027), False, 'import librosa\n'), ((1050, 1071), 'numpy.log1p', 'np.log1p', (['spectrogram'], {}), '(spectrogram)\n', (1058, 1071), True, 'import numpy as np\n'), ((1204, 1234), 'torch.FloatTensor', 'torch.FloatTensor', (['spectrogram'], {}), '(spectrogram)\n', (1221, 1234), False, 'import torch\n'), ((1681, 1811), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['sound'], {'sr': 'self.sampling_rate', 'n_mels': 'self.n_mel', 'n_fft': 'self.n_fft', 'hop_length': 'self.hop_length'}), '(sound, sr=self.sampling_rate, n_mels=self.\n n_mel, n_fft=self.n_fft, hop_length=self.hop_length)\n', (1711, 1811), False, 'import librosa\n'), ((1906, 1945), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['melspectrogram'], {}), '(melspectrogram)\n', (1929, 1945), False, 'import librosa\n'), ((2106, 2143), 'torch.FloatTensor', 'torch.FloatTensor', (['log_melspectrogram'], {}), '(log_melspectrogram)\n', (2123, 2143), False, 'import torch\n'), ((2573, 2693), 'librosa.feature.mfcc', 'librosa.feature.mfcc', (['sound'], {'sr': 'self.sampling_rate', 'n_mfcc': 'self.n_mfcc', 'n_fft': 'self.n_fft', 'hop_length': 'self.hop_length'}), '(sound, sr=self.sampling_rate, n_mfcc=self.n_mfcc,\n n_fft=self.n_fft, hop_length=self.hop_length)\n', (2593, 2693), False, 'import librosa\n'), ((2865, 2888), 'torch.FloatTensor', 'torch.FloatTensor', (['mfcc'], {}), '(mfcc)\n', (2882, 2888), False, 'import torch\n'), ((1168, 1187), 'numpy.std', 'np.std', (['spectrogram'], {}), '(spectrogram)\n', (1174, 1187), True, 'import numpy as np\n'), ((2063, 2089), 'numpy.std', 'np.std', (['log_melspectrogram'], {}), '(log_melspectrogram)\n', (2069, 2089), True, 'import numpy as np\n'), ((2836, 2848), 'numpy.std', 'np.std', (['mfcc'], {}), '(mfcc)\n', (2842, 2848), True, 'import numpy as np\n'), ((3806, 3825), 'numpy.std', 'np.std', (['filter_bank'], {}), '(filter_bank)\n', (3812, 3825), True, 'import numpy as np\n'), ((3441, 3454), 'torch.Tensor', 'Tensor', (['sound'], {}), '(sound)\n', (3447, 3454), False, 'from torch import Tensor\n')] |
import numpy as np
import nibabel as nib
import os
def squeezeNii(root, file):
img = nib.load(root + '/' + file)
newFile = file.replace('.nii','_temp.nii')
nib.save(nib.Nifti1Image(np.squeeze(img.dataobj),img.affine), root+'/'+newFile)
# This is to get the directory that the program
# is currently running in.
dir_path = '/data/data_mrcv/45_DATA_HUMANS/CHEST/STUDIES/2020_CARDIAC_DL_SEGMENTATION_CORRADO/test'
for root, dirs, files in os.walk(dir_path):
print(root)
for file in files:
if file.endswith('.nii'):
squeezeNii(root, str(file))
os.remove(root + '/' + str(file))
os.rename(root + '/' + str(file).replace('.nii','_temp.nii'), root + '/' + str(file))
| [
"numpy.squeeze",
"os.walk",
"nibabel.load"
] | [((466, 483), 'os.walk', 'os.walk', (['dir_path'], {}), '(dir_path)\n', (473, 483), False, 'import os\n'), ((95, 122), 'nibabel.load', 'nib.load', (["(root + '/' + file)"], {}), "(root + '/' + file)\n", (103, 122), True, 'import nibabel as nib\n'), ((201, 224), 'numpy.squeeze', 'np.squeeze', (['img.dataobj'], {}), '(img.dataobj)\n', (211, 224), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
def exponential(x, a, b):
return a * b**x
def get_curve_pars(d20, d21):
year = np.linspace(1, 120, num=120)
temp_20 = np.linspace(0, d20*100, num=100)
temp_21 = np.linspace(d20*101, d20*100 + d21*20, num=20)
temp = np.concatenate((temp_20, temp_21), axis=None)
# print(temp)
# plt.plot(year, temp)
(param, cov) = curve_fit(exponential, year, temp, p0=[0.1, 1.05])
# print(param)
perr = np.sqrt(np.diag(cov))
fit = exponential(year, param[0], param[1])
# plt.plot(year, temp, 'r-', year, fit, 'b')
# plt.show()
return [param, perr]
# param, perr = get_curve_pars(0.019, 0.036)
# print(param)
| [
"scipy.optimize.curve_fit",
"numpy.linspace",
"numpy.concatenate",
"numpy.diag"
] | [((179, 207), 'numpy.linspace', 'np.linspace', (['(1)', '(120)'], {'num': '(120)'}), '(1, 120, num=120)\n', (190, 207), True, 'import numpy as np\n'), ((222, 256), 'numpy.linspace', 'np.linspace', (['(0)', '(d20 * 100)'], {'num': '(100)'}), '(0, d20 * 100, num=100)\n', (233, 256), True, 'import numpy as np\n'), ((269, 321), 'numpy.linspace', 'np.linspace', (['(d20 * 101)', '(d20 * 100 + d21 * 20)'], {'num': '(20)'}), '(d20 * 101, d20 * 100 + d21 * 20, num=20)\n', (280, 321), True, 'import numpy as np\n'), ((327, 372), 'numpy.concatenate', 'np.concatenate', (['(temp_20, temp_21)'], {'axis': 'None'}), '((temp_20, temp_21), axis=None)\n', (341, 372), True, 'import numpy as np\n'), ((439, 489), 'scipy.optimize.curve_fit', 'curve_fit', (['exponential', 'year', 'temp'], {'p0': '[0.1, 1.05]'}), '(exponential, year, temp, p0=[0.1, 1.05])\n', (448, 489), False, 'from scipy.optimize import curve_fit\n'), ((528, 540), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (535, 540), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
WHDR Hinge Loss layer.
Provides the layer that computes a Hinge loss approximation to the WHDR as
explained in the intrinsic images in the wild paper.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from whdr_layer import WhdrLayer
MAX_EVALUATED_COMPARISONS = 1500 # 2500 # non-augmented has max 1181
# derive from WhdrLayer to get its methods
# (get comparisons, visualize, lightness)
class WhdrHingeLossLayer(WhdrLayer):
"""
WHDR Hinge Loss Layer.
Compute a Hinge loss approximation to the WHDR Loss as explained in the
intrinsic images in the wild paper.
"""
def setup(self, bottom, top):
"""Check that layer is correctly set up by checking input pair."""
if len(bottom) != 2 and len(bottom) != 3:
raise Exception("Need two inputs, the reflectance image on which "
"the WHDR is supposed to be evaluated and the "
"ground truth comparisons. In the case of Sintel"
"a third input with the ground truth reflectances"
"is needed and the given comparisons should be 0.")
params = self.param_str.split('_')
if self.param_str == "":
self.delta = 0.1 # threshold for "more or less equal"
self.margin = 0.0 # margin in the Hinge
self.ratio = 1.0 # ratio of evaluated comparisons
self.eval_dense = 1 # evaluate dense labels?
elif len(params) == 4:
self.delta = float(params[0])
assert(self.delta >= 0)
self.margin = float(params[1])
assert(self.margin >= 0)
self.ratio = float(params[2])
assert(0 < self.ratio <= 1)
self.eval_dense = int(params[3])
else:
msg = ("parameters to WhdrHingeLossLayer were not as expected: " +
self.param_str +
" was provided, but need four arguments, " +
"delta, margin" +
"ratio of comparisons, if dense " +
"labels are supposed to be evaluated."
)
raise Exception(msg)
print("WhdrHingeLossLayer uses",
"delta =", self.delta,
"margin =", self.margin,
"ratio of evaluated comparisons =", self.ratio,
"evaluate dense labels:", self.eval_dense,
)
def reshape(self, bottom, top):
"""Define dimensions of data."""
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
"""Forward pass of the layer."""
# start = timeit.default_timer()
batch_size, channels, height, width = bottom[0].data.shape
# prepare blob for backprop step to share computation
# (it is changed in self._whdr_hinge_single_img !!!)
self.diff = np.zeros_like(bottom[0].data)
whdrs = [self._whdr_hinge_single_img(bottom, b)
for b in range(batch_size)]
# compute the final WHDR value as mean of the WHDRs in the batch
whdr = np.mean(whdrs)
# also apply the mean (division by batch_size) to gradient
self.diff /= batch_size
top[0].data[...] = whdr
def backward(self, top, propagate_down, bottom):
"""Backward pass of the layer."""
# compute gradient to (reflectance) image
if propagate_down[0]:
loss_weight = top[0].diff[0]
# derivative is mostly computed in forward
bottom[0].diff[...] = self.diff * loss_weight
# check that there is no gradient to the ground truth computed
if propagate_down[1]:
print('Layer cannot backpropagate to ground truth comparisons.')
# raise Exception('Layer cannot backpropagate to ground truth.')
def _whdr_hinge_single_img(self, bottom, b):
# inner_start = timeit.default_timer()
# get the reflectance image
refl_img = bottom[0].data[b, :, :, :]
height, width = refl_img.shape[1:]
comparisons, file_name = self._get_comparisons(bottom, b,
height, width)
num_comparisons = comparisons.shape[0]
if not self.eval_dense and num_comparisons > 300:
# do not evaluate densely annotated images
num_comparisons = 1
if self.ratio < 1.0:
num_comparisons = int(np.ceil(self.ratio * num_comparisons))
if num_comparisons <= MAX_EVALUATED_COMPARISONS:
# for non-augmented data this should always be the case
comp_list = range(num_comparisons)
else:
comp_list = np.random.choice(num_comparisons,
MAX_EVALUATED_COMPARISONS,
replace=False)
weight_sum = np.sum(comparisons[comp_list, 5])
error_sum = sum([self._eval_single_comparison(refl_img,
comparisons[c, :],
b)
for c in comp_list])
# catch a possible weight_sum = 0 if there are no comparisons
if weight_sum:
whdr = error_sum / weight_sum
self.diff[b, :, :, :] /= weight_sum
else:
whdr = 0.0
return whdr
def _eval_single_comparison(self, refl_img, comparison, b):
x1, y1, x2, y2, darker = comparison[0:5].astype(int)
weight = comparison[5]
# pay attention to the blob ordering:
# channel times y times x
R1 = refl_img[:, y1, x1]
R2 = refl_img[:, y2, x2]
# get the lightness instead of the (r,g,b) and gradient
L1, dL1dR = self._lightness(R1)
L2, dL2dR = self._lightness(R2)
# compute ratio once
L2inv = 1. / L2
y = L1 * L2inv # => y = L1 / L2
# derivative of the ratio
dydL1 = L2inv # => dydL1 = 1. / L2
dydL2 = -y * L2inv # => dydL2 = -L1 / L2**2
# branch into the cases 0(=E), 1, 2
if darker == 1: # L1 is darker than L2
border = 1 / (1 + self.delta + self.margin)
if y > border:
loss_y = y - border # loss_y = max(0, y - border)
dldy = 1 # derivative of the hinge loss
else:
loss_y = 0
dldy = 0
elif darker == 2: # L2 is darker than L1
border = 1 + self.delta + self.margin
if y < border:
loss_y = border - y # loss_y = max(0, border - y)
dldy = -1
else:
loss_y = 0
dldy = 0
elif darker == 0: # L1 and L2 are more or less the same
if self.margin <= self.delta:
# this should normally be the case that makes sense
border_right = 1 + self.delta - self.margin
# loss_y = max(0, border_left - y, y - border_right)
if y > border_right:
loss_y = y - border_right
dldy = 1
else:
border_left = 1 / border_right
if y < border_left:
loss_y = border_left - y
dldy = -1
else:
loss_y = 0
dldy = 0
else:
border = 1 + self.delta - self.margin
loss_y = max(1/border - y, y - border)
if y > 1:
dldy = 1
else:
dldy = -1
else:
raise Exception('darker is neither 0(=E), 1, 2')
error = weight * loss_y
# the final derivatives by chain rule
self.diff[b, :, y1, x1] += weight * dldy * dydL1 * dL1dR
self.diff[b, :, y2, x2] += weight * dldy * dydL2 * dL2dR
return error
| [
"numpy.mean",
"numpy.ceil",
"numpy.random.choice",
"numpy.sum",
"numpy.zeros_like"
] | [((4039, 4068), 'numpy.zeros_like', 'np.zeros_like', (['bottom[0].data'], {}), '(bottom[0].data)\n', (4052, 4068), True, 'import numpy as np\n'), ((4260, 4274), 'numpy.mean', 'np.mean', (['whdrs'], {}), '(whdrs)\n', (4267, 4274), True, 'import numpy as np\n'), ((6038, 6071), 'numpy.sum', 'np.sum', (['comparisons[comp_list, 5]'], {}), '(comparisons[comp_list, 5])\n', (6044, 6071), True, 'import numpy as np\n'), ((5859, 5934), 'numpy.random.choice', 'np.random.choice', (['num_comparisons', 'MAX_EVALUATED_COMPARISONS'], {'replace': '(False)'}), '(num_comparisons, MAX_EVALUATED_COMPARISONS, replace=False)\n', (5875, 5934), True, 'import numpy as np\n'), ((5609, 5646), 'numpy.ceil', 'np.ceil', (['(self.ratio * num_comparisons)'], {}), '(self.ratio * num_comparisons)\n', (5616, 5646), True, 'import numpy as np\n')] |
"""
Traffic.py
"""
__author__ = "<EMAIL>"
import numpy as np
from os import listdir
from re import split
from OU import OU
from helper import softmax
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in split(r'(\d+)', string_)]
#
class Traffic():
def __init__(self, nodes_num, type, capacity):
self.nodes_num = nodes_num
self.prev_traffic = None
self.type = type
self.capacity = capacity * nodes_num / (nodes_num - 1)
self.dictionary = {}
self.dictionary['NORM'] = self.normal_traffic
self.dictionary['UNI'] = self.uniform_traffic
self.dictionary['CONTROLLED'] = self.controlled_uniform_traffic
self.dictionary['EXP'] = self.exp_traffic
self.dictionary['OU'] = self.ou_traffic
self.dictionary['STAT'] = self.stat_traffic
self.dictionary['STATEQ'] = self.stat_eq_traffic
self.dictionary['FILE'] = self.file_traffic
self.dictionary['DIR'] = self.dir_traffic
if self.type.startswith('DIR:'):
self.dir = sorted(listdir(self.type.split('DIR:')[-1]), key=lambda x: natural_key((x)))
self.static = None
self.total_ou = OU(1, self.capacity/2, 0.1, self.capacity/2)
self.nodes_ou = OU(self.nodes_num**2, 1, 0.1, 1)
def normal_traffic(self):
t = np.random.normal(capacity/2, capacity/2)
return np.asarray(t * softmax(np.random.randn(self.nodes_num, self.nodes_num))).clip(min=0.001)
def uniform_traffic(self):
t = np.random.uniform(0, self.capacity*1.25)
return np.asarray(t * softmax(np.random.uniform(0, 1, size=[self.nodes_num]*2))).clip(min=0.001)
def controlled_uniform_traffic(self):
t = np.random.uniform(0, self.capacity*1.25)
if self.prev_traffic is None:
self.prev_traffic = np.asarray(t * softmax(np.random.uniform(0, 1, size=[self.nodes_num]*2))).clip(min=0.001)
dist = [1]
dist += [0]*(self.nodes_num**2 - 1)
ch = np.random.choice(dist, [self.nodes_num]*2)
tt = np.multiply(self.prev_traffic, 1 - ch)
nt = np.asarray(t * softmax(np.random.uniform(0, 1, size=[self.nodes_num]*2))).clip(min=0.001)
nt = np.multiply(nt, ch)
self.prev_traffic = tt + nt
return self.prev_traffic
# xxxxxxx
# xxx
# 指数分布
def exp_traffic(self):
a = np.random.exponential(size=self.nodes_num)
b = np.random.exponential(size=self.nodes_num)
# https://blog.csdn.net/u011599639/article/details/77926402
# 计算向量 a,b 外积,[a,b]
T = np.outer(a, b)
# 对角线 填 -1
np.fill_diagonal(T, -1)
T[T!=-1] = np.asarray(np.random.exponential()*T[T!=-1]/np.average(T[T!=-1])).clip(min=0.001)
return T
def stat_traffic(self):
if self.static is None:
string = self.type.split('STAT:')[-1]
v = np.asarray(tuple(float(x) for x in string.split(',')[:self.nodes_num**2]))
M = np.split(v, self.nodes_num)
self.static = np.vstack(M)
return self.static
def stat_eq_traffic(self):
if self.static is None:
value = float(self.type.split('STATEQ:')[-1])
self.static = np.full([self.nodes_num]*2, value, dtype=float)
return self.static
def ou_traffic(self):
t = self.total_ou.evolve()[0]
nt = t * softmax(self.nodes_ou.evolve())
i = np.split(nt, self.nodes_num)
return np.vstack(i).clip(min=0.001)
def file_traffic(self):
if self.static is None:
fname = 'traffic/' + self.type.split('FILE:')[-1]
v = np.loadtxt(fname, delimiter=',')
self.static = np.split(v, self.nodes_num)
return self.static
def dir_traffic(self):
while len(self.dir) > 0:
tm = self.dir.pop(0)
if not tm.endswith('.txt'):
continue
fname = self.type.split('DIR:')[-1] + '/' + tm
v = np.loadtxt(fname, delimiter=',')
return np.split(v, self.nodes_num)
return False
def generate(self):
return self.dictionary[self.type.split(":")[0]]()
# 这样 dictionary [14,14] 代表什么意思呢???
#[[-1.00000000e+00 4.82597027e-01 1.64885219e-01 2.39937374e-01
# 4.24195039e-01 3.90513477e-01 1.73313504e-01 2.39531467e-01
# 8.81383591e-01 2.35750495e-01 9.86736084e-01 1.10174305e+00
# 2.91715890e-02 1.24369249e-01]
# [1.24568554e-01 - 1.00000000e+00 4.58794226e-02 6.67627350e-02
# 1.18032554e-01 1.08660636e-01 4.82245987e-02 6.66497910e-02
# 2.45245574e-01 6.55977330e-02 2.74559976e-01 3.06560741e-01
# 8.11701418e-03 3.46058268e-02]
# [4.41894837e-01 4.76355699e-01 - 1.00000000e+00 2.36834313e-01
# 4.18709012e-01 3.85463046e-01 1.71072076e-01 2.36433655e-01
# 8.69984838e-01 2.32701582e-01 9.73974829e-01 1.08749443e+00
# 2.87943189e-02 1.22760807e-01]
# [3.99306289e-01 4.30445913e-01 1.47067148e-01 - 1.00000000e+00
# 3.78355046e-01 3.48313231e-01 1.54584644e-01 2.13646863e-01
# 7.86138212e-01 2.10274476e-01 8.80105948e-01 9.82684859e-01
# 2.60192056e-02 1.10929475e-01]
# [8.86430104e-01 9.55557740e-01 3.26478072e-01 4.75083769e-01
# - 1.00000000e+00 7.73229329e-01 3.43166350e-01 4.74280060e-01
# 1.74516805e+00 4.66793613e-01 1.95376940e+00 2.18148691e+00
# 5.77606908e-02 2.46255140e-01]
# [6.17537874e-01 6.65696136e-01 2.27443285e-01 3.30970507e-01
# 5.85136215e-01 - 1.00000000e+00 2.39069293e-01 3.30410597e-01
# 1.21578381e+00 3.25195110e-01 1.36110743e+00 1.51974846e+00
# 4.02393985e-02 1.71555405e-01]
# [2.31567998e-01 2.49626667e-01 8.52880256e-02 1.24109275e-01
# 2.19417832e-01 2.01995810e-01 - 1.00000000e+00 1.23899316e-01
# 4.55901789e-01 1.21943582e-01 5.10396098e-01 5.69884250e-01
# 1.50892072e-02 6.43308585e-02]
# [1.68174721e+00 1.81289710e+00 6.19398623e-01 9.01335366e-01
# 1.59350744e+00 1.46698116e+00 6.51059851e-01 - 1.00000000e+00
# 3.31095647e+00 8.85607170e-01 3.70671778e+00 4.13874653e+00
# 1.09584366e-01 4.67198589e-01]
# [1.81548791e+00 1.95706747e+00 6.68656207e-01 9.73013928e-01
# 1.72023088e+00 1.58364262e+00 7.02835289e-01 9.71367859e-01
# - 1.00000000e+00 9.56034948e-01 4.00149396e+00 4.46787974e+00
# 1.18299046e-01 5.04352487e-01]
# [3.39180759e+00 3.65631535e+00 1.24922518e+00 1.81784523e+00
# 3.21384249e+00 2.95865979e+00 1.31308067e+00 1.81476994e+00
# 6.67765480e+00 - 1.00000000e+00 7.47584027e+00 8.34717123e+00
# 2.21013647e-01 9.42262733e-01]
# [3.80108803e+00 4.09751324e+00 1.39996587e+00 2.03719980e+00
# 3.60164835e+00 3.31567343e+00 1.47152663e+00 2.03375342e+00
# 7.48342971e+00 2.00165090e+00 - 1.00000000e+00 9.35440226e+00
# 2.47682778e-01 1.05596308e+00]
# [1.92350407e-01 2.07350720e-01 7.08439274e-02 1.03090538e-01
# 1.82257953e-01 1.67786468e-01 7.44651911e-02 1.02916137e-01
# 3.78691769e-01 1.01291620e-01 4.23957102e-01 - 1.00000000e+00
# 1.25337489e-02 5.34359969e-02]
# [3.30800641e-01 3.56597899e-01 1.21836065e-01 1.77293184e-01
# 3.13443828e-01 2.88556037e-01 1.28063846e-01 1.76993253e-01
# 6.51267039e-01 1.74199439e-01 7.29113514e-01 8.14093818e-01
# - 1.00000000e+00 9.18982306e-02]
# [6.67661381e-01 7.19728489e-01 2.45904104e-01 3.57834288e-01
# 6.32629787e-01 5.82398272e-01 2.58473757e-01 3.57228932e-01
# 1.31446496e+00 3.51590122e-01 1.47158401e+00 1.64310142e+00
# 4.35054974e-02 - 1.00000000e+00]]
| [
"numpy.random.normal",
"re.split",
"numpy.multiply",
"numpy.average",
"numpy.random.choice",
"numpy.random.exponential",
"numpy.fill_diagonal",
"numpy.split",
"numpy.outer",
"numpy.vstack",
"numpy.random.uniform",
"numpy.full",
"numpy.loadtxt",
"numpy.random.randn",
"OU.OU"
] | [((1266, 1314), 'OU.OU', 'OU', (['(1)', '(self.capacity / 2)', '(0.1)', '(self.capacity / 2)'], {}), '(1, self.capacity / 2, 0.1, self.capacity / 2)\n', (1268, 1314), False, 'from OU import OU\n'), ((1335, 1369), 'OU.OU', 'OU', (['(self.nodes_num ** 2)', '(1)', '(0.1)', '(1)'], {}), '(self.nodes_num ** 2, 1, 0.1, 1)\n', (1337, 1369), False, 'from OU import OU\n'), ((1411, 1455), 'numpy.random.normal', 'np.random.normal', (['(capacity / 2)', '(capacity / 2)'], {}), '(capacity / 2, capacity / 2)\n', (1427, 1455), True, 'import numpy as np\n'), ((1600, 1642), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(self.capacity * 1.25)'], {}), '(0, self.capacity * 1.25)\n', (1617, 1642), True, 'import numpy as np\n'), ((1801, 1843), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(self.capacity * 1.25)'], {}), '(0, self.capacity * 1.25)\n', (1818, 1843), True, 'import numpy as np\n'), ((2078, 2122), 'numpy.random.choice', 'np.random.choice', (['dist', '([self.nodes_num] * 2)'], {}), '(dist, [self.nodes_num] * 2)\n', (2094, 2122), True, 'import numpy as np\n'), ((2135, 2173), 'numpy.multiply', 'np.multiply', (['self.prev_traffic', '(1 - ch)'], {}), '(self.prev_traffic, 1 - ch)\n', (2146, 2173), True, 'import numpy as np\n'), ((2291, 2310), 'numpy.multiply', 'np.multiply', (['nt', 'ch'], {}), '(nt, ch)\n', (2302, 2310), True, 'import numpy as np\n'), ((2457, 2499), 'numpy.random.exponential', 'np.random.exponential', ([], {'size': 'self.nodes_num'}), '(size=self.nodes_num)\n', (2478, 2499), True, 'import numpy as np\n'), ((2512, 2554), 'numpy.random.exponential', 'np.random.exponential', ([], {'size': 'self.nodes_num'}), '(size=self.nodes_num)\n', (2533, 2554), True, 'import numpy as np\n'), ((2664, 2678), 'numpy.outer', 'np.outer', (['a', 'b'], {}), '(a, b)\n', (2672, 2678), True, 'import numpy as np\n'), ((2706, 2729), 'numpy.fill_diagonal', 'np.fill_diagonal', (['T', '(-1)'], {}), '(T, -1)\n', (2722, 2729), True, 'import numpy as np\n'), ((3511, 3539), 'numpy.split', 'np.split', (['nt', 'self.nodes_num'], {}), '(nt, self.nodes_num)\n', (3519, 3539), True, 'import numpy as np\n'), ((298, 322), 're.split', 'split', (['"""(\\\\d+)"""', 'string_'], {}), "('(\\\\d+)', string_)\n", (303, 322), False, 'from re import split\n'), ((3068, 3095), 'numpy.split', 'np.split', (['v', 'self.nodes_num'], {}), '(v, self.nodes_num)\n', (3076, 3095), True, 'import numpy as np\n'), ((3122, 3134), 'numpy.vstack', 'np.vstack', (['M'], {}), '(M)\n', (3131, 3134), True, 'import numpy as np\n'), ((3310, 3359), 'numpy.full', 'np.full', (['([self.nodes_num] * 2)', 'value'], {'dtype': 'float'}), '([self.nodes_num] * 2, value, dtype=float)\n', (3317, 3359), True, 'import numpy as np\n'), ((3723, 3755), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'delimiter': '""","""'}), "(fname, delimiter=',')\n", (3733, 3755), True, 'import numpy as np\n'), ((3782, 3809), 'numpy.split', 'np.split', (['v', 'self.nodes_num'], {}), '(v, self.nodes_num)\n', (3790, 3809), True, 'import numpy as np\n'), ((4071, 4103), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'delimiter': '""","""'}), "(fname, delimiter=',')\n", (4081, 4103), True, 'import numpy as np\n'), ((4123, 4150), 'numpy.split', 'np.split', (['v', 'self.nodes_num'], {}), '(v, self.nodes_num)\n', (4131, 4150), True, 'import numpy as np\n'), ((3555, 3567), 'numpy.vstack', 'np.vstack', (['i'], {}), '(i)\n', (3564, 3567), True, 'import numpy as np\n'), ((2794, 2816), 'numpy.average', 'np.average', (['T[T != -1]'], {}), '(T[T != -1])\n', (2804, 2816), True, 'import numpy as np\n'), ((1490, 1537), 'numpy.random.randn', 'np.random.randn', (['self.nodes_num', 'self.nodes_num'], {}), '(self.nodes_num, self.nodes_num)\n', (1505, 1537), True, 'import numpy as np\n'), ((1679, 1729), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '([self.nodes_num] * 2)'}), '(0, 1, size=[self.nodes_num] * 2)\n', (1696, 1729), True, 'import numpy as np\n'), ((2211, 2261), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '([self.nodes_num] * 2)'}), '(0, 1, size=[self.nodes_num] * 2)\n', (2228, 2261), True, 'import numpy as np\n'), ((2761, 2784), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (2782, 2784), True, 'import numpy as np\n'), ((1935, 1985), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '([self.nodes_num] * 2)'}), '(0, 1, size=[self.nodes_num] * 2)\n', (1952, 1985), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.recovery.mallett2019` module.
"""
from __future__ import division, unicode_literals
import unittest
import numpy as np
from colour.characterisation import SDS_COLOURCHECKERS
from colour.colorimetry import (SpectralShape, MSDS_CMFS_STANDARD_OBSERVER,
SDS_ILLUMINANTS, CCS_ILLUMINANTS, sd_to_XYZ)
from colour.difference import JND_CIE1976, delta_E_CIE1976
from colour.models import RGB_COLOURSPACE_sRGB, XYZ_to_RGB, XYZ_to_Lab
from colour.recovery import (spectral_primary_decomposition_Mallett2019,
RGB_to_sd_Mallett2019, sRGB_to_sd_Mallett2019)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestSpectralPrimaryDecompositionMallett2019', 'TestsRGB_to_sd_Mallett2019'
]
SD_D65 = SDS_ILLUMINANTS['D65']
CCS_D65 = CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65']
class TestMixinMallett2019(object):
"""
A mixin for testing the :mod:`colour.recovery.mallett2019` module.
"""
def check_callable(self, RGB_to_sd_callable, *args):
"""
Tests :func:`colour.recovery.RGB_to_sd_Mallett2019` definition or the
more specialised :func:`colour.recovery.sRGB_to_sd_Mallett2019`
definition.
"""
# Make sure the white point is reconstructed as a perfectly flat
# spectrum.
RGB = np.full(3, 1.0)
sd = RGB_to_sd_callable(RGB, *args)
self.assertLess(np.var(sd.values), 1e-5)
# Check if the primaries or their combination exceeds the [0, 1] range.
lower = np.zeros_like(sd.values) - 1e-12
upper = np.ones_like(sd.values) + 1e+12
for RGB in [[1, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]:
sd = RGB_to_sd_callable(RGB, *args)
np.testing.assert_array_less(sd.values, upper)
np.testing.assert_array_less(lower, sd.values)
# Check Delta E's using a colour checker.
for name, sd in SDS_COLOURCHECKERS['ColorChecker N Ohta'].items():
XYZ = sd_to_XYZ(sd, illuminant=SD_D65) / 100
Lab = XYZ_to_Lab(XYZ, CCS_D65)
RGB = XYZ_to_RGB(XYZ, RGB_COLOURSPACE_sRGB.whitepoint, CCS_D65,
RGB_COLOURSPACE_sRGB.XYZ_to_RGB_matrix)
recovered_sd = RGB_to_sd_callable(RGB, *args)
recovered_XYZ = sd_to_XYZ(recovered_sd, illuminant=SD_D65) / 100
recovered_Lab = XYZ_to_Lab(recovered_XYZ, CCS_D65)
error = delta_E_CIE1976(Lab, recovered_Lab)
# This method has relatively high Delta E's using datasets
# generated quickly, so the threshold is increased for unit tests.
if error > 5 * JND_CIE1976:
self.fail('Delta E for \'{0}\' is {1}!'.format(name, error))
class TestSpectralPrimaryDecompositionMallett2019(unittest.TestCase,
TestMixinMallett2019):
"""
Defines :func:`colour.recovery.spectral_primary_decomposition_Mallett2019`
definition unit tests methods.
"""
def test_spectral_primary_decomposition_Mallett2019(self):
"""
Tests :func:`colour.recovery.\
test_spectral_primary_decomposition_Mallett2019` definition.
"""
shape = SpectralShape(380, 730, 10)
cmfs = MSDS_CMFS_STANDARD_OBSERVER[
'CIE 1931 2 Degree Standard Observer']
cmfs = cmfs.copy().align(shape)
illuminant = SD_D65.copy().align(shape)
basis = spectral_primary_decomposition_Mallett2019(
RGB_COLOURSPACE_sRGB, cmfs, illuminant)
self.check_callable(RGB_to_sd_Mallett2019, basis)
class TestsRGB_to_sd_Mallett2019(unittest.TestCase, TestMixinMallett2019):
"""
Defines :func:`colour.recovery.sRGB_to_sd_Mallett2019` definition unit
tests methods.
"""
def test_sRGB_to_sd_Mallett2019(self):
"""
Tests :func:`colour.recovery.sRGB_to_sd_Mallett2019` definition.
"""
self.check_callable(sRGB_to_sd_Mallett2019)
if __name__ == '__main__':
unittest.main()
| [
"colour.recovery.spectral_primary_decomposition_Mallett2019",
"numpy.ones_like",
"numpy.testing.assert_array_less",
"colour.difference.delta_E_CIE1976",
"colour.models.XYZ_to_Lab",
"colour.colorimetry.SpectralShape",
"colour.colorimetry.sd_to_XYZ",
"colour.models.XYZ_to_RGB",
"unittest.main",
"num... | [((4307, 4322), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4320, 4322), False, 'import unittest\n'), ((1618, 1633), 'numpy.full', 'np.full', (['(3)', '(1.0)'], {}), '(3, 1.0)\n', (1625, 1633), True, 'import numpy as np\n'), ((3510, 3537), 'colour.colorimetry.SpectralShape', 'SpectralShape', (['(380)', '(730)', '(10)'], {}), '(380, 730, 10)\n', (3523, 3537), False, 'from colour.colorimetry import SpectralShape, MSDS_CMFS_STANDARD_OBSERVER, SDS_ILLUMINANTS, CCS_ILLUMINANTS, sd_to_XYZ\n'), ((3738, 3824), 'colour.recovery.spectral_primary_decomposition_Mallett2019', 'spectral_primary_decomposition_Mallett2019', (['RGB_COLOURSPACE_sRGB', 'cmfs', 'illuminant'], {}), '(RGB_COLOURSPACE_sRGB, cmfs,\n illuminant)\n', (3780, 3824), False, 'from colour.recovery import spectral_primary_decomposition_Mallett2019, RGB_to_sd_Mallett2019, sRGB_to_sd_Mallett2019\n'), ((1702, 1719), 'numpy.var', 'np.var', (['sd.values'], {}), '(sd.values)\n', (1708, 1719), True, 'import numpy as np\n'), ((1824, 1848), 'numpy.zeros_like', 'np.zeros_like', (['sd.values'], {}), '(sd.values)\n', (1837, 1848), True, 'import numpy as np\n'), ((1873, 1896), 'numpy.ones_like', 'np.ones_like', (['sd.values'], {}), '(sd.values)\n', (1885, 1896), True, 'import numpy as np\n'), ((2030, 2076), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['sd.values', 'upper'], {}), '(sd.values, upper)\n', (2058, 2076), True, 'import numpy as np\n'), ((2089, 2135), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['lower', 'sd.values'], {}), '(lower, sd.values)\n', (2117, 2135), True, 'import numpy as np\n'), ((2337, 2361), 'colour.models.XYZ_to_Lab', 'XYZ_to_Lab', (['XYZ', 'CCS_D65'], {}), '(XYZ, CCS_D65)\n', (2347, 2361), False, 'from colour.models import RGB_COLOURSPACE_sRGB, XYZ_to_RGB, XYZ_to_Lab\n'), ((2380, 2481), 'colour.models.XYZ_to_RGB', 'XYZ_to_RGB', (['XYZ', 'RGB_COLOURSPACE_sRGB.whitepoint', 'CCS_D65', 'RGB_COLOURSPACE_sRGB.XYZ_to_RGB_matrix'], {}), '(XYZ, RGB_COLOURSPACE_sRGB.whitepoint, CCS_D65,\n RGB_COLOURSPACE_sRGB.XYZ_to_RGB_matrix)\n', (2390, 2481), False, 'from colour.models import RGB_COLOURSPACE_sRGB, XYZ_to_RGB, XYZ_to_Lab\n'), ((2671, 2705), 'colour.models.XYZ_to_Lab', 'XYZ_to_Lab', (['recovered_XYZ', 'CCS_D65'], {}), '(recovered_XYZ, CCS_D65)\n', (2681, 2705), False, 'from colour.models import RGB_COLOURSPACE_sRGB, XYZ_to_RGB, XYZ_to_Lab\n'), ((2727, 2762), 'colour.difference.delta_E_CIE1976', 'delta_E_CIE1976', (['Lab', 'recovered_Lab'], {}), '(Lab, recovered_Lab)\n', (2742, 2762), False, 'from colour.difference import JND_CIE1976, delta_E_CIE1976\n'), ((2280, 2312), 'colour.colorimetry.sd_to_XYZ', 'sd_to_XYZ', (['sd'], {'illuminant': 'SD_D65'}), '(sd, illuminant=SD_D65)\n', (2289, 2312), False, 'from colour.colorimetry import SpectralShape, MSDS_CMFS_STANDARD_OBSERVER, SDS_ILLUMINANTS, CCS_ILLUMINANTS, sd_to_XYZ\n'), ((2594, 2636), 'colour.colorimetry.sd_to_XYZ', 'sd_to_XYZ', (['recovered_sd'], {'illuminant': 'SD_D65'}), '(recovered_sd, illuminant=SD_D65)\n', (2603, 2636), False, 'from colour.colorimetry import SpectralShape, MSDS_CMFS_STANDARD_OBSERVER, SDS_ILLUMINANTS, CCS_ILLUMINANTS, sd_to_XYZ\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import numpy as np
from pprint import pprint
sys.path.append('../')
from main_human36 import main_human as main
# ordering of table 1 in the camera ready paper
# 1) 3d supervised - opt_row_sup
from opts.table_1.row_sup import opt as opt_row_sup
# 2) Ours - opt_row_ours
from opts.table_1.row_ours import opt as opt_row_ours
# 3) Known focal length (previously called weak projective)
from opts.table_1.row_3 import opt as opt_row_3
# 4) Skeleton from [41]
from opts.table_1.row_4 import opt as opt_row_4
# 5) No skeleton loss
from opts.table_1.row_5 import opt as opt_row_5
# 6) All pairs
from opts.table_1.row_6 import opt as opt_row_6
# 7) Distance tolerance
from opts.table_1.row_7 import opt as opt_row_7
opt_list = [opt_row_sup, opt_row_ours, opt_row_3, opt_row_4, opt_row_5, opt_row_6, opt_row_7]
results = []
num_epochs = 25
save_ims = True
save_log = True
only_run_test = False
# all the experiments are saved here
checkpoint_dir = '../checkpoint/table_1'
for i, exp_opt in enumerate(opt_list):
exp_opt.epochs = num_epochs
exp_opt.save_ims = save_ims
exp_opt.ckpt = checkpoint_dir
exp_opt.ckpt = os.path.join(exp_opt.ckpt, exp_opt.exp)
exp_opt.ckpt_ims = exp_opt.ckpt + '/ims'
if only_run_test:
exp_opt.load = exp_opt.ckpt + '/test_ckpt_last.pth.tar'
exp_opt.resume = True
exp_opt.is_train = False
exp_opt.epochs = num_epochs+1
save_log = False
print("\n==================Options=================")
pprint(vars(exp_opt), indent=4)
if not os.path.isdir(exp_opt.ckpt): os.makedirs(exp_opt.ckpt)
if not os.path.isdir(exp_opt.ckpt_ims): os.makedirs(exp_opt.ckpt_ims)
print("==========================================\n")
err_test_best, err_test_last, err_test_actions_last = main(exp_opt, save_log)
print("Testing Errors:")
print(" - Best: [{}]".format(round(err_test_best, 2)))
print(" - Last: [{}]".format(round(err_test_last, 2)))
print(" - Last Avg Action: [{}]".format(round(np.mean(err_test_actions_last), 2)))
result = {}
result['exp'] = exp_opt.exp
result['err_test_best'] = err_test_best
result['err_test_last'] = err_test_last
result['err_test_actions_last'] = err_test_actions_last
result['err_test_actions_last_mean'] = np.mean(err_test_actions_last)
results.append(result)
with open('%s/table_1_results.json'%(checkpoint_dir),'w') as fp:
json.dump(results, fp)
| [
"numpy.mean",
"os.makedirs",
"os.path.join",
"os.path.isdir",
"main_human36.main_human",
"sys.path.append",
"json.dump"
] | [((126, 148), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (141, 148), False, 'import sys\n'), ((1214, 1253), 'os.path.join', 'os.path.join', (['exp_opt.ckpt', 'exp_opt.exp'], {}), '(exp_opt.ckpt, exp_opt.exp)\n', (1226, 1253), False, 'import os\n'), ((1864, 1887), 'main_human36.main_human', 'main', (['exp_opt', 'save_log'], {}), '(exp_opt, save_log)\n', (1868, 1887), True, 'from main_human36 import main_human as main\n'), ((2384, 2414), 'numpy.mean', 'np.mean', (['err_test_actions_last'], {}), '(err_test_actions_last)\n', (2391, 2414), True, 'import numpy as np\n'), ((2513, 2535), 'json.dump', 'json.dump', (['results', 'fp'], {}), '(results, fp)\n', (2522, 2535), False, 'import json\n'), ((1618, 1645), 'os.path.isdir', 'os.path.isdir', (['exp_opt.ckpt'], {}), '(exp_opt.ckpt)\n', (1631, 1645), False, 'import os\n'), ((1647, 1672), 'os.makedirs', 'os.makedirs', (['exp_opt.ckpt'], {}), '(exp_opt.ckpt)\n', (1658, 1672), False, 'import os\n'), ((1684, 1715), 'os.path.isdir', 'os.path.isdir', (['exp_opt.ckpt_ims'], {}), '(exp_opt.ckpt_ims)\n', (1697, 1715), False, 'import os\n'), ((1717, 1746), 'os.makedirs', 'os.makedirs', (['exp_opt.ckpt_ims'], {}), '(exp_opt.ckpt_ims)\n', (1728, 1746), False, 'import os\n'), ((2107, 2137), 'numpy.mean', 'np.mean', (['err_test_actions_last'], {}), '(err_test_actions_last)\n', (2114, 2137), True, 'import numpy as np\n')] |
# https://scikit-learn.org/stable/auto_examples/inspection/plot_permutation_importance_multicollinear.html#sphx-glr-auto-examples-inspection-plot-permutation-importance-multicollinear-py
# https://orbi.uliege.be/bitstream/2268/155642/1/louppe13.pdf
# https://proceedings.neurips.cc/paper/2019/file/702cafa3bb4c9c86e4a3b6834b45aedd-Paper.pdf
# https://indico.cern.ch/event/443478/contributions/1098668/attachments/1157598/1664920/slides.pdf
import time
import warnings
from collections import defaultdict
from typing import Callable, Tuple
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform
from scipy.stats import spearmanr
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.inspection import permutation_importance
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
def evaluate_cv(model, dataset_x, dataset_y, cv=None):
start_time = time.time()
scores = cross_val_score(model, dataset_x, dataset_y, cv=cv)
elapsed_time = time.time() - start_time
# print(f"mean CV score: {np.mean(scores):.3f} (in {elapsed_time:.3f} seconds)")
return scores
def check_class_imbalance(dataset_y):
_, occurrences = np.unique(dataset_y, return_counts=True)
print(f"{occurrences = }")
# highest fraction of class over samples
imbalance = np.max(occurrences / dataset_y.size)
print(f"{np.max(occurrences / dataset_y.size) = :.4f}")
print(f"{np.min(occurrences / dataset_y.size) = :.4f}")
print(f". . . . . . . . . . . . 1 / #classes = {1/(np.max(dataset_y)+1):.4f}")
return imbalance
def compare_score_imbalance(score: float, imbalance: float):
if score < 1.5 * imbalance:
warnings.warn(
f"{score = :.3f} is below {1.5*imbalance:.3f}, results may not be "
f"indicative (class_{imbalance = :.3f})"
)
else:
print(f"{score = :.3f} ({imbalance = :.3f})")
def check_correlation(dataset_x):
for i in range(dataset_x.shape[1]):
for j in range(i + 1, dataset_x.shape[1]):
coeff = np.corrcoef(dataset_x[:, i], dataset_x[:, j])[0, 1]
if np.abs(coeff) > 0.8:
# dataset_x[:, j] = np.random.rand(dataset_x.shape[0])
print(f"{i=} {j=} {coeff=}")
def new_model(
random_state,
n_estimators: int = 1000,
max_features: int = None,
max_depth: int = None,
) -> ExtraTreesClassifier:
return ExtraTreesClassifier(
n_estimators=n_estimators,
max_features=max_features,
max_depth=max_depth,
random_state=random_state,
)
def get_feature_idx(dataset_x, dataset_y, start=(), random_state=48):
cv = 5
def get_score_partial_features(indices: tuple):
partial_x = dataset_x[:, indices]
# model = new_model(random_state)
# model = new_model(random_state=random_state)
model = ExtraTreesClassifier(random_state=random_state)
return indices[-1], np.mean(evaluate_cv(model, partial_x, dataset_y, cv))
delayed_score = joblib.delayed(get_score_partial_features)
last_score = 0.0
selected = tuple(start)
candidates = list(set(range(dataset_x.shape[1])) - set(selected))
while True:
results = joblib.Parallel(n_jobs=-1)(
delayed_score(selected + (c,)) for c in candidates
)
best_idx, best_score = results[0]
for idx_, score_ in results[1:]:
if score_ > best_score:
best_score = score_
best_idx = idx_
if best_score - last_score < 0.01:
break
selected += (best_idx,)
candidates.remove(best_idx)
print(f"{best_score=:.3f} {selected=}")
last_score = best_score
return selected
def add_input_noise(dataset_x: np.ndarray, rel_scale: float):
scale = rel_scale * np.mean(np.abs(dataset_x), axis=1)
# numpy needs the first axis to be the same as the scale
size = dataset_x.shape[::-1]
noise = np.random.normal(scale=scale, size=size).T
return dataset_x + noise
def do_plot(dataset_x, dataset_y, stratify_classes=True, random_state=48):
model = new_model(random_state)
cv = 10
# check_correlation(dataset_x)
# imbalance = check_class_imbalance(dataset_y)
# split dataset
stratify = dataset_y if stratify_classes else None
X_train, X_test, Y_train, Y_test = train_test_split(
dataset_x,
dataset_y,
test_size=0.33,
random_state=random_state,
stratify=stratify,
)
# cv_scores = evaluate_cv(model, dataset_x, dataset_y, cv)
# print(
# f"{np.min(cv_scores)=}",
# f"{np.mean(cv_scores)=}",
# f"{np.median(cv_scores)=}",
# f"{np.max(cv_scores)=}",
# )
# compare_score_imbalance(np.mean(cv_scores), imbalance)
model.fit(X_train, Y_train)
ts_score = model.score(X_test, Y_test)
print(f"{ts_score=}")
feature_names = list(map(str, range(dataset_x.shape[1])))
# find the most important features (see sklearn doc)
# result = permutation_importance(
# model, X_train, Y_train, n_repeats=10, random_state=42
# )
# perm_sorted_idx = result.importances_mean.argsort()
# tree_importance_sorted_idx = np.argsort(model.feature_importances_)
# tree_indices = np.arange(0, len(model.feature_importances_)) + 0.5
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
# ax1.barh(
# tree_indices, model.feature_importances_[tree_importance_sorted_idx], height=0.7
# )
# ax1.set_yticks(tree_indices)
# ax1.set_yticklabels([feature_names[i] for i in tree_importance_sorted_idx])
# ax1.set_ylim((0, len(model.feature_importances_)))
# ax2.boxplot(
# result.importances[perm_sorted_idx].T,
# vert=False,
# labels=[feature_names[i] for i in perm_sorted_idx],
# )
# fig.tight_layout()
# plt.show()
# find the correlated features
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
corr = spearmanr(dataset_x).correlation
# Ensure the correlation matrix is symmetric
corr = (corr + corr.T) / 2
np.fill_diagonal(corr, 1)
# We convert the correlation matrix to a distance matrix before performing
# hierarchical clustering using Ward's linkage.
distance_matrix = 1 - np.abs(corr)
dist_linkage = hierarchy.ward(squareform(distance_matrix))
dendro = hierarchy.dendrogram(
dist_linkage, labels=feature_names, ax=ax1, leaf_rotation=90
)
# dendro_idx = np.arange(0, len(dendro["ivl"]))
# ax2.imshow(corr[dendro["leaves"], :][:, dendro["leaves"]])
# ax2.set_xticks(dendro_idx)
# ax2.set_yticks(dendro_idx)
# ax2.set_xticklabels(dendro["ivl"], rotation="vertical")
# ax2.set_yticklabels(dendro["ivl"])
fig.tight_layout()
plt.show()
# for threshold in [3.5, 2.5, 1.5, 1.0, 0.8, 0.6, 0.4, 0.2, 0.1, 0.05]:
for threshold in [0.4]:
cluster_ids = hierarchy.fcluster(dist_linkage, threshold, criterion="distance")
cluster_id_to_feature_ids = defaultdict(list)
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_ids[cluster_id].append(idx)
selected_features = [v[0] for v in cluster_id_to_feature_ids.values()]
X_train_sel = X_train[:, selected_features]
X_test_sel = X_test[:, selected_features]
clf_sel = new_model(random_state=random_state)
clf_sel.fit(X_train_sel, Y_train)
score = clf_sel.score(X_test_sel, Y_test)
print(f"{threshold=:.3f} {score=:.3f} {len(selected_features)=}")
print(f"{selected_features=}")
def get_mdi_importance(ds_x, ds_y, model):
model.fit(ds_x, ds_y)
try:
importances = model.feature_importances_
if hasattr(model, "estimators_"):
std = np.std(
[tree.feature_importances_ for tree in model.estimators_], axis=0
)
else:
std = np.full_like(importances, np.nan)
return importances, std
except AttributeError as _:
return None
def get_permutation_importance(ds_x, ds_y, model, random_state):
# permutation importance
X_train, X_test, Y_train, Y_test = train_test_split(
ds_x, ds_y, test_size=0.33, random_state=random_state
)
model.fit(X_train, Y_train)
result = permutation_importance(
model, X_test, Y_test, random_state=random_state, n_repeats=10, n_jobs=-1,
)
return (result.importances_mean, result.importances_std)
def get_feature_importances(ds_x, ds_y, model_fn: Callable, random_state):
return (
get_permutation_importance(ds_x, ds_y, model_fn(random_state), random_state),
get_mdi_importance(ds_x, ds_y, model_fn(random_state)),
)
def study_model(ds_x, ds_y, random_state):
model_builders = [
lambda: ExtraTreesClassifier(
n_estimators=1000, max_features=None, n_jobs=-1, random_state=random_state
),
lambda: RandomForestClassifier(
n_estimators=1000, max_features=None, n_jobs=-1, random_state=random_state
),
lambda: MLPClassifier(hidden_layer_sizes=(128, 128), random_state=random_state),
]
df = pd.DataFrame()
# TODO
def add_features(
dataset_x: np.ndarray,
*,
n_comb_lin_droppout: int = 0,
n_noise: int = 0,
n_lin_comb: int = 0,
n_redundant: int = 0,
) -> np.ndarray:
"""add some correlated or noisy features to a dataset.
Args:
dataset_x (np.ndarray): original dataset
n_comb_lin_droppout (int): first apply a 30% dropout to the dataset and
then apply a linear combination with a small noise (scale=0.1*std)
n_noise (int): number of gaussian noise features to add (scale=1.0)
n_lin_comb (int): linear combination of the features with added
gaussian noise (scale=0.1*std) to add
n_redundant (int): number of redundant features to add with a gaussian
noise (scale=0.1*std)
Returns:
np.ndarray: the dataset, columns are added in order, at the right edge
"""
def _dropout() -> np.ndarray:
"compute one correlated noisy feature column"
weights = np.random.normal(loc=0, scale=1, size=(dataset_x.shape[1], 1))
dropout = np.copy(dataset_x)
dropout[np.random.rand(*dropout.shape) < 0.3] = 0
feature = np.dot(dropout, weights)
return feature + 0.1 * np.std(feature) * _noise()
def _noise() -> np.ndarray:
"compute one complete noise feature column"
return np.random.normal(size=(dataset_x.shape[0], 1))
def _lin_comb() -> np.ndarray:
weights = np.random.normal(loc=0, scale=1, size=(dataset_x.shape[1], 1))
feature = np.dot(dataset_x, weights)
return feature + 0.1 * np.std(feature) * _noise()
def _redundant() -> np.ndarray:
idx = np.random.randint(dataset_x.shape[1])
feature = dataset_x[:, idx : idx + 1]
return feature + 0.1 * np.std(feature) * _noise()
feature_columns = [dataset_x]
feature_columns.extend(_dropout() for _ in range(n_comb_lin_droppout))
feature_columns.extend(_noise() for _ in range(n_noise))
feature_columns.extend(_lin_comb() for _ in range(n_lin_comb))
feature_columns.extend(_redundant() for _ in range(n_redundant))
merged = np.concatenate(feature_columns, axis=1)
assert (
dataset_x.shape[0] == merged.shape[0]
), "invalid number of objects after transformation"
assert (
dataset_x.shape[1] + n_comb_lin_droppout + n_noise + n_lin_comb + n_redundant
== merged.shape[1]
), "invalid number of features after transformation"
return merged
def _compare_mdi_perm(
dataset_x, dataset_y, feature_names, model_fn, random_state
) -> Tuple[pd.DataFrame, pd.DataFrame]:
# two series: one for MDI, one for MDA
(mda_mean, mda_std), (mdi_mean, mdi_std) = get_feature_importances(
dataset_x, dataset_y, model_fn, random_state
)
mean = pd.DataFrame()
mean["MDI"] = pd.Series(mdi_mean, index=feature_names)
mean["Perm"] = pd.Series(mda_mean, index=feature_names)
std = pd.DataFrame()
std["MDI"] = pd.Series(mdi_std, index=feature_names)
std["Perm"] = pd.Series(mda_std, index=feature_names)
return mean, std
def _compare_extra_features(
dataset_x,
dataset_y,
noisy_offset: int,
feature_names,
model_fn,
random_state,
title: str = "",
save_to: str = "",
):
def extra_nan(df: pd.DataFrame):
extras = pd.DataFrame()
for col in df.columns:
extras[col] = pd.Series(
[np.nan] * (len(feature_names) - noisy_offset),
index=feature_names[noisy_offset:],
)
return extras
base = _compare_mdi_perm(
dataset_x=dataset_x[:, :noisy_offset],
dataset_y=dataset_y,
feature_names=feature_names[:noisy_offset],
model_fn=model_fn,
random_state=random_state,
)
# add NaNs so the two dataset are aligned on the plot
base = [pd.concat((df, extra_nan(df))) for df in base]
full = _compare_mdi_perm(
dataset_x=dataset_x,
dataset_y=dataset_y,
feature_names=feature_names,
model_fn=model_fn,
random_state=random_state,
)
# put the two in comparable plots
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4), sharex=True)
if title:
fig.suptitle(title)
base[0].plot.barh(xerr=base[1], ax=axes[0])
full[0].plot.barh(xerr=full[1], ax=axes[1])
axes[0].grid(axis="x", which="both")
axes[1].grid(axis="x", which="both")
fig.tight_layout()
plt.show()
if save_to:
fig.savefig(save_to)
def study_pure_noise(ds_x, ds_y, model_fn, feature_labels, random_state):
"add pure noise features to the dataset and show its impact on feature selection"
n_noise = 3
ds_x_extra = add_features(ds_x, n_noise=n_noise)
feature_labels = feature_labels + [f"noise_{i+1}" for i in range(n_noise)]
_compare_extra_features(
ds_x_extra,
ds_y,
ds_x.shape[1],
feature_labels,
model_fn,
random_state,
title="Effect of purely random features",
save_to="tmp_noise.png",
)
def study_duplicates(ds_x, ds_y, model_fn, feature_labels, random_state):
"add duplicate features to the dataset and show its impact on feature selection"
n_redundant = 3
ds_x_extra = add_features(ds_x, n_redundant=n_redundant)
feature_labels = feature_labels + [f"noise_{i+1}" for i in range(n_redundant)]
_compare_extra_features(
ds_x_extra,
ds_y,
ds_x.shape[1],
feature_labels,
model_fn,
random_state,
title="Effect of redundant random features",
save_to="tmp_redundant.png",
)
def study_duplicates_gn(ds_x, ds_y, model_fn, feature_labels, random_state):
"add duplicate features with gaussian noise to the dataset and show its impact on feature selection"
n_lin_comb = 3
ds_x_extra = add_features(ds_x, n_lin_comb=n_lin_comb)
feature_labels = feature_labels + [f"noise_{i+1}" for i in range(n_lin_comb)]
_compare_extra_features(
ds_x_extra,
ds_y,
ds_x.shape[1],
feature_labels,
model_fn,
random_state,
title="Effect of correlated random features (linear combination)",
save_to="tmp_lin_comb.png",
)
def study_duplicates_correlated(ds_x, ds_y, model_fn, feature_labels, random_state):
"add duplicate features with correlated (linear combination then dropout) to the dataset and show its impact on feature selection"
n_lin_comb_dropout = 3
ds_x_extra = add_features(ds_x, n_comb_lin_droppout=n_lin_comb_dropout)
feature_labels = feature_labels + [
f"noise_{i+1}" for i in range(n_lin_comb_dropout)
]
_compare_extra_features(
ds_x_extra,
ds_y,
ds_x.shape[1],
feature_labels,
model_fn,
random_state,
title="Effect of correlated random features (linear combination with dropout)",
save_to="tmp_lin_comb_dropout.png",
)
def show_datasize_learning_curve(
dataset_x: np.ndarray,
dataset_y: np.ndarray,
model,
cv=None,
save_to: str = "",
show: bool = False,
):
if not save_to and not show:
raise ValueError(f"at least one of {save_to=}, {show=} should be set")
# 0.1, 0.2, ..., 0.9, 1.0
percentage = np.arange(0.2, 1.2, 0.2)
def _get_score_for_percentage(p: float):
"get the score using p percent of the data available"
r_mask = np.random.rand(dataset_x.shape[0]) < p
ds_x = dataset_x[r_mask, :]
ds_y = dataset_y[r_mask]
cv_scores = evaluate_cv(model, ds_x, ds_y, cv)
return np.mean(cv_scores)
fn = joblib.delayed(_get_score_for_percentage)
scores = joblib.Parallel(n_jobs=-1)(fn(p) for p in percentage)
fig, ax = plt.subplots(1, 1, figsize=(5, 4))
fig.suptitle("Estimated Accuracy as a Function of the Dataset Size")
ax.set_ylabel("Estimated Accuracy (%)")
ax.set_xlabel("Relative size of the dataset (%)")
ax.plot(percentage, scores)
if save_to:
fig.savefig(save_to)
if show:
plt.show()
def main():
np.random.seed(5876)
ds_x = add_input_noise(np.load("ds_x.npy"), rel_scale=2)
ds_y = np.load("ds_y.npy")
# selected = get_feature_idx(ds_x, ds_y, random_state=48)
# print(f"{selected=}")
selected = (73, 61, 67) # save time
ds_x = ds_x[:, selected]
kwargs = {
"ds_x": ds_x,
"ds_y": ds_y,
"model_fn": new_model,
"feature_labels": [f"f_{i}" for i in selected],
"random_state": 33,
}
# evaluate the impact of (model, noisy features, correlated features)
study_duplicates(**kwargs)
study_duplicates_gn(**kwargs)
study_duplicates_correlated(**kwargs)
study_pure_noise(**kwargs)
# do_plot(ds_x, ds_y, random_state=48)
if __name__ == "__main__":
main()
| [
"numpy.random.rand",
"sklearn.ensemble.ExtraTreesClassifier",
"scipy.cluster.hierarchy.fcluster",
"numpy.arange",
"numpy.mean",
"numpy.full_like",
"numpy.max",
"sklearn.inspection.permutation_importance",
"numpy.dot",
"numpy.random.seed",
"numpy.concatenate",
"numpy.min",
"pandas.DataFrame",... | [((1112, 1123), 'time.time', 'time.time', ([], {}), '()\n', (1121, 1123), False, 'import time\n'), ((1137, 1188), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'dataset_x', 'dataset_y'], {'cv': 'cv'}), '(model, dataset_x, dataset_y, cv=cv)\n', (1152, 1188), False, 'from sklearn.model_selection import cross_val_score, train_test_split\n'), ((1399, 1439), 'numpy.unique', 'np.unique', (['dataset_y'], {'return_counts': '(True)'}), '(dataset_y, return_counts=True)\n', (1408, 1439), True, 'import numpy as np\n'), ((1534, 1570), 'numpy.max', 'np.max', (['(occurrences / dataset_y.size)'], {}), '(occurrences / dataset_y.size)\n', (1540, 1570), True, 'import numpy as np\n'), ((2633, 2759), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_estimators': 'n_estimators', 'max_features': 'max_features', 'max_depth': 'max_depth', 'random_state': 'random_state'}), '(n_estimators=n_estimators, max_features=max_features,\n max_depth=max_depth, random_state=random_state)\n', (2653, 2759), False, 'from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\n'), ((3238, 3280), 'joblib.delayed', 'joblib.delayed', (['get_score_partial_features'], {}), '(get_score_partial_features)\n', (3252, 3280), False, 'import joblib\n'), ((4592, 4697), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dataset_x', 'dataset_y'], {'test_size': '(0.33)', 'random_state': 'random_state', 'stratify': 'stratify'}), '(dataset_x, dataset_y, test_size=0.33, random_state=\n random_state, stratify=stratify)\n', (4608, 4697), False, 'from sklearn.model_selection import cross_val_score, train_test_split\n'), ((6240, 6275), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 8)'}), '(1, 1, figsize=(12, 8))\n', (6252, 6275), True, 'import matplotlib.pyplot as plt\n'), ((6405, 6430), 'numpy.fill_diagonal', 'np.fill_diagonal', (['corr', '(1)'], {}), '(corr, 1)\n', (6421, 6430), True, 'import numpy as np\n'), ((6678, 6764), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['dist_linkage'], {'labels': 'feature_names', 'ax': 'ax1', 'leaf_rotation': '(90)'}), '(dist_linkage, labels=feature_names, ax=ax1,\n leaf_rotation=90)\n', (6698, 6764), False, 'from scipy.cluster import hierarchy\n'), ((7089, 7099), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7097, 7099), True, 'import matplotlib.pyplot as plt\n'), ((8492, 8563), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ds_x', 'ds_y'], {'test_size': '(0.33)', 'random_state': 'random_state'}), '(ds_x, ds_y, test_size=0.33, random_state=random_state)\n', (8508, 8563), False, 'from sklearn.model_selection import cross_val_score, train_test_split\n'), ((8625, 8726), 'sklearn.inspection.permutation_importance', 'permutation_importance', (['model', 'X_test', 'Y_test'], {'random_state': 'random_state', 'n_repeats': '(10)', 'n_jobs': '(-1)'}), '(model, X_test, Y_test, random_state=random_state,\n n_repeats=10, n_jobs=-1)\n', (8647, 8726), False, 'from sklearn.inspection import permutation_importance\n'), ((9495, 9509), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9507, 9509), True, 'import pandas as pd\n'), ((11642, 11681), 'numpy.concatenate', 'np.concatenate', (['feature_columns'], {'axis': '(1)'}), '(feature_columns, axis=1)\n', (11656, 11681), True, 'import numpy as np\n'), ((12316, 12330), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12328, 12330), True, 'import pandas as pd\n'), ((12349, 12389), 'pandas.Series', 'pd.Series', (['mdi_mean'], {'index': 'feature_names'}), '(mdi_mean, index=feature_names)\n', (12358, 12389), True, 'import pandas as pd\n'), ((12409, 12449), 'pandas.Series', 'pd.Series', (['mda_mean'], {'index': 'feature_names'}), '(mda_mean, index=feature_names)\n', (12418, 12449), True, 'import pandas as pd\n'), ((12461, 12475), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12473, 12475), True, 'import pandas as pd\n'), ((12493, 12532), 'pandas.Series', 'pd.Series', (['mdi_std'], {'index': 'feature_names'}), '(mdi_std, index=feature_names)\n', (12502, 12532), True, 'import pandas as pd\n'), ((12551, 12590), 'pandas.Series', 'pd.Series', (['mda_std'], {'index': 'feature_names'}), '(mda_std, index=feature_names)\n', (12560, 12590), True, 'import pandas as pd\n'), ((13681, 13740), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(8, 4)', 'sharex': '(True)'}), '(nrows=1, ncols=2, figsize=(8, 4), sharex=True)\n', (13693, 13740), True, 'import matplotlib.pyplot as plt\n'), ((13992, 14002), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14000, 14002), True, 'import matplotlib.pyplot as plt\n'), ((16838, 16862), 'numpy.arange', 'np.arange', (['(0.2)', '(1.2)', '(0.2)'], {}), '(0.2, 1.2, 0.2)\n', (16847, 16862), True, 'import numpy as np\n'), ((17198, 17239), 'joblib.delayed', 'joblib.delayed', (['_get_score_for_percentage'], {}), '(_get_score_for_percentage)\n', (17212, 17239), False, 'import joblib\n'), ((17322, 17356), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 4)'}), '(1, 1, figsize=(5, 4))\n', (17334, 17356), True, 'import matplotlib.pyplot as plt\n'), ((17661, 17681), 'numpy.random.seed', 'np.random.seed', (['(5876)'], {}), '(5876)\n', (17675, 17681), True, 'import numpy as np\n'), ((17754, 17773), 'numpy.load', 'np.load', (['"""ds_y.npy"""'], {}), "('ds_y.npy')\n", (17761, 17773), True, 'import numpy as np\n'), ((1208, 1219), 'time.time', 'time.time', ([], {}), '()\n', (1217, 1219), False, 'import time\n'), ((1899, 2044), 'warnings.warn', 'warnings.warn', (['f"""score = {score:.3f} is below {1.5 * imbalance:.3f}, results may not be indicative (class_imbalance = {imbalance:.3f})"""'], {}), "(\n f'score = {score:.3f} is below {1.5 * imbalance:.3f}, results may not be indicative (class_imbalance = {imbalance:.3f})'\n )\n", (1912, 2044), False, 'import warnings\n'), ((3086, 3133), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3106, 3133), False, 'from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\n'), ((4190, 4230), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'scale', 'size': 'size'}), '(scale=scale, size=size)\n', (4206, 4230), True, 'import numpy as np\n'), ((6287, 6307), 'scipy.stats.spearmanr', 'spearmanr', (['dataset_x'], {}), '(dataset_x)\n', (6296, 6307), False, 'from scipy.stats import spearmanr\n'), ((6589, 6601), 'numpy.abs', 'np.abs', (['corr'], {}), '(corr)\n', (6595, 6601), True, 'import numpy as np\n'), ((6636, 6663), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {}), '(distance_matrix)\n', (6646, 6663), False, 'from scipy.spatial.distance import squareform\n'), ((7229, 7294), 'scipy.cluster.hierarchy.fcluster', 'hierarchy.fcluster', (['dist_linkage', 'threshold'], {'criterion': '"""distance"""'}), "(dist_linkage, threshold, criterion='distance')\n", (7247, 7294), False, 'from scipy.cluster import hierarchy\n'), ((7332, 7349), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7343, 7349), False, 'from collections import defaultdict\n'), ((10497, 10559), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': '(dataset_x.shape[1], 1)'}), '(loc=0, scale=1, size=(dataset_x.shape[1], 1))\n', (10513, 10559), True, 'import numpy as np\n'), ((10579, 10597), 'numpy.copy', 'np.copy', (['dataset_x'], {}), '(dataset_x)\n', (10586, 10597), True, 'import numpy as np\n'), ((10675, 10699), 'numpy.dot', 'np.dot', (['dropout', 'weights'], {}), '(dropout, weights)\n', (10681, 10699), True, 'import numpy as np\n'), ((10859, 10905), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(dataset_x.shape[0], 1)'}), '(size=(dataset_x.shape[0], 1))\n', (10875, 10905), True, 'import numpy as np\n'), ((10961, 11023), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': '(dataset_x.shape[1], 1)'}), '(loc=0, scale=1, size=(dataset_x.shape[1], 1))\n', (10977, 11023), True, 'import numpy as np\n'), ((11042, 11068), 'numpy.dot', 'np.dot', (['dataset_x', 'weights'], {}), '(dataset_x, weights)\n', (11048, 11068), True, 'import numpy as np\n'), ((11179, 11216), 'numpy.random.randint', 'np.random.randint', (['dataset_x.shape[1]'], {}), '(dataset_x.shape[1])\n', (11196, 11216), True, 'import numpy as np\n'), ((12849, 12863), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12861, 12863), True, 'import pandas as pd\n'), ((17169, 17187), 'numpy.mean', 'np.mean', (['cv_scores'], {}), '(cv_scores)\n', (17176, 17187), True, 'import numpy as np\n'), ((17253, 17279), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (17268, 17279), False, 'import joblib\n'), ((17631, 17641), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17639, 17641), True, 'import matplotlib.pyplot as plt\n'), ((17709, 17728), 'numpy.load', 'np.load', (['"""ds_x.npy"""'], {}), "('ds_x.npy')\n", (17716, 17728), True, 'import numpy as np\n'), ((3437, 3463), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (3452, 3463), False, 'import joblib\n'), ((4056, 4073), 'numpy.abs', 'np.abs', (['dataset_x'], {}), '(dataset_x)\n', (4062, 4073), True, 'import numpy as np\n'), ((8101, 8174), 'numpy.std', 'np.std', (['[tree.feature_importances_ for tree in model.estimators_]'], {'axis': '(0)'}), '([tree.feature_importances_ for tree in model.estimators_], axis=0)\n', (8107, 8174), True, 'import numpy as np\n'), ((8237, 8270), 'numpy.full_like', 'np.full_like', (['importances', 'np.nan'], {}), '(importances, np.nan)\n', (8249, 8270), True, 'import numpy as np\n'), ((9132, 9232), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_estimators': '(1000)', 'max_features': 'None', 'n_jobs': '(-1)', 'random_state': 'random_state'}), '(n_estimators=1000, max_features=None, n_jobs=-1,\n random_state=random_state)\n', (9152, 9232), False, 'from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\n'), ((9268, 9370), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(1000)', 'max_features': 'None', 'n_jobs': '(-1)', 'random_state': 'random_state'}), '(n_estimators=1000, max_features=None, n_jobs=-1,\n random_state=random_state)\n', (9290, 9370), False, 'from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\n'), ((9406, 9477), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(128, 128)', 'random_state': 'random_state'}), '(hidden_layer_sizes=(128, 128), random_state=random_state)\n', (9419, 9477), False, 'from sklearn.neural_network import MLPClassifier\n'), ((16988, 17022), 'numpy.random.rand', 'np.random.rand', (['dataset_x.shape[0]'], {}), '(dataset_x.shape[0])\n', (17002, 17022), True, 'import numpy as np\n'), ((1584, 1620), 'numpy.max', 'np.max', (['(occurrences / dataset_y.size)'], {}), '(occurrences / dataset_y.size)\n', (1590, 1620), True, 'import numpy as np\n'), ((1644, 1680), 'numpy.min', 'np.min', (['(occurrences / dataset_y.size)'], {}), '(occurrences / dataset_y.size)\n', (1650, 1680), True, 'import numpy as np\n'), ((2269, 2314), 'numpy.corrcoef', 'np.corrcoef', (['dataset_x[:, i]', 'dataset_x[:, j]'], {}), '(dataset_x[:, i], dataset_x[:, j])\n', (2280, 2314), True, 'import numpy as np\n'), ((2336, 2349), 'numpy.abs', 'np.abs', (['coeff'], {}), '(coeff)\n', (2342, 2349), True, 'import numpy as np\n'), ((10614, 10644), 'numpy.random.rand', 'np.random.rand', (['*dropout.shape'], {}), '(*dropout.shape)\n', (10628, 10644), True, 'import numpy as np\n'), ((10732, 10747), 'numpy.std', 'np.std', (['feature'], {}), '(feature)\n', (10738, 10747), True, 'import numpy as np\n'), ((11101, 11116), 'numpy.std', 'np.std', (['feature'], {}), '(feature)\n', (11107, 11116), True, 'import numpy as np\n'), ((11294, 11309), 'numpy.std', 'np.std', (['feature'], {}), '(feature)\n', (11300, 11309), True, 'import numpy as np\n'), ((1746, 1763), 'numpy.max', 'np.max', (['dataset_y'], {}), '(dataset_y)\n', (1752, 1763), True, 'import numpy as np\n')] |
import numpy as np
def mark_label_on_pathways(name, pid, pw_map, gene_id_list, label=1):
"""Marks given genes to the pathways
Parameters
----------
name: str
pid: int
patient id
pw_map: map of networkx graphs of pathways
patient label mapping
gene_id_list: list of list of string
uniprot gene id list of genes
label: int
the label which will be assigned to found genes in pathways - default value is 1
"""
label_field = f'label-{name}'
gene_ids = [uid for a in gene_id_list for uid in a]
for pw in pw_map.values(): # for each pathway
for n in pw.nodes():
nd = pw.nodes[n]
if label_field not in nd:
pw.add_node(n, **{label_field: {}})
if np.any([g in nd['uniprotids'] for g in gene_ids]):
nd[label_field][pid] = label
def mark_cont_label_on_pathways(name, pid, pw_map, uni_ids, gene_vals):
"""Marks given genes and their normalized expressions to the pathways
Parameters
----------
name: str
pid: int
patient id
pw_map: map of networkx graphs of pathways
patient label mapping
uni_ids: list of list of string
uniprot gene id list of genes
gene_vals: :obj:`numpy.ndarray`
the values of genes which will be assigned to found genes in pathways
"""
label_field = f'label-{name}'
# gene_ids = uni_ids #[uid for a in gene_id_list for uid in a]
for pw in pw_map.values(): # for each pathway
for n in pw.nodes():
nd = pw.nodes[n]
if label_field not in nd:
pw.add_node(n, **{label_field: {}})
intersect_values = gene_vals[[len(set(nd['uniprotids']).intersection(g)) > 0 for g in uni_ids]]
if len(intersect_values) > 0:
if 'oe' in name:
nd[label_field][pid] = max(0, max(intersect_values))
elif 'ue' in name:
nd[label_field][pid] = min(0, min(intersect_values))
elif 'abs' in name:
nd[label_field][pid] = max(intersect_values.max(), intersect_values.min(), key=abs)
def mark_extra_label_on_pathways(name, pid, pw_map, old_label_name, threshold=1.96):
"""Marks new labels on pathways using old_labels
Parameters
----------
name: string
new label name
pid: int
patient id
pw_map: map of networkx graphs of pathways
patient label mapping
old_label_name: string
old label name that will be used for new label
threshold: float
threshold of new label. If abs(old_label)<threshold then new label=0 same otherwise - default value is 1.96
"""
label_field = f'label-{name}'
old_label_field = f'label-{old_label_name}'
oe_label_field = f'label-oe'
ue_label_field = f'label-ue'
for pw in pw_map.values(): # for each pathway
for n in pw.nodes():
nd = pw.nodes[n]
if label_field not in nd:
pw.add_node(n, **{label_field: {}})
if name == 'onekernel':
if pid in nd[oe_label_field].keys():
nd[label_field][pid] = nd[oe_label_field][pid]
elif pid in nd[ue_label_field].keys():
nd[label_field][pid] = nd[ue_label_field][pid]
else:
if pid in nd[old_label_field].keys() and abs(nd[old_label_field][pid]) < threshold:
nd[label_field][pid] = 0
elif pid in nd[old_label_field].keys():
nd[label_field][pid] = nd[old_label_field][pid]
| [
"numpy.any"
] | [((781, 832), 'numpy.any', 'np.any', (["[(g in nd['uniprotids']) for g in gene_ids]"], {}), "([(g in nd['uniprotids']) for g in gene_ids])\n", (787, 832), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.