code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Blockwise coregistration
========================
Often, biases are spatially variable, and a "global" shift may not be enough to coregister a DEM properly.
In the :ref:`sphx_glr_auto_examples_plot_nuth_kaab.py` example, we saw that the method improved the alignment significantly, but there were still possibly nonlinear artefacts in the result.
Clearly, nonlinear coregistration approaches are needed.
One solution is :class:`xdem.coreg.BlockwiseCoreg`, a helper to run any ``Coreg`` class over an arbitrarily small grid, and then "puppet warp" the DEM to fit the reference best.
The ``BlockwiseCoreg`` class runs in five steps:
1. Generate a subdivision grid to divide the DEM in N blocks.
2. Run the requested coregistration approach in each block.
3. Extract each result as a source and destination X/Y/Z point.
4. Interpolate the X/Y/Z point-shifts into three shift-rasters.
5. Warp the DEM to apply the X/Y/Z shifts.
"""
# sphinx_gallery_thumbnail_number = 2
import matplotlib.pyplot as plt
import geoutils as gu
import numpy as np
import xdem
# %%
# **Example files**
reference_dem = xdem.DEM(xdem.examples.get_path("longyearbyen_ref_dem"))
dem_to_be_aligned = xdem.DEM(xdem.examples.get_path("longyearbyen_tba_dem"))
glacier_outlines = gu.Vector(xdem.examples.get_path("longyearbyen_glacier_outlines"))
# Create a stable ground mask (not glacierized) to mark "inlier data"
inlier_mask = ~glacier_outlines.create_mask(reference_dem)
plt_extent = [
reference_dem.bounds.left,
reference_dem.bounds.right,
reference_dem.bounds.bottom,
reference_dem.bounds.top,
]
# %%
# The DEM to be aligned (a 1990 photogrammetry-derived DEM) has some vertical and horizontal biases that we want to avoid, as well as possible nonlinear distortions.
# The product is a mosaic of multiple DEMs, so "seams" may exist in the data.
# These can be visualized by plotting a change map:
diff_before = (reference_dem - dem_to_be_aligned).data
plt.figure(figsize=(8, 5))
plt.imshow(diff_before.squeeze(), cmap="coolwarm_r", vmin=-10, vmax=10, extent=plt_extent)
plt.colorbar()
plt.show()
# %%
# Horizontal and vertical shifts can be estimated using :class:`xdem.coreg.NuthKaab`.
# Let's prepare a coregistration class that calculates 64 offsets, evenly spread over the DEM.
blockwise = xdem.coreg.BlockwiseCoreg(xdem.coreg.NuthKaab(), subdivision=64)
# %%
# The grid that will be used can be visualized with a helper function.
# Coregistration will be performed in each block separately.
plt.title("Subdivision grid")
plt.imshow(blockwise.subdivide_array(dem_to_be_aligned.shape), cmap="gist_ncar")
plt.show()
# %%
# Coregistration is performed with the ``.fit()`` method.
# This runs in multiple threads by default, so more CPU cores are preferable here.
blockwise.fit(reference_dem.data, dem_to_be_aligned.data, transform=reference_dem.transform, inlier_mask=inlier_mask)
aligned_dem_data = blockwise.apply(dem_to_be_aligned.data, transform=dem_to_be_aligned.transform)
# %%
# The estimated shifts can be visualized by applying the coregistration to a completely flat surface.
# This shows the estimated shifts that would be applied in elevation; additional horizontal shifts will also be applied if the method supports it.
# The :func:`xdem.coreg.BlockwiseCoreg.stats` method can be used to annotate each block with its associated Z shift.
z_correction = blockwise.apply(np.zeros_like(dem_to_be_aligned.data), transform=dem_to_be_aligned.transform)
plt.title("Vertical correction")
plt.imshow(z_correction, cmap="coolwarm_r", vmin=-10, vmax=10, extent=plt_extent)
for _, row in blockwise.stats().iterrows():
plt.annotate(round(row["z_off"], 1), (row["center_x"], row["center_y"]), ha
="center")
# %%
# Then, the new difference can be plotted to validate that it improved.
diff_after = reference_dem.data - aligned_dem_data
plt.figure(figsize=(8, 5))
plt.imshow(diff_after.squeeze(), cmap="coolwarm_r", vmin=-10, vmax=10, extent=plt_extent)
plt.colorbar()
plt.show()
# %%
# We can compare the :ref:`spatial_stats_nmad` to validate numerically that there was an improvment:
print(f"Error before: {xdem.spatialstats.nmad(diff_before):.2f} m")
print(f"Error after: {xdem.spatialstats.nmad(diff_after):.2f} m")
| [
"matplotlib.pyplot.title",
"xdem.spatialstats.nmad",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"xdem.examples.get_path",
"xdem.coreg.NuthKaab"
] | [((1955, 1981), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (1965, 1981), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2087), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2085, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2088, 2098), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2096, 2098), True, 'import matplotlib.pyplot as plt\n'), ((2504, 2533), 'matplotlib.pyplot.title', 'plt.title', (['"""Subdivision grid"""'], {}), "('Subdivision grid')\n", (2513, 2533), True, 'import matplotlib.pyplot as plt\n'), ((2615, 2625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2623, 2625), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3505), 'matplotlib.pyplot.title', 'plt.title', (['"""Vertical correction"""'], {}), "('Vertical correction')\n", (3482, 3505), True, 'import matplotlib.pyplot as plt\n'), ((3506, 3592), 'matplotlib.pyplot.imshow', 'plt.imshow', (['z_correction'], {'cmap': '"""coolwarm_r"""', 'vmin': '(-10)', 'vmax': '(10)', 'extent': 'plt_extent'}), "(z_correction, cmap='coolwarm_r', vmin=-10, vmax=10, extent=\n plt_extent)\n", (3516, 3592), True, 'import matplotlib.pyplot as plt\n'), ((3854, 3880), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (3864, 3880), True, 'import matplotlib.pyplot as plt\n'), ((3971, 3985), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3983, 3985), True, 'import matplotlib.pyplot as plt\n'), ((3986, 3996), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3994, 3996), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1157), 'xdem.examples.get_path', 'xdem.examples.get_path', (['"""longyearbyen_ref_dem"""'], {}), "('longyearbyen_ref_dem')\n", (1133, 1157), False, 'import xdem\n'), ((1188, 1234), 'xdem.examples.get_path', 'xdem.examples.get_path', (['"""longyearbyen_tba_dem"""'], {}), "('longyearbyen_tba_dem')\n", (1210, 1234), False, 'import xdem\n'), ((1265, 1320), 'xdem.examples.get_path', 'xdem.examples.get_path', (['"""longyearbyen_glacier_outlines"""'], {}), "('longyearbyen_glacier_outlines')\n", (1287, 1320), False, 'import xdem\n'), ((2325, 2346), 'xdem.coreg.NuthKaab', 'xdem.coreg.NuthKaab', ([], {}), '()\n', (2344, 2346), False, 'import xdem\n'), ((3395, 3432), 'numpy.zeros_like', 'np.zeros_like', (['dem_to_be_aligned.data'], {}), '(dem_to_be_aligned.data)\n', (3408, 3432), True, 'import numpy as np\n'), ((4128, 4163), 'xdem.spatialstats.nmad', 'xdem.spatialstats.nmad', (['diff_before'], {}), '(diff_before)\n', (4150, 4163), False, 'import xdem\n'), ((4195, 4229), 'xdem.spatialstats.nmad', 'xdem.spatialstats.nmad', (['diff_after'], {}), '(diff_after)\n', (4217, 4229), False, 'import xdem\n')] |
"""
--- Day 9: Smoke Basin ---
These caves seem to be lava tubes. Parts are even still volcanically active; small hydrothermal
vents release smoke into the caves that slowly settles like rain.
If you can model how the smoke flows through the caves, you might be able to avoid it and be that
much safer. The submarine generates a heightmap of the floor of the nearby caves for you (your
puzzle input).
Smoke flows to the lowest point of the area it's in. For example, consider the following heightmap:
2199943210
3987894921
9856789892
8767896789
9899965678
Each number corresponds to the height of a particular location, where 9 is the highest and 0 is the
lowest a location can be.
Your first goal is to find the low points - the locations that are lower than any of its adjacent
locations. Most locations have four adjacent locations (up, down, left, and right); locations on
the edge or corner of the map have three or two adjacent locations, respectively. (Diagonal
locations do not count as adjacent.)
In the above example, there are four low points, all highlighted: two are in the first row (a 1 and
a 0), one is in the third row (a 5), and one is in the bottom row (also a 5). All other locations
on the heightmap have some lower adjacent location, and so are not low points.
The risk level of a low point is 1 plus its height. In the above example, the risk levels of the
low points are 2, 1, 6, and 6. The sum of the risk levels of all low points in the heightmap is
therefore 15.
Find all of the low points on your heightmap. What is the sum of the risk levels of all low points
on your heightmap?
--- Part Two ---
Next, you need to find the largest basins so you know what areas are most important to avoid.
A basin is all locations that eventually flow downward to a single low point. Therefore, every low
point has a basin, although some basins are very small. Locations of height 9 do not count as being
in any basin, and all other locations will always be part of exactly one basin.
The size of a basin is the number of locations within the basin, including the low point. The
example above has four basins.
The top-left basin, size 3:
2199943210
3987894921
9856789892
8767896789
9899965678
The top-right basin, size 9:
2199943210
3987894921
9856789892
8767896789
9899965678
The middle basin, size 14:
2199943210
3987894921
9856789892
8767896789
9899965678
The bottom-right basin, size 9:
2199943210
3987894921
9856789892
8767896789
9899965678
Find the three largest basins and multiply their sizes together. In the above example, this is
9 * 14 * 9 = 1134.
What do you get if you multiply together the sizes of the three largest basins?
"""
from scipy import ndimage
import numpy as np
with open("solutions/2021/day9/input.txt", "r") as f:
dem = np.array([[*map(int, line)] for line in f.read().splitlines()])
adjacent = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowest_neighbor = ndimage.rank_filter(dem, 1, footprint=adjacent, mode="constant", cval=9.0)
low_points_mask = dem < lowest_neighbor
basins_mask = ndimage.binary_dilation(low_points_mask, structure=adjacent, iterations=-1, mask=dem != 9)
basins, n_basins = ndimage.label(basins_mask, structure=adjacent)
basin_nrs, basin_sizes = np.unique(basins[basins>0], return_counts=True)
print(f"Answer 1: {np.sum(dem[low_points_mask] + 1)}")
print(f"Answer 2: {np.product(basin_sizes[np.argsort(basin_sizes)[-3:]])}")
| [
"scipy.ndimage.rank_filter",
"scipy.ndimage.binary_dilation",
"numpy.sum",
"numpy.argsort",
"scipy.ndimage.label",
"numpy.array",
"numpy.unique"
] | [((2867, 2910), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 1, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 1, 1], [0, 1, 0]])\n', (2875, 2910), True, 'import numpy as np\n'), ((2929, 3003), 'scipy.ndimage.rank_filter', 'ndimage.rank_filter', (['dem', '(1)'], {'footprint': 'adjacent', 'mode': '"""constant"""', 'cval': '(9.0)'}), "(dem, 1, footprint=adjacent, mode='constant', cval=9.0)\n", (2948, 3003), False, 'from scipy import ndimage\n'), ((3059, 3153), 'scipy.ndimage.binary_dilation', 'ndimage.binary_dilation', (['low_points_mask'], {'structure': 'adjacent', 'iterations': '(-1)', 'mask': '(dem != 9)'}), '(low_points_mask, structure=adjacent, iterations=-1,\n mask=dem != 9)\n', (3082, 3153), False, 'from scipy import ndimage\n'), ((3169, 3215), 'scipy.ndimage.label', 'ndimage.label', (['basins_mask'], {'structure': 'adjacent'}), '(basins_mask, structure=adjacent)\n', (3182, 3215), False, 'from scipy import ndimage\n'), ((3241, 3290), 'numpy.unique', 'np.unique', (['basins[basins > 0]'], {'return_counts': '(True)'}), '(basins[basins > 0], return_counts=True)\n', (3250, 3290), True, 'import numpy as np\n'), ((3309, 3341), 'numpy.sum', 'np.sum', (['(dem[low_points_mask] + 1)'], {}), '(dem[low_points_mask] + 1)\n', (3315, 3341), True, 'import numpy as np\n'), ((3387, 3410), 'numpy.argsort', 'np.argsort', (['basin_sizes'], {}), '(basin_sizes)\n', (3397, 3410), True, 'import numpy as np\n')] |
################################################################################
#
# test_dtram.py - testing the dTRAM estimator
#
# author: <NAME> <<EMAIL>>
#
################################################################################
from nose.tools import assert_raises, assert_true
from pytram import DTRAM, ExpressionError, NotConvergedWarning
import numpy as np
def test_expression_error_None():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), None )
def test_expression_error_int():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), 5 )
def test_expression_error_list():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), [1,2] )
def test_expression_error_dim():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,2,2), dtype=np.float64 ) )
def test_expression_error_markov():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,2), dtype=np.float64 ) )
def test_expression_error_therm():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(1,3), dtype=np.float64 ) )
def test_expression_error_int16():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,3), dtype=np.int16 ) )
def test_expression_error_float32():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,3), dtype=np.float32 ) )
def test_expression_error_zeros():
assert_raises( ExpressionError, DTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.zeros( shape=(2,3), dtype=np.float64 ) )
def test_three_state_model():
C_K_ij = np.array( [[[2358,29,0],[29,0,32],[0,32,197518]],[[16818,16763,0],[16763,0,16510],[0,16510,16635]]], dtype=np.intc )
gamma_K_j = np.ones( shape=(2,3), dtype=np.float64 )
gamma_K_j[1,0] = np.exp( -4.0 )
gamma_K_j[1,2] = np.exp( -8.0 )
dtram = DTRAM( C_K_ij, gamma_K_j )
assert_raises( NotConvergedWarning, dtram.scf_iteration, maxiter=1, ftol=1.0E-80, verbose=False )
dtram.scf_iteration( maxiter=200000, ftol=1.0E-15, verbose=False )
pi = np.array( [1.82026887e-02,3.30458960e-04,9.81466852e-01], dtype=np.float64 )
T = np.array( [[9.90504397e-01,9.49560284e-03,0.0],[5.23046803e-01,0.0,4.76953197e-01],[0.0,1.60589690e-04,9.99839410e-01]], dtype=np.float64 )
assert_true( np.max( np.abs( dtram.pi_i - pi ) ) < 1.0E-8 )
assert_true( np.max( np.abs( dtram.estimate_transition_matrix()[0,:,:] - T ) ) < 1.0E-8 )
| [
"pytram.DTRAM",
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.exp",
"nose.tools.assert_raises"
] | [((1777, 1912), 'numpy.array', 'np.array', (['[[[2358, 29, 0], [29, 0, 32], [0, 32, 197518]], [[16818, 16763, 0], [16763,\n 0, 16510], [0, 16510, 16635]]]'], {'dtype': 'np.intc'}), '([[[2358, 29, 0], [29, 0, 32], [0, 32, 197518]], [[16818, 16763, 0],\n [16763, 0, 16510], [0, 16510, 16635]]], dtype=np.intc)\n', (1785, 1912), True, 'import numpy as np\n'), ((1910, 1949), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.float64'}), '(shape=(2, 3), dtype=np.float64)\n', (1917, 1949), True, 'import numpy as np\n'), ((1972, 1984), 'numpy.exp', 'np.exp', (['(-4.0)'], {}), '(-4.0)\n', (1978, 1984), True, 'import numpy as np\n'), ((2008, 2020), 'numpy.exp', 'np.exp', (['(-8.0)'], {}), '(-8.0)\n', (2014, 2020), True, 'import numpy as np\n'), ((2035, 2059), 'pytram.DTRAM', 'DTRAM', (['C_K_ij', 'gamma_K_j'], {}), '(C_K_ij, gamma_K_j)\n', (2040, 2059), False, 'from pytram import DTRAM, ExpressionError, NotConvergedWarning\n'), ((2066, 2164), 'nose.tools.assert_raises', 'assert_raises', (['NotConvergedWarning', 'dtram.scf_iteration'], {'maxiter': '(1)', 'ftol': '(1e-80)', 'verbose': '(False)'}), '(NotConvergedWarning, dtram.scf_iteration, maxiter=1, ftol=\n 1e-80, verbose=False)\n', (2079, 2164), False, 'from nose.tools import assert_raises, assert_true\n'), ((2244, 2314), 'numpy.array', 'np.array', (['[0.0182026887, 0.00033045896, 0.981466852]'], {'dtype': 'np.float64'}), '([0.0182026887, 0.00033045896, 0.981466852], dtype=np.float64)\n', (2252, 2314), True, 'import numpy as np\n'), ((2329, 2464), 'numpy.array', 'np.array', (['[[0.990504397, 0.00949560284, 0.0], [0.523046803, 0.0, 0.476953197], [0.0, \n 0.00016058969, 0.99983941]]'], {'dtype': 'np.float64'}), '([[0.990504397, 0.00949560284, 0.0], [0.523046803, 0.0, 0.476953197\n ], [0.0, 0.00016058969, 0.99983941]], dtype=np.float64)\n', (2337, 2464), True, 'import numpy as np\n'), ((456, 495), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (463, 495), True, 'import numpy as np\n'), ((581, 620), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (588, 620), True, 'import numpy as np\n'), ((704, 743), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (711, 743), True, 'import numpy as np\n'), ((830, 869), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (837, 869), True, 'import numpy as np\n'), ((871, 913), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2, 2)', 'dtype': 'np.float64'}), '(shape=(2, 2, 2), dtype=np.float64)\n', (878, 913), True, 'import numpy as np\n'), ((996, 1035), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1003, 1035), True, 'import numpy as np\n'), ((1037, 1076), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2)', 'dtype': 'np.float64'}), '(shape=(2, 2), dtype=np.float64)\n', (1044, 1076), True, 'import numpy as np\n'), ((1159, 1198), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1166, 1198), True, 'import numpy as np\n'), ((1200, 1239), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 3)', 'dtype': 'np.float64'}), '(shape=(1, 3), dtype=np.float64)\n', (1207, 1239), True, 'import numpy as np\n'), ((1322, 1361), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1329, 1361), True, 'import numpy as np\n'), ((1363, 1400), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.int16'}), '(shape=(2, 3), dtype=np.int16)\n', (1370, 1400), True, 'import numpy as np\n'), ((1485, 1524), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1492, 1524), True, 'import numpy as np\n'), ((1526, 1565), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.float32'}), '(shape=(2, 3), dtype=np.float32)\n', (1533, 1565), True, 'import numpy as np\n'), ((1648, 1687), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1655, 1687), True, 'import numpy as np\n'), ((1689, 1729), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 3)', 'dtype': 'np.float64'}), '(shape=(2, 3), dtype=np.float64)\n', (1697, 1729), True, 'import numpy as np\n'), ((2494, 2517), 'numpy.abs', 'np.abs', (['(dtram.pi_i - pi)'], {}), '(dtram.pi_i - pi)\n', (2500, 2517), True, 'import numpy as np\n')] |
import argparse
import json
import os
import os.path as osp
import sys
from PIL import Image
import numpy as np
# -----------------------------------------------------------------------------------------
def parse_wider_gt(dets_file_name, isEllipse=False):
# -----------------------------------------------------------------------------------------
'''
Parse the FDDB-format detection output file:
- first line is image file name
- second line is an integer, for `n` detections in that image
- next `n` lines are detection coordinates
- again, next line is image file name
- detections are [x y width height score]
Returns a dict: {'img_filename': detections as a list of arrays}
'''
fid = open(dets_file_name, 'r')
# Parsing the FDDB-format detection output txt file
img_flag = True
numdet_flag = False
start_det_count = False
det_count = 0
numdet = -1
det_dict = {}
img_file = ''
for line in fid:
line = line.strip()
if line == '0 0 0 0 0 0 0 0 0 0':
if det_count == numdet - 1:
start_det_count = False
det_count = 0
img_flag = True # next line is image file
numdet_flag = False
numdet = -1
det_dict.pop(img_file)
continue
if img_flag:
# Image filename
img_flag = False
numdet_flag = True
# print('Img file: ' + line)
img_file = line
det_dict[img_file] = [] # init detections list for image
continue
if numdet_flag:
# next line after image filename: number of detections
numdet = int(line)
numdet_flag = False
if numdet > 0:
start_det_count = True # start counting detections
det_count = 0
else:
# no detections in this image
img_flag = True # next line is another image file
numdet = -1
# print 'num det: ' + line
continue
if start_det_count:
# after numdet, lines are detections
detection = [float(x) for x in line.split()] # split on whitespace
det_dict[img_file].append(detection)
# print 'Detection: %s' % line
det_count += 1
if det_count == numdet:
start_det_count = False
det_count = 0
img_flag = True # next line is image file
numdet_flag = False
numdet = -1
return det_dict
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'-d', '--datadir', help="dir to widerface", default='data/WIDERFace', type=str)
parser.add_argument(
'-s', '--subset', help="which subset to convert", default='all', choices=['all', 'train', 'val'], type=str)
parser.add_argument(
'-o', '--outdir', help="where to store annotations", default='data/WIDERFace')
return parser.parse_args()
def convert_wider_annots(args):
"""Convert from WIDER FDDB-style format to MMDetection style
Annotation format:
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4),
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4),
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
The `ann` field is optional for testing.
"""
subset = ['train', 'val'] if args.subset == 'all' else [args.subset]
os.makedirs(args.datadir, exist_ok=True)
os.makedirs(args.outdir, exist_ok=True)
categories = [{"id": 1, "name": 'face'}]
for sset in subset:
print(f'Processing subset {sset}')
out_json_name = osp.join(args.outdir, f'wider_face_{sset}_annot_mmdet_style.json')
data_dir = osp.join(args.datadir, f'WIDER_{sset}', 'images')
img_id = 0
ann_id = 0
cat_id = 1
images = []
ann_file = os.path.join(args.datadir, 'wider_face_split', f'wider_face_{sset}_bbx_gt.txt')
wider_annot_dict = parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...]
for filename in wider_annot_dict.keys():
if len(images) % 50 ==0:
print( '{} images processed'.format(len(images)))
image = {}
im = Image.open(os.path.join(data_dir, filename))
image['width'] = im.height
image['height'] = im.width
image['filename'] = filename
# x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose
ann = {}
bboxes = np.array([b[:4] for b in wider_annot_dict[filename]], dtype=int) # x,y,w,h
if not len(bboxes)>0:
continue
# bboxes[:,2:] = bboxes[:, 2:] + bboxes[:,:2] - 1
ann['bboxes'] = bboxes.tolist()
ann['bboxes'] = [b[:4] for b in wider_annot_dict[filename]] # x,y,w,h
ann['bboxes_ignore'] = []
ann['blur'] = [int(b[4]) for b in wider_annot_dict[filename]] # x,y,w,h
ann['expression'] = [int(b[5]) for b in wider_annot_dict[filename]] # x,y,w,h
ann['illumination'] = [int(b[6]) for b in wider_annot_dict[filename]] # x,y,w,h
ann['occlusion'] = [int(b[8]) for b in wider_annot_dict[filename]] # x,y,w,h
ann['pose'] = [int(b[9]) for b in wider_annot_dict[filename]] # x,y,w,h
ann['labels'] = [1 for b in wider_annot_dict[filename]] # x,y,w,h
image['ann'] = ann
images.append(image)
with open(out_json_name, 'w', encoding='utf8') as outfile:
json.dump(images, outfile, indent=4, sort_keys=True)
if __name__ == '__main__':
convert_wider_annots(parse_args())
| [
"json.dump",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.array",
"os.path.join"
] | [((2680, 2734), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert dataset"""'}), "(description='Convert dataset')\n", (2703, 2734), False, 'import argparse\n'), ((3781, 3821), 'os.makedirs', 'os.makedirs', (['args.datadir'], {'exist_ok': '(True)'}), '(args.datadir, exist_ok=True)\n', (3792, 3821), False, 'import os\n'), ((3826, 3865), 'os.makedirs', 'os.makedirs', (['args.outdir'], {'exist_ok': '(True)'}), '(args.outdir, exist_ok=True)\n', (3837, 3865), False, 'import os\n'), ((4003, 4069), 'os.path.join', 'osp.join', (['args.outdir', 'f"""wider_face_{sset}_annot_mmdet_style.json"""'], {}), "(args.outdir, f'wider_face_{sset}_annot_mmdet_style.json')\n", (4011, 4069), True, 'import os.path as osp\n'), ((4089, 4138), 'os.path.join', 'osp.join', (['args.datadir', 'f"""WIDER_{sset}"""', '"""images"""'], {}), "(args.datadir, f'WIDER_{sset}', 'images')\n", (4097, 4138), True, 'import os.path as osp\n'), ((4236, 4315), 'os.path.join', 'os.path.join', (['args.datadir', '"""wider_face_split"""', 'f"""wider_face_{sset}_bbx_gt.txt"""'], {}), "(args.datadir, 'wider_face_split', f'wider_face_{sset}_bbx_gt.txt')\n", (4248, 4315), False, 'import os\n'), ((4886, 4950), 'numpy.array', 'np.array', (['[b[:4] for b in wider_annot_dict[filename]]'], {'dtype': 'int'}), '([b[:4] for b in wider_annot_dict[filename]], dtype=int)\n', (4894, 4950), True, 'import numpy as np\n'), ((5908, 5960), 'json.dump', 'json.dump', (['images', 'outfile'], {'indent': '(4)', 'sort_keys': '(True)'}), '(images, outfile, indent=4, sort_keys=True)\n', (5917, 5960), False, 'import json\n'), ((4605, 4637), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (4617, 4637), False, 'import os\n')] |
import numpy as np
a=np.load("/Users/ibm_siyuhuo/Github Repo/seq2seq/foodData/doinfer.npy")
ang1Pred=np.load("results/npyRes/ang1Pred.npy")
ang2Pred=np.load("results/npyRes/ang2Pred.npy")
thefile = open('./res_angle.txt', 'w')
for i in range(len(a)):
tmp1=""
tmp2=""
tmp3=""
for j in range(len(a[i])):
tmp1=tmp1+str(a[i][j])+" "
tmp2 = tmp2 + str(ang1Pred[i][j]) + " "
tmp3 = tmp3 + str(ang2Pred[i][j]) + " "
#
# print(len(tmp1.split(" ")))
# print(len(tmp2.split(" ")))
# print(len(tmp3.split(" ")))
thefile.write("seq: " + "%s\n" % tmp1.strip())
thefile.write("preds angle1: " + "%s\n" % tmp2.strip())
thefile.write("preds angle2: " + "%s\n" % tmp3.strip())
| [
"numpy.load"
] | [((21, 91), 'numpy.load', 'np.load', (['"""/Users/ibm_siyuhuo/Github Repo/seq2seq/foodData/doinfer.npy"""'], {}), "('/Users/ibm_siyuhuo/Github Repo/seq2seq/foodData/doinfer.npy')\n", (28, 91), True, 'import numpy as np\n'), ((101, 139), 'numpy.load', 'np.load', (['"""results/npyRes/ang1Pred.npy"""'], {}), "('results/npyRes/ang1Pred.npy')\n", (108, 139), True, 'import numpy as np\n'), ((149, 187), 'numpy.load', 'np.load', (['"""results/npyRes/ang2Pred.npy"""'], {}), "('results/npyRes/ang2Pred.npy')\n", (156, 187), True, 'import numpy as np\n')] |
import json
import warnings
from collections import Counter, defaultdict
from glob import glob
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.optimize import minimize
from scipy.stats import norm
from tqdm import tqdm
plt.style.use('fivethirtyeight')
# %matplotlib qt
# %%
class TransferEntropy:
"""
Class to compute asset graphs using transfer entropy.
Parameters
----------
assets: list[str], default=None
List of assets to use from loaded data.
data: pd.DataFrame, default=None
DataFrame of asset log returns with datetime index and no missing data.
If None, price datafiles are loaded from data directory.
Attributes
----------
data: pd.DataFrame
DataFrame of asset log returns with datetime index.
assets: list(str)
List of asset names.
corr: pd.DataFrame
Spearman correlation between assets.
Methods
-------
set_timeperiod(start, end): Set starting and ending dates for analysis.
subset_assets(assets): Subset assets used.
build_asset_graph(solver): Find graph coordinates.
compute_transfer_entorpy(bins): Return transfer entropy.
compute_effective_transfer_entropy: Return effective transfer entropy.
plot_asset_graph(threshold): Plot asset graph edges that meet threshold.
plot_corr(method): Plot correlations between assets.
plot_te(te): Plot transfer entropy heatmap.
"""
def __init__(self, assets=None, data=None):
self.assets = assets
self._prices = data if data is not None else self._load_data()
self.set_timeperiod('1/3/2011', '12/31/2018')
def _read_file(self, fid):
"""Read data file as DataFrame."""
df = pd.read_csv(
fid,
index_col=0,
parse_dates=True,
infer_datetime_format=True,
)
return df
def _load_data(self):
"""Load data from data directory into single into DataFrame."""
fids = glob('../data/*.csv')
df = pd.DataFrame().join(
[self._read_file(fid) for fid in fids], how='outer')
return df
def set_timeperiod(self, start=None, end=None):
"""
Updata self.data with start and end dates for analysis.
Parameters
----------
start: str or datetime object, default=None
Starting date for analysis.
end: str or datetime object, default=None
Ending date for analysis.
"""
data = self._prices.copy()
# Ignore warnings for missing data.
warnings.filterwarnings('ignore')
# Subset data by time period.
if start is not None:
data = data[data.index >= pd.to_datetime(start)].copy()
if end is not None:
data = data[data.index <= pd.to_datetime(end)].copy()
# Drop Weekends and forward fill Holidays.
keep_ix = [ix.weekday() < 5 for ix in list(data.index)]
data = data[keep_ix].copy()
data.fillna(method='ffill', inplace=True)
self.prices = data.copy()
# Calculate Log Returns.
self.data = np.log(data[1:] / data[:-1].values) # log returns
self.data.dropna(axis=1, inplace=True) # Drop assets with missing data.
# Map asset names to DataFrame.
# with open('../data/asset_mapping.json', 'r') as fid:
# asset_map = json.load(fid)
# self.assets = [asset_map.get(a, a) for a in list(self.data)]
# self._n = len(self.assets)
# Subset data to specified assets.
if self.assets is not None:
self.subset_assets(self.assets)
else:
self.assets = list(self.data)
self._n = len(self.assets)
# Rename DataFrame with asset names and init data matrix.
# self.data.columns = self.assets
self._data_mat = self.data.values
def subset_assets(self, assets):
"""
Subset data to specified assets.
Parameters
----------
assets: list[str]
List of assets to use.
"""
self.prices = self.prices[assets].copy()
self.data = self.data[assets].copy()
self.assets = assets
self._n = len(self.assets)
def _euclidean_distance(self, x, i, j):
"""Euclidean distance between points in x-coordinates."""
m = x.shape[1]
return sum((x[i, a] - x[j, a])**2 for a in range(m))**0.5
def _stress_function(self, x):
"""Stress function for Classical Multidimensional Scaling."""
# Map 1-D input coordinates to 2-D
x = np.reshape(x, (-1, 2))
n = x.shape[0]
num, denom = 0, 0
for i in range(n):
for j in range(i, n):
delta = int(i == j)
euc_d = self._euclidean_distance(x, i, j)
# Build numerator and denominator sums.
num += (delta - euc_d)**2
denom += self._distance[i, j]**2
return (num / denom)**0.5
def build_asset_graph(self, solver, distance=None, verbose=False):
"""
Build asset graph of transfer entropy with specified threshold.
Parameters
----------
solver: str, default='SLSQP'
Scipy mimiziation solving technique.
"""
# Find correlations and distance metric.
self.corr = self.data.corr('spearman')
self._distance = distance if distance is not None \
else np.sqrt(2 * (1-self.corr.values))
# Solve 2-D coordinate positions.
def exit_opt(Xi):
if np.sum(np.isnan(Xi)) > 1:
raise RuntimeError('Minimize convergence failed.')
def printx(Xi):
print(Xi)
exit_opt(Xi)
opt = minimize(
self._stress_function,
x0=np.random.rand(2*self._n),
method=solver,
tol=1e-3,
options={'disp': False, 'maxiter': 10000},
callback=printx if verbose else exit_opt,
)
if opt.status != 0:
raise RuntimeError(opt.message)
self._coordinates = np.reshape(opt.x, (-1, 2))
def plot_asset_graph(self, threshold, all_thresholds=None,
ax=None, figsize=(6, 6), fontsize=6):
"""
Plot asset graph network.
Parameters
----------
threshold: float
Maximum threshold distance for edges in network.
all_thresholds: list[float], default=None
If provided, the colorbar maximum value will be set as the
maximum threshold, otherwise the given threshold is used.
This is convenient for keeping a standard scale when plotting
multiple thresholds.
ax: matplotlib axis, default=None
Matplotlib axis to plot figure, if None one is created.
figsize: list or tuple, default=(6, 6)
Figure size.
fontsize: int, default=6
Fontsize for asset labels.
"""
# Find edges and nodes.
edges = {}
nodes = []
for i in range(self._n - 1):
d_i = self._distance[i, :]
edges_i = np.argwhere(d_i < threshold).reshape(-1)
edges_i = list(edges_i[edges_i > i])
edges[i] = edges_i
if len(edges_i) > 0:
nodes.append(i)
nodes.extend(edges_i)
nodes = list(set(nodes))
edges = {key: val for key, val in edges.items() if len(val) > 0}
# Store values of edges.
edge_vals = {}
for node0, node0_edges in edges.items():
for node1 in node0_edges:
edge_vals[(node0, node1)] = self._distance[node0, node1]
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
x = self._coordinates[:, 0]
y = self._coordinates[:, 1]
# Get sequential colors for edges based on distance.
cmap = sns.color_palette('magma', 101).as_hex()
emin = min(self._distance[self._distance > 0])
emax = threshold if all_thresholds is None else max(all_thresholds)
emax += 1e-3
edge_color = {key: cmap[int(100 * (val-emin) / (emax-emin))]
for key, val in edge_vals.items()}
# Plot edges.
for node0, node0_edges in edges.items():
for node1 in node0_edges:
ix = [node0, node1]
ax.plot(x[ix], y[ix], c=edge_color[tuple(ix)], lw=2, alpha=0.7)
# Plot asset names over edges.
box = {'fill': 'white', 'facecolor': 'white', 'edgecolor': 'k'}
for node in nodes:
ax.text(x[node], y[node], self.assets[node], fontsize=fontsize,
horizontalalignment='center', verticalalignment='center',
bbox=box)
def plot_corr(self, method='pearson', ax=None, figsize=(6, 6),
fontsize=8, cbar=True, labels=True):
"""
Plot correlation of assets.
Parameters
----------
method: {'spearman', 'pearson', 'kendall'}, default='pearson'
Correlation method.
- 'spearman': Spearman rank correlation
- 'pearson': standard correlation coefficient
- 'kendall': Kendall Tau correlation coefficient
ax: matplotlib axis, default=None
Matplotlib axis to plot figure, if None one is created.
figsize: list or tuple, default=(6, 6)
Figure size.
fontsize: int, default=8
Fontsize for asset labels.
cbar: bool, default=True
If True include color bar.
labels: bool, default=False
If True include tick labels for x & y axis.
If False do not inlclude labels.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
self._corr = self.data.corr(method)
sns.heatmap(self._corr, ax=ax, vmin=-1, vmax=1, cbar=cbar,
xticklabels=labels, yticklabels=labels, cmap='coolwarm')
if labels:
plt.setp(ax.get_xticklabels(), fontsize=fontsize)
plt.setp(ax.get_yticklabels(), fontsize=fontsize)
if cbar:
cbar_ax = ax.collections[0].colorbar
ticks = np.linspace(-1, 1, 5)
cbar_ax.set_ticks(ticks)
cbar_ax.set_ticklabels(ticks)
def _transfer_entropy_function(self, data, X, Y, bins, shuffle):
"""
Compute transfer entropy for asset x on lagged x and lagged y
log returns.
Parameters
----------
X: int
Index location of asset x, asset to be predicted.
Y: int
Index location of asset y, asset which influences asset x.
bins: int
Number of bins to place log returns in.
shuffle: bool
If True, shuffle all time series for randomized transfer entropy.
Returns
-------
te: float
Transfer entropy of y --> x.
"""
x = data[1:, X]
x_lag = data[:-1, X]
y_lag = data[:-1, Y]
n = len(x)
# Find respective historgram bin for each time series value.
data_matrix = np.concatenate([x, x_lag, y_lag])
bin_edges = np.concatenate([
np.linspace(np.min(data_matrix), 0, int(bins/2)+1),
np.linspace(0, np.max(data_matrix), int(bins/2)+1)[1:],
])
bin_vals = np.reshape(pd.cut(
data_matrix, bins=bin_edges, labels=False), (3, -1)).T
if shuffle:
# Shuffle y_lag for randomized transfer entropy.
np.random.shuffle(bin_vals[:, 2])
# Find frequency of occurce for each set of joint vectors.
p_ilag = Counter(bin_vals[:, 1])
p_i_ilag = Counter(list(zip(bin_vals[:, 0], bin_vals[:, 1])))
p_ilag_jlag = Counter(list(zip(bin_vals[:, 1], bin_vals[:, 2])))
p_i_ilag_jlag = Counter(
list(zip(bin_vals[:, 0], bin_vals[:, 1], bin_vals[:, 2])))
# Catch warnings as errors for np.log2(0).
warnings.filterwarnings('error')
# Compute transfer entropy.
te = 0
for i in range(bins):
for ilag in range(bins):
for jlag in range(bins):
try:
te_i = (p_i_ilag_jlag.get((i, ilag, jlag), 0) / n
* np.log2(p_i_ilag_jlag.get((i, ilag, jlag), 0)
* p_ilag.get(ilag, 0)
/ p_i_ilag.get((i, ilag), 0)
/ p_ilag_jlag.get((ilag, jlag), 0)
))
except (ZeroDivisionError, RuntimeWarning):
te_i = 0
te += te_i
# Reset warnings to default.
warnings.filterwarnings('ignore')
return te
def compute_transfer_entropy(self, bins=6, shuffle=False, save=True):
"""
Compute transfer entropy matrix. Returned matrix is directional
such that asset on X-axis inluences next day transfer entropy of
asset on the Y-axis.
Parameters
----------
bins: int, default=6
Number of bins to place log returns in.
shuffle: bool, default=False
If True, shuffle all time series for randomized transfer entropy.
save: bool, default=True
If True save result
Returns
-------
te: [n x n] nd.array
Transfer entropy matrix.
"""
n = self._n
te = np.zeros([n, n])
data = self._data_mat.copy()
for i in range(n):
for j in range(n):
te[i, j] = self._transfer_entropy_function(
data, i, j, bins, shuffle=shuffle)
if save:
self._te = te.copy() # store te matrix.
self._te_min = np.min(te)
self._te_max = np.max(te)
self.te = pd.DataFrame(te, columns=self.assets, index=self.assets)
else:
return te
def compute_effective_transfer_entropy(self, bins=6, sims=25,
std_threshold=1, pbar=False):
"""
Compute effective transfer entropy matrix. Returned matrix is
directional such that asset on X-axis inluences next day transfer
entropy of asset on the Y-axis.
Parameters
----------
bins: int, default=6
Number of bins to place log returns in.
sims: int, default=25
Number of simulations to use when estimating randomized
transfer entropy.
pbar: bool, default=False
If True, show progress bar for simulations.
Returns
-------
ete: [n x n] nd.array
Effective transfer entropy matrix.
"""
# Compute and store transfer entropy.
self.compute_transfer_entropy(bins)
# Compute and store randomized transfer entropy.
self.rte_tensor = np.zeros([self._n, self._n, sims])
if pbar:
for i in tqdm(range(sims)):
self.rte_tensor[:, :, i] = self.compute_transfer_entropy(
bins, shuffle=True, save=False)
else:
for i in range(sims):
self.rte_tensor[:, :, i] = self.compute_transfer_entropy(
bins, shuffle=True, save=False)
# Peform significance test on ETE values.
ete = np.zeros([self._n, self._n])
for i in range(self._n):
for j in range(self._n):
if i == j:
continue
te = self.te.iloc[i, j]
rte_array = self.rte_tensor[i, j, :]
if te - np.mean(rte_array) - np.std(rte_array)/sims**0.5 > 0:
ete[i, j] = te - np.mean(rte_array)
rte = np.mean(self.rte_tensor, axis=2)
self.rte = pd.DataFrame(rte, columns=self.assets, index=self.assets)
self.ete = pd.DataFrame(ete, columns=self.assets, index=self.assets)
# Store max and min values.
self._te_max = np.max(self.te.values)
self._te_min = np.min(self.ete.values)
def plot_te(self, te='ete', labels=True, cbar=True, ax=None,
figsize=(6, 6), fontsize=6, vmin=None, vmax=None):
"""
Plot correlation of assets.
Parameters
----------
te: {'te', 'rte', 'ete'}, default='ete'
Transfer entropy to be plotted.
- te: transfer entropy
- rte: randomized transfer entropy
- ete: effective transfer entropy
labels: bool, default=True
If True include labels in plot, else ignore labels.
cbar: bool, default=True
If True plot colorbar with scale.
ax: matplotlib axis, default=None
Matplotlib axis to plot figure, if None one is created.
figsize: list or tuple, default=(6, 6)
Figure size.
fontsize: int, default=6
Fontsize for asset labels.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
vmin = self._te_min if vmin is None else vmin
vmax = self._te_max if vmax is None else vmax
heatmap = eval(f'self.{te}')
sns.heatmap(heatmap, ax=ax, vmin=vmin, vmax=vmax,
cmap='viridis', xticklabels=labels, yticklabels=labels,
cbar=cbar)
if labels:
plt.setp(ax.get_xticklabels(), fontsize=fontsize)
plt.setp(ax.get_yticklabels(), fontsize=fontsize)
def plot_corr_network(self, nx_type='circle', threshold=0.8, pos=None,
method='pearson', ax=None, figsize=(12, 12), cmap='Reds', fontsize=8,
vmin=None, vmax=None):
"""
Plot correlation of assets.
Parameters
----------
nx_type: {'circle', 'cluster'}, default='circle'
Type of network graph.
- 'circle': Circular graph in decreasing node strength order.
- 'cluster': Spring graph of clusters.
threshold: int, default=0.9
Lower threshold of link strength for connections in network graph.
pos: dict, default=None
Dictionary with nodes as keys and positions as values.
method: {'spearman', 'pearson', 'kendall'}, default='spearman'
Correlation method.
- 'spearman': Spearman rank correlation
- 'pearson': standard correlation coefficient
- 'kendall': Kendall Tau correlation coefficient
ax: matplotlib axis, default=None
Matplotlib axis to plot figure, if None one is created.
figsize: list or tuple, default=(6, 6)
Figure size.
cmap: str, default='Blues'
Matploblib cmap.
fontsize: int, default=8
Fontsize for asset labels.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
# Find correlation matrix and transform it in a links data frame.
corr = self.data.corr(method).abs()
links = corr.stack().reset_index()
links.columns = ['in', 'out', 'weight']
links = links.loc[(links['in'] != links['out'])
& (links['weight'] > threshold)]
links = links[['out', 'in', 'weight']].reset_index(drop=True)
# Subset to only use upper triangle portion of correlation matrix.
edges = defaultdict(int)
ix = []
for i, row in links.iterrows():
if (edges[(row['out'], row['in'])]
+ edges[(row['in'], row['out'])] > 0):
continue
else:
edges[(row['out'], row['in'])] += 1
ix.append(i)
links = links.loc[ix]
# Sort by node centrality, and minmax range.
nc = np.sum(corr, axis=1) - 1
nc = (nc-np.min(nc)) / (np.max(nc)-np.min(nc))
nc.sort_values(inplace=True, ascending=False)
nx_nodes = list(set(list(links['out']) + list(links['in'])))
node_ix = [node in nx_nodes for node in nc.index]
nc = nc.loc[node_ix]
node_strengths = nc.values
nodes = list(nc.index)
# Build OrderedGraph of nodes by centrality measure.
G = nx.OrderedGraph()
G.add_nodes_from(nodes)
w = links['weight'].values
lwidths = np.round(0.3 + 3*((w - np.min(w)) / (np.max(w)-np.min(w))), 2)
edge_tuples = list(links[['in', 'out']].itertuples(index=False))
for edge, w, lw in zip(edge_tuples, links['weight'].values, lwidths):
G.add_edge(*edge, weight=w, lw=lw)
# Get graph position.
if pos is None:
pos = {
'circle': nx.circular_layout,
'cluster': nx.spring_layout,
}[nx_type](G)
self.pos = pos
# Draw network edges.
kwargs = {
'ax': ax,
'pos': pos,
'edge_cmap': plt.get_cmap(cmap),
'alpha': 0.7,
'edge_vmin': 0.8 * np.min(links['weight']),
'edge_vmax': np.max(links['weight']),
}
nx_edges = sorted(G.edges(data=True), key=lambda x: x[-1]['weight'])
for u, v, d in nx_edges:
kwargs['edge_color'] = np.array([d['weight']])
kwargs['width'] = np.array([d['lw']])
nx.draw_networkx_edges(G, edgelist=[(u,v)], **kwargs)
# Draw network nodes and labels.
vmax = 1.1 * np.max(nc) if vmax is None else vmax
vmin = 0.8 * nc.iloc[int(0.8*len(nc))] if vmin is None else vmin
node_colors = [max(ns, vmin*1.05) for ns in node_strengths]
nx.draw_networkx_nodes(
G, ax=ax, pos=pos, cmap=cmap, node_size=1000*node_strengths,
node_color=node_colors, alpha=0.9, vmax=vmax, vmin=vmin)
nx.draw_networkx_labels(
G, ax=ax, pos=pos, font_weight='bold', font_size=fontsize)
# Hide axis ticks and grid.
ax.grid(False)
ax.tick_params(
bottom=False, left=False, labelbottom=False, labelleft=False)
def plot_ete_network(self, nx_type='circle', node_value='out', pos=None,
threshold=0.01, ax=None, figsize=(12, 12), cmap='Blues', fontsize=8,
vmin=None, vmax=None):
"""
Plot correlation of assets.
Parameters
----------
nx_type: {'circle', 'cluster'}, default='circle'
Type of network graph.
- 'circle': Circular graph in decreasing node strength order.
- 'cluster': Spring graph of clusters.
node_value: {'out', 'in'}, default='out'
Transfer entropy node centraily measure.
'out': Total transfer entropy out of each node.
'in': Total transfer entropy in to each node.
pos: dict, default=None
Dictionary with nodes as keys and positions as values.
threshold: int, default=0.01
Lower threshold of link strength for connections in network graph.
ax: matplotlib axis, default=None
Matplotlib axis to plot figure, if None one is created.
figsize: list or tuple, default=(6, 6)
Figure size.
cmap: str, default='Blues'
Matploblib cmap.
fontsize: int, default=8
Fontsize for asset labels.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
# Compute effective transfer entropy DataFrame and
# transform it in a links data frame.
links = self.ete.stack().reset_index()
links.columns = ['in', 'out', 'weight']
links = links.loc[links['weight'] > threshold]
links = links[['out', 'in', 'weight']]
if len(links) < 1:
msg = 'Threshold is too high, lower threshold below '
msg += f'{np.max(self.ete.values):.4f}'
raise ValueError(msg)
# Sort by node centrality, and minmax range.
axis = {'in': 1, 'out': 0}[node_value]
nc = np.sum(self.ete, axis=axis)
nc = (nc-np.min(nc)) / (np.max(nc)-np.min(nc))
nc.sort_values(inplace=True, ascending=False)
nx_nodes = list(set(list(links['out']) + list(links['in'])))
node_ix = [node in nx_nodes for node in nc.index]
nc = nc.loc[node_ix]
node_strengths = 0.1 + 0.9*nc.values
nodes = list(nc.index)
# Build OrderedGraph of nodes by centrality measure.
G = nx.OrderedDiGraph()
G.add_nodes_from(nodes)
w = links['weight'].values
lwidths = np.round(0.1 + 3*((w - np.min(w)) / (np.max(w)-np.min(w))), 2)
edge_tuples = list(links[['in', 'out']].itertuples(index=False))
for edge, w, lw in zip(edge_tuples, links['weight'].values, lwidths):
G.add_edge(*edge, weight=w, lw=lw)
# Get graph position.
if pos is None:
pos = {
'circle': nx.circular_layout,
'cluster': nx.spring_layout,
}[nx_type](G)
self.pos = pos
# Draw network edges.
kwargs = {
'ax': ax,
'pos': pos,
'arrowstyle': '-|>',
'arrowsize': 15,
'edge_cmap': plt.get_cmap(cmap),
'alpha': 0.6,
'edge_vmin': 0.8 * np.min(links['weight']),
'edge_vmax': np.max(links['weight']),
}
nx_edges = sorted(G.edges(data=True), key=lambda x: x[-1]['weight'])
for u, v, d in nx_edges:
kwargs['edge_color'] = np.array([d['weight']])
kwargs['width'] = np.array([d['lw']])
nx.draw_networkx_edges(G, edgelist=[(u,v)], **kwargs)
# Draw network nodes and labels.
vmax = 1.2 * np.max(nc) if vmax is None else vmax
# vmin = 0.7 * nc.iloc[int(0.5*len(nc))]
vmin = -0.1 if vmin is None else vmin
node_colors = [max(ns, vmin*1.5) for ns in node_strengths]
nx.draw_networkx_nodes(
G, ax=ax, pos=pos, cmap=cmap, node_size=1000*node_strengths,
node_color=node_colors, alpha=0.9, vmax=vmax, vmin=vmin)
nx.draw_networkx_labels(
G, ax=ax, pos=pos, font_weight='bold', font_size=fontsize)
# Hide axis ticks and grid.
ax.grid(False)
ax.tick_params(
bottom=False, left=False, labelbottom=False, labelleft=False)
# eqs = 'SPY DIA XLK XLV XLF IYZ XLY XLP XLI XLE XLU XME IYR XLB XPH IWM PHO ' \
# 'SOXX WOOD FDN GNR IBB ILF ITA IYT KIE PBW ' \
# 'AFK EZA ECH EWW EWC EWZ EEM EIDO EPOL EPP EWA EWD EWG EWH EWJ EWI EWK ' \
# 'EWL EWM EWP EWQ EWS EWT EWU EWY GXC HAO EZU RSX TUR'.split()
# fi = 'AGG SHY IEI IEF TLT TIP LQD HYG MBB'.split()
# cmdtys = 'GLD SLV DBA DBC USO UNG'.split()
# assets = eqs + fi + cmdtys
#
#
#
# self = TransferEntropy(assets=assets)
#
# # Set Period.
# start = '1/2/2011'
# end = '12/31/2018'
# self.set_timeperiod(start, end)
# #
# # %%
# # Plot correlation matrix.
# self.plot_corr(cbar=True)
# plt.show()
#
# %%
# corr_thresh = 0.75
# fig, axes = plt.subplots(1, 2, figsize=(12, 12), sharex=True, sharey=True)
# fig.suptitle('Correlation Magnitude')
# self.plot_corr_network(
# nx_type='circle',
# threshold=corr_thresh,
# ax=axes[0],
# vmin=0.3,
# vmax=1.2,
# )
# self.plot_corr_network(
# nx_type='cluster',
# threshold=corr_thresh,
# ax=axes[1],
# )
# # %%
# # Find network graph coordinates.
# corr_thresh = 0.5
# self.plot_corr_network(nx_type='circle', threshold=corr_thresh)
# plt.show()
#
# self.plot_corr_network(nx_type='cluster', threshold=corr_thresh)
# plt.show()
#
# # %%
# Compute effective transfer entropy.
# self.compute_effective_transfer_entropy(sims=10, pbar=True)
# ete = self.ete
# %%
# # %%
# # Plot effective transfer entropy.
# self.plot_te(te='ete', vmax=0.5*np.max(self.te.values))
# plt.show()
# # %%
# self.ete = ete.copy()
#
# Plot effective transfer entropy out.
# np.max(self.ete.values)
# ete_thresh_high = 0.018
# ete_thresh_low = 0.012
#
# fig, axes = plt.subplots(1, 3, figsize=(20, 10))
# fig.suptitle('Transfer Entropy Out')
# self.plot_ete_network(
# nx_type='circle',
# node_value='out',
# threshold=ete_thresh_low,
# ax=axes[0],
# )
# self.plot_ete_network(
# nx_type='cluster',
# node_value='out',
# threshold=ete_thresh_low,
# ax=axes[1],
# )
# axes[1].set_title('Low Threshold')
# self.plot_ete_network(
# nx_type='cluster',
# node_value='out',
# threshold=ete_thresh_high,
# # pos=self.pos,
# ax=axes[2],
# )
# axes[2].set_title('High Threshold')
# plt.show()
#
# # Plot effective transfer entropy in.
# fig, axes = plt.subplots(1, 3, figsize=(20, 10))
# fig.suptitle('Transfer Entropy In')
# self.plot_ete_network(
# nx_type='circle',
# node_value='in',
# threshold=ete_thresh_low,
# cmap='Purples',
# ax=axes[0],
# )
# self.plot_ete_network(
# nx_type='cluster',
# node_value='in',
# threshold=ete_thresh_low,
# cmap='Purples',
# ax=axes[1])
# axes[1].set_title('Low Threshold')
# self.plot_ete_network(
# nx_type='cluster',
# node_value='in',
# threshold=ete_thresh_high,
# cmap='Purples',
# # pos=self.pos,
# ax=axes[2])
# axes[2].set_title('High Threshold')
# plt.show()
# %%
# # Find network graph coordinates.
# thresh = 0.03
# self.plot_ete_network(nx_type='circle', threshold=thresh)
# plt.show()
#
# self.plot_ete_network(nx_type='cluster', threshold=thresh)
# plt.show()
#
# # %%
# self.plot_ete_network(nx_type='circle', node_value='in', threshold=thresh,
# cmap='Purples')
# plt.show()
#
# self.plot_ete_network(nx_type='cluster', node_value='in', threshold=thresh,
# cmap='Purples')
# plt.show()
# %%
# --------------------- Still in-progress work below ----------------------- #
# # %%
# # Plot asset graphs for multiple thresholds.
# thresholds = [0.5, 0.6, 0.7]
#
# x, y = self._coordinates[:, 0], self._coordinates[:, 1]
# adjx = (max(x) - min(x)) / 10 # Give border around graph
# adjy = (max(y) - min(y)) / 10 # Give border around graph
# n = len(thresholds)
# fig, axes = plt.subplots(1, n, figsize=[n*4, 6])
#
# for t, ax in zip(thresholds, axes.flat):
# self.plot_asset_graph(t, thresholds, ax=ax, fontsize=6)
# ax.set_title(f'T = {t}')
# ax.set_xlim(min(x)-adjx, max(x)+adjx)
# ax.set_ylim(min(y)-adjy, max(y)+adjy)
# ax.set_yticklabels([])
# ax.set_xticklabels([])
# ax.grid(False)
# plt.tight_layout()
# plt.show()
#
#
# # %%
# # Plot
# fig, axes = plt.subplots(1, 3, figsize=[14, 4])
# for te, ax in zip(['te', 'rte', 'ete'], axes.flat):
# self.plot_te(te, labels=False, ax=ax, cbar=False)
# ax.set_title(f'${te.upper()}_{{X \\rightarrow Y}}$')
# plt.tight_layout()
# plt.show()
#
#
| [
"seaborn.heatmap",
"numpy.sum",
"pandas.read_csv",
"numpy.isnan",
"collections.defaultdict",
"matplotlib.pyplot.style.use",
"numpy.mean",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"glob.glob",
"pandas.DataFrame",
"numpy.std",
"numpy.max",
"numpy.reshape",
"numpy.li... | [((300, 332), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (313, 332), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1880), 'pandas.read_csv', 'pd.read_csv', (['fid'], {'index_col': '(0)', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), '(fid, index_col=0, parse_dates=True, infer_datetime_format=True)\n', (1816, 1880), True, 'import pandas as pd\n'), ((2076, 2097), 'glob.glob', 'glob', (['"""../data/*.csv"""'], {}), "('../data/*.csv')\n", (2080, 2097), False, 'from glob import glob\n'), ((2664, 2697), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2687, 2697), False, 'import warnings\n'), ((3218, 3253), 'numpy.log', 'np.log', (['(data[1:] / data[:-1].values)'], {}), '(data[1:] / data[:-1].values)\n', (3224, 3253), True, 'import numpy as np\n'), ((4690, 4712), 'numpy.reshape', 'np.reshape', (['x', '(-1, 2)'], {}), '(x, (-1, 2))\n', (4700, 4712), True, 'import numpy as np\n'), ((6241, 6267), 'numpy.reshape', 'np.reshape', (['opt.x', '(-1, 2)'], {}), '(opt.x, (-1, 2))\n', (6251, 6267), True, 'import numpy as np\n'), ((10051, 10171), 'seaborn.heatmap', 'sns.heatmap', (['self._corr'], {'ax': 'ax', 'vmin': '(-1)', 'vmax': '(1)', 'cbar': 'cbar', 'xticklabels': 'labels', 'yticklabels': 'labels', 'cmap': '"""coolwarm"""'}), "(self._corr, ax=ax, vmin=-1, vmax=1, cbar=cbar, xticklabels=\n labels, yticklabels=labels, cmap='coolwarm')\n", (10062, 10171), True, 'import seaborn as sns\n'), ((11360, 11393), 'numpy.concatenate', 'np.concatenate', (['[x, x_lag, y_lag]'], {}), '([x, x_lag, y_lag])\n', (11374, 11393), True, 'import numpy as np\n'), ((11896, 11919), 'collections.Counter', 'Counter', (['bin_vals[:, 1]'], {}), '(bin_vals[:, 1])\n', (11903, 11919), False, 'from collections import Counter, defaultdict\n'), ((12227, 12259), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (12250, 12259), False, 'import warnings\n'), ((13030, 13063), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (13053, 13063), False, 'import warnings\n'), ((13788, 13804), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (13796, 13804), True, 'import numpy as np\n'), ((15249, 15283), 'numpy.zeros', 'np.zeros', (['[self._n, self._n, sims]'], {}), '([self._n, self._n, sims])\n', (15257, 15283), True, 'import numpy as np\n'), ((15706, 15734), 'numpy.zeros', 'np.zeros', (['[self._n, self._n]'], {}), '([self._n, self._n])\n', (15714, 15734), True, 'import numpy as np\n'), ((16103, 16135), 'numpy.mean', 'np.mean', (['self.rte_tensor'], {'axis': '(2)'}), '(self.rte_tensor, axis=2)\n', (16110, 16135), True, 'import numpy as np\n'), ((16155, 16212), 'pandas.DataFrame', 'pd.DataFrame', (['rte'], {'columns': 'self.assets', 'index': 'self.assets'}), '(rte, columns=self.assets, index=self.assets)\n', (16167, 16212), True, 'import pandas as pd\n'), ((16232, 16289), 'pandas.DataFrame', 'pd.DataFrame', (['ete'], {'columns': 'self.assets', 'index': 'self.assets'}), '(ete, columns=self.assets, index=self.assets)\n', (16244, 16289), True, 'import pandas as pd\n'), ((16350, 16372), 'numpy.max', 'np.max', (['self.te.values'], {}), '(self.te.values)\n', (16356, 16372), True, 'import numpy as np\n'), ((16396, 16419), 'numpy.min', 'np.min', (['self.ete.values'], {}), '(self.ete.values)\n', (16402, 16419), True, 'import numpy as np\n'), ((17552, 17672), 'seaborn.heatmap', 'sns.heatmap', (['heatmap'], {'ax': 'ax', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': '"""viridis"""', 'xticklabels': 'labels', 'yticklabels': 'labels', 'cbar': 'cbar'}), "(heatmap, ax=ax, vmin=vmin, vmax=vmax, cmap='viridis',\n xticklabels=labels, yticklabels=labels, cbar=cbar)\n", (17563, 17672), True, 'import seaborn as sns\n'), ((19756, 19772), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (19767, 19772), False, 'from collections import Counter, defaultdict\n'), ((20590, 20607), 'networkx.OrderedGraph', 'nx.OrderedGraph', ([], {}), '()\n', (20605, 20607), True, 'import networkx as nx\n'), ((22018, 22164), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G'], {'ax': 'ax', 'pos': 'pos', 'cmap': 'cmap', 'node_size': '(1000 * node_strengths)', 'node_color': 'node_colors', 'alpha': '(0.9)', 'vmax': 'vmax', 'vmin': 'vmin'}), '(G, ax=ax, pos=pos, cmap=cmap, node_size=1000 *\n node_strengths, node_color=node_colors, alpha=0.9, vmax=vmax, vmin=vmin)\n', (22040, 22164), True, 'import networkx as nx\n'), ((22192, 22279), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G'], {'ax': 'ax', 'pos': 'pos', 'font_weight': '"""bold"""', 'font_size': 'fontsize'}), "(G, ax=ax, pos=pos, font_weight='bold', font_size=\n fontsize)\n", (22215, 22279), True, 'import networkx as nx\n'), ((24415, 24442), 'numpy.sum', 'np.sum', (['self.ete'], {'axis': 'axis'}), '(self.ete, axis=axis)\n', (24421, 24442), True, 'import numpy as np\n'), ((24866, 24885), 'networkx.OrderedDiGraph', 'nx.OrderedDiGraph', ([], {}), '()\n', (24883, 24885), True, 'import networkx as nx\n'), ((26377, 26523), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G'], {'ax': 'ax', 'pos': 'pos', 'cmap': 'cmap', 'node_size': '(1000 * node_strengths)', 'node_color': 'node_colors', 'alpha': '(0.9)', 'vmax': 'vmax', 'vmin': 'vmin'}), '(G, ax=ax, pos=pos, cmap=cmap, node_size=1000 *\n node_strengths, node_color=node_colors, alpha=0.9, vmax=vmax, vmin=vmin)\n', (26399, 26523), True, 'import networkx as nx\n'), ((26551, 26638), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G'], {'ax': 'ax', 'pos': 'pos', 'font_weight': '"""bold"""', 'font_size': 'fontsize'}), "(G, ax=ax, pos=pos, font_weight='bold', font_size=\n fontsize)\n", (26574, 26638), True, 'import networkx as nx\n'), ((5581, 5616), 'numpy.sqrt', 'np.sqrt', (['(2 * (1 - self.corr.values))'], {}), '(2 * (1 - self.corr.values))\n', (5588, 5616), True, 'import numpy as np\n'), ((7893, 7928), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (7905, 7928), True, 'import matplotlib.pyplot as plt\n'), ((9962, 9997), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (9974, 9997), True, 'import matplotlib.pyplot as plt\n'), ((10417, 10438), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(5)'], {}), '(-1, 1, 5)\n', (10428, 10438), True, 'import numpy as np\n'), ((11777, 11810), 'numpy.random.shuffle', 'np.random.shuffle', (['bin_vals[:, 2]'], {}), '(bin_vals[:, 2])\n', (11794, 11810), True, 'import numpy as np\n'), ((14114, 14124), 'numpy.min', 'np.min', (['te'], {}), '(te)\n', (14120, 14124), True, 'import numpy as np\n'), ((14152, 14162), 'numpy.max', 'np.max', (['te'], {}), '(te)\n', (14158, 14162), True, 'import numpy as np\n'), ((14185, 14241), 'pandas.DataFrame', 'pd.DataFrame', (['te'], {'columns': 'self.assets', 'index': 'self.assets'}), '(te, columns=self.assets, index=self.assets)\n', (14197, 14241), True, 'import pandas as pd\n'), ((17361, 17396), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (17373, 17396), True, 'import matplotlib.pyplot as plt\n'), ((19233, 19268), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (19245, 19268), True, 'import matplotlib.pyplot as plt\n'), ((20152, 20172), 'numpy.sum', 'np.sum', (['corr'], {'axis': '(1)'}), '(corr, axis=1)\n', (20158, 20172), True, 'import numpy as np\n'), ((21306, 21324), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (21318, 21324), True, 'import matplotlib.pyplot as plt\n'), ((21433, 21456), 'numpy.max', 'np.max', (["links['weight']"], {}), "(links['weight'])\n", (21439, 21456), True, 'import numpy as np\n'), ((21617, 21640), 'numpy.array', 'np.array', (["[d['weight']]"], {}), "([d['weight']])\n", (21625, 21640), True, 'import numpy as np\n'), ((21671, 21690), 'numpy.array', 'np.array', (["[d['lw']]"], {}), "([d['lw']])\n", (21679, 21690), True, 'import numpy as np\n'), ((21703, 21757), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G'], {'edgelist': '[(u, v)]'}), '(G, edgelist=[(u, v)], **kwargs)\n', (21725, 21757), True, 'import networkx as nx\n'), ((23771, 23806), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (23783, 23806), True, 'import matplotlib.pyplot as plt\n'), ((25634, 25652), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (25646, 25652), True, 'import matplotlib.pyplot as plt\n'), ((25761, 25784), 'numpy.max', 'np.max', (["links['weight']"], {}), "(links['weight'])\n", (25767, 25784), True, 'import numpy as np\n'), ((25946, 25969), 'numpy.array', 'np.array', (["[d['weight']]"], {}), "([d['weight']])\n", (25954, 25969), True, 'import numpy as np\n'), ((26000, 26019), 'numpy.array', 'np.array', (["[d['lw']]"], {}), "([d['lw']])\n", (26008, 26019), True, 'import numpy as np\n'), ((26032, 26086), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G'], {'edgelist': '[(u, v)]'}), '(G, edgelist=[(u, v)], **kwargs)\n', (26054, 26086), True, 'import networkx as nx\n'), ((2111, 2125), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2123, 2125), True, 'import pandas as pd\n'), ((5941, 5968), 'numpy.random.rand', 'np.random.rand', (['(2 * self._n)'], {}), '(2 * self._n)\n', (5955, 5968), True, 'import numpy as np\n'), ((8079, 8110), 'seaborn.color_palette', 'sns.color_palette', (['"""magma"""', '(101)'], {}), "('magma', 101)\n", (8096, 8110), True, 'import seaborn as sns\n'), ((11608, 11657), 'pandas.cut', 'pd.cut', (['data_matrix'], {'bins': 'bin_edges', 'labels': '(False)'}), '(data_matrix, bins=bin_edges, labels=False)\n', (11614, 11657), True, 'import pandas as pd\n'), ((20194, 20204), 'numpy.min', 'np.min', (['nc'], {}), '(nc)\n', (20200, 20204), True, 'import numpy as np\n'), ((20209, 20219), 'numpy.max', 'np.max', (['nc'], {}), '(nc)\n', (20215, 20219), True, 'import numpy as np\n'), ((20220, 20230), 'numpy.min', 'np.min', (['nc'], {}), '(nc)\n', (20226, 20230), True, 'import numpy as np\n'), ((21383, 21406), 'numpy.min', 'np.min', (["links['weight']"], {}), "(links['weight'])\n", (21389, 21406), True, 'import numpy as np\n'), ((21832, 21842), 'numpy.max', 'np.max', (['nc'], {}), '(nc)\n', (21838, 21842), True, 'import numpy as np\n'), ((24460, 24470), 'numpy.min', 'np.min', (['nc'], {}), '(nc)\n', (24466, 24470), True, 'import numpy as np\n'), ((24475, 24485), 'numpy.max', 'np.max', (['nc'], {}), '(nc)\n', (24481, 24485), True, 'import numpy as np\n'), ((24486, 24496), 'numpy.min', 'np.min', (['nc'], {}), '(nc)\n', (24492, 24496), True, 'import numpy as np\n'), ((25711, 25734), 'numpy.min', 'np.min', (["links['weight']"], {}), "(links['weight'])\n", (25717, 25734), True, 'import numpy as np\n'), ((26161, 26171), 'numpy.max', 'np.max', (['nc'], {}), '(nc)\n', (26167, 26171), True, 'import numpy as np\n'), ((5708, 5720), 'numpy.isnan', 'np.isnan', (['Xi'], {}), '(Xi)\n', (5716, 5720), True, 'import numpy as np\n'), ((7300, 7328), 'numpy.argwhere', 'np.argwhere', (['(d_i < threshold)'], {}), '(d_i < threshold)\n', (7311, 7328), True, 'import numpy as np\n'), ((11455, 11474), 'numpy.min', 'np.min', (['data_matrix'], {}), '(data_matrix)\n', (11461, 11474), True, 'import numpy as np\n'), ((24225, 24248), 'numpy.max', 'np.max', (['self.ete.values'], {}), '(self.ete.values)\n', (24231, 24248), True, 'import numpy as np\n'), ((11522, 11541), 'numpy.max', 'np.max', (['data_matrix'], {}), '(data_matrix)\n', (11528, 11541), True, 'import numpy as np\n'), ((16069, 16087), 'numpy.mean', 'np.mean', (['rte_array'], {}), '(rte_array)\n', (16076, 16087), True, 'import numpy as np\n'), ((2805, 2826), 'pandas.to_datetime', 'pd.to_datetime', (['start'], {}), '(start)\n', (2819, 2826), True, 'import pandas as pd\n'), ((2901, 2920), 'pandas.to_datetime', 'pd.to_datetime', (['end'], {}), '(end)\n', (2915, 2920), True, 'import pandas as pd\n'), ((15978, 15996), 'numpy.mean', 'np.mean', (['rte_array'], {}), '(rte_array)\n', (15985, 15996), True, 'import numpy as np\n'), ((15999, 16016), 'numpy.std', 'np.std', (['rte_array'], {}), '(rte_array)\n', (16005, 16016), True, 'import numpy as np\n'), ((20716, 20725), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (20722, 20725), True, 'import numpy as np\n'), ((20730, 20739), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (20736, 20739), True, 'import numpy as np\n'), ((20740, 20749), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (20746, 20749), True, 'import numpy as np\n'), ((24994, 25003), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (25000, 25003), True, 'import numpy as np\n'), ((25008, 25017), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (25014, 25017), True, 'import numpy as np\n'), ((25018, 25027), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (25024, 25027), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from model.Sample_MIL import InstanceModels, RaggedModels
from model.KerasLayers import Losses, Metrics
from model import DatasetsUtils
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
import pickle
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[-1], True)
tf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC2':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]
import sys
sys.path.append(str(cwd))
D, maf = pickle.load(open(cwd / 'figures' / 'controls' / 'data' / 'data.pkl', 'rb'))
sample_df = pickle.load(open(cwd / 'files' / 'tcga_sample_table.pkl', 'rb'))
strand_emb_mat = np.concatenate([np.zeros(2)[np.newaxis, :], np.diag(np.ones(2))], axis=0)
D['strand_emb'] = strand_emb_mat[D['strand']]
chr_emb_mat = np.concatenate([np.zeros(24)[np.newaxis, :], np.diag(np.ones(24))], axis=0)
D['chr_emb'] = chr_emb_mat[D['chr']]
frame_emb_mat = np.concatenate([np.zeros(3)[np.newaxis, :], np.diag(np.ones(3))], axis=0)
D['cds_emb'] = frame_emb_mat[D['cds']]
##bin position
def pos_one_hot(pos):
one_pos = int(pos * 100)
return one_pos, (pos * 100) - one_pos
result = np.apply_along_axis(pos_one_hot, -1, D['pos_float'][:, np.newaxis])
D['pos_bin'] = np.stack(result[:, 0]) + 1
D['pos_loc'] = np.stack(result[:, 1])
indexes = [np.where(D['sample_idx'] == idx) for idx in range(sample_df.shape[0])]
pos_loc = np.array([D['pos_loc'][i] for i in indexes], dtype='object')
pos_bin = np.array([D['pos_bin'][i] for i in indexes], dtype='object')
chr = np.array([D['chr'][i] for i in indexes], dtype='object')
# set y label and weights
genes = maf['Hugo_Symbol'].values
boolean = ['PTEN' in genes[j] for j in [np.where(D['sample_idx'] == i)[0] for i in range(sample_df.shape[0])]]
y_label = np.stack([[0, 1] if i else [1, 0] for i in boolean])
y_strat = np.argmax(y_label, axis=-1)
class_counts = dict(zip(*np.unique(y_strat, return_counts=True)))
y_weights = np.array([1 / class_counts[_] for _ in y_strat])
y_weights /= np.sum(y_weights)
pos_loader = DatasetsUtils.Map.FromNumpy(pos_loc, tf.float32)
bin_loader = DatasetsUtils.Map.FromNumpy(pos_bin, tf.float32)
chr_loader = DatasetsUtils.Map.FromNumpy(chr, tf.int32)
weights = []
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_weighted_CE', min_delta=0.0001, patience=50, mode='min', restore_best_weights=True)]
losses = [Losses.CrossEntropy()]
##stratified K fold for test
for idx_train, idx_test in StratifiedKFold(n_splits=8, random_state=0, shuffle=True).split(y_strat, y_strat):
idx_train, idx_valid = [idx_train[idx] for idx in list(StratifiedShuffleSplit(n_splits=1, test_size=1000, random_state=0).split(np.zeros_like(y_strat)[idx_train], y_strat[idx_train]))[0]]
ds_train = tf.data.Dataset.from_tensor_slices((idx_train, y_label[idx_train], y_strat[idx_train]))
ds_train = ds_train.apply(DatasetsUtils.Apply.StratifiedMinibatch(batch_size=len(idx_train) // 2, ds_size=len(idx_train)))
ds_train = ds_train.map(lambda x, y: ((pos_loader(x, ragged_output=True),
bin_loader(x, ragged_output=True),
chr_loader(x, ragged_output=True),
),
y,
tf.gather(tf.constant(y_weights, dtype=tf.float32), x)
))
ds_valid = tf.data.Dataset.from_tensor_slices((idx_valid, y_label[idx_valid]))
ds_valid = ds_valid.batch(len(idx_valid), drop_remainder=False)
ds_valid = ds_valid.map(lambda x, y: ((pos_loader(x, ragged_output=True),
bin_loader(x, ragged_output=True),
chr_loader(x, ragged_output=True),
),
y,
tf.gather(tf.constant(y_weights, dtype=tf.float32), x)
))
while True:
position_encoder = InstanceModels.VariantPositionBin(24, 100)
mil = RaggedModels.MIL(instance_encoders=[position_encoder.model], output_dim=2, pooling='sum', mil_hidden=(64, 32, 16, 8), output_type='anlulogits')
mil.model.compile(loss=losses,
metrics=[Metrics.CrossEntropy(), Metrics.Accuracy()],
weighted_metrics=[Metrics.CrossEntropy(), Metrics.Accuracy()],
optimizer=tf.keras.optimizers.Adam(learning_rate=0.005,
clipvalue=10000))
mil.model.fit(ds_train,
steps_per_epoch=20,
validation_data=ds_valid,
epochs=10000,
callbacks=callbacks)
eval = mil.model.evaluate(ds_valid)
if eval[2] >= .985:
break
weights.append(mil.model.get_weights())
with open(cwd / 'figures' / 'controls' / 'samples' / 'suppressor' / 'results' / 'weights.pkl', 'wb') as f:
pickle.dump(weights, f)
| [
"pickle.dump",
"numpy.sum",
"numpy.argmax",
"numpy.ones",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.unique",
"model.KerasLayers.Losses.CrossEntropy",
"numpy.zeros_like",
"model.Sample_MIL.RaggedModels.MIL",
"model.KerasLayers.Metrics.Accuracy",
"numpy.apply_along_axis",
"tensorflow.ke... | [((288, 339), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (332, 339), True, 'import tensorflow as tf\n'), ((340, 408), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[-1]', '(True)'], {}), '(physical_devices[-1], True)\n', (380, 408), True, 'import tensorflow as tf\n'), ((409, 480), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['physical_devices[-1]', '"""GPU"""'], {}), "(physical_devices[-1], 'GPU')\n", (451, 480), True, 'import tensorflow as tf\n'), ((504, 522), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (520, 522), False, 'import pathlib\n'), ((1355, 1422), 'numpy.apply_along_axis', 'np.apply_along_axis', (['pos_one_hot', '(-1)', "D['pos_float'][:, np.newaxis]"], {}), "(pos_one_hot, -1, D['pos_float'][:, np.newaxis])\n", (1374, 1422), True, 'import numpy as np\n'), ((1481, 1503), 'numpy.stack', 'np.stack', (['result[:, 1]'], {}), '(result[:, 1])\n', (1489, 1503), True, 'import numpy as np\n'), ((1598, 1658), 'numpy.array', 'np.array', (["[D['pos_loc'][i] for i in indexes]"], {'dtype': '"""object"""'}), "([D['pos_loc'][i] for i in indexes], dtype='object')\n", (1606, 1658), True, 'import numpy as np\n'), ((1669, 1729), 'numpy.array', 'np.array', (["[D['pos_bin'][i] for i in indexes]"], {'dtype': '"""object"""'}), "([D['pos_bin'][i] for i in indexes], dtype='object')\n", (1677, 1729), True, 'import numpy as np\n'), ((1736, 1792), 'numpy.array', 'np.array', (["[D['chr'][i] for i in indexes]"], {'dtype': '"""object"""'}), "([D['chr'][i] for i in indexes], dtype='object')\n", (1744, 1792), True, 'import numpy as np\n'), ((1976, 2030), 'numpy.stack', 'np.stack', (['[([0, 1] if i else [1, 0]) for i in boolean]'], {}), '([([0, 1] if i else [1, 0]) for i in boolean])\n', (1984, 2030), True, 'import numpy as np\n'), ((2039, 2066), 'numpy.argmax', 'np.argmax', (['y_label'], {'axis': '(-1)'}), '(y_label, axis=-1)\n', (2048, 2066), True, 'import numpy as np\n'), ((2146, 2196), 'numpy.array', 'np.array', (['[(1 / class_counts[_]) for _ in y_strat]'], {}), '([(1 / class_counts[_]) for _ in y_strat])\n', (2154, 2196), True, 'import numpy as np\n'), ((2208, 2225), 'numpy.sum', 'np.sum', (['y_weights'], {}), '(y_weights)\n', (2214, 2225), True, 'import numpy as np\n'), ((2240, 2288), 'model.DatasetsUtils.Map.FromNumpy', 'DatasetsUtils.Map.FromNumpy', (['pos_loc', 'tf.float32'], {}), '(pos_loc, tf.float32)\n', (2267, 2288), False, 'from model import DatasetsUtils\n'), ((2302, 2350), 'model.DatasetsUtils.Map.FromNumpy', 'DatasetsUtils.Map.FromNumpy', (['pos_bin', 'tf.float32'], {}), '(pos_bin, tf.float32)\n', (2329, 2350), False, 'from model import DatasetsUtils\n'), ((2364, 2406), 'model.DatasetsUtils.Map.FromNumpy', 'DatasetsUtils.Map.FromNumpy', (['chr', 'tf.int32'], {}), '(chr, tf.int32)\n', (2391, 2406), False, 'from model import DatasetsUtils\n'), ((1439, 1461), 'numpy.stack', 'np.stack', (['result[:, 0]'], {}), '(result[:, 0])\n', (1447, 1461), True, 'import numpy as np\n'), ((1516, 1548), 'numpy.where', 'np.where', (["(D['sample_idx'] == idx)"], {}), "(D['sample_idx'] == idx)\n", (1524, 1548), True, 'import numpy as np\n'), ((2435, 2569), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_weighted_CE"""', 'min_delta': '(0.0001)', 'patience': '(50)', 'mode': '"""min"""', 'restore_best_weights': '(True)'}), "(monitor='val_weighted_CE', min_delta=\n 0.0001, patience=50, mode='min', restore_best_weights=True)\n", (2467, 2569), True, 'import tensorflow as tf\n'), ((2576, 2597), 'model.KerasLayers.Losses.CrossEntropy', 'Losses.CrossEntropy', ([], {}), '()\n', (2595, 2597), False, 'from model.KerasLayers import Losses, Metrics\n'), ((2946, 3038), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(idx_train, y_label[idx_train], y_strat[idx_train])'], {}), '((idx_train, y_label[idx_train], y_strat[\n idx_train]))\n', (2980, 3038), True, 'import tensorflow as tf\n'), ((3646, 3713), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(idx_valid, y_label[idx_valid])'], {}), '((idx_valid, y_label[idx_valid]))\n', (3680, 3713), True, 'import tensorflow as tf\n'), ((5316, 5339), 'pickle.dump', 'pickle.dump', (['weights', 'f'], {}), '(weights, f)\n', (5327, 5339), False, 'import pickle\n'), ((2655, 2712), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(8)', 'random_state': '(0)', 'shuffle': '(True)'}), '(n_splits=8, random_state=0, shuffle=True)\n', (2670, 2712), False, 'from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\n'), ((4295, 4337), 'model.Sample_MIL.InstanceModels.VariantPositionBin', 'InstanceModels.VariantPositionBin', (['(24)', '(100)'], {}), '(24, 100)\n', (4328, 4337), False, 'from model.Sample_MIL import InstanceModels, RaggedModels\n'), ((4352, 4499), 'model.Sample_MIL.RaggedModels.MIL', 'RaggedModels.MIL', ([], {'instance_encoders': '[position_encoder.model]', 'output_dim': '(2)', 'pooling': '"""sum"""', 'mil_hidden': '(64, 32, 16, 8)', 'output_type': '"""anlulogits"""'}), "(instance_encoders=[position_encoder.model], output_dim=2,\n pooling='sum', mil_hidden=(64, 32, 16, 8), output_type='anlulogits')\n", (4368, 4499), False, 'from model.Sample_MIL import InstanceModels, RaggedModels\n'), ((874, 885), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (882, 885), True, 'import numpy as np\n'), ((910, 920), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (917, 920), True, 'import numpy as np\n'), ((1009, 1021), 'numpy.zeros', 'np.zeros', (['(24)'], {}), '(24)\n', (1017, 1021), True, 'import numpy as np\n'), ((1046, 1057), 'numpy.ones', 'np.ones', (['(24)'], {}), '(24)\n', (1053, 1057), True, 'import numpy as np\n'), ((1139, 1150), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1147, 1150), True, 'import numpy as np\n'), ((1175, 1185), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1182, 1185), True, 'import numpy as np\n'), ((2093, 2131), 'numpy.unique', 'np.unique', (['y_strat'], {'return_counts': '(True)'}), '(y_strat, return_counts=True)\n', (2102, 2131), True, 'import numpy as np\n'), ((1895, 1925), 'numpy.where', 'np.where', (["(D['sample_idx'] == i)"], {}), "(D['sample_idx'] == i)\n", (1903, 1925), True, 'import numpy as np\n'), ((4741, 4803), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.005)', 'clipvalue': '(10000)'}), '(learning_rate=0.005, clipvalue=10000)\n', (4765, 4803), True, 'import tensorflow as tf\n'), ((3539, 3579), 'tensorflow.constant', 'tf.constant', (['y_weights'], {'dtype': 'tf.float32'}), '(y_weights, dtype=tf.float32)\n', (3550, 3579), True, 'import tensorflow as tf\n'), ((4160, 4200), 'tensorflow.constant', 'tf.constant', (['y_weights'], {'dtype': 'tf.float32'}), '(y_weights, dtype=tf.float32)\n', (4171, 4200), True, 'import tensorflow as tf\n'), ((4571, 4593), 'model.KerasLayers.Metrics.CrossEntropy', 'Metrics.CrossEntropy', ([], {}), '()\n', (4591, 4593), False, 'from model.KerasLayers import Losses, Metrics\n'), ((4595, 4613), 'model.KerasLayers.Metrics.Accuracy', 'Metrics.Accuracy', ([], {}), '()\n', (4611, 4613), False, 'from model.KerasLayers import Losses, Metrics\n'), ((4660, 4682), 'model.KerasLayers.Metrics.CrossEntropy', 'Metrics.CrossEntropy', ([], {}), '()\n', (4680, 4682), False, 'from model.KerasLayers import Losses, Metrics\n'), ((4684, 4702), 'model.KerasLayers.Metrics.Accuracy', 'Metrics.Accuracy', ([], {}), '()\n', (4700, 4702), False, 'from model.KerasLayers import Losses, Metrics\n'), ((2797, 2863), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(1000)', 'random_state': '(0)'}), '(n_splits=1, test_size=1000, random_state=0)\n', (2819, 2863), False, 'from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\n'), ((2870, 2892), 'numpy.zeros_like', 'np.zeros_like', (['y_strat'], {}), '(y_strat)\n', (2883, 2892), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
script to generate figures for the paper
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import logsumexp
from matplotlib_scalebar.scalebar import ScaleBar
from . import prob, benchmark
# Figure
def fig_distribution(contigs, df, K, K0=10**3.5, mode='default'):
top = np.argsort(contigs.lengths)[::-1]
# generate top three estimators
estimator, raw_estimator_0 = prob.infer_from_contig2(df, contigs, top[0], K=K, K0=K0)
_, raw_estimator_1 = prob.infer_from_contig2(df, contigs, top[1], K=K, K0=K0)
_, raw_estimator_2 = prob.infer_from_contig2(df, contigs, top[2], K=K, K0=K0)
# draw plot
_fig_distribution(raw_estimator_0, raw_estimator_1, raw_estimator_2, estimator, K, mode)
def _fig_distribution(raw_estimator_1st, raw_estimator_2nd, raw_estimator_3rd, estimator, K, mode):
"""
>>> fig_distribution()
>>> plt.show()
"""
width = K + 10000
if mode == 'default':
x = np.linspace(1, width, 500)
elif mode == 'log':
x = np.logspace(0, np.log10(width), 500)
plt.xlabel('Separation Distance $d$ (bp)', fontsize=14)
plt.ylabel('Contact Probability $p(d)$', fontsize=14)
plt.tick_params(axis='x', labelsize=14)
plt.tick_params(axis='y', labelsize=14)
plt.yscale('log')
if mode == 'log':
plt.xscale('log')
plt.plot(x, np.exp(raw_estimator_1st(x)), label='1st longest contig', linewidth=1, alpha=0.9)
plt.plot(x, np.exp(raw_estimator_2nd(x)), label='2nd longest contig', linewidth=1, alpha=0.9)
plt.plot(x, np.exp(raw_estimator_3rd(x)), label='3rd longest contig', linewidth=1, alpha=0.9)
plt.plot(x, np.exp(estimator(x)), label='smoothed $p(d)$')
# grayed region
plt.axvspan(K, width, facecolor='gray', alpha=0.2)
plt.legend(fontsize=14)
plt.tight_layout()
# Figure
def fig_errorchart(results):
# for each scaffold...
ks = [0,1,2,3,4,5]
labels = ['3D-DNA', 'HiC-Hiker adaptive', 'HiC-Hiker k=2', 'HiC-Hiker k=3', 'HiC-Hiker k=4', 'HiC-Hiker k=5']
T = [0 for _ in ks]
F = [0 for _ in ks]
for i in range(len(ks)):
k = ks[i]
T[i] = 0
F[i] = 0
for scaf_id in range(len(results[k])):
if len(results[k][scaf_id]) > 20:
ok, ng_ord, ng_ori = parse_result(results[k][scaf_id])
T[i] += ok
F[i] += ng_ori
# calculate error rates
T, F = np.array(T), np.array(F)
X = F / (T+F) * 100
# show barplot
print(labels, X)
plt.bar(labels, X)
for l, x in zip(labels, X):
plt.text(l, x, '{:.3f}'.format(x), ha='center', va='center', fontsize=13)
# plt.text(l, x, 'hoge', ha='center', va='center', fontsize=15)
plt.ylabel('Local Orientation Error (%)', fontsize=18)
plt.tick_params(axis='x', labelsize=18, rotation=90)
plt.tick_params(axis='y', labelsize=18)
plt.tight_layout()
def parse_result(result):
ok = len(list(filter(lambda x:x=='ok', result)))
ng_ord = len(list(filter(lambda x:x=='order_error', result)))
ng_ori = len(list(filter(lambda x:x=='orientation_error', result)))
return ok, ng_ord, ng_ori
# Figure
def fig_length_error(contigs, layout, result_3d, result_hic):
results_with_length = []
for scaf_id in range(len(layout.scaffolds)):
for scaf_pos in range(len(layout.scaffolds[scaf_id].order)):
cid = layout.scaffolds[scaf_id].order[scaf_pos]
length = contigs.lengths[cid]
a = result_3d[scaf_id][scaf_pos]
b = result_hic[scaf_id][scaf_pos]
results_with_length.append((a, b, length))
# print((a, b, length))
bins = np.logspace(np.log10(15000), 6, num=20, base=10)
lens_erA = np.histogram([x[2] for x in results_with_length if x[0]=='orientation_error'], bins=bins)[0]
lens_erB = np.histogram([x[2] for x in results_with_length if x[1]=='orientation_error'], bins=bins)[0]
lens_all = np.histogram([x[2] for x in results_with_length if x[1]=='orientation_error' or x[1]=='ok'], bins=bins)[0]
print('ok')
print(lens_erA)
print(lens_erB)
print(lens_all)
plt.xscale('log')
plt.plot(bins[:-1], lens_erA / lens_all * 100, label='3D-DNA', marker='o')
plt.plot(bins[:-1], lens_erB / lens_all * 100, label='HiC-Hiker', marker='o')
plt.xlabel('Contig Length (bp)', fontsize=18)
plt.ylabel('Local Orientation Error (%)', fontsize=18)
plt.tick_params(labelsize=18)
plt.legend(fontsize=18)
plt.xlim(10**4, 10**6)
# Figure
def fig_matrix(probs, contigs, polished_layout, ori_layout, result):
fig = plt.figure(figsize=(8, 8), dpi=300)
# show selected matrixes
plt.subplot(2, 2, 1)
i = 520
_inspect_with_size(probs, polished_layout, ori_layout, contigs, result, scaf_id=0, pos=i)
plt.subplot(2, 2, 2)
i = 806
_inspect_with_size(probs, polished_layout, ori_layout, contigs, result, scaf_id=0, pos=i)
plt.subplot(2, 2, 3)
i = 1248
_inspect_with_size(probs, polished_layout, ori_layout, contigs, result, scaf_id=0, pos=i)
plt.subplot(2, 2, 4)
i = 1338
im = _inspect_with_size(probs, polished_layout, ori_layout, contigs, result, scaf_id=0, pos=i)
# show colorbar (this parameter was set manually)
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.93, 0.15, 0.02, 0.7])
fig.colorbar(im, cax=cbar_ax)
# show axis indicator
plt.gcf().text(0.06, 0.98, '→ $j$')
plt.gcf().text(0.0, 0.94, '→ $i$', rotation=270)
# figure label
offset = 0.02
plt.gcf().text(0+offset, 1-offset, 'a', fontsize=15, fontweight='bold')
plt.gcf().text(0.45+offset, 1-offset, 'b', fontsize=15, fontweight='bold')
plt.gcf().text(0+offset, 0.51-offset, 'c', fontsize=15, fontweight='bold')
plt.gcf().text(0.45+offset, 0.51-offset, 'd', fontsize=15, fontweight='bold')
def normalization_matrix(prob, orientations):
X, Y = prob.shape
M = np.zeros((X//2, Y//2))
for i in range(X//2):
for j in range(Y//2):
# p: numerator
oi = orientations[i]
p = logsumexp([prob[2*i + oi, 2*j + 0], prob[2*i + oi, 2*j + 1]])
# P: denom
P = logsumexp([prob[2*i + Oi, 2*j + Oj] for Oi in [0,1] for Oj in [0,1]])
M[i,j] = p-P
return M
def matrix_by_range(mat, lengths, unit_length):
"""scale the matrix such that each cell of the matrix (say M_{ij}) have the height and width proportional to length[i] and length[j] respectively """
assert mat.shape[0] == len(lengths)
N = len(lengths)
sizes = [l//unit_length + 1 for l in lengths]
s = sum(sizes)
M = np.zeros((s, s))
for i in range(N):
for j in range(N):
X0 = sum(sizes[:i])
Y0 = sum(sizes[:j])
X1 = sum(sizes[:i+1])
Y1 = sum(sizes[:j+1])
M[X0:X1, Y0:Y1] = mat[i, j]
return M
def _inspect_with_size(probs, polished_layout, ori_layout, contigs, result, scaf_id, pos):
i = pos # short hand
k = 5 # how many neighbor contigs on the matrix?
unit_length = 10000 # 1 px corresponds to unit_length bp in the plot
target = probs[scaf_id][2*i - k*2 : 2*i + (k+1)*2, 2*i - k*2 : 2*i + (k+1)*2]
print(target.shape, len(polished_layout.scaffolds[scaf_id].order), len(result[scaf_id]))
orientations = [
0 if polished_layout.scaffolds[scaf_id].orientation[x] == ori_layout.scaffolds[scaf_id].orientation[x] else 1
for x in range(i-k, i+k+1)
]
# orientations = polished_layout.scaffolds[scaf_id].orientation[i-k:i+k+1]
print(polished_layout.scaffolds[scaf_id].orientation[i-k:i+k+1])
print(ori_layout.scaffolds[scaf_id].orientation[i-k:i+k+1])
print(orientations)
M = normalization_matrix(target, orientations)
lengths = [contigs.lengths[polished_layout.scaffolds[scaf_id].order[i+ind]] for ind in range(-k, k+1)]
M2 = matrix_by_range(M, lengths, unit_length=unit_length)
# show the matrix
im = plt.imshow(np.exp(M2), cmap='bwr', interpolation=None, aspect=1.0)
plt.clim(0, 1)
# show the scalebar
scalebar = ScaleBar(unit_length, label_formatter=lambda value, unit: '{} Kbp'.format(value)) # 1 pixel = 0.2 meter
plt.gca().add_artist(scalebar)
# show contig ids
names = ['{} {}'.format('x' if result[scaf_id][x] == 'orientation_error' else '#' if result[scaf_id][x] == 'order_error' else '', x) for x in range(i-k, i+k+1)]
sizes = [l//unit_length + 1 for l in lengths]
ticks_locations = [sum(sizes[:i])-0.5 for i in range(len(sizes))]
names_locations = [sum(sizes[:i])-0.5+(sizes[i]/2) for i in range(len(sizes))]
plt.yticks(names_locations, names)
plt.xticks(names_locations, names, rotation=90)
# show the border line of each contig
for loc in ticks_locations:
plt.axvline(x=loc, color='black', linewidth=0.5, alpha=0.5)
plt.axhline(y=loc, color='black', linewidth=0.5, alpha=0.5)
# disable default ticks
plt.tick_params(bottom=False, left=False)
#plt.vlines(x=ticks_locations, ymin=0, ymax=M2.shape[0], color='black', linewidth=0.5)
#plt.hlines(y=ticks_locations, xmin=0, xmax=M2.shape[0], color='black', linewidth=0.5)
#plt.axhline(y=2.5, color='black', linewidth=1)
#plt.title(f'scaffold {scaf_id}')
#colorbar = plt.colorbar(orientation="vertical")
#colorbar.set_label(r'$\sum_{\theta_i, \theta_j} P(R_{ij} \mid \theta_i, \theta_j)$', fontsize=10)
# Minor ticks
#ax.set_xticks(np.arange(-.5, 10, 1), minor=True);
#ax.set_yticks(np.arange(-.5, 10, 1), minor=True);
# Gridlines based on minor ticks
#plt.grid(which='minor', color='w', linestyle='-', linewidth=2)
#plt.grid(color='black', linestyle='-', linewidth=0.1)
#plt.axes().yaxis.set_minor_locator(plt.MultipleLocator(1.0))
#ax = plt.gca();
#ax.set_xticks(np.arange(-.5, M2.shape[0], 10), minor=True)
#ax.set_yticks(np.arange(-.5, M2.shape[0], 10), minor=True)
#ax.grid(which='minor', color='gray', linestyle='dashed', linewidth=0.5)
#plt.tick_params(which='minor', bottom=False, left=False)
#plt.grid(color='black', linewidth=0.1)
plt.tight_layout()
#plt.axis('off')
return im
| [
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.bar",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.histogram",
"numpy.exp",
"scipy.special.logsumexp",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axvline",
"matplotlib.pypl... | [((1120, 1175), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Separation Distance $d$ (bp)"""'], {'fontsize': '(14)'}), "('Separation Distance $d$ (bp)', fontsize=14)\n", (1130, 1175), True, 'import matplotlib.pyplot as plt\n'), ((1180, 1233), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Contact Probability $p(d)$"""'], {'fontsize': '(14)'}), "('Contact Probability $p(d)$', fontsize=14)\n", (1190, 1233), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1278), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'labelsize': '(14)'}), "(axis='x', labelsize=14)\n", (1254, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1283, 1322), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'labelsize': '(14)'}), "(axis='y', labelsize=14)\n", (1298, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1327, 1344), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1337, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1824), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['K', 'width'], {'facecolor': '"""gray"""', 'alpha': '(0.2)'}), "(K, width, facecolor='gray', alpha=0.2)\n", (1785, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1853), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (1840, 1853), True, 'import matplotlib.pyplot as plt\n'), ((1858, 1876), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1874, 1876), True, 'import matplotlib.pyplot as plt\n'), ((2564, 2582), 'matplotlib.pyplot.bar', 'plt.bar', (['labels', 'X'], {}), '(labels, X)\n', (2571, 2582), True, 'import matplotlib.pyplot as plt\n'), ((2773, 2827), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Local Orientation Error (%)"""'], {'fontsize': '(18)'}), "('Local Orientation Error (%)', fontsize=18)\n", (2783, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2884), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'labelsize': '(18)', 'rotation': '(90)'}), "(axis='x', labelsize=18, rotation=90)\n", (2847, 2884), True, 'import matplotlib.pyplot as plt\n'), ((2889, 2928), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'labelsize': '(18)'}), "(axis='y', labelsize=18)\n", (2904, 2928), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2952), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2950, 2952), True, 'import matplotlib.pyplot as plt\n'), ((4194, 4211), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (4204, 4211), True, 'import matplotlib.pyplot as plt\n'), ((4216, 4290), 'matplotlib.pyplot.plot', 'plt.plot', (['bins[:-1]', '(lens_erA / lens_all * 100)'], {'label': '"""3D-DNA"""', 'marker': '"""o"""'}), "(bins[:-1], lens_erA / lens_all * 100, label='3D-DNA', marker='o')\n", (4224, 4290), True, 'import matplotlib.pyplot as plt\n'), ((4295, 4372), 'matplotlib.pyplot.plot', 'plt.plot', (['bins[:-1]', '(lens_erB / lens_all * 100)'], {'label': '"""HiC-Hiker"""', 'marker': '"""o"""'}), "(bins[:-1], lens_erB / lens_all * 100, label='HiC-Hiker', marker='o')\n", (4303, 4372), True, 'import matplotlib.pyplot as plt\n'), ((4377, 4422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Contig Length (bp)"""'], {'fontsize': '(18)'}), "('Contig Length (bp)', fontsize=18)\n", (4387, 4422), True, 'import matplotlib.pyplot as plt\n'), ((4427, 4481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Local Orientation Error (%)"""'], {'fontsize': '(18)'}), "('Local Orientation Error (%)', fontsize=18)\n", (4437, 4481), True, 'import matplotlib.pyplot as plt\n'), ((4486, 4515), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(18)'}), '(labelsize=18)\n', (4501, 4515), True, 'import matplotlib.pyplot as plt\n'), ((4520, 4543), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (4530, 4543), True, 'import matplotlib.pyplot as plt\n'), ((4548, 4574), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** 4)', '(10 ** 6)'], {}), '(10 ** 4, 10 ** 6)\n', (4556, 4574), True, 'import matplotlib.pyplot as plt\n'), ((4661, 4696), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)', 'dpi': '(300)'}), '(figsize=(8, 8), dpi=300)\n', (4671, 4696), True, 'import matplotlib.pyplot as plt\n'), ((4731, 4751), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (4742, 4751), True, 'import matplotlib.pyplot as plt\n'), ((4862, 4882), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4873, 4882), True, 'import matplotlib.pyplot as plt\n'), ((4993, 5013), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (5004, 5013), True, 'import matplotlib.pyplot as plt\n'), ((5125, 5145), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (5136, 5145), True, 'import matplotlib.pyplot as plt\n'), ((5986, 6012), 'numpy.zeros', 'np.zeros', (['(X // 2, Y // 2)'], {}), '((X // 2, Y // 2))\n', (5994, 6012), True, 'import numpy as np\n'), ((6682, 6698), 'numpy.zeros', 'np.zeros', (['(s, s)'], {}), '((s, s))\n', (6690, 6698), True, 'import numpy as np\n'), ((8094, 8108), 'matplotlib.pyplot.clim', 'plt.clim', (['(0)', '(1)'], {}), '(0, 1)\n', (8102, 8108), True, 'import matplotlib.pyplot as plt\n'), ((8677, 8711), 'matplotlib.pyplot.yticks', 'plt.yticks', (['names_locations', 'names'], {}), '(names_locations, names)\n', (8687, 8711), True, 'import matplotlib.pyplot as plt\n'), ((8716, 8763), 'matplotlib.pyplot.xticks', 'plt.xticks', (['names_locations', 'names'], {'rotation': '(90)'}), '(names_locations, names, rotation=90)\n', (8726, 8763), True, 'import matplotlib.pyplot as plt\n'), ((9002, 9043), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'bottom': '(False)', 'left': '(False)'}), '(bottom=False, left=False)\n', (9017, 9043), True, 'import matplotlib.pyplot as plt\n'), ((10169, 10187), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10185, 10187), True, 'import matplotlib.pyplot as plt\n'), ((348, 375), 'numpy.argsort', 'np.argsort', (['contigs.lengths'], {}), '(contigs.lengths)\n', (358, 375), True, 'import numpy as np\n'), ((1015, 1041), 'numpy.linspace', 'np.linspace', (['(1)', 'width', '(500)'], {}), '(1, width, 500)\n', (1026, 1041), True, 'import numpy as np\n'), ((1376, 1393), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1386, 1393), True, 'import matplotlib.pyplot as plt\n'), ((2473, 2484), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (2481, 2484), True, 'import numpy as np\n'), ((2486, 2497), 'numpy.array', 'np.array', (['F'], {}), '(F)\n', (2494, 2497), True, 'import numpy as np\n'), ((3738, 3753), 'numpy.log10', 'np.log10', (['(15000)'], {}), '(15000)\n', (3746, 3753), True, 'import numpy as np\n'), ((3790, 3885), 'numpy.histogram', 'np.histogram', (["[x[2] for x in results_with_length if x[0] == 'orientation_error']"], {'bins': 'bins'}), "([x[2] for x in results_with_length if x[0] ==\n 'orientation_error'], bins=bins)\n", (3802, 3885), True, 'import numpy as np\n'), ((3898, 3993), 'numpy.histogram', 'np.histogram', (["[x[2] for x in results_with_length if x[1] == 'orientation_error']"], {'bins': 'bins'}), "([x[2] for x in results_with_length if x[1] ==\n 'orientation_error'], bins=bins)\n", (3910, 3993), True, 'import numpy as np\n'), ((4006, 4117), 'numpy.histogram', 'np.histogram', (["[x[2] for x in results_with_length if x[1] == 'orientation_error' or x[1] ==\n 'ok']"], {'bins': 'bins'}), "([x[2] for x in results_with_length if x[1] ==\n 'orientation_error' or x[1] == 'ok'], bins=bins)\n", (4018, 4117), True, 'import numpy as np\n'), ((8034, 8044), 'numpy.exp', 'np.exp', (['M2'], {}), '(M2)\n', (8040, 8044), True, 'import numpy as np\n'), ((8844, 8903), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'loc', 'color': '"""black"""', 'linewidth': '(0.5)', 'alpha': '(0.5)'}), "(x=loc, color='black', linewidth=0.5, alpha=0.5)\n", (8855, 8903), True, 'import matplotlib.pyplot as plt\n'), ((8912, 8971), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'loc', 'color': '"""black"""', 'linewidth': '(0.5)', 'alpha': '(0.5)'}), "(y=loc, color='black', linewidth=0.5, alpha=0.5)\n", (8923, 8971), True, 'import matplotlib.pyplot as plt\n'), ((5465, 5474), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5472, 5474), True, 'import matplotlib.pyplot as plt\n'), ((5505, 5514), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5512, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5606), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5604, 5606), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5682), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5680, 5682), True, 'import matplotlib.pyplot as plt\n'), ((5752, 5761), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5759, 5761), True, 'import matplotlib.pyplot as plt\n'), ((5831, 5840), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5838, 5840), True, 'import matplotlib.pyplot as plt\n'), ((6141, 6210), 'scipy.special.logsumexp', 'logsumexp', (['[prob[2 * i + oi, 2 * j + 0], prob[2 * i + oi, 2 * j + 1]]'], {}), '([prob[2 * i + oi, 2 * j + 0], prob[2 * i + oi, 2 * j + 1]])\n', (6150, 6210), False, 'from scipy.special import logsumexp\n'), ((6233, 6308), 'scipy.special.logsumexp', 'logsumexp', (['[prob[2 * i + Oi, 2 * j + Oj] for Oi in [0, 1] for Oj in [0, 1]]'], {}), '([prob[2 * i + Oi, 2 * j + Oj] for Oi in [0, 1] for Oj in [0, 1]])\n', (6242, 6308), False, 'from scipy.special import logsumexp\n'), ((8254, 8263), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8261, 8263), True, 'import matplotlib.pyplot as plt\n'), ((1093, 1108), 'numpy.log10', 'np.log10', (['width'], {}), '(width)\n', (1101, 1108), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import logging
from train_test import train, test
import warnings
from arg_parser import init_parser
from setproctitle import setproctitle as ptitle
from normalized_env import NormalizedEnv
import gym
import sys
sys.path.insert(0,'../envs/')
from recoveringBanditsEnv import recoveringBanditsEnv
from recoveringBanditsMultipleArmsEnv import recoveringBanditsMultipleArmsEnv
from deadlineSchedulingEnv import deadlineSchedulingEnv
from deadlineSchedulingMultipleArmsEnv import deadlineSchedulingMultipleArmsEnv
from sizeAwareIndexEnv import sizeAwareIndexEnv
from sizeAwareIndexMultipleArmsEnv import sizeAwareIndexMultipleArmsEnv
if __name__ == "__main__":
ptitle('test_wolp')
warnings.filterwarnings('ignore')
parser = init_parser('WOLP_DDPG')
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_ids)[1:-1]
from util import get_output_folder, setup_logger
from wolp_agent import WolpertingerAgent
args.save_model_dir = get_output_folder('output', args.env)
if args.env == 'recovering':
print('selected recovering')
env = recoveringBanditsMultipleArmsEnv(seed=args.env_seed, numEpisodes=args.max_episode, batchSize=5,
train = args.mode, numArms=args.arms, scheduleArms=args.scheduleArms, noiseVar=0.0, maxWait=20, episodeLimit=args.max_episode_length)
elif args.env == 'deadline':
print('selected deadline')
env = deadlineSchedulingMultipleArmsEnv(seed=args.env_seed, numEpisodes=args.max_episode, batchSize=5,
train=args.mode, numArms=args.arms, processingCost=0.5, maxDeadline=12, maxLoad=9, newJobProb=0.7,
episodeLimit=args.max_episode_length, scheduleArms=args.scheduleArms, noiseVar=0.0)
elif args.env == 'size_aware':
class1Arms = class2Arms = int(args.arms / 2)
env = sizeAwareIndexMultipleArmsEnv(seed=args.env_seed, numEpisodes=args.max_episode, train=args.mode, noiseVar=0,
batchSize = 5, class1Arms=class1Arms, class2Arms=class2Arms, numArms=args.arms, scheduleArms=args.scheduleArms,
case=1, episodeLimit=args.max_episode_length)
continuous = None
try:
# continuous action
nb_states = env.state_space.shape[0]
nb_actions = env.action_space.shape[0]
action_high = env.action_space.high
action_low = env.action_space.low
continuous = True
env = NormalizedEnv(env)
except IndexError:
# discrete action for 1 dimension
nb_states = env.state_space.shape[0]
nb_actions = 1
max_actions = env.action_space.n
continuous = False
if args.seed > 0:
np.random.seed(args.seed)
if continuous:
agent_args = {
'continuous':continuous,
'max_actions':None,
'action_low': action_low,
'action_high': action_high,
'nb_states': nb_states,
'nb_actions': nb_actions,
'k_ratio': args.k_ratio,
'args': args,
}
else:
agent_args = {
'continuous':continuous,
'max_actions':max_actions,
'action_low': None,
'action_high': None,
'nb_states': nb_states,
'nb_actions': nb_actions,
'k_ratio': args.k_ratio,
'args': args,
}
agent = WolpertingerAgent(**agent_args)
if args.gpu_ids[0] >= 0 and args.gpu_nums > 0:
agent.cuda_convert()
# set logger, log args here
log = {}
if args.mode == 'train':
setup_logger('RS_log', r'{}/RS_train_log'.format(args.save_model_dir))
elif args.mode == 'test':
setup_logger('RS_log', r'{}/RS_test_log'.format(args.save_model_dir))
else:
raise RuntimeError('undefined mode {}'.format(args.mode))
log['RS_log'] = logging.getLogger('RS_log')
d_args = vars(args)
for k in d_args.keys():
log['RS_log'].info('{0}: {1}'.format(k, d_args[k]))
if args.mode == 'train':
train_args = {
'continuous':continuous,
'env': env,
'agent': agent,
'max_episode': args.max_episode,
'warmup': args.warmup,
'save_model_dir': args.save_model_dir,
'max_episode_length': args.max_episode_length,
'logger': log['RS_log'],
'saveInterval' : args.save_episode_interval,
}
train(**train_args)
elif args.mode == 'test':
test_args = {
'agent': agent,
'RUNS' : args.test_runs,
'test_episode':args.test_episode,
'max_episode_length': args.max_episode_length,
'testing_interval' : args.testing_episode_interval,
'SEED': args.seed,
'logger': log['RS_log'],
'args' : args,
}
test(**test_args)
else:
raise RuntimeError('undefined mode {}'.format(args.mode))
| [
"arg_parser.init_parser",
"numpy.random.seed",
"util.get_output_folder",
"warnings.filterwarnings",
"normalized_env.NormalizedEnv",
"sys.path.insert",
"deadlineSchedulingMultipleArmsEnv.deadlineSchedulingMultipleArmsEnv",
"setproctitle.setproctitle",
"wolp_agent.WolpertingerAgent",
"recoveringBand... | [((289, 319), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../envs/"""'], {}), "(0, '../envs/')\n", (304, 319), False, 'import sys\n'), ((739, 758), 'setproctitle.setproctitle', 'ptitle', (['"""test_wolp"""'], {}), "('test_wolp')\n", (745, 758), True, 'from setproctitle import setproctitle as ptitle\n'), ((763, 796), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (786, 796), False, 'import warnings\n'), ((810, 834), 'arg_parser.init_parser', 'init_parser', (['"""WOLP_DDPG"""'], {}), "('WOLP_DDPG')\n", (821, 834), False, 'from arg_parser import init_parser\n'), ((1074, 1111), 'util.get_output_folder', 'get_output_folder', (['"""output"""', 'args.env'], {}), "('output', args.env)\n", (1091, 1111), False, 'from util import get_output_folder, setup_logger\n'), ((3447, 3478), 'wolp_agent.WolpertingerAgent', 'WolpertingerAgent', ([], {}), '(**agent_args)\n', (3464, 3478), False, 'from wolp_agent import WolpertingerAgent\n'), ((3919, 3946), 'logging.getLogger', 'logging.getLogger', (['"""RS_log"""'], {}), "('RS_log')\n", (3936, 3946), False, 'import logging\n'), ((1201, 1442), 'recoveringBanditsMultipleArmsEnv.recoveringBanditsMultipleArmsEnv', 'recoveringBanditsMultipleArmsEnv', ([], {'seed': 'args.env_seed', 'numEpisodes': 'args.max_episode', 'batchSize': '(5)', 'train': 'args.mode', 'numArms': 'args.arms', 'scheduleArms': 'args.scheduleArms', 'noiseVar': '(0.0)', 'maxWait': '(20)', 'episodeLimit': 'args.max_episode_length'}), '(seed=args.env_seed, numEpisodes=args.\n max_episode, batchSize=5, train=args.mode, numArms=args.arms,\n scheduleArms=args.scheduleArms, noiseVar=0.0, maxWait=20, episodeLimit=\n args.max_episode_length)\n', (1233, 1442), False, 'from recoveringBanditsMultipleArmsEnv import recoveringBanditsMultipleArmsEnv\n'), ((2480, 2498), 'normalized_env.NormalizedEnv', 'NormalizedEnv', (['env'], {}), '(env)\n', (2493, 2498), False, 'from normalized_env import NormalizedEnv\n'), ((2741, 2766), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2755, 2766), True, 'import numpy as np\n'), ((4505, 4524), 'train_test.train', 'train', ([], {}), '(**train_args)\n', (4510, 4524), False, 'from train_test import train, test\n'), ((1515, 1811), 'deadlineSchedulingMultipleArmsEnv.deadlineSchedulingMultipleArmsEnv', 'deadlineSchedulingMultipleArmsEnv', ([], {'seed': 'args.env_seed', 'numEpisodes': 'args.max_episode', 'batchSize': '(5)', 'train': 'args.mode', 'numArms': 'args.arms', 'processingCost': '(0.5)', 'maxDeadline': '(12)', 'maxLoad': '(9)', 'newJobProb': '(0.7)', 'episodeLimit': 'args.max_episode_length', 'scheduleArms': 'args.scheduleArms', 'noiseVar': '(0.0)'}), '(seed=args.env_seed, numEpisodes=args.\n max_episode, batchSize=5, train=args.mode, numArms=args.arms,\n processingCost=0.5, maxDeadline=12, maxLoad=9, newJobProb=0.7,\n episodeLimit=args.max_episode_length, scheduleArms=args.scheduleArms,\n noiseVar=0.0)\n', (1548, 1811), False, 'from deadlineSchedulingMultipleArmsEnv import deadlineSchedulingMultipleArmsEnv\n'), ((4927, 4944), 'train_test.test', 'test', ([], {}), '(**test_args)\n', (4931, 4944), False, 'from train_test import train, test\n'), ((1909, 2188), 'sizeAwareIndexMultipleArmsEnv.sizeAwareIndexMultipleArmsEnv', 'sizeAwareIndexMultipleArmsEnv', ([], {'seed': 'args.env_seed', 'numEpisodes': 'args.max_episode', 'train': 'args.mode', 'noiseVar': '(0)', 'batchSize': '(5)', 'class1Arms': 'class1Arms', 'class2Arms': 'class2Arms', 'numArms': 'args.arms', 'scheduleArms': 'args.scheduleArms', 'case': '(1)', 'episodeLimit': 'args.max_episode_length'}), '(seed=args.env_seed, numEpisodes=args.\n max_episode, train=args.mode, noiseVar=0, batchSize=5, class1Arms=\n class1Arms, class2Arms=class2Arms, numArms=args.arms, scheduleArms=args\n .scheduleArms, case=1, episodeLimit=args.max_episode_length)\n', (1938, 2188), False, 'from sizeAwareIndexMultipleArmsEnv import sizeAwareIndexMultipleArmsEnv\n')] |
from unittest import TestCase
import qelos_core as q
from torch.autograd import Variable
import torch
from torch import nn
import numpy as np
class TestEmptyWordEmb(TestCase):
def test_it(self):
dic = dict(zip(map(chr, range(97, 122)), range(1,122-97+1)))
dic[q.WordEmb.masktoken] = 0
m = q.ZeroWordEmb(10, worddic=dic)
embedding, mask = m(Variable(torch.LongTensor([0,1,2])))
self.assertEqual(embedding.size(), (3, 10))
self.assertTrue(np.allclose(mask.detach().numpy(), [0,1,1]))
self.assertTrue(np.allclose(np.zeros((3, 10)), embedding.detach().numpy()))
def test_overridden(self):
dic = dict(zip(map(chr, range(97, 122)), range(1,122-97+1)))
dic[q.WordEmb.masktoken] = 0
m = q.ZeroWordEmb(10, worddic=dic)
dic = dict(zip(map(chr, range(97, 122)), range(0, 122 - 97)))
mo = q.WordEmb(10, worddic=dic)
moe = m.override(mo)
emb, mask = moe(Variable(torch.LongTensor([0,1,2])))
self.assertEqual(emb.size(), (3, 10))
self.assertTrue(np.allclose(mask.detach().numpy(), [0,1,1]))
self.assertTrue(np.allclose(emb[0].detach().numpy(), np.zeros((10,))))
oemb, mask = mo(Variable(torch.LongTensor([0,0,1])))
self.assertEqual(oemb.size(), (3, 10))
self.assertTrue(mask is None)
self.assertTrue(np.allclose(oemb.detach().numpy()[1:], emb.detach().numpy()[1:]))
class TestWordEmb(TestCase):
def test_creation_simple(self):
dic = dict(zip(map(chr, range(97, 122)), range(122-97)))
m = q.WordEmb(10, worddic=dic)
embedding, _ = m(Variable(torch.LongTensor([0,1,2])))
self.assertEqual(embedding.size(), (3, 10))
trueemb = m.embedding.weight.cpu().detach().numpy()[0]
self.assertTrue(np.allclose(trueemb, embedding[0].detach().numpy()))
def test_creation_masked(self):
dic = dict(zip(map(chr, range(97, 122)), range(1, 122-97+1)))
dic[q.WordEmb.masktoken] = 0
m = q.WordEmb(10, worddic=dic)
embedding, mask = m(Variable(torch.LongTensor([0, 1, 2])))
self.assertEqual(embedding.size(), (3, 10))
trueemb = m.embedding.weight.cpu().detach().numpy()[1]
self.assertTrue(np.allclose(trueemb, embedding[1].detach().numpy()))
self.assertTrue(np.allclose(embedding[0].detach().numpy(), np.zeros((10,))))
print(mask)
self.assertTrue(np.allclose(mask.detach().numpy(), [0,1,1]))
class TestAdaptedWordEmb(TestCase):
def setUp(self):
wdic = {"<MASK>": 0, "<RARE>": 1, "the": 10, "a": 5, "his": 50, "abracadabrqmsd--qsdfmqgf-": 6}
wdic2 = {"<MASK>": 0, "<RARE>": 1, "the": 2, "a": 3, "his": 4, "abracadabrqmsd--qsdfmqgf-": 5, "qsdfqsdf": 7}
self.adapted = q.WordEmb(50, worddic=wdic)
self.vanilla = q.WordEmb(50, worddic=wdic, value=self.adapted.embedding.weight.detach().numpy())
self.adapted = self.adapted.adapt(wdic2)
def test_map(self):
self.assertEqual(self.adapted * "a", 3)
self.assertEqual(self.adapted * "the", 2)
self.assertEqual(self.adapted * "his", 4)
self.assertEqual(self.adapted * "her", 1)
self.assertEqual(self.vanilla * "a", 5)
self.assertEqual(self.vanilla * "the", 10)
self.assertEqual(self.vanilla * "her", 1)
self.assertEqual(self.adapted * "qsdfqlmkdsjfmqlsdkjgmqlsjdf", 1)
print(self.vanilla * "the", self.adapted * "the")
self.assertTrue(np.allclose(self.vanilla % "the", self.adapted % "the"))
self.assertTrue(np.allclose(self.vanilla % "his", self.adapted % "his"))
def test_adapted_block(self):
pred, mask = self.adapted(Variable(torch.LongTensor([self.adapted * x for x in "the a his".split()])))
l = pred.sum()
l.backward()
grad = self.adapted.inner.embedding.weight.grad
self.assertTrue(grad.norm().item() > 0)
vpred = np.asarray([self.vanilla % x for x in "the a his".split()])
self.assertTrue(np.allclose(pred.detach().numpy(), vpred))
oovpred, mask = self.adapted(Variable(torch.LongTensor([6, 7]))) # two different kinds of OOV
print(self.adapted % 6)
print(self.vanilla % self.vanilla.raretoken)
# TODO self.assertTrue(np.allclose(oovpred.datasets.numpy(), np.zeros_like(oovpred.datasets.numpy())))
def test_adapted_prediction_shape(self):
xval = np.random.randint(0, 3, (5, 4))
x = Variable(torch.from_numpy(xval))
pred, mask = self.adapted(x)
self.assertEqual(pred.size(), (5, 4, 50))
self.assertEqual(mask.size(), (5, 4))
self.assertTrue(np.allclose(mask.detach().numpy(), xval != 0))
class TestWordEmbOverriding(TestCase):
def setUp(self):
words = "<MASK> <RARE> the a his monkey inception key earlgrey"
wdic = dict(zip(words.split(), range(0, len(words.split()))))
overwords = "he his her mine cat monkey the interstellar grey key"
overwdic = dict(zip(overwords.split(), range(0, len(overwords.split()))))
self.baseemb = q.WordEmb(dim=50, worddic=wdic)
self.overemb = q.WordEmb(dim=50, worddic=overwdic)
self.emb = self.baseemb.override(self.overemb)
pass
def test_embed_masker(self):
v = Variable(torch.from_numpy(np.random.randint(0, 5, (4, 3))))
m, mask = self.emb(v)
self.assertTrue(np.all((v.detach().numpy() != 0) == mask.detach().numpy()))
def test_sameasover(self):
words = "the his monkey key"
pred, msk = self.emb(torch.LongTensor([self.emb * x for x in words.split()]))
pred = pred.detach().numpy()
gpred, _ = self.overemb(torch.LongTensor([self.overemb * x for x in words.split()]))
gpred = gpred.detach().numpy()
self.assertTrue(np.allclose(pred, gpred))
def test_sameasbase(self):
words = "inception earlgrey <MASK>"
pred, mask = self.emb(torch.LongTensor([self.emb * x for x in words.split()]))
pred = pred.detach().numpy()
gpred, msk = self.baseemb(torch.LongTensor([self.baseemb * x for x in words.split()]))
gpred = gpred.detach().numpy()
self.assertTrue(np.allclose(pred, gpred))
def test_notasover(self):
words = "<NAME>"
pred, mask = self.emb(torch.LongTensor([self.emb * x for x in words.split()]))
pred = pred.detach().numpy()
gpred, _ = self.overemb(torch.LongTensor([self.baseemb * x for x in words.split()]))
gpred = gpred.detach().numpy()
self.assertFalse(np.allclose(pred, gpred))
def test_notasbase(self):
words = "the his monkey key"
pred, mask = self.emb(torch.LongTensor([self.emb * x for x in words.split()]))
pred = pred.detach().numpy()
gpred, msk = self.baseemb(torch.LongTensor([self.baseemb * x for x in words.split()]))
gpred = gpred.detach().numpy()
self.assertFalse(np.allclose(pred, gpred))
class TestGlove(TestCase):
def setUp(self):
q.PretrainedWordEmb.defaultpath = "../data/glove/miniglove.%dd"
self.glove = q.PretrainedWordEmb(50)
print(self.glove.defaultpath)
def test_loaded(self):
thevector = self.glove % "the"
truevector = np.asarray([ 4.18000013e-01, 2.49679998e-01, -4.12420005e-01,
1.21699996e-01, 3.45270008e-01, -4.44569997e-02,
-4.96879995e-01, -1.78619996e-01, -6.60229998e-04,
-6.56599998e-01, 2.78430015e-01, -1.47670001e-01,
-5.56770027e-01, 1.46579996e-01, -9.50950012e-03,
1.16579998e-02, 1.02040000e-01, -1.27920002e-01,
-8.44299972e-01, -1.21809997e-01, -1.68009996e-02,
-3.32789987e-01, -1.55200005e-01, -2.31309995e-01,
-1.91809997e-01, -1.88230002e+00, -7.67459989e-01,
9.90509987e-02, -4.21249986e-01, -1.95260003e-01,
4.00710011e+00, -1.85939997e-01, -5.22870004e-01,
-3.16810012e-01, 5.92130003e-04, 7.44489999e-03,
1.77780002e-01, -1.58969998e-01, 1.20409997e-02,
-5.42230010e-02, -2.98709989e-01, -1.57490000e-01,
-3.47579986e-01, -4.56370004e-02, -4.42510009e-01,
1.87849998e-01, 2.78489990e-03, -1.84110001e-01,
-1.15139998e-01, -7.85809994e-01])
self.assertEqual(self.glove * "the", 2)
self.assertTrue(np.allclose(thevector, truevector))
self.assertEqual(self.glove.embedding.weight.size(), (4002, 50))
def test_loaded_with_dic(self):
D = "<MASK> <RARE> cat dog person earlgreytea".split()
D = dict(zip(D, range(len(D))))
m = q.PretrainedWordEmb(50, worddic=D)
def test_subclone(self):
D = "<MASK> <RARE> cat dog person earlgreytea".split()
D = dict(zip(D, range(len(D))))
subclone = self.glove.subclone(D, fixed=True)
for k, v in D.items():
if k not in subclone.D:
self.assertTrue(k not in self.glove.D)
else:
subclonemb = subclone(torch.tensor(np.asarray([subclone.D[k]])))[0].numpy()
gloveemb = self.glove(torch.tensor(np.asarray([self.glove.D[k]])))[0].numpy()
self.assertTrue(np.allclose(subclonemb, gloveemb))
pass
def test_partially_loaded(self):
D = "<MASK> <RARE> cat dog person arizonaiceteaa".split()
D = dict(zip(D, range(len(D))))
baseemb = q.WordEmb(dim=50, worddic=D)
baseemb = baseemb.override(self.glove)
q.PartiallyPretrainedWordEmb.defaultpath = "../data/glove/miniglove.%dd"
plemb = q.PartiallyPretrainedWordEmb(dim=50, worddic=D,
value=baseemb.base.embedding.weight.detach().numpy(),
gradfracs=(1., 0.5))
x = torch.tensor(np.asarray([0, 1, 2, 3, 4, 5]), dtype=torch.int64)
base_out, base_mask = baseemb(x)
pl_out, mask = plemb(x)
self.assertTrue(np.allclose(base_out[2:].detach().numpy(), pl_out[2:].detach().numpy()))
# test gradients
l = pl_out.sum()
l.backward()
gradnorm = plemb.embedding.weight.grad.norm()
thegrad = plemb.embedding.weight.grad
print(gradnorm)
self.assertTrue(np.all(thegrad.detach().numpy()[0, :] == 0))
self.assertTrue(np.all(thegrad.detach().numpy()[[1,2,5], :] == 1.))
self.assertTrue(np.all(thegrad.detach().numpy()[[3, 4], :] == 0.5))
print(base_out - pl_out)
class TestComputedWordEmb(TestCase):
def setUp(self):
data = np.random.random((7, 10)).astype("float32")
computer = nn.Linear(10, 15)
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.emb = q.ComputedWordEmb(data=data, computer=computer, worddic=worddic)
def test_shape(self):
x = Variable(torch.LongTensor([0, 1, 2]))
emb, msk = self.emb(x)
print(msk)
self.assertEqual(emb.size(), (3, 15))
self.assertTrue(np.allclose(msk.detach().numpy(), [[0,1,1]]))
class TestMergedWordEmb(TestCase):
def setUp(self):
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.emb1 = q.WordEmb(100, worddic=worddic)
self.emb2 = q.WordEmb(100, worddic=worddic)
def test_sum_merge(self):
emb = self.emb1.merge(self.emb2, mode="sum")
x = Variable(torch.LongTensor([0, 1, 2]))
emb1res, msk1 = self.emb1(x)
print(msk1)
emb2res, msk2 = self.emb2(x)
embres, msk = emb(x)
self.assertTrue(np.allclose(embres.detach().numpy(), emb1res.detach().numpy() + emb2res.detach().numpy()))
def test_cat_merge(self):
emb = self.emb1.merge(self.emb2, mode="cat")
x = Variable(torch.LongTensor([0, 1, 2]))
emb1res, msk1 = self.emb1(x)
print(msk1)
emb2res, msk2 = self.emb2(x)
embres, msk = emb(x)
self.assertTrue(np.allclose(embres.detach().numpy(), np.concatenate([emb1res.detach().numpy(), emb2res.detach().numpy()], axis=1)))
class TestZeroWordLinout(TestCase):
def setUp(self):
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.linout = q.ZeroWordLinout(10, worddic=worddic)
def test_it(self):
x = Variable(torch.randn(7, 10))
msk = Variable(torch.FloatTensor([[1,0,1,1,0,1,0]]*5 + [[0,1,0,0,1,0,1]]*2))
y = self.linout(x, mask=msk)
print(y)
self.assertEqual(y.size(), (7, 7))
self.assertTrue(np.allclose(y.detach().numpy(), np.zeros_like(y.detach().numpy())))
def test_overridden(self):
worddic = "second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
linout = q.WordLinout(10, worddic=worddic)
l = self.linout.override(linout)
x = Variable(torch.randn(7, 10))
msk = Variable(torch.FloatTensor([[1,0,1,1,0,1,0]]*5 + [[0,1,0,0,1,0,1]]*2))
y = l(x, mask=msk)
print(y)
class TestWordLinout(TestCase):
def setUp(self):
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.linout = q.WordLinout(10, worddic=worddic)
def test_shape(self):
x = Variable(torch.randn(7, 10))
msk = Variable(torch.FloatTensor([[1,0,1,1,0,1,0]]*5 + [[0,1,0,0,1,0,1]]*2))
y = self.linout(x, mask=msk)
print(y)
self.assertEqual(y.size(), (7, 7))
# self.assertTrue(False)
class TestCosineWordLinout(TestCase):
def setUp(self):
worddic = "<MASK> <RARE> first second third fourth fifth sixth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.linout = q.WordLinout(10, worddic=worddic, cosnorm=True)
def test_it(self):
x = torch.randn(7, 10)
y = self.linout(x)
print(y)
self.assertEqual(y.size(), (7, 8))
self.assertTrue(np.all(y.detach().numpy() < 1))
self.assertTrue(np.all(y.detach().numpy() > -1))
#x = x.detach().numpy()
w = self. linout.lin.weight.detach().numpy()
y = y.detach().numpy()
for i in range(7):
for j in range(8):
self.assertTrue(np.allclose(y[i, j], np.dot(x[i], w[j]) / (np.linalg.norm(x[i], 2) * np.linalg.norm(w[j], 2))))
ny, cosnorm = self.linout(x, _retcosnorm=True)
ny = ny / x.norm(2, 1).unsqueeze(1)
ny = ny / cosnorm.pow(1./2)
self.assertTrue(np.allclose(ny.detach().numpy(), y))
class TestPretrainedWordLinout(TestCase):
def setUp(self):
q.PretrainedWordLinout.defaultpath = "../data/glove/miniglove.%dd"
self.glove = q.PretrainedWordLinout(50)
print(self.glove.defaultpath)
def test_loaded(self):
thevector = self.glove % "the"
truevector = np.asarray([
4.18000013e-01, 2.49679998e-01, -4.12420005e-01,
1.21699996e-01, 3.45270008e-01, -4.44569997e-02,
-4.96879995e-01, -1.78619996e-01, -6.60229998e-04,
-6.56599998e-01, 2.78430015e-01, -1.47670001e-01,
-5.56770027e-01, 1.46579996e-01, -9.50950012e-03,
1.16579998e-02, 1.02040000e-01, -1.27920002e-01,
-8.44299972e-01, -1.21809997e-01, -1.68009996e-02,
-3.32789987e-01, -1.55200005e-01, -2.31309995e-01,
-1.91809997e-01, -1.88230002e+00, -7.67459989e-01,
9.90509987e-02, -4.21249986e-01, -1.95260003e-01,
4.00710011e+00, -1.85939997e-01, -5.22870004e-01,
-3.16810012e-01, 5.92130003e-04, 7.44489999e-03,
1.77780002e-01, -1.58969998e-01, 1.20409997e-02,
-5.42230010e-02, -2.98709989e-01, -1.57490000e-01,
-3.47579986e-01, -4.56370004e-02, -4.42510009e-01,
1.87849998e-01, 2.78489990e-03, -1.84110001e-01,
-1.15139998e-01, -7.85809994e-01])
self.assertEqual(self.glove * "the", 2)
self.assertTrue(np.allclose(thevector, truevector))
self.assertEqual(self.glove.lin.weight.size(), (4002, 50))
class TestAdaptedWordLinout(TestCase):
def setUp(self):
wdic = {"<MASK>": 0, "<RARE>": 1, "the": 10, "a": 5, "his": 50, "abracadabrqmsd--qsdfmqgf-": 6}
wdic2 = {"<MASK>": 0, "<RARE>": 1, "the": 2, "a": 3, "his": 4, "abracadabrqmsd--qsdfmqgf-": 5, "qsdfqsdf": 7}
self.adapted = q.WordLinout(10, worddic=wdic, bias=False)
self.vanilla = q.WordLinout(10, worddic=wdic, weight=self.adapted.lin.weight.detach().numpy(), bias=False)
self.adapted = self.adapted.adapt(wdic2)
def test_map(self):
self.assertEqual(self.adapted * "a", 3)
self.assertEqual(self.adapted * "the", 2)
self.assertEqual(self.adapted * "his", 4)
self.assertEqual(self.adapted * "her", 1)
self.assertEqual(self.vanilla * "a", 5)
self.assertEqual(self.vanilla * "the", 10)
self.assertEqual(self.vanilla * "her", 1)
self.assertEqual(self.adapted * "qsdfqlmkdsjfmqlsdkjgmqlsjdf", 1)
print(self.vanilla * "the", self.adapted * "the")
print(self.vanilla % "the", self.adapted % "the")
self.assertTrue(np.allclose(self.vanilla % "the", self.adapted % "the"))
self.assertTrue(np.allclose(self.vanilla % "his", self.adapted % "his"))
def test_adapted_block(self):
pred = self.adapted(Variable(torch.FloatTensor(np.stack([self.adapted % x for x in "the a his".split()], axis=0))))
l = pred.sum()
l.backward()
grad = self.adapted.inner.lin.weight.grad
self.assertTrue(grad.norm().item() > 0)
def test_adapted_prediction_shape(self):
xval = np.stack([self.adapted % "the", self.adapted % "a"], axis=0)
x = Variable(torch.from_numpy(xval))
pred = self.adapted(x)
self.assertEqual(pred.size(), (2, 8))
def test_cosined(self):
EPS = 1e-6
self.adapted.inner.cosnorm = True
xval = np.stack([self.adapted % "the", self.adapted % "a"], axis=0)
x = torch.tensor(xval)
pred = self.adapted(x)
self.assertEqual(pred.size(), (2, 8))
prednp = pred.detach().numpy()
print(prednp)
print(pred)
self.assertTrue(np.all(pred.detach().numpy() <= 1.+EPS))
self.assertTrue(np.all(pred.detach().numpy() >= -1-EPS))
ny, cosnorm = self.adapted(x, _retcosnorm=True)
ny = ny / x.norm(2, 1).unsqueeze(1)
ny = ny / cosnorm.pow(1./2)
self.assertTrue(np.allclose(ny.detach().numpy(), prednp))
class TestOverriddenWordLinout(TestCase):
def setUp(self):
wdic = {"<MASK>": 0, "<RARE>": 1, "the": 10, "a": 5, "his": 50, "monkey": 6}
wdic2 = {"<MASK>": 0, "<RARE>": 1, "the": 2, "a": 3, "his": 4, "abracadabrqmsd--qsdfmqgf-": 5, "qsdfqsdf": 7}
self.base = q.WordLinout(10, worddic=wdic, bias=False)
self.over = q.WordLinout(10, worddic=wdic2, bias=False)
self.overridden = self.base.override(self.over)
def test_shapes(self):
x = Variable(torch.FloatTensor(np.stack([self.base % x for x in "the a his".split()], axis=0)))
pred = self.overridden(x)
self.assertEqual(pred.size(), (3, 51))
basepred = self.base(x)
overpred = self.over(x)
l = pred.sum()
l.backward()
self.assertTrue(self.base.lin.weight.grad.norm()[0] > 0)
self.assertTrue(self.over.lin.weight.grad.norm()[0] > 1)
basepred = basepred.detach().numpy()
overpred = overpred.detach().numpy()
pred = pred.detach().numpy()
self.assertTrue(np.allclose(pred[:, 10], overpred[:, 2]))
self.assertTrue(np.allclose(pred[:, 5], overpred[:, 3]))
self.assertTrue(np.allclose(pred[:, 6], basepred[:, 6]))
def test_cosined(self):
EPS = 1e-12
self.base.cosnorm = True
self.over.cosnorm = True
x = torch.tensor(np.stack([self.base % x for x in "the a his".split()], axis=0))
pred = self.overridden(x)
self.assertEqual(pred.size(), (3, 51))
prednp = pred.detach().numpy()
print(prednp)
print(pred)
self.assertTrue(np.all(pred.detach().numpy() <= 1. + EPS))
self.assertTrue(np.all(pred.detach().numpy() >= -1 - EPS))
ny, cosnorm = self.overridden(x, _retcosnorm=True)
ny = ny / x.norm(2, 1).unsqueeze(1)
ny = ny / cosnorm.pow(1. / 2)
self.assertTrue(np.allclose(ny.detach().numpy(), prednp))
class TestComputedWordLinout(TestCase):
def setUp(self):
data = np.random.random((7, 10)).astype("float32")
computer = nn.Linear(10, 15)
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.linout = q.ComputedWordLinout(data=data, computer=computer, worddic=worddic, bias=False)
def test_basic(self):
x = Variable(torch.randn(3, 15)).float()
out = self.linout(x)
self.assertEqual(out.size(), (3, 7))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
self.assertTrue(np.allclose(cout.detach().numpy(), out.detach().numpy()))
def test_cosiner(self):
EPS = 1e-12
self.linout.cosnorm = True
x = Variable(torch.randn(3, 15)).float()
out = self.linout(x)
self.assertEqual(out.size(), (3, 7))
self.assertTrue(np.all(out.detach().numpy() <= 1. + EPS))
self.assertTrue(np.all(out.detach().numpy() >= -1 - EPS))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
cout = cout / torch.norm(computer(data), 2, 1).unsqueeze(0)
cout = cout / torch.norm(x, 2, 1).unsqueeze(1)
self.assertTrue(np.allclose(cout.detach().numpy(), out.detach().numpy()))
self.linout.cosnorm = False
ny, cosnorm = self.linout(x, _retcosnorm=True)
ny = ny / x.norm(2, 1).unsqueeze(1)
ny = ny / cosnorm.pow(1. / 2)
self.assertTrue(np.allclose(ny.detach().numpy(), out.detach().numpy()))
def test_masked(self):
x = Variable(torch.randn(3, 15)).float()
msk_nonzero_batches = [0,0,0,1,1,2]
msk_nonzero_values = [0,2,3,2,6,5]
msk = np.zeros((3, 7)).astype("int32")
msk[msk_nonzero_batches, msk_nonzero_values] = 1
print(msk)
msk = Variable(torch.from_numpy(msk))
out = self.linout(x, mask=msk)
self.assertEqual(out.size(), (3, 7))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
# cout = cout * msk.float()
cout = cout + torch.log(msk.float())
self.assertTrue(np.allclose(cout.detach().numpy(), out.detach().numpy()))
# def test_masked_with_rnn_computer(self):
# data = np.random.random((7, 5, 10)).astype("float32")
# computer = q.RecurrentStack(
# q.persist_kwargs(),
# q.GRULayer(10, 15)
# ).return_final()
# worddic = "<MASK> <RARE> first second third fourth fifth"
# worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
# linout = q.ComputedWordLinout(data=data, computer=computer, worddic=worddic)
#
# x = Variable(torch.randn(3, 15)).float()
# msk_nonzero_batches = [0, 0, 0, 1, 1, 2]
# msk_nonzero_values = [0, 2, 3, 2, 6, 5]
# msk = np.zeros((3, 7)).astype("int32")
# msk[msk_nonzero_batches, msk_nonzero_values] = 1
# print(msk)
# msk = Variable(torch.from_numpy(msk))
# out = linout(x, mask=msk)
# self.assertEqual(out.size(), (3, 7))
# data = linout.data
# computer = linout.computer
# cout = torch.matmul(x, computer(data).t())
# # cout = cout * msk.float()
# cout = cout + torch.log(msk.float())
# self.assertTrue(np.allclose(cout.detach().numpy(), out.detach().numpy()))
def test_all_masked(self):
x = torch.randn(3, 15)
msk = np.zeros((3, 7)).astype("int32")
print(msk)
msk = torch.tensor(msk)
out = self.linout(x, mask=msk)
self.assertEqual(out.size(), (3, 7))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
cout = cout + torch.log(msk.float())
self.assertTrue(np.allclose(cout.detach().numpy(), out.detach().numpy()))
# def test_masked_3D_data(self):
# self.linout.data = q.val(np.random.random((7, 10, 3)).astype(dtype="float32")).v
# self.linout.computer = q.GRULayer(3, 15).return_final("only")
#
# x = Variable(torch.randn(3, 15)).float()
# msk_nonzero_batches = [0, 0, 0, 1, 1, 2]
# msk_nonzero_values = [0, 2, 3, 2, 6, 5]
# msk = np.zeros((3, 7)).astype("int32")
# msk[msk_nonzero_batches, msk_nonzero_values] = 1
# print(msk)
# msk = Variable(torch.from_numpy(msk))
# out = self.linout(x, mask=msk)
# self.assertEqual(out.size(), (3, 7))
# data = self.linout.data
# computer = self.linout.computer
# cout = torch.matmul(x, computer(data).t())
# # cout = cout * msk.float()
# cout = cout + torch.log(msk.float())
# self.assertTrue(np.allclose(cout.detach().numpy(), out.detach().numpy()))
def test_basic_grad(self):
x = Variable(torch.randn(3, 15)).float()
y = Variable(torch.randn(3, 15)).float()
out = self.linout(x)
loss = out.sum()
loss.backward()
agrads = []
for p in self.linout.parameters():
if p.requires_grad:
agrads.append(p.grad.detach().numpy() + 0)
out = self.linout(y)
loss = out.sum()
loss.backward()
bgrads = []
for p in self.linout.parameters():
if p.requires_grad:
bgrads.append(p.grad.detach().numpy() + 0)
pass
class TestMergedWordLinout(TestCase):
def setUp(self):
wd = dict(zip(map(lambda x: chr(x), range(100)), range(100)))
self.base = q.WordLinout(50, worddic=wd, bias=False)
self.merg = q.WordLinout(50, worddic=wd, bias=False)
self.linout = self.base.merge(self.merg)
def test_cosiner(self):
self.linout.cosnorm = True
x = torch.tensor(np.random.random((5, 50)), dtype=torch.float32)
pred = self.linout(x)
self.assertTrue(np.all(pred.detach().numpy() <= 1.))
self.assertTrue(np.all(pred.detach().numpy() >= -1.))
ny, cosnorm = self.linout(x, _retcosnorm=True)
ny = ny / x.norm(2, 1).unsqueeze(1)
ny = ny / cosnorm.pow(1. / 2)
self.assertTrue(np.allclose(ny.detach().numpy(), pred.detach().numpy()))
| [
"numpy.allclose",
"torch.randn",
"qelos_core.ZeroWordLinout",
"numpy.random.randint",
"numpy.linalg.norm",
"qelos_core.PretrainedWordLinout",
"qelos_core.ComputedWordLinout",
"torch.FloatTensor",
"qelos_core.WordEmb",
"torch.nn.Linear",
"qelos_core.ZeroWordEmb",
"numpy.stack",
"qelos_core.Wo... | [((320, 350), 'qelos_core.ZeroWordEmb', 'q.ZeroWordEmb', (['(10)'], {'worddic': 'dic'}), '(10, worddic=dic)\n', (333, 350), True, 'import qelos_core as q\n'), ((771, 801), 'qelos_core.ZeroWordEmb', 'q.ZeroWordEmb', (['(10)'], {'worddic': 'dic'}), '(10, worddic=dic)\n', (784, 801), True, 'import qelos_core as q\n'), ((885, 911), 'qelos_core.WordEmb', 'q.WordEmb', (['(10)'], {'worddic': 'dic'}), '(10, worddic=dic)\n', (894, 911), True, 'import qelos_core as q\n'), ((1576, 1602), 'qelos_core.WordEmb', 'q.WordEmb', (['(10)'], {'worddic': 'dic'}), '(10, worddic=dic)\n', (1585, 1602), True, 'import qelos_core as q\n'), ((2013, 2039), 'qelos_core.WordEmb', 'q.WordEmb', (['(10)'], {'worddic': 'dic'}), '(10, worddic=dic)\n', (2022, 2039), True, 'import qelos_core as q\n'), ((2777, 2804), 'qelos_core.WordEmb', 'q.WordEmb', (['(50)'], {'worddic': 'wdic'}), '(50, worddic=wdic)\n', (2786, 2804), True, 'import qelos_core as q\n'), ((4424, 4455), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', '(5, 4)'], {}), '(0, 3, (5, 4))\n', (4441, 4455), True, 'import numpy as np\n'), ((5089, 5120), 'qelos_core.WordEmb', 'q.WordEmb', ([], {'dim': '(50)', 'worddic': 'wdic'}), '(dim=50, worddic=wdic)\n', (5098, 5120), True, 'import qelos_core as q\n'), ((5144, 5179), 'qelos_core.WordEmb', 'q.WordEmb', ([], {'dim': '(50)', 'worddic': 'overwdic'}), '(dim=50, worddic=overwdic)\n', (5153, 5179), True, 'import qelos_core as q\n'), ((7109, 7132), 'qelos_core.PretrainedWordEmb', 'q.PretrainedWordEmb', (['(50)'], {}), '(50)\n', (7128, 7132), True, 'import qelos_core as q\n'), ((7259, 8010), 'numpy.asarray', 'np.asarray', (['[0.418000013, 0.249679998, -0.412420005, 0.121699996, 0.345270008, -\n 0.0444569997, -0.496879995, -0.178619996, -0.000660229998, -0.656599998,\n 0.278430015, -0.147670001, -0.556770027, 0.146579996, -0.00950950012, \n 0.0116579998, 0.10204, -0.127920002, -0.844299972, -0.121809997, -\n 0.0168009996, -0.332789987, -0.155200005, -0.231309995, -0.191809997, -\n 1.88230002, -0.767459989, 0.0990509987, -0.421249986, -0.195260003, \n 4.00710011, -0.185939997, -0.522870004, -0.316810012, 0.000592130003, \n 0.00744489999, 0.177780002, -0.158969998, 0.0120409997, -0.054223001, -\n 0.298709989, -0.15749, -0.347579986, -0.0456370004, -0.442510009, \n 0.187849998, 0.0027848999, -0.184110001, -0.115139998, -0.785809994]'], {}), '([0.418000013, 0.249679998, -0.412420005, 0.121699996, \n 0.345270008, -0.0444569997, -0.496879995, -0.178619996, -0.000660229998,\n -0.656599998, 0.278430015, -0.147670001, -0.556770027, 0.146579996, -\n 0.00950950012, 0.0116579998, 0.10204, -0.127920002, -0.844299972, -\n 0.121809997, -0.0168009996, -0.332789987, -0.155200005, -0.231309995, -\n 0.191809997, -1.88230002, -0.767459989, 0.0990509987, -0.421249986, -\n 0.195260003, 4.00710011, -0.185939997, -0.522870004, -0.316810012, \n 0.000592130003, 0.00744489999, 0.177780002, -0.158969998, 0.0120409997,\n -0.054223001, -0.298709989, -0.15749, -0.347579986, -0.0456370004, -\n 0.442510009, 0.187849998, 0.0027848999, -0.184110001, -0.115139998, -\n 0.785809994])\n', (7269, 8010), True, 'import numpy as np\n'), ((8617, 8651), 'qelos_core.PretrainedWordEmb', 'q.PretrainedWordEmb', (['(50)'], {'worddic': 'D'}), '(50, worddic=D)\n', (8636, 8651), True, 'import qelos_core as q\n'), ((9408, 9436), 'qelos_core.WordEmb', 'q.WordEmb', ([], {'dim': '(50)', 'worddic': 'D'}), '(dim=50, worddic=D)\n', (9417, 9436), True, 'import qelos_core as q\n'), ((10595, 10612), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(15)'], {}), '(10, 15)\n', (10604, 10612), False, 'from torch import nn\n'), ((10772, 10836), 'qelos_core.ComputedWordEmb', 'q.ComputedWordEmb', ([], {'data': 'data', 'computer': 'computer', 'worddic': 'worddic'}), '(data=data, computer=computer, worddic=worddic)\n', (10789, 10836), True, 'import qelos_core as q\n'), ((11298, 11329), 'qelos_core.WordEmb', 'q.WordEmb', (['(100)'], {'worddic': 'worddic'}), '(100, worddic=worddic)\n', (11307, 11329), True, 'import qelos_core as q\n'), ((11350, 11381), 'qelos_core.WordEmb', 'q.WordEmb', (['(100)'], {'worddic': 'worddic'}), '(100, worddic=worddic)\n', (11359, 11381), True, 'import qelos_core as q\n'), ((12372, 12409), 'qelos_core.ZeroWordLinout', 'q.ZeroWordLinout', (['(10)'], {'worddic': 'worddic'}), '(10, worddic=worddic)\n', (12388, 12409), True, 'import qelos_core as q\n'), ((12918, 12951), 'qelos_core.WordLinout', 'q.WordLinout', (['(10)'], {'worddic': 'worddic'}), '(10, worddic=worddic)\n', (12930, 12951), True, 'import qelos_core as q\n'), ((13382, 13415), 'qelos_core.WordLinout', 'q.WordLinout', (['(10)'], {'worddic': 'worddic'}), '(10, worddic=worddic)\n', (13394, 13415), True, 'import qelos_core as q\n'), ((13928, 13975), 'qelos_core.WordLinout', 'q.WordLinout', (['(10)'], {'worddic': 'worddic', 'cosnorm': '(True)'}), '(10, worddic=worddic, cosnorm=True)\n', (13940, 13975), True, 'import qelos_core as q\n'), ((14012, 14030), 'torch.randn', 'torch.randn', (['(7)', '(10)'], {}), '(7, 10)\n', (14023, 14030), False, 'import torch\n'), ((14891, 14917), 'qelos_core.PretrainedWordLinout', 'q.PretrainedWordLinout', (['(50)'], {}), '(50)\n', (14913, 14917), True, 'import qelos_core as q\n'), ((15044, 15795), 'numpy.asarray', 'np.asarray', (['[0.418000013, 0.249679998, -0.412420005, 0.121699996, 0.345270008, -\n 0.0444569997, -0.496879995, -0.178619996, -0.000660229998, -0.656599998,\n 0.278430015, -0.147670001, -0.556770027, 0.146579996, -0.00950950012, \n 0.0116579998, 0.10204, -0.127920002, -0.844299972, -0.121809997, -\n 0.0168009996, -0.332789987, -0.155200005, -0.231309995, -0.191809997, -\n 1.88230002, -0.767459989, 0.0990509987, -0.421249986, -0.195260003, \n 4.00710011, -0.185939997, -0.522870004, -0.316810012, 0.000592130003, \n 0.00744489999, 0.177780002, -0.158969998, 0.0120409997, -0.054223001, -\n 0.298709989, -0.15749, -0.347579986, -0.0456370004, -0.442510009, \n 0.187849998, 0.0027848999, -0.184110001, -0.115139998, -0.785809994]'], {}), '([0.418000013, 0.249679998, -0.412420005, 0.121699996, \n 0.345270008, -0.0444569997, -0.496879995, -0.178619996, -0.000660229998,\n -0.656599998, 0.278430015, -0.147670001, -0.556770027, 0.146579996, -\n 0.00950950012, 0.0116579998, 0.10204, -0.127920002, -0.844299972, -\n 0.121809997, -0.0168009996, -0.332789987, -0.155200005, -0.231309995, -\n 0.191809997, -1.88230002, -0.767459989, 0.0990509987, -0.421249986, -\n 0.195260003, 4.00710011, -0.185939997, -0.522870004, -0.316810012, \n 0.000592130003, 0.00744489999, 0.177780002, -0.158969998, 0.0120409997,\n -0.054223001, -0.298709989, -0.15749, -0.347579986, -0.0456370004, -\n 0.442510009, 0.187849998, 0.0027848999, -0.184110001, -0.115139998, -\n 0.785809994])\n', (15054, 15795), True, 'import numpy as np\n'), ((16559, 16601), 'qelos_core.WordLinout', 'q.WordLinout', (['(10)'], {'worddic': 'wdic', 'bias': '(False)'}), '(10, worddic=wdic, bias=False)\n', (16571, 16601), True, 'import qelos_core as q\n'), ((17852, 17912), 'numpy.stack', 'np.stack', (["[self.adapted % 'the', self.adapted % 'a']"], {'axis': '(0)'}), "([self.adapted % 'the', self.adapted % 'a'], axis=0)\n", (17860, 17912), True, 'import numpy as np\n'), ((18140, 18200), 'numpy.stack', 'np.stack', (["[self.adapted % 'the', self.adapted % 'a']"], {'axis': '(0)'}), "([self.adapted % 'the', self.adapted % 'a'], axis=0)\n", (18148, 18200), True, 'import numpy as np\n'), ((18213, 18231), 'torch.tensor', 'torch.tensor', (['xval'], {}), '(xval)\n', (18225, 18231), False, 'import torch\n'), ((19012, 19054), 'qelos_core.WordLinout', 'q.WordLinout', (['(10)'], {'worddic': 'wdic', 'bias': '(False)'}), '(10, worddic=wdic, bias=False)\n', (19024, 19054), True, 'import qelos_core as q\n'), ((19075, 19118), 'qelos_core.WordLinout', 'q.WordLinout', (['(10)'], {'worddic': 'wdic2', 'bias': '(False)'}), '(10, worddic=wdic2, bias=False)\n', (19087, 19118), True, 'import qelos_core as q\n'), ((20801, 20818), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(15)'], {}), '(10, 15)\n', (20810, 20818), False, 'from torch import nn\n'), ((20981, 21060), 'qelos_core.ComputedWordLinout', 'q.ComputedWordLinout', ([], {'data': 'data', 'computer': 'computer', 'worddic': 'worddic', 'bias': '(False)'}), '(data=data, computer=computer, worddic=worddic, bias=False)\n', (21001, 21060), True, 'import qelos_core as q\n'), ((24266, 24284), 'torch.randn', 'torch.randn', (['(3)', '(15)'], {}), '(3, 15)\n', (24277, 24284), False, 'import torch\n'), ((24365, 24382), 'torch.tensor', 'torch.tensor', (['msk'], {}), '(msk)\n', (24377, 24382), False, 'import torch\n'), ((26401, 26441), 'qelos_core.WordLinout', 'q.WordLinout', (['(50)'], {'worddic': 'wd', 'bias': '(False)'}), '(50, worddic=wd, bias=False)\n', (26413, 26441), True, 'import qelos_core as q\n'), ((26462, 26502), 'qelos_core.WordLinout', 'q.WordLinout', (['(50)'], {'worddic': 'wd', 'bias': '(False)'}), '(50, worddic=wd, bias=False)\n', (26474, 26502), True, 'import qelos_core as q\n'), ((3487, 3542), 'numpy.allclose', 'np.allclose', (["(self.vanilla % 'the')", "(self.adapted % 'the')"], {}), "(self.vanilla % 'the', self.adapted % 'the')\n", (3498, 3542), True, 'import numpy as np\n'), ((3568, 3623), 'numpy.allclose', 'np.allclose', (["(self.vanilla % 'his')", "(self.adapted % 'his')"], {}), "(self.vanilla % 'his', self.adapted % 'his')\n", (3579, 3623), True, 'import numpy as np\n'), ((4477, 4499), 'torch.from_numpy', 'torch.from_numpy', (['xval'], {}), '(xval)\n', (4493, 4499), False, 'import torch\n'), ((5816, 5840), 'numpy.allclose', 'np.allclose', (['pred', 'gpred'], {}), '(pred, gpred)\n', (5827, 5840), True, 'import numpy as np\n'), ((6200, 6224), 'numpy.allclose', 'np.allclose', (['pred', 'gpred'], {}), '(pred, gpred)\n', (6211, 6224), True, 'import numpy as np\n'), ((6563, 6587), 'numpy.allclose', 'np.allclose', (['pred', 'gpred'], {}), '(pred, gpred)\n', (6574, 6587), True, 'import numpy as np\n'), ((6940, 6964), 'numpy.allclose', 'np.allclose', (['pred', 'gpred'], {}), '(pred, gpred)\n', (6951, 6964), True, 'import numpy as np\n'), ((8356, 8390), 'numpy.allclose', 'np.allclose', (['thevector', 'truevector'], {}), '(thevector, truevector)\n', (8367, 8390), True, 'import numpy as np\n'), ((9782, 9812), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (9792, 9812), True, 'import numpy as np\n'), ((10885, 10912), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (10901, 10912), False, 'import torch\n'), ((11487, 11514), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (11503, 11514), False, 'import torch\n'), ((11859, 11886), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (11875, 11886), False, 'import torch\n'), ((12455, 12473), 'torch.randn', 'torch.randn', (['(7)', '(10)'], {}), '(7, 10)\n', (12466, 12473), False, 'import torch\n'), ((12498, 12574), 'torch.FloatTensor', 'torch.FloatTensor', (['([[1, 0, 1, 1, 0, 1, 0]] * 5 + [[0, 1, 0, 0, 1, 0, 1]] * 2)'], {}), '([[1, 0, 1, 1, 0, 1, 0]] * 5 + [[0, 1, 0, 0, 1, 0, 1]] * 2)\n', (12515, 12574), False, 'import torch\n'), ((13014, 13032), 'torch.randn', 'torch.randn', (['(7)', '(10)'], {}), '(7, 10)\n', (13025, 13032), False, 'import torch\n'), ((13057, 13133), 'torch.FloatTensor', 'torch.FloatTensor', (['([[1, 0, 1, 1, 0, 1, 0]] * 5 + [[0, 1, 0, 0, 1, 0, 1]] * 2)'], {}), '([[1, 0, 1, 1, 0, 1, 0]] * 5 + [[0, 1, 0, 0, 1, 0, 1]] * 2)\n', (13074, 13133), False, 'import torch\n'), ((13464, 13482), 'torch.randn', 'torch.randn', (['(7)', '(10)'], {}), '(7, 10)\n', (13475, 13482), False, 'import torch\n'), ((13507, 13583), 'torch.FloatTensor', 'torch.FloatTensor', (['([[1, 0, 1, 1, 0, 1, 0]] * 5 + [[0, 1, 0, 0, 1, 0, 1]] * 2)'], {}), '([[1, 0, 1, 1, 0, 1, 0]] * 5 + [[0, 1, 0, 0, 1, 0, 1]] * 2)\n', (13524, 13583), False, 'import torch\n'), ((16149, 16183), 'numpy.allclose', 'np.allclose', (['thevector', 'truevector'], {}), '(thevector, truevector)\n', (16160, 16183), True, 'import numpy as np\n'), ((17352, 17407), 'numpy.allclose', 'np.allclose', (["(self.vanilla % 'the')", "(self.adapted % 'the')"], {}), "(self.vanilla % 'the', self.adapted % 'the')\n", (17363, 17407), True, 'import numpy as np\n'), ((17433, 17488), 'numpy.allclose', 'np.allclose', (["(self.vanilla % 'his')", "(self.adapted % 'his')"], {}), "(self.vanilla % 'his', self.adapted % 'his')\n", (17444, 17488), True, 'import numpy as np\n'), ((17934, 17956), 'torch.from_numpy', 'torch.from_numpy', (['xval'], {}), '(xval)\n', (17950, 17956), False, 'import torch\n'), ((19779, 19819), 'numpy.allclose', 'np.allclose', (['pred[:, 10]', 'overpred[:, 2]'], {}), '(pred[:, 10], overpred[:, 2])\n', (19790, 19819), True, 'import numpy as np\n'), ((19845, 19884), 'numpy.allclose', 'np.allclose', (['pred[:, 5]', 'overpred[:, 3]'], {}), '(pred[:, 5], overpred[:, 3])\n', (19856, 19884), True, 'import numpy as np\n'), ((19910, 19949), 'numpy.allclose', 'np.allclose', (['pred[:, 6]', 'basepred[:, 6]'], {}), '(pred[:, 6], basepred[:, 6])\n', (19921, 19949), True, 'import numpy as np\n'), ((22647, 22668), 'torch.from_numpy', 'torch.from_numpy', (['msk'], {}), '(msk)\n', (22663, 22668), False, 'import torch\n'), ((26641, 26666), 'numpy.random.random', 'np.random.random', (['(5, 50)'], {}), '((5, 50))\n', (26657, 26666), True, 'import numpy as np\n'), ((388, 415), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (404, 415), False, 'import torch\n'), ((573, 590), 'numpy.zeros', 'np.zeros', (['(3, 10)'], {}), '((3, 10))\n', (581, 590), True, 'import numpy as np\n'), ((974, 1001), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (990, 1001), False, 'import torch\n'), ((1178, 1193), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (1186, 1193), True, 'import numpy as np\n'), ((1229, 1256), 'torch.LongTensor', 'torch.LongTensor', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1245, 1256), False, 'import torch\n'), ((1637, 1664), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1653, 1664), False, 'import torch\n'), ((2077, 2104), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2093, 2104), False, 'import torch\n'), ((2366, 2381), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (2374, 2381), True, 'import numpy as np\n'), ((4110, 4134), 'torch.LongTensor', 'torch.LongTensor', (['[6, 7]'], {}), '([6, 7])\n', (4126, 4134), False, 'import torch\n'), ((5320, 5351), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(4, 3)'], {}), '(0, 5, (4, 3))\n', (5337, 5351), True, 'import numpy as np\n'), ((10532, 10557), 'numpy.random.random', 'np.random.random', (['(7, 10)'], {}), '((7, 10))\n', (10548, 10557), True, 'import numpy as np\n'), ((20738, 20763), 'numpy.random.random', 'np.random.random', (['(7, 10)'], {}), '((7, 10))\n', (20754, 20763), True, 'import numpy as np\n'), ((22515, 22531), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (22523, 22531), True, 'import numpy as np\n'), ((24299, 24315), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (24307, 24315), True, 'import numpy as np\n'), ((9198, 9231), 'numpy.allclose', 'np.allclose', (['subclonemb', 'gloveemb'], {}), '(subclonemb, gloveemb)\n', (9209, 9231), True, 'import numpy as np\n'), ((21109, 21127), 'torch.randn', 'torch.randn', (['(3)', '(15)'], {}), '(3, 15)\n', (21120, 21127), False, 'import torch\n'), ((21521, 21539), 'torch.randn', 'torch.randn', (['(3)', '(15)'], {}), '(3, 15)\n', (21532, 21539), False, 'import torch\n'), ((21968, 21987), 'torch.norm', 'torch.norm', (['x', '(2)', '(1)'], {}), '(x, 2, 1)\n', (21978, 21987), False, 'import torch\n'), ((22386, 22404), 'torch.randn', 'torch.randn', (['(3)', '(15)'], {}), '(3, 15)\n', (22397, 22404), False, 'import torch\n'), ((25692, 25710), 'torch.randn', 'torch.randn', (['(3)', '(15)'], {}), '(3, 15)\n', (25703, 25710), False, 'import torch\n'), ((25741, 25759), 'torch.randn', 'torch.randn', (['(3)', '(15)'], {}), '(3, 15)\n', (25752, 25759), False, 'import torch\n'), ((14458, 14476), 'numpy.dot', 'np.dot', (['x[i]', 'w[j]'], {}), '(x[i], w[j])\n', (14464, 14476), True, 'import numpy as np\n'), ((14480, 14503), 'numpy.linalg.norm', 'np.linalg.norm', (['x[i]', '(2)'], {}), '(x[i], 2)\n', (14494, 14503), True, 'import numpy as np\n'), ((14506, 14529), 'numpy.linalg.norm', 'np.linalg.norm', (['w[j]', '(2)'], {}), '(w[j], 2)\n', (14520, 14529), True, 'import numpy as np\n'), ((9031, 9058), 'numpy.asarray', 'np.asarray', (['[subclone.D[k]]'], {}), '([subclone.D[k]])\n', (9041, 9058), True, 'import numpy as np\n'), ((9123, 9152), 'numpy.asarray', 'np.asarray', (['[self.glove.D[k]]'], {}), '([self.glove.D[k]])\n', (9133, 9152), True, 'import numpy as np\n')] |
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score
from category_encoders import MEstimateEncoder
import numpy as np
from collections import defaultdict
def fit_predict(modelo, enc, data, target, test):
pipe = Pipeline([("encoder", enc), ("model", modelo)])
pipe.fit(data, target)
return pipe.predict(test)
def auc_group(model, data, y_true, dicc, group: str = "", min_samples: int = 50):
aux = data.copy()
aux["target"] = y_true
cats = aux[group].value_counts()
cats = cats[cats > min_samples].index.tolist()
cats = cats + ["all"]
if len(dicc) == 0:
dicc = defaultdict(list, {k: [] for k in cats})
for cat in cats:
if cat != "all":
aux2 = aux[aux[group] == cat]
preds = model.predict_proba(aux2.drop(columns="target"))[:, 1]
truth = aux2["target"]
dicc[cat].append(roc_auc_score(truth, preds))
elif cat == "all":
dicc[cat].append(roc_auc_score(y_true, model.predict_proba(data)[:, 1]))
else:
pass
return dicc
def explain(xgb: bool = True):
"""
Provide a SHAP explanation by fitting MEstimate and GBDT
"""
if xgb:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", GradientBoostingClassifier())]
)
pipe.fit(X_tr, y_tr)
explainer = shap.Explainer(pipe[1])
shap_values = explainer(pipe[:-1].transform(X_tr))
shap.plots.beeswarm(shap_values)
return pd.DataFrame(np.abs(shap_values.values), columns=X_tr.columns).sum()
else:
pipe = Pipeline(
[("encoder", MEstimateEncoder()), ("model", LogisticRegression())]
)
pipe.fit(X_tr, y_tr)
coefficients = pd.concat(
[pd.DataFrame(X_tr.columns), pd.DataFrame(np.transpose(pipe[1].coef_))],
axis=1,
)
coefficients.columns = ["feat", "val"]
return coefficients.sort_values(by="val", ascending=False)
def calculate_cm(true, preds):
# Obtain the confusion matrix
cm = confusion_matrix(preds, true)
# https://stackoverflow.com/questions/31324218/scikit-learn-how-to-obtain-true-positive-true-negative-false-positive-and-fal
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
return TPR[0]
def metric_calculator(
modelo, data: pd.DataFrame, truth: pd.DataFrame, col: str, group1: str, group2: str
):
aux = data.copy()
aux["target"] = truth
# Filter the data
g1 = data[data[col] == group1]
g2 = data[data[col] == group2]
# Filter the ground truth
g1_true = aux[aux[col] == group1].target
g2_true = aux[aux[col] == group2].target
# Do predictions
p1 = modelo.predict(g1)
p2 = modelo.predict(g2)
# Extract metrics for each group
res1 = calculate_cm(p1, g1_true)
res2 = calculate_cm(p2, g2_true)
return res1 - res2
def plot_rolling(data, roll_mean: int = 5, roll_std: int = 20):
aux = data.rolling(roll_mean).mean().dropna()
stand = data.rolling(roll_std).quantile(0.05, interpolation="lower").dropna()
plt.figure()
for col in data.columns:
plt.plot(aux[col], label=col)
# plt.fill_between(aux.index,(aux[col] - stand[col]),(aux[col] + stand[col]),# color="b",alpha=0.1,)
plt.legend()
plt.show()
def scale_output(data):
return pd.DataFrame(
StandardScaler().fit_transform(data), columns=data.columns, index=data.index
)
| [
"pandas.DataFrame",
"numpy.abs",
"numpy.transpose",
"sklearn.metrics.roc_auc_score",
"collections.defaultdict",
"sklearn.ensemble.GradientBoostingClassifier",
"category_encoders.MEstimateEncoder",
"sklearn.pipeline.Pipeline",
"sklearn.metrics.confusion_matrix",
"numpy.diag"
] | [((339, 386), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('encoder', enc), ('model', modelo)]"], {}), "([('encoder', enc), ('model', modelo)])\n", (347, 386), False, 'from sklearn.pipeline import Pipeline\n'), ((2183, 2212), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['preds', 'true'], {}), '(preds, true)\n', (2199, 2212), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n'), ((2429, 2440), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2436, 2440), True, 'import numpy as np\n'), ((731, 771), 'collections.defaultdict', 'defaultdict', (['list', '{k: [] for k in cats}'], {}), '(list, {k: [] for k in cats})\n', (742, 771), False, 'from collections import defaultdict\n'), ((2370, 2381), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2377, 2381), True, 'import numpy as np\n'), ((2408, 2419), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (2415, 2419), True, 'import numpy as np\n'), ((1000, 1027), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['truth', 'preds'], {}), '(truth, preds)\n', (1013, 1027), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n'), ((1890, 1916), 'pandas.DataFrame', 'pd.DataFrame', (['X_tr.columns'], {}), '(X_tr.columns)\n', (1902, 1916), True, 'import pandas as pd\n'), ((1361, 1379), 'category_encoders.MEstimateEncoder', 'MEstimateEncoder', ([], {}), '()\n', (1377, 1379), False, 'from category_encoders import MEstimateEncoder\n'), ((1392, 1420), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (1418, 1420), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1634, 1660), 'numpy.abs', 'np.abs', (['shap_values.values'], {}), '(shap_values.values)\n', (1640, 1660), True, 'import numpy as np\n'), ((1750, 1768), 'category_encoders.MEstimateEncoder', 'MEstimateEncoder', ([], {}), '()\n', (1766, 1768), False, 'from category_encoders import MEstimateEncoder\n'), ((1931, 1958), 'numpy.transpose', 'np.transpose', (['pipe[1].coef_'], {}), '(pipe[1].coef_)\n', (1943, 1958), True, 'import numpy as np\n')] |
# add the current directory to PYTHONPATH
from pathlib import Path
import sys
import os
path = Path(os.getcwd())
sys.path.append(str(path.parent))
# import the necessary packages
from sklearn.model_selection import train_test_split
from image_classification.callbacks import TrainingMonitor, CosineScheduler
from image_classification.data import DataDispatcher
from image_classification.utils.dispatcher import MODELS
from image_classification.utils import resnet_lr_scheduler
from image_classification.utils import config
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.optimizers import SGD
import tensorflow as tf
import numpy as np
# initialize the training data
dd = DataDispatcher()
train_ds, val_ds = dd.get_train_data()
# compute some additional training constants
steps_per_epoch = np.ceil(dd.num_train_imgs / config.BS)
validation_steps = np.ceil(dd.num_val_imgs / config.BS)
# initialize the callbacks
if config.USE_COSINE:
lrs = CosineScheduler(config.INIT_LR, steps_per_epoch, config.EPOCHS, warmup=5)
else:
lrs = LearningRateScheduler(resnet_lr_scheduler)
tm = TrainingMonitor(config.TM_FIG_PATH, json_path=config.TM_JSON_PATH, start_at=config.START_EPOCH)
mc = ModelCheckpoint(f"weights/{config.MODEL_NAME}" + "_{epoch:03d}.h5")
callbacks = [tm, mc, lrs]
# initialize the model
model = MODELS[config.MODEL_NAME]
# initialize the loss fn
if config.USE_LBL_SMOOTH:
loss = CategoricalCrossentropy(label_smoothing=0.1)
else:
loss = CategoricalCrossentropy()
# initialize the optimizer and compile the model
opt = SGD(lr=config.INIT_LR, momentum=0.9)
model.compile(loss=loss, optimizer=opt, metrics=["accuracy"])
# train the model
model.fit(x=train_ds, epochs=config.EPOCHS, steps_per_epoch=steps_per_epoch,
validation_data=val_ds, validation_steps=validation_steps,
callbacks=callbacks, initial_epoch=config.START_EPOCH)
| [
"numpy.ceil",
"image_classification.data.DataDispatcher",
"image_classification.callbacks.CosineScheduler",
"os.getcwd",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.callbacks.LearningRateScheduler... | [((833, 849), 'image_classification.data.DataDispatcher', 'DataDispatcher', ([], {}), '()\n', (847, 849), False, 'from image_classification.data import DataDispatcher\n'), ((953, 991), 'numpy.ceil', 'np.ceil', (['(dd.num_train_imgs / config.BS)'], {}), '(dd.num_train_imgs / config.BS)\n', (960, 991), True, 'import numpy as np\n'), ((1011, 1047), 'numpy.ceil', 'np.ceil', (['(dd.num_val_imgs / config.BS)'], {}), '(dd.num_val_imgs / config.BS)\n', (1018, 1047), True, 'import numpy as np\n'), ((1247, 1347), 'image_classification.callbacks.TrainingMonitor', 'TrainingMonitor', (['config.TM_FIG_PATH'], {'json_path': 'config.TM_JSON_PATH', 'start_at': 'config.START_EPOCH'}), '(config.TM_FIG_PATH, json_path=config.TM_JSON_PATH, start_at\n =config.START_EPOCH)\n', (1262, 1347), False, 'from image_classification.callbacks import TrainingMonitor, CosineScheduler\n'), ((1348, 1415), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(f'weights/{config.MODEL_NAME}' + '_{epoch:03d}.h5')"], {}), "(f'weights/{config.MODEL_NAME}' + '_{epoch:03d}.h5')\n", (1363, 1415), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler\n'), ((1707, 1743), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': 'config.INIT_LR', 'momentum': '(0.9)'}), '(lr=config.INIT_LR, momentum=0.9)\n', (1710, 1743), False, 'from tensorflow.keras.optimizers import SGD\n'), ((100, 111), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (109, 111), False, 'import os\n'), ((1108, 1181), 'image_classification.callbacks.CosineScheduler', 'CosineScheduler', (['config.INIT_LR', 'steps_per_epoch', 'config.EPOCHS'], {'warmup': '(5)'}), '(config.INIT_LR, steps_per_epoch, config.EPOCHS, warmup=5)\n', (1123, 1181), False, 'from image_classification.callbacks import TrainingMonitor, CosineScheduler\n'), ((1198, 1240), 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['resnet_lr_scheduler'], {}), '(resnet_lr_scheduler)\n', (1219, 1240), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler\n'), ((1563, 1607), 'tensorflow.keras.losses.CategoricalCrossentropy', 'CategoricalCrossentropy', ([], {'label_smoothing': '(0.1)'}), '(label_smoothing=0.1)\n', (1586, 1607), False, 'from tensorflow.keras.losses import CategoricalCrossentropy\n'), ((1625, 1650), 'tensorflow.keras.losses.CategoricalCrossentropy', 'CategoricalCrossentropy', ([], {}), '()\n', (1648, 1650), False, 'from tensorflow.keras.losses import CategoricalCrossentropy\n')] |
# -----------------------------------------------------------------------------
# Copyright (c) 2015, <NAME>. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
ax = plt.axes([0.025,0.025,0.95,0.95], polar=True)
N = 20
theta = np.arange(0.0, 2*np.pi, 2*np.pi/N)
radii = 10*np.random.rand(N)
width = np.pi/4*np.random.rand(N)
bars = plt.bar(theta, radii, width=width, bottom=0.0)
for r,bar in zip(radii, bars):
bar.set_facecolor( plt.cm.jet(r/10.))
bar.set_alpha(0.5)
ax.set_xticklabels([])
ax.set_yticklabels([])
# savefig('../figures/polar_ex.png',dpi=48)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.cm.jet",
"matplotlib.pyplot.bar",
"numpy.arange",
"numpy.random.rand"
] | [((342, 390), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.025, 0.025, 0.95, 0.95]'], {'polar': '(True)'}), '([0.025, 0.025, 0.95, 0.95], polar=True)\n', (350, 390), True, 'import matplotlib.pyplot as plt\n'), ((404, 444), 'numpy.arange', 'np.arange', (['(0.0)', '(2 * np.pi)', '(2 * np.pi / N)'], {}), '(0.0, 2 * np.pi, 2 * np.pi / N)\n', (413, 444), True, 'import numpy as np\n'), ((509, 555), 'matplotlib.pyplot.bar', 'plt.bar', (['theta', 'radii'], {'width': 'width', 'bottom': '(0.0)'}), '(theta, radii, width=width, bottom=0.0)\n', (516, 555), True, 'import matplotlib.pyplot as plt\n'), ((744, 754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (752, 754), True, 'import matplotlib.pyplot as plt\n'), ((450, 467), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (464, 467), True, 'import numpy as np\n'), ((484, 501), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (498, 501), True, 'import numpy as np\n'), ((611, 631), 'matplotlib.pyplot.cm.jet', 'plt.cm.jet', (['(r / 10.0)'], {}), '(r / 10.0)\n', (621, 631), True, 'import matplotlib.pyplot as plt\n')] |
####################
# Import Libraries
####################
import shap
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
import matplotlib.pyplot as plt
from math import sqrt
from dataclasses import dataclass
from sklearn.metrics import mean_squared_error
####################
# Default Values
####################
X_MIN = 0
X_MAX = 1
####################
# Weight Data Structure
####################
@dataclass
class Weight:
w0: float
w1: float
w2: float
####################
# Create Dataset
####################
@st.cache
def build_dataset(x_resolution):
X_source = np.linspace(X_MIN, X_MAX, x_resolution)
y_source = (
np.polynomial.polynomial.polyval(X_source, [0, 2, 5])
+ np.sin(8 * X_source)
+ 0.5 * np.random.normal(size=x_resolution)
)
return pd.DataFrame({"x_source": X_source, "y_source": y_source})
####################
# Create Regression
####################
def build_regression(source_df, w: Weight):
X_reg = source_df["x_source"].copy()
y_reg = np.polynomial.polynomial.polyval(X_reg, [0, w.w0, w.w1]) + np.sin(
w.w2 * X_reg
)
return pd.DataFrame({"x_reg": X_reg, "y_reg": y_reg})
####################
# Create Metrics
####################
def build_error(source_df, res_df):
y_error = np.abs(source_df["y_source"] - res_df["y_reg"])
return pd.DataFrame({"x": source_df["x_source"], "y_err": y_error})
def compute_rmse(source_df, res_df):
rmse = sqrt(mean_squared_error(source_df["y_source"], res_df["y_reg"]))
return rmse
####################
# Show Header
####################
st.title("Regression")
st.markdown("Play with weights in sidebar and see if you can fit the points.")
st.markdown("$$f(x)=w_0 \\times x+w_1 \\times x^2 + sin(w_2 \\times x)$$")
####################
# Create Sidebar
####################
st.sidebar.subheader("Parameters")
xres = st.sidebar.slider("Number of points", 100, 1000, 100, 100)
w0 = st.sidebar.slider("w0", 0.0, 10.0, 1.0, 0.5)
w1 = st.sidebar.slider("w1", 0.0, 10.0, 1.0, 0.5)
w2 = st.sidebar.slider("w2", 0.0, 10.0, 1.0, 0.5)
w = Weight(w0, w1, w2)
####################
# Create Sidebar
####################
source_data = build_dataset(xres)
regression_data = build_regression(source_data, w)
error_data = build_error(source_data, regression_data)
rmse = compute_rmse(source_data, regression_data)
####################
# Show Graphs
####################
plt.figure(figsize=(5,4))
plt.plot(regression_data.x_reg, regression_data.y_reg, color='blue', linewidth=2.0)
plt.plot(source_data.x_source, source_data.y_source, 'o', color='black', markersize=2)
st.pyplot(plt, bbox_inches='tight')
plt.figure(figsize=(5,2))
plt.ylabel("abs error |data - reg|")
plt.fill_between(error_data.x, 0, error_data.y_err)
st.pyplot(plt, bbox_inches='tight')
rmse_text = st.text(f"Current RMSE : {rmse}")
| [
"pandas.DataFrame",
"streamlit.sidebar.slider",
"streamlit.markdown",
"streamlit.sidebar.subheader",
"numpy.abs",
"matplotlib.pyplot.plot",
"streamlit.title",
"matplotlib.pyplot.figure",
"streamlit.text",
"numpy.polynomial.polynomial.polyval",
"numpy.sin",
"streamlit.pyplot",
"numpy.linspace... | [((1641, 1663), 'streamlit.title', 'st.title', (['"""Regression"""'], {}), "('Regression')\n", (1649, 1663), True, 'import streamlit as st\n'), ((1665, 1743), 'streamlit.markdown', 'st.markdown', (['"""Play with weights in sidebar and see if you can fit the points."""'], {}), "('Play with weights in sidebar and see if you can fit the points.')\n", (1676, 1743), True, 'import streamlit as st\n'), ((1744, 1818), 'streamlit.markdown', 'st.markdown', (['"""$$f(x)=w_0 \\\\times x+w_1 \\\\times x^2 + sin(w_2 \\\\times x)$$"""'], {}), "('$$f(x)=w_0 \\\\times x+w_1 \\\\times x^2 + sin(w_2 \\\\times x)$$')\n", (1755, 1818), True, 'import streamlit as st\n'), ((1880, 1914), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Parameters"""'], {}), "('Parameters')\n", (1900, 1914), True, 'import streamlit as st\n'), ((1922, 1980), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Number of points"""', '(100)', '(1000)', '(100)', '(100)'], {}), "('Number of points', 100, 1000, 100, 100)\n", (1939, 1980), True, 'import streamlit as st\n'), ((1987, 2031), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""w0"""', '(0.0)', '(10.0)', '(1.0)', '(0.5)'], {}), "('w0', 0.0, 10.0, 1.0, 0.5)\n", (2004, 2031), True, 'import streamlit as st\n'), ((2037, 2081), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""w1"""', '(0.0)', '(10.0)', '(1.0)', '(0.5)'], {}), "('w1', 0.0, 10.0, 1.0, 0.5)\n", (2054, 2081), True, 'import streamlit as st\n'), ((2087, 2131), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""w2"""', '(0.0)', '(10.0)', '(1.0)', '(0.5)'], {}), "('w2', 0.0, 10.0, 1.0, 0.5)\n", (2104, 2131), True, 'import streamlit as st\n'), ((2464, 2490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (2474, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2577), 'matplotlib.pyplot.plot', 'plt.plot', (['regression_data.x_reg', 'regression_data.y_reg'], {'color': '"""blue"""', 'linewidth': '(2.0)'}), "(regression_data.x_reg, regression_data.y_reg, color='blue',\n linewidth=2.0)\n", (2498, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2574, 2664), 'matplotlib.pyplot.plot', 'plt.plot', (['source_data.x_source', 'source_data.y_source', '"""o"""'], {'color': '"""black"""', 'markersize': '(2)'}), "(source_data.x_source, source_data.y_source, 'o', color='black',\n markersize=2)\n", (2582, 2664), True, 'import matplotlib.pyplot as plt\n'), ((2661, 2696), 'streamlit.pyplot', 'st.pyplot', (['plt'], {'bbox_inches': '"""tight"""'}), "(plt, bbox_inches='tight')\n", (2670, 2696), True, 'import streamlit as st\n'), ((2698, 2724), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 2)'}), '(figsize=(5, 2))\n', (2708, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2760), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""abs error |data - reg|"""'], {}), "('abs error |data - reg|')\n", (2734, 2760), True, 'import matplotlib.pyplot as plt\n'), ((2761, 2812), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['error_data.x', '(0)', 'error_data.y_err'], {}), '(error_data.x, 0, error_data.y_err)\n', (2777, 2812), True, 'import matplotlib.pyplot as plt\n'), ((2813, 2848), 'streamlit.pyplot', 'st.pyplot', (['plt'], {'bbox_inches': '"""tight"""'}), "(plt, bbox_inches='tight')\n", (2822, 2848), True, 'import streamlit as st\n'), ((2862, 2895), 'streamlit.text', 'st.text', (['f"""Current RMSE : {rmse}"""'], {}), "(f'Current RMSE : {rmse}')\n", (2869, 2895), True, 'import streamlit as st\n'), ((629, 668), 'numpy.linspace', 'np.linspace', (['X_MIN', 'X_MAX', 'x_resolution'], {}), '(X_MIN, X_MAX, x_resolution)\n', (640, 668), True, 'import numpy as np\n'), ((849, 907), 'pandas.DataFrame', 'pd.DataFrame', (["{'x_source': X_source, 'y_source': y_source}"], {}), "({'x_source': X_source, 'y_source': y_source})\n", (861, 907), True, 'import pandas as pd\n'), ((1174, 1220), 'pandas.DataFrame', 'pd.DataFrame', (["{'x_reg': X_reg, 'y_reg': y_reg}"], {}), "({'x_reg': X_reg, 'y_reg': y_reg})\n", (1186, 1220), True, 'import pandas as pd\n'), ((1332, 1379), 'numpy.abs', 'np.abs', (["(source_df['y_source'] - res_df['y_reg'])"], {}), "(source_df['y_source'] - res_df['y_reg'])\n", (1338, 1379), True, 'import numpy as np\n'), ((1391, 1451), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': source_df['x_source'], 'y_err': y_error}"], {}), "({'x': source_df['x_source'], 'y_err': y_error})\n", (1403, 1451), True, 'import pandas as pd\n'), ((1069, 1125), 'numpy.polynomial.polynomial.polyval', 'np.polynomial.polynomial.polyval', (['X_reg', '[0, w.w0, w.w1]'], {}), '(X_reg, [0, w.w0, w.w1])\n', (1101, 1125), True, 'import numpy as np\n'), ((1128, 1148), 'numpy.sin', 'np.sin', (['(w.w2 * X_reg)'], {}), '(w.w2 * X_reg)\n', (1134, 1148), True, 'import numpy as np\n'), ((1507, 1565), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (["source_df['y_source']", "res_df['y_reg']"], {}), "(source_df['y_source'], res_df['y_reg'])\n", (1525, 1565), False, 'from sklearn.metrics import mean_squared_error\n'), ((694, 747), 'numpy.polynomial.polynomial.polyval', 'np.polynomial.polynomial.polyval', (['X_source', '[0, 2, 5]'], {}), '(X_source, [0, 2, 5])\n', (726, 747), True, 'import numpy as np\n'), ((758, 778), 'numpy.sin', 'np.sin', (['(8 * X_source)'], {}), '(8 * X_source)\n', (764, 778), True, 'import numpy as np\n'), ((795, 830), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'x_resolution'}), '(size=x_resolution)\n', (811, 830), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import numpy as np
from tensorflow.keras import Input
from numpy.testing import assert_almost_equal
import tensorflow.python.keras.backend as K
def get_standart_values_1d_box(n, dc_decomp=True, grad_bounds=False, nb=100):
"""
:param n: A set of functions with their monotonic
decomposition for testing the activations
:return:
"""
"""
A set of functions with their monotonic decomposition
for testing the activations
"""
w_u_ = np.ones(nb)
b_u_ = np.zeros(nb)
w_l_ = np.ones(nb)
b_l_ = np.zeros(nb)
if n == 0:
# identity
y_ = np.linspace(-2, -1, nb)
x_ = np.linspace(-2, -1, nb)
h_ = np.linspace(-2, -1, nb)
g_ = np.zeros_like(x_)
if n == 1:
y_ = np.linspace(1, 2, nb)
x_ = np.linspace(1, 2, nb)
h_ = np.linspace(1, 2, nb)
g_ = np.zeros_like(x_)
if n == 2:
y_ = np.linspace(-1, 1, nb)
x_ = np.linspace(-1, 1, nb)
h_ = np.linspace(-1, 1, nb)
g_ = np.zeros_like(x_)
if n == 3:
# identity
y_ = np.linspace(-2, -1, nb)
x_ = np.linspace(-2, -1, nb)
h_ = 2 * np.linspace(-2, -1, nb)
g_ = -np.linspace(-2, -1, nb)
if n == 4:
y_ = np.linspace(1, 2, nb)
x_ = np.linspace(1, 2, nb)
h_ = 2 * np.linspace(1, 2, nb)
g_ = -np.linspace(1, 2, nb)
if n == 5:
y_ = np.linspace(-1, 1, nb)
x_ = np.linspace(-1, 1, nb)
h_ = 2 * np.linspace(-1, 1, nb)
g_ = -np.linspace(-1, 1, nb)
if n == 6:
assert nb == 100, "expected nb=100 samples"
# cosine function
x_ = np.linspace(-np.pi, np.pi, 100)
y_ = np.cos(x_)
h_ = np.concatenate([y_[:50], np.ones((50,))]) - 0.5
g_ = np.concatenate([np.ones((50,)), y_[50:]]) - 0.5
w_u_ = np.zeros_like(x_)
w_l_ = np.zeros_like(x_)
b_u_ = np.ones_like(x_)
b_l_ = -np.ones_like(x_)
if n == 7:
# h and g >0
h_ = np.linspace(0.5, 2, nb)
g_ = np.linspace(1, 2, nb)[::-1]
x_ = h_ + g_
y_ = h_ + g_
if n == 8:
# h <0 and g <0
# h_max+g_max <=0
h_ = np.linspace(-2, -1, nb)
g_ = np.linspace(-2, -1, nb)[::-1]
y_ = h_ + g_
x_ = h_ + g_
if n == 9:
# h >0 and g <0
# h_min+g_min >=0
h_ = np.linspace(4, 5, nb)
g_ = np.linspace(-2, -1, nb)[::-1]
y_ = h_ + g_
x_ = h_ + g_
x_min_ = x_.min() + np.zeros_like(x_)
x_max_ = x_.max() + np.zeros_like(x_)
x_0_ = np.concatenate([x_min_[:, None], x_max_[:, None]], 1)
u_c_ = np.max(y_) * np.ones((nb,))
l_c_ = np.min(y_) * np.ones((nb,))
if dc_decomp:
return [
x_[:, None],
y_[:, None],
x_0_[:, :, None],
u_c_[:, None],
w_u_[:, None, None],
b_u_[:, None],
l_c_[:, None],
w_l_[:, None, None],
b_l_[:, None],
h_[:, None],
g_[:, None],
]
return [
x_[:, None],
y_[:, None],
x_0_[:, :, None],
u_c_[:, None],
w_u_[:, None, None],
b_u_[:, None],
l_c_[:, None],
w_l_[:, None, None],
b_l_[:, None],
]
def get_tensor_decomposition_1d_box(dc_decomp=True):
if dc_decomp:
return [
Input((1,), dtype=K.floatx()),
Input((1,)),
Input((2, 1)),
Input((1,)),
Input((1, 1)),
Input((1,)),
Input((1,)),
Input((1, 1)),
Input((1,)),
Input((1,)),
Input((1,)),
]
return [
Input((1,)),
Input((1,)),
Input((2, 1)),
Input((1,)),
Input((1, 1)),
Input((1,)),
Input((1,)),
Input((1, 1)),
Input((1,)),
]
def get_tensor_decomposition_multid_box(odd=1, dc_decomp=True):
if odd:
n = 3
else:
n = 2
if dc_decomp:
# x, y, z, u, w_u, b_u, l, w_l, b_l, h, g
return [
Input((n,)),
Input((n,)),
Input((2, n)),
Input((n,)),
Input((n, n)),
Input((n,)),
Input((n,)),
Input((n, n)),
Input((n,)),
Input((n,)),
Input((n,)),
]
return [
Input((n,)),
Input((n,)),
Input((2, n)),
Input((n,)),
Input((n, n)),
Input((n,)),
Input((n,)),
Input((n, n)),
Input((n,)),
]
def get_standard_values_multid_box(odd=1, dc_decomp=True):
if dc_decomp:
(
x_0,
y_0,
z_0,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
h_0,
g_0,
) = get_standart_values_1d_box(0, dc_decomp)
(
x_1,
y_1,
z_1,
u_c_1,
w_u_1,
b_u_1,
l_c_1,
w_l_1,
b_l_1,
h_1,
g_1,
) = get_standart_values_1d_box(1, dc_decomp)
else:
(
x_0,
y_0,
z_0,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
) = get_standart_values_1d_box(0, dc_decomp)
(
x_1,
y_1,
z_1,
u_c_1,
w_u_1,
b_u_1,
l_c_1,
w_l_1,
b_l_1,
) = get_standart_values_1d_box(1, dc_decomp)
if not odd:
# output (x_0+x_1, x_0+2*x_0)
x_ = np.concatenate([x_0, x_1], -1)
z_min_ = np.concatenate([z_0[:, 0], z_1[:, 0]], -1)
z_max_ = np.concatenate([z_0[:, 1], z_1[:, 1]], -1)
z_ = np.concatenate([z_min_[:, None], z_max_[:, None]], 1)
y_ = np.concatenate([y_0 + y_1, y_0 + 2 * y_1], -1)
b_u_ = np.concatenate([b_u_0 + b_u_1, b_u_0 + 2 * b_u_1], -1)
u_c_ = np.concatenate([u_c_0 + u_c_1, u_c_0 + 2 * u_c_1], -1)
b_l_ = np.concatenate([b_l_0 + b_l_1, b_l_0 + 2 * b_l_1], -1)
l_c_ = np.concatenate([l_c_0 + l_c_1, l_c_0 + 2 * l_c_1], -1)
if dc_decomp:
h_ = np.concatenate([h_0 + h_1, h_0 + 2 * h_1], -1)
g_ = np.concatenate([g_0 + g_1, g_0 + 2 * g_1], -1)
w_u_ = np.zeros((len(x_), 2, 2))
w_u_[:, 0, 0] = w_u_0[:, 0, 0]
w_u_[:, 1, 0] = w_u_1[:, 0, 0]
w_u_[:, 0, 1] = w_u_0[:, 0, 0]
w_u_[:, 1, 1] = 2 * w_u_1[:, 0, 0]
w_l_ = np.zeros((len(x_), 2, 2))
w_l_[:, 0, 0] = w_l_0[:, 0, 0]
w_l_[:, 1, 0] = w_l_1[:, 0, 0]
w_l_[:, 0, 1] = w_l_0[:, 0, 0]
w_l_[:, 1, 1] = 2 * w_l_1[:, 0, 0]
else:
(
x_2,
y_2,
z_2,
u_c_2,
w_u_2,
b_u_2,
l_c_2,
w_l_2,
b_l_2,
h_2,
g_2,
) = get_standart_values_1d_box(2)
# output (x_0+x_1, x_0+2*x_0, x_2)
x_ = np.concatenate([x_0, x_1, x_2], -1)
z_min_ = np.concatenate([z_0[:, 0], z_1[:, 0], z_2[:, 0]], -1)
z_max_ = np.concatenate([z_0[:, 1], z_1[:, 1], z_2[:, 1]], -1)
z_ = np.concatenate([z_min_[:, None], z_max_[:, None]], 1)
y_ = np.concatenate([y_0 + y_1, y_0 + 2 * y_1, y_2], -1)
b_u_ = np.concatenate([b_u_0 + b_u_1, b_u_0 + 2 * b_u_1, b_u_2], -1)
b_l_ = np.concatenate([b_l_0 + b_l_1, b_l_0 + 2 * b_l_1, b_l_2], -1)
u_c_ = np.concatenate([u_c_0 + u_c_1, u_c_0 + 2 * u_c_1, u_c_2], -1)
l_c_ = np.concatenate([l_c_0 + l_c_1, l_c_0 + 2 * l_c_1, l_c_2], -1)
if dc_decomp:
h_ = np.concatenate([h_0 + h_1, h_0 + 2 * h_1, h_2], -1)
g_ = np.concatenate([g_0 + g_1, g_0 + 2 * g_1, g_2], -1)
w_u_ = np.zeros((len(x_), 3, 3))
w_u_[:, 0, 0] = w_u_0[:, 0, 0]
w_u_[:, 1, 0] = w_u_1[:, 0, 0]
w_u_[:, 0, 1] = w_u_0[:, 0, 0]
w_u_[:, 1, 1] = 2 * w_u_1[:, 0, 0]
w_u_[:, 2, 2] = w_u_2[:, 0, 0]
w_l_ = np.zeros((len(x_), 3, 3))
w_l_[:, 0, 0] = w_l_0[:, 0, 0]
w_l_[:, 1, 0] = w_l_1[:, 0, 0]
w_l_[:, 0, 1] = w_l_0[:, 0, 0]
w_l_[:, 1, 1] = 2 * w_l_1[:, 0, 0]
w_l_[:, 2, 2] = w_l_2[:, 0, 0]
if dc_decomp:
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_]
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def build_image_from_1D_box(odd=0, m=0, dc_decomp=True):
if odd:
n = 7
else:
n = 6
if dc_decomp:
(
x_,
y_0,
z_,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
h_0,
g_0,
) = get_standart_values_1d_box(m, dc_decomp=dc_decomp)
else:
(
x_,
y_0,
z_,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
) = get_standart_values_1d_box(m, dc_decomp=dc_decomp)
y_ = np.concatenate([(i + 1) * y_0 for i in range(n * n)], -1).reshape((-1, n, n))
b_u_ = np.concatenate([(i + 1) * b_u_0 for i in range(n * n)], -1).reshape((-1, n, n))
b_l_ = np.concatenate([(i + 1) * b_l_0 for i in range(n * n)], -1).reshape((-1, n, n))
if dc_decomp:
h_ = np.concatenate([(i + 1) * h_0 for i in range(n * n)], -1).reshape((-1, n, n))
g_ = np.concatenate([(i + 1) * g_0 for i in range(n * n)], -1).reshape((-1, n, n))
u_c_ = np.concatenate([(i + 1) * u_c_0 for i in range(n * n)], -1).reshape((-1, n, n))
l_c_ = np.concatenate([(i + 1) * l_c_0 for i in range(n * n)], -1).reshape((-1, n, n))
w_u_ = np.zeros((len(x_), 1, n * n))
w_l_ = np.zeros((len(x_), 1, n * n))
for i in range(n * n):
w_u_[:, 0, i] = (i + 1) * w_u_0[:, 0, 0]
w_l_[:, 0, i] = (i + 1) * w_l_0[:, 0, 0]
w_u_ = w_u_.reshape((-1, 1, n, n))
w_l_ = w_l_.reshape((-1, 1, n, n))
if dc_decomp:
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_]
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def build_image_from_2D_box(odd=0, m0=0, m1=1, dc_decomp=True):
if dc_decomp:
(
x_0,
y_0,
z_0,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
h_0,
g_0,
) = build_image_from_1D_box(odd, m0, dc_decomp)
(
x_1,
y_1,
z_1,
u_c_1,
w_u_1,
b_u_1,
l_c_1,
w_l_1,
b_l_1,
h_1,
g_1,
) = build_image_from_1D_box(odd, m1, dc_decomp)
else:
(
x_0,
y_0,
z_0,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
) = build_image_from_1D_box(odd, m0, dc_decomp)
(
x_1,
y_1,
z_1,
u_c_1,
w_u_1,
b_u_1,
l_c_1,
w_l_1,
b_l_1,
) = build_image_from_1D_box(odd, m1, dc_decomp)
x_ = np.concatenate([x_0, x_1], -1)
z_min_ = np.concatenate([z_0[:, 0], z_1[:, 0]], -1)
z_max_ = np.concatenate([z_0[:, 1], z_1[:, 1]], -1)
z_ = np.concatenate([z_min_[:, None], z_max_[:, None]], 1)
y_ = y_0 + y_1
b_u_ = b_u_0 + b_u_1
b_l_ = b_l_0 + b_l_1
u_c_ = u_c_0 + u_c_1
l_c_ = l_c_0 + l_c_1
w_u_ = np.concatenate([w_u_0, w_u_1], 1)
w_l_ = np.concatenate([w_l_0, w_l_1], 1)
if dc_decomp:
h_ = h_0 + h_1
g_ = g_0 + g_1
assert_output_properties_box(
x_,
h_ + g_,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"image from 2D",
)
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_]
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def get_standard_values_images_box(data_format="channels_last", odd=0, m0=0, m1=1, dc_decomp=True):
output = build_image_from_2D_box(odd, m0, m1, dc_decomp)
if dc_decomp:
x_0, y_0, z_0, u_c_0, w_u_0, b_u_0, l_c_0, w_l_0, b_l_0, h_0, g_0 = output
else:
x_0, y_0, z_0, u_c_0, w_u_0, b_u_0, l_c_0, w_l_0, b_l_0 = output
x_ = x_0
z_ = z_0
z_min_ = z_0[:, 0]
z_max_ = z_0[:, 1]
if data_format == "channels_last":
y_0 = y_0[:, :, :, None]
b_u_0 = b_u_0[:, :, :, None]
b_l_0 = b_l_0[:, :, :, None]
u_c_0 = u_c_0[:, :, :, None]
l_c_0 = l_c_0[:, :, :, None]
w_u_0 = w_u_0[:, :, :, :, None]
w_l_0 = w_l_0[:, :, :, :, None]
y_ = np.concatenate([y_0, y_0], -1)
b_u_ = np.concatenate([b_u_0, b_u_0], -1)
b_l_ = np.concatenate([b_l_0, b_l_0], -1)
u_c_ = np.concatenate([u_c_0, u_c_0], -1)
l_c_ = np.concatenate([l_c_0, l_c_0], -1)
w_u_ = np.concatenate([w_u_0, w_u_0], -1)
w_l_ = np.concatenate([w_l_0, w_l_0], -1)
if dc_decomp:
h_0 = h_0[:, :, :, None]
g_0 = g_0[:, :, :, None]
h_ = np.concatenate([h_0, h_0], -1)
g_ = np.concatenate([g_0, g_0], -1)
assert_output_properties_box(
x_,
y_,
h_,
g_,
z_min_,
z_max_,
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"images {},{},{},{}".format(data_format, odd, m0, m1),
)
else:
output = get_standard_values_images_box(data_format="channels_last", odd=odd, m0=m0, m1=m1, dc_decomp=dc_decomp)
x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = output[:9]
if dc_decomp:
h_, g_ = output[-2:]
h_ = np.transpose(h_, (0, 3, 1, 2))
g_ = np.transpose(g_, (0, 3, 1, 2))
y_ = np.transpose(y_, (0, 3, 1, 2))
u_c_ = np.transpose(u_c_, (0, 3, 1, 2))
l_c_ = np.transpose(l_c_, (0, 3, 1, 2))
b_u_ = np.transpose(b_u_, (0, 3, 1, 2))
b_l_ = np.transpose(b_l_, (0, 3, 1, 2))
w_u_ = np.transpose(w_u_, (0, 1, 4, 2, 3))
w_l_ = np.transpose(w_l_, (0, 1, 4, 2, 3))
if dc_decomp:
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_]
else:
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def get_tensor_decomposition_images_box(data_format, odd, dc_decomp=True):
if odd:
n = 7
else:
n = 6
if data_format == "channels_last":
# x, y, z, u, w_u, b_u, l, w_l, b_l
output = [
Input((2,)),
Input((n, n, 2)),
Input((2, 2)),
Input((n, n, 2)),
Input((2, n, n, 2)),
Input((n, n, 2)),
Input((n, n, 2)),
Input((2, n, n, 2)),
Input((n, n, 2)),
]
if dc_decomp:
output += [Input((n, n, 2)), Input((n, n, 2))]
else:
output = [
Input((2,)),
Input((2, n, n)),
Input((2, 2)),
Input((2, n, n)),
Input((2, 2, n, n)),
Input((2, n, n)),
Input((2, n, n)),
Input((2, 2, n, n)),
Input((2, n, n)),
]
if dc_decomp:
output += [Input((n, n, 2)), Input((n, n, 2))]
return output
def assert_output_properties_box(x_, y_, h_, g_, x_min_, x_max_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, name, decimal=5):
if y_ is None:
y_ = h_ + g_
if h_ is not None:
assert_almost_equal(
h_ + g_,
y_,
decimal=decimal,
err_msg="decomposition error for function {}".format(name),
)
assert np.min(x_min_ <= x_max_), "x_min >x_max for function {}".format(name)
assert_almost_equal(
np.clip(x_min_ - x_, 0, np.inf),
0.0,
decimal=decimal,
err_msg="x_min >x_ for function {}".format(name),
)
assert_almost_equal(
np.clip(x_ - x_max_, 0, np.inf),
0.0,
decimal=decimal,
err_msg="x_max < x_ for function {}".format(name),
)
if w_u_ is not None:
x_expand = x_ + np.zeros_like(x_)
n_expand = len(w_u_.shape) - len(x_expand.shape)
for i in range(n_expand):
x_expand = np.expand_dims(x_expand, -1)
lower_ = np.sum(w_l_ * x_expand, 1) + b_l_
upper_ = np.sum(w_u_ * x_expand, 1) + b_u_
# check that the functions h_ and g_ remains monotonic
if h_ is not None:
assert_almost_equal(
np.clip(h_[:-1] - h_[1:], 0, np.inf),
np.zeros_like(h_[1:]),
decimal=decimal,
err_msg="h is not increasing for function {}".format(name),
)
assert_almost_equal(
np.clip(g_[1:] - g_[:-1], 0, np.inf),
np.zeros_like(g_[1:]),
decimal=decimal,
err_msg="g is not increasing for function {}".format(name),
)
#
if w_u_ is not None:
assert_almost_equal(
np.clip(lower_ - y_, 0.0, np.inf),
np.zeros_like(y_),
decimal=decimal,
err_msg="lower_ >y",
)
assert_almost_equal(
np.clip(y_ - upper_, 0.0, 1e6),
np.zeros_like(y_),
decimal=decimal,
err_msg="upper <y",
)
if l_c_ is not None:
assert_almost_equal(
np.clip(l_c_ - y_, 0.0, np.inf),
np.zeros_like(y_),
decimal=decimal,
err_msg="l_c >y",
)
assert_almost_equal(
np.clip(y_ - u_c_, 0.0, 1e6),
np.zeros_like(y_),
decimal=decimal,
err_msg="u_c <y",
)
# computer lower bounds on the domain
if w_u_ is not None and l_c_ is not None:
x_expand_min = x_min_ + np.zeros_like(x_)
x_expand_max = x_max_ + np.zeros_like(x_)
n_expand = len(w_u_.shape) - len(x_expand_min.shape)
for i in range(n_expand):
x_expand_min = np.expand_dims(x_expand_min, -1)
x_expand_max = np.expand_dims(x_expand_max, -1)
lower_ = np.sum(np.maximum(0, w_l_) * x_expand_min, 1) + np.sum(np.minimum(0, w_l_) * x_expand_max, 1) + b_l_
upper_ = np.sum(np.maximum(0, w_u_) * x_expand_max, 1) + np.sum(np.minimum(0, w_u_) * x_expand_min, 1) + b_u_
"""
assert_almost_equal(
np.clip(lower_.min(0) - l_c_.max(0), 0.0, np.inf),
np.zeros_like(y_.min(0)),
decimal=decimal,
err_msg="lower_ >l_c",
)
assert_almost_equal(
np.clip(u_c_.min(0) - upper_.max(0), 0.0, 1e6),
np.zeros_like(y_.min(0)),
decimal=decimal,
err_msg="upper <u_c",
)
"""
def assert_output_properties_box_linear(x_, y_, x_min_, x_max_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, name, decimal=5):
# flatten everything
# flatten everyting
n = len(x_)
if y_ is not None:
n = len(y_)
y_ = y_.reshape((n, -1))
if l_c_ is not None:
u_c_ = u_c_.reshape((n, -1))
l_c_ = l_c_.reshape((n, -1))
if w_u_ is not None:
w_u_ = w_u_.reshape((n, w_u_.shape[1], -1))
w_l_ = w_l_.reshape((n, w_l_.shape[1], -1))
b_u_ = b_u_.reshape((n, -1))
b_l_ = b_l_.reshape((n, -1))
# assert_almost_equal(h_ + g_, y_, decimal=decimal, err_msg='decomposition error for function {}'.format(name))
assert np.min(x_min_ <= x_max_), "x_min >x_max for function {}".format(name)
assert_almost_equal(
np.clip(x_min_ - x_, 0, np.inf), 0.0, decimal=decimal, err_msg="x_min >x_ for function {}".format(name)
)
assert_almost_equal(
np.clip(x_ - x_max_, 0, np.inf), 0.0, decimal=decimal, err_msg="x_max < x_ for function {}".format(name)
)
if w_u_ is not None:
x_expand = x_ + np.zeros_like(x_)
n_expand = len(w_u_.shape) - len(x_expand.shape)
for i in range(n_expand):
x_expand = np.expand_dims(x_expand, -1)
lower_ = np.sum(w_l_ * x_expand, 1) + b_l_
upper_ = np.sum(w_u_ * x_expand, 1) + b_u_
# check that the functions h_ and g_ remains monotonic
# assert_almost_equal(np.clip(h_[:-1] - h_[1:], 0, np.inf), np.zeros_like(h_[1:]), decimal=decimal,
# err_msg='h is not increasing for function {}'.format(name))
# assert_almost_equal(np.clip(g_[1:] - g_[:-1], 0, np.inf), np.zeros_like(g_[1:]), decimal=decimal,
# err_msg='g is not increasing for function {}'.format(name))
if y_ is not None:
if l_c_ is not None:
assert_almost_equal(np.clip(l_c_ - y_, 0.0, np.inf), np.zeros_like(y_), decimal=decimal, err_msg="l_c >y")
assert_almost_equal(np.clip(y_ - u_c_, 0.0, 1e6), np.zeros_like(y_), decimal=decimal, err_msg="u_c <y")
if w_u_ is not None:
assert_almost_equal(
np.clip(lower_ - y_, 0.0, np.inf), np.zeros_like(y_), decimal=decimal, err_msg="lower_ >y"
)
assert_almost_equal(np.clip(y_ - upper_, 0.0, 1e6), np.zeros_like(y_), decimal=decimal, err_msg="upper <y")
# computer lower bounds on the domain
if w_u_ is not None:
x_expand_min = x_min_ + np.zeros_like(x_)
x_expand_max = x_max_ + np.zeros_like(x_)
n_expand = len(w_u_.shape) - len(x_expand_min.shape)
for i in range(n_expand):
x_expand_min = np.expand_dims(x_expand_min, -1)
x_expand_max = np.expand_dims(x_expand_max, -1)
lower_ = np.sum(np.maximum(0, w_l_) * x_expand_min, 1) + np.sum(np.minimum(0, w_l_) * x_expand_max, 1) + b_l_
upper_ = np.sum(np.maximum(0, w_u_) * x_expand_max, 1) + np.sum(np.minimum(0, w_u_) * x_expand_min, 1) + b_u_
# import pdb; pdb.set_trace()
if y_ is not None:
assert_almost_equal(np.clip(lower_ - y_, 0.0, np.inf), np.zeros_like(y_), decimal=decimal, err_msg="l_c >y")
assert_almost_equal(np.clip(y_ - upper_, 0.0, 1e6), np.zeros_like(y_), decimal=decimal, err_msg="u_c <y")
# multi decomposition for convert
def get_standard_values_multid_box_convert(odd=1, dc_decomp=True):
if dc_decomp:
(
x_0,
y_0,
z_0,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
h_0,
g_0,
) = get_standart_values_1d_box(0, dc_decomp)
(
x_1,
y_1,
z_1,
u_c_1,
w_u_1,
b_u_1,
l_c_1,
w_l_1,
b_l_1,
h_1,
g_1,
) = get_standart_values_1d_box(3, dc_decomp)
else:
(
x_0,
y_0,
z_0,
u_c_0,
w_u_0,
b_u_0,
l_c_0,
w_l_0,
b_l_0,
) = get_standart_values_1d_box(0, dc_decomp)
(
x_1,
y_1,
z_1,
u_c_1,
w_u_1,
b_u_1,
l_c_1,
w_l_1,
b_l_1,
) = get_standart_values_1d_box(1, dc_decomp)
if not odd:
# output (x_0+x_1, x_0+2*x_0) (x_0, x_1)
x_ = np.concatenate([x_0, x_1], -1)
z_min_ = np.concatenate([z_0[:, 0], z_1[:, 0]], -1)
z_max_ = np.concatenate([z_0[:, 1], z_1[:, 1]], -1)
z_ = np.concatenate([z_min_[:, None], z_max_[:, None]], 1)
y_ = np.concatenate([y_0, y_1], -1)
b_u_ = np.concatenate([b_u_0, b_u_1], -1)
u_c_ = np.concatenate([u_c_0, u_c_1], -1)
b_l_ = np.concatenate([b_l_0, b_l_1], -1)
l_c_ = np.concatenate([l_c_0, l_c_1], -1)
if dc_decomp:
h_ = np.concatenate([h_0, h_1], -1)
g_ = np.concatenate([g_0, g_1], -1)
w_u_ = np.zeros((len(x_), 2, 2))
w_u_[:, 0, 0] = w_u_0[:, 0, 0]
w_u_[:, 1, 1] = w_u_1[:, 0, 0]
w_l_ = np.zeros((len(x_), 2, 2))
w_l_[:, 0, 0] = w_l_0[:, 0, 0]
w_l_[:, 1, 1] = w_l_1[:, 0, 0]
else:
if dc_decomp:
(
x_2,
y_2,
z_2,
u_c_2,
w_u_2,
b_u_2,
l_c_2,
w_l_2,
b_l_2,
h_2,
g_2,
) = get_standart_values_1d_box(2, dc_decomp)
else:
(x_2, y_2, z_2, u_c_2, w_u_2, b_u_2, l_c_2, w_l_2, b_l_2) = get_standart_values_1d_box(2, dc_decomp)
x_ = np.concatenate([x_0, x_1, x_2], -1)
z_min_ = np.concatenate([z_0[:, 0], z_1[:, 0], z_2[:, 0]], -1)
z_max_ = np.concatenate([z_0[:, 1], z_1[:, 1], z_2[:, 1]], -1)
z_ = np.concatenate([z_min_[:, None], z_max_[:, None]], 1)
y_ = np.concatenate([y_0, y_1, y_2], -1)
b_u_ = np.concatenate([b_u_0, b_u_1, b_u_2], -1)
b_l_ = np.concatenate([b_l_0, b_l_1, b_l_2], -1)
u_c_ = np.concatenate([u_c_0, u_c_1, u_c_2], -1)
l_c_ = np.concatenate([l_c_0, l_c_1, l_c_2], -1)
if dc_decomp:
h_ = np.concatenate([h_0, h_1, h_2], -1)
g_ = np.concatenate([g_0, g_1, g_2], -1)
w_u_ = np.zeros((len(x_), 3, 3))
w_u_[:, 0, 0] = w_u_0[:, 0, 0]
w_u_[:, 1, 1] = w_u_1[:, 0, 0]
w_u_[:, 2, 2] = w_u_2[:, 0, 0]
w_l_ = np.zeros((len(x_), 3, 3))
w_l_[:, 0, 0] = w_l_0[:, 0, 0]
w_l_[:, 1, 1] = w_l_1[:, 0, 0]
w_l_[:, 2, 2] = w_l_2[:, 0, 0]
if dc_decomp:
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_]
return [x_, y_, z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def assert_output_properties_box_nodc(x_, y_, x_min_, x_max_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, name, decimal=5):
assert np.min(x_min_ <= x_max_), "x_min >x_max for function {}".format(name)
assert_almost_equal(
np.clip(x_min_ - x_, 0, np.inf),
0.0,
decimal=decimal,
err_msg="x_min >x_ for function {}".format(name),
)
assert_almost_equal(
np.clip(x_ - x_max_, 0, np.inf),
0.0,
decimal=decimal,
err_msg="x_max < x_ for function {}".format(name),
)
x_expand = x_ + np.zeros_like(x_)
n_expand = len(w_u_.shape) - len(x_expand.shape)
for i in range(n_expand):
x_expand = np.expand_dims(x_expand, -1)
lower_ = np.sum(w_l_ * x_expand, 1) + b_l_
upper_ = np.sum(w_u_ * x_expand, 1) + b_u_
# check that the functions h_ and g_ remains monotonic
assert_almost_equal(
np.clip(l_c_ - y_, 0.0, np.inf),
np.zeros_like(y_),
decimal=decimal,
err_msg="l_c >y",
)
assert_almost_equal(
np.clip(y_ - u_c_, 0.0, 1e6),
np.zeros_like(y_),
decimal=decimal,
err_msg="u_c <y",
)
#
assert_almost_equal(
np.clip(lower_ - y_, 0.0, np.inf),
np.zeros_like(y_),
decimal=decimal,
err_msg="lower_ >y",
)
assert_almost_equal(
np.clip(y_ - upper_, 0.0, 1e6),
np.zeros_like(y_),
decimal=decimal,
err_msg="upper <y",
)
# computer lower bounds on the domain
x_expand_min = x_min_ + np.zeros_like(x_)
x_expand_max = x_max_ + np.zeros_like(x_)
n_expand = len(w_u_.shape) - len(x_expand_min.shape)
for i in range(n_expand):
x_expand_min = np.expand_dims(x_expand_min, -1)
x_expand_max = np.expand_dims(x_expand_max, -1)
lower_ = np.sum(np.maximum(0, w_l_) * x_expand_min, 1) + np.sum(np.minimum(0, w_l_) * x_expand_max, 1) + b_l_
upper_ = np.sum(np.maximum(0, w_u_) * x_expand_max, 1) + np.sum(np.minimum(0, w_u_) * x_expand_min, 1) + b_u_
assert_almost_equal(
np.clip(lower_.min(0) - l_c_.max(0), 0.0, np.inf),
np.zeros_like(y_.min(0)),
decimal=decimal,
err_msg="lower_ >l_c",
)
assert_almost_equal(
np.clip(u_c_.min(0) - upper_.max(0), 0.0, 1e6),
np.zeros_like(y_.min(0)),
decimal=decimal,
err_msg="upper <u_c",
)
| [
"numpy.zeros_like",
"numpy.ones_like",
"numpy.sum",
"numpy.maximum",
"numpy.minimum",
"tensorflow.keras.Input",
"numpy.zeros",
"numpy.ones",
"numpy.transpose",
"numpy.clip",
"numpy.expand_dims",
"tensorflow.python.keras.backend.floatx",
"numpy.min",
"numpy.max",
"numpy.linspace",
"nump... | [((511, 522), 'numpy.ones', 'np.ones', (['nb'], {}), '(nb)\n', (518, 522), True, 'import numpy as np\n'), ((534, 546), 'numpy.zeros', 'np.zeros', (['nb'], {}), '(nb)\n', (542, 546), True, 'import numpy as np\n'), ((558, 569), 'numpy.ones', 'np.ones', (['nb'], {}), '(nb)\n', (565, 569), True, 'import numpy as np\n'), ((581, 593), 'numpy.zeros', 'np.zeros', (['nb'], {}), '(nb)\n', (589, 593), True, 'import numpy as np\n'), ((2637, 2690), 'numpy.concatenate', 'np.concatenate', (['[x_min_[:, None], x_max_[:, None]]', '(1)'], {}), '([x_min_[:, None], x_max_[:, None]], 1)\n', (2651, 2690), True, 'import numpy as np\n'), ((11500, 11530), 'numpy.concatenate', 'np.concatenate', (['[x_0, x_1]', '(-1)'], {}), '([x_0, x_1], -1)\n', (11514, 11530), True, 'import numpy as np\n'), ((11544, 11586), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 0], z_1[:, 0]]', '(-1)'], {}), '([z_0[:, 0], z_1[:, 0]], -1)\n', (11558, 11586), True, 'import numpy as np\n'), ((11600, 11642), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 1], z_1[:, 1]]', '(-1)'], {}), '([z_0[:, 1], z_1[:, 1]], -1)\n', (11614, 11642), True, 'import numpy as np\n'), ((11652, 11705), 'numpy.concatenate', 'np.concatenate', (['[z_min_[:, None], z_max_[:, None]]', '(1)'], {}), '([z_min_[:, None], z_max_[:, None]], 1)\n', (11666, 11705), True, 'import numpy as np\n'), ((11838, 11871), 'numpy.concatenate', 'np.concatenate', (['[w_u_0, w_u_1]', '(1)'], {}), '([w_u_0, w_u_1], 1)\n', (11852, 11871), True, 'import numpy as np\n'), ((11883, 11916), 'numpy.concatenate', 'np.concatenate', (['[w_l_0, w_l_1]', '(1)'], {}), '([w_l_0, w_l_1], 1)\n', (11897, 11916), True, 'import numpy as np\n'), ((16293, 16317), 'numpy.min', 'np.min', (['(x_min_ <= x_max_)'], {}), '(x_min_ <= x_max_)\n', (16299, 16317), True, 'import numpy as np\n'), ((20073, 20097), 'numpy.min', 'np.min', (['(x_min_ <= x_max_)'], {}), '(x_min_ <= x_max_)\n', (20079, 20097), True, 'import numpy as np\n'), ((26451, 26475), 'numpy.min', 'np.min', (['(x_min_ <= x_max_)'], {}), '(x_min_ <= x_max_)\n', (26457, 26475), True, 'import numpy as np\n'), ((642, 665), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (653, 665), True, 'import numpy as np\n'), ((679, 702), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (690, 702), True, 'import numpy as np\n'), ((716, 739), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (727, 739), True, 'import numpy as np\n'), ((753, 770), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (766, 770), True, 'import numpy as np\n'), ((800, 821), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (811, 821), True, 'import numpy as np\n'), ((835, 856), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (846, 856), True, 'import numpy as np\n'), ((870, 891), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (881, 891), True, 'import numpy as np\n'), ((905, 922), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (918, 922), True, 'import numpy as np\n'), ((952, 974), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nb'], {}), '(-1, 1, nb)\n', (963, 974), True, 'import numpy as np\n'), ((988, 1010), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nb'], {}), '(-1, 1, nb)\n', (999, 1010), True, 'import numpy as np\n'), ((1024, 1046), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nb'], {}), '(-1, 1, nb)\n', (1035, 1046), True, 'import numpy as np\n'), ((1060, 1077), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (1073, 1077), True, 'import numpy as np\n'), ((1126, 1149), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (1137, 1149), True, 'import numpy as np\n'), ((1163, 1186), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (1174, 1186), True, 'import numpy as np\n'), ((1295, 1316), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (1306, 1316), True, 'import numpy as np\n'), ((1330, 1351), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (1341, 1351), True, 'import numpy as np\n'), ((1456, 1478), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nb'], {}), '(-1, 1, nb)\n', (1467, 1478), True, 'import numpy as np\n'), ((1492, 1514), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nb'], {}), '(-1, 1, nb)\n', (1503, 1514), True, 'import numpy as np\n'), ((1700, 1731), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(100)'], {}), '(-np.pi, np.pi, 100)\n', (1711, 1731), True, 'import numpy as np\n'), ((1745, 1755), 'numpy.cos', 'np.cos', (['x_'], {}), '(x_)\n', (1751, 1755), True, 'import numpy as np\n'), ((1893, 1910), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (1906, 1910), True, 'import numpy as np\n'), ((1926, 1943), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (1939, 1943), True, 'import numpy as np\n'), ((1959, 1975), 'numpy.ones_like', 'np.ones_like', (['x_'], {}), '(x_)\n', (1971, 1975), True, 'import numpy as np\n'), ((2059, 2082), 'numpy.linspace', 'np.linspace', (['(0.5)', '(2)', 'nb'], {}), '(0.5, 2, nb)\n', (2070, 2082), True, 'import numpy as np\n'), ((2245, 2268), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (2256, 2268), True, 'import numpy as np\n'), ((2433, 2454), 'numpy.linspace', 'np.linspace', (['(4)', '(5)', 'nb'], {}), '(4, 5, nb)\n', (2444, 2454), True, 'import numpy as np\n'), ((2565, 2582), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (2578, 2582), True, 'import numpy as np\n'), ((2607, 2624), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (2620, 2624), True, 'import numpy as np\n'), ((2703, 2713), 'numpy.max', 'np.max', (['y_'], {}), '(y_)\n', (2709, 2713), True, 'import numpy as np\n'), ((2716, 2730), 'numpy.ones', 'np.ones', (['(nb,)'], {}), '((nb,))\n', (2723, 2730), True, 'import numpy as np\n'), ((2742, 2752), 'numpy.min', 'np.min', (['y_'], {}), '(y_)\n', (2748, 2752), True, 'import numpy as np\n'), ((2755, 2769), 'numpy.ones', 'np.ones', (['(nb,)'], {}), '((nb,))\n', (2762, 2769), True, 'import numpy as np\n'), ((3780, 3791), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3785, 3791), False, 'from tensorflow.keras import Input\n'), ((3801, 3812), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3806, 3812), False, 'from tensorflow.keras import Input\n'), ((3822, 3835), 'tensorflow.keras.Input', 'Input', (['(2, 1)'], {}), '((2, 1))\n', (3827, 3835), False, 'from tensorflow.keras import Input\n'), ((3845, 3856), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3850, 3856), False, 'from tensorflow.keras import Input\n'), ((3866, 3879), 'tensorflow.keras.Input', 'Input', (['(1, 1)'], {}), '((1, 1))\n', (3871, 3879), False, 'from tensorflow.keras import Input\n'), ((3889, 3900), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3894, 3900), False, 'from tensorflow.keras import Input\n'), ((3910, 3921), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3915, 3921), False, 'from tensorflow.keras import Input\n'), ((3931, 3944), 'tensorflow.keras.Input', 'Input', (['(1, 1)'], {}), '((1, 1))\n', (3936, 3944), False, 'from tensorflow.keras import Input\n'), ((3954, 3965), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3959, 3965), False, 'from tensorflow.keras import Input\n'), ((4488, 4499), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4493, 4499), False, 'from tensorflow.keras import Input\n'), ((4509, 4520), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4514, 4520), False, 'from tensorflow.keras import Input\n'), ((4530, 4543), 'tensorflow.keras.Input', 'Input', (['(2, n)'], {}), '((2, n))\n', (4535, 4543), False, 'from tensorflow.keras import Input\n'), ((4553, 4564), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4558, 4564), False, 'from tensorflow.keras import Input\n'), ((4574, 4587), 'tensorflow.keras.Input', 'Input', (['(n, n)'], {}), '((n, n))\n', (4579, 4587), False, 'from tensorflow.keras import Input\n'), ((4597, 4608), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4602, 4608), False, 'from tensorflow.keras import Input\n'), ((4618, 4629), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4623, 4629), False, 'from tensorflow.keras import Input\n'), ((4639, 4652), 'tensorflow.keras.Input', 'Input', (['(n, n)'], {}), '((n, n))\n', (4644, 4652), False, 'from tensorflow.keras import Input\n'), ((4662, 4673), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4667, 4673), False, 'from tensorflow.keras import Input\n'), ((5820, 5850), 'numpy.concatenate', 'np.concatenate', (['[x_0, x_1]', '(-1)'], {}), '([x_0, x_1], -1)\n', (5834, 5850), True, 'import numpy as np\n'), ((5868, 5910), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 0], z_1[:, 0]]', '(-1)'], {}), '([z_0[:, 0], z_1[:, 0]], -1)\n', (5882, 5910), True, 'import numpy as np\n'), ((5928, 5970), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 1], z_1[:, 1]]', '(-1)'], {}), '([z_0[:, 1], z_1[:, 1]], -1)\n', (5942, 5970), True, 'import numpy as np\n'), ((5984, 6037), 'numpy.concatenate', 'np.concatenate', (['[z_min_[:, None], z_max_[:, None]]', '(1)'], {}), '([z_min_[:, None], z_max_[:, None]], 1)\n', (5998, 6037), True, 'import numpy as np\n'), ((6051, 6097), 'numpy.concatenate', 'np.concatenate', (['[y_0 + y_1, y_0 + 2 * y_1]', '(-1)'], {}), '([y_0 + y_1, y_0 + 2 * y_1], -1)\n', (6065, 6097), True, 'import numpy as np\n'), ((6113, 6167), 'numpy.concatenate', 'np.concatenate', (['[b_u_0 + b_u_1, b_u_0 + 2 * b_u_1]', '(-1)'], {}), '([b_u_0 + b_u_1, b_u_0 + 2 * b_u_1], -1)\n', (6127, 6167), True, 'import numpy as np\n'), ((6183, 6237), 'numpy.concatenate', 'np.concatenate', (['[u_c_0 + u_c_1, u_c_0 + 2 * u_c_1]', '(-1)'], {}), '([u_c_0 + u_c_1, u_c_0 + 2 * u_c_1], -1)\n', (6197, 6237), True, 'import numpy as np\n'), ((6253, 6307), 'numpy.concatenate', 'np.concatenate', (['[b_l_0 + b_l_1, b_l_0 + 2 * b_l_1]', '(-1)'], {}), '([b_l_0 + b_l_1, b_l_0 + 2 * b_l_1], -1)\n', (6267, 6307), True, 'import numpy as np\n'), ((6323, 6377), 'numpy.concatenate', 'np.concatenate', (['[l_c_0 + l_c_1, l_c_0 + 2 * l_c_1]', '(-1)'], {}), '([l_c_0 + l_c_1, l_c_0 + 2 * l_c_1], -1)\n', (6337, 6377), True, 'import numpy as np\n'), ((7253, 7288), 'numpy.concatenate', 'np.concatenate', (['[x_0, x_1, x_2]', '(-1)'], {}), '([x_0, x_1, x_2], -1)\n', (7267, 7288), True, 'import numpy as np\n'), ((7306, 7359), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 0], z_1[:, 0], z_2[:, 0]]', '(-1)'], {}), '([z_0[:, 0], z_1[:, 0], z_2[:, 0]], -1)\n', (7320, 7359), True, 'import numpy as np\n'), ((7377, 7430), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 1], z_1[:, 1], z_2[:, 1]]', '(-1)'], {}), '([z_0[:, 1], z_1[:, 1], z_2[:, 1]], -1)\n', (7391, 7430), True, 'import numpy as np\n'), ((7444, 7497), 'numpy.concatenate', 'np.concatenate', (['[z_min_[:, None], z_max_[:, None]]', '(1)'], {}), '([z_min_[:, None], z_max_[:, None]], 1)\n', (7458, 7497), True, 'import numpy as np\n'), ((7511, 7562), 'numpy.concatenate', 'np.concatenate', (['[y_0 + y_1, y_0 + 2 * y_1, y_2]', '(-1)'], {}), '([y_0 + y_1, y_0 + 2 * y_1, y_2], -1)\n', (7525, 7562), True, 'import numpy as np\n'), ((7578, 7639), 'numpy.concatenate', 'np.concatenate', (['[b_u_0 + b_u_1, b_u_0 + 2 * b_u_1, b_u_2]', '(-1)'], {}), '([b_u_0 + b_u_1, b_u_0 + 2 * b_u_1, b_u_2], -1)\n', (7592, 7639), True, 'import numpy as np\n'), ((7655, 7716), 'numpy.concatenate', 'np.concatenate', (['[b_l_0 + b_l_1, b_l_0 + 2 * b_l_1, b_l_2]', '(-1)'], {}), '([b_l_0 + b_l_1, b_l_0 + 2 * b_l_1, b_l_2], -1)\n', (7669, 7716), True, 'import numpy as np\n'), ((7732, 7793), 'numpy.concatenate', 'np.concatenate', (['[u_c_0 + u_c_1, u_c_0 + 2 * u_c_1, u_c_2]', '(-1)'], {}), '([u_c_0 + u_c_1, u_c_0 + 2 * u_c_1, u_c_2], -1)\n', (7746, 7793), True, 'import numpy as np\n'), ((7809, 7870), 'numpy.concatenate', 'np.concatenate', (['[l_c_0 + l_c_1, l_c_0 + 2 * l_c_1, l_c_2]', '(-1)'], {}), '([l_c_0 + l_c_1, l_c_0 + 2 * l_c_1, l_c_2], -1)\n', (7823, 7870), True, 'import numpy as np\n'), ((13150, 13180), 'numpy.concatenate', 'np.concatenate', (['[y_0, y_0]', '(-1)'], {}), '([y_0, y_0], -1)\n', (13164, 13180), True, 'import numpy as np\n'), ((13196, 13230), 'numpy.concatenate', 'np.concatenate', (['[b_u_0, b_u_0]', '(-1)'], {}), '([b_u_0, b_u_0], -1)\n', (13210, 13230), True, 'import numpy as np\n'), ((13246, 13280), 'numpy.concatenate', 'np.concatenate', (['[b_l_0, b_l_0]', '(-1)'], {}), '([b_l_0, b_l_0], -1)\n', (13260, 13280), True, 'import numpy as np\n'), ((13296, 13330), 'numpy.concatenate', 'np.concatenate', (['[u_c_0, u_c_0]', '(-1)'], {}), '([u_c_0, u_c_0], -1)\n', (13310, 13330), True, 'import numpy as np\n'), ((13346, 13380), 'numpy.concatenate', 'np.concatenate', (['[l_c_0, l_c_0]', '(-1)'], {}), '([l_c_0, l_c_0], -1)\n', (13360, 13380), True, 'import numpy as np\n'), ((13396, 13430), 'numpy.concatenate', 'np.concatenate', (['[w_u_0, w_u_0]', '(-1)'], {}), '([w_u_0, w_u_0], -1)\n', (13410, 13430), True, 'import numpy as np\n'), ((13446, 13480), 'numpy.concatenate', 'np.concatenate', (['[w_l_0, w_l_0]', '(-1)'], {}), '([w_l_0, w_l_0], -1)\n', (13460, 13480), True, 'import numpy as np\n'), ((14427, 14457), 'numpy.transpose', 'np.transpose', (['y_', '(0, 3, 1, 2)'], {}), '(y_, (0, 3, 1, 2))\n', (14439, 14457), True, 'import numpy as np\n'), ((14473, 14505), 'numpy.transpose', 'np.transpose', (['u_c_', '(0, 3, 1, 2)'], {}), '(u_c_, (0, 3, 1, 2))\n', (14485, 14505), True, 'import numpy as np\n'), ((14521, 14553), 'numpy.transpose', 'np.transpose', (['l_c_', '(0, 3, 1, 2)'], {}), '(l_c_, (0, 3, 1, 2))\n', (14533, 14553), True, 'import numpy as np\n'), ((14569, 14601), 'numpy.transpose', 'np.transpose', (['b_u_', '(0, 3, 1, 2)'], {}), '(b_u_, (0, 3, 1, 2))\n', (14581, 14601), True, 'import numpy as np\n'), ((14617, 14649), 'numpy.transpose', 'np.transpose', (['b_l_', '(0, 3, 1, 2)'], {}), '(b_l_, (0, 3, 1, 2))\n', (14629, 14649), True, 'import numpy as np\n'), ((14665, 14700), 'numpy.transpose', 'np.transpose', (['w_u_', '(0, 1, 4, 2, 3)'], {}), '(w_u_, (0, 1, 4, 2, 3))\n', (14677, 14700), True, 'import numpy as np\n'), ((14716, 14751), 'numpy.transpose', 'np.transpose', (['w_l_', '(0, 1, 4, 2, 3)'], {}), '(w_l_, (0, 1, 4, 2, 3))\n', (14728, 14751), True, 'import numpy as np\n'), ((16397, 16428), 'numpy.clip', 'np.clip', (['(x_min_ - x_)', '(0)', 'np.inf'], {}), '(x_min_ - x_, 0, np.inf)\n', (16404, 16428), True, 'import numpy as np\n'), ((16566, 16597), 'numpy.clip', 'np.clip', (['(x_ - x_max_)', '(0)', 'np.inf'], {}), '(x_ - x_max_, 0, np.inf)\n', (16573, 16597), True, 'import numpy as np\n'), ((20177, 20208), 'numpy.clip', 'np.clip', (['(x_min_ - x_)', '(0)', 'np.inf'], {}), '(x_min_ - x_, 0, np.inf)\n', (20184, 20208), True, 'import numpy as np\n'), ((20321, 20352), 'numpy.clip', 'np.clip', (['(x_ - x_max_)', '(0)', 'np.inf'], {}), '(x_ - x_max_, 0, np.inf)\n', (20328, 20352), True, 'import numpy as np\n'), ((23893, 23923), 'numpy.concatenate', 'np.concatenate', (['[x_0, x_1]', '(-1)'], {}), '([x_0, x_1], -1)\n', (23907, 23923), True, 'import numpy as np\n'), ((23941, 23983), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 0], z_1[:, 0]]', '(-1)'], {}), '([z_0[:, 0], z_1[:, 0]], -1)\n', (23955, 23983), True, 'import numpy as np\n'), ((24001, 24043), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 1], z_1[:, 1]]', '(-1)'], {}), '([z_0[:, 1], z_1[:, 1]], -1)\n', (24015, 24043), True, 'import numpy as np\n'), ((24057, 24110), 'numpy.concatenate', 'np.concatenate', (['[z_min_[:, None], z_max_[:, None]]', '(1)'], {}), '([z_min_[:, None], z_max_[:, None]], 1)\n', (24071, 24110), True, 'import numpy as np\n'), ((24124, 24154), 'numpy.concatenate', 'np.concatenate', (['[y_0, y_1]', '(-1)'], {}), '([y_0, y_1], -1)\n', (24138, 24154), True, 'import numpy as np\n'), ((24170, 24204), 'numpy.concatenate', 'np.concatenate', (['[b_u_0, b_u_1]', '(-1)'], {}), '([b_u_0, b_u_1], -1)\n', (24184, 24204), True, 'import numpy as np\n'), ((24220, 24254), 'numpy.concatenate', 'np.concatenate', (['[u_c_0, u_c_1]', '(-1)'], {}), '([u_c_0, u_c_1], -1)\n', (24234, 24254), True, 'import numpy as np\n'), ((24270, 24304), 'numpy.concatenate', 'np.concatenate', (['[b_l_0, b_l_1]', '(-1)'], {}), '([b_l_0, b_l_1], -1)\n', (24284, 24304), True, 'import numpy as np\n'), ((24320, 24354), 'numpy.concatenate', 'np.concatenate', (['[l_c_0, l_c_1]', '(-1)'], {}), '([l_c_0, l_c_1], -1)\n', (24334, 24354), True, 'import numpy as np\n'), ((25201, 25236), 'numpy.concatenate', 'np.concatenate', (['[x_0, x_1, x_2]', '(-1)'], {}), '([x_0, x_1, x_2], -1)\n', (25215, 25236), True, 'import numpy as np\n'), ((25254, 25307), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 0], z_1[:, 0], z_2[:, 0]]', '(-1)'], {}), '([z_0[:, 0], z_1[:, 0], z_2[:, 0]], -1)\n', (25268, 25307), True, 'import numpy as np\n'), ((25325, 25378), 'numpy.concatenate', 'np.concatenate', (['[z_0[:, 1], z_1[:, 1], z_2[:, 1]]', '(-1)'], {}), '([z_0[:, 1], z_1[:, 1], z_2[:, 1]], -1)\n', (25339, 25378), True, 'import numpy as np\n'), ((25392, 25445), 'numpy.concatenate', 'np.concatenate', (['[z_min_[:, None], z_max_[:, None]]', '(1)'], {}), '([z_min_[:, None], z_max_[:, None]], 1)\n', (25406, 25445), True, 'import numpy as np\n'), ((25459, 25494), 'numpy.concatenate', 'np.concatenate', (['[y_0, y_1, y_2]', '(-1)'], {}), '([y_0, y_1, y_2], -1)\n', (25473, 25494), True, 'import numpy as np\n'), ((25510, 25551), 'numpy.concatenate', 'np.concatenate', (['[b_u_0, b_u_1, b_u_2]', '(-1)'], {}), '([b_u_0, b_u_1, b_u_2], -1)\n', (25524, 25551), True, 'import numpy as np\n'), ((25567, 25608), 'numpy.concatenate', 'np.concatenate', (['[b_l_0, b_l_1, b_l_2]', '(-1)'], {}), '([b_l_0, b_l_1, b_l_2], -1)\n', (25581, 25608), True, 'import numpy as np\n'), ((25624, 25665), 'numpy.concatenate', 'np.concatenate', (['[u_c_0, u_c_1, u_c_2]', '(-1)'], {}), '([u_c_0, u_c_1, u_c_2], -1)\n', (25638, 25665), True, 'import numpy as np\n'), ((25681, 25722), 'numpy.concatenate', 'np.concatenate', (['[l_c_0, l_c_1, l_c_2]', '(-1)'], {}), '([l_c_0, l_c_1, l_c_2], -1)\n', (25695, 25722), True, 'import numpy as np\n'), ((26555, 26586), 'numpy.clip', 'np.clip', (['(x_min_ - x_)', '(0)', 'np.inf'], {}), '(x_min_ - x_, 0, np.inf)\n', (26562, 26586), True, 'import numpy as np\n'), ((26724, 26755), 'numpy.clip', 'np.clip', (['(x_ - x_max_)', '(0)', 'np.inf'], {}), '(x_ - x_max_, 0, np.inf)\n', (26731, 26755), True, 'import numpy as np\n'), ((26882, 26899), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (26895, 26899), True, 'import numpy as np\n'), ((27002, 27030), 'numpy.expand_dims', 'np.expand_dims', (['x_expand', '(-1)'], {}), '(x_expand, -1)\n', (27016, 27030), True, 'import numpy as np\n'), ((27045, 27071), 'numpy.sum', 'np.sum', (['(w_l_ * x_expand)', '(1)'], {}), '(w_l_ * x_expand, 1)\n', (27051, 27071), True, 'import numpy as np\n'), ((27092, 27118), 'numpy.sum', 'np.sum', (['(w_u_ * x_expand)', '(1)'], {}), '(w_u_ * x_expand, 1)\n', (27098, 27118), True, 'import numpy as np\n'), ((27220, 27251), 'numpy.clip', 'np.clip', (['(l_c_ - y_)', '(0.0)', 'np.inf'], {}), '(l_c_ - y_, 0.0, np.inf)\n', (27227, 27251), True, 'import numpy as np\n'), ((27261, 27278), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (27274, 27278), True, 'import numpy as np\n'), ((27370, 27404), 'numpy.clip', 'np.clip', (['(y_ - u_c_)', '(0.0)', '(1000000.0)'], {}), '(y_ - u_c_, 0.0, 1000000.0)\n', (27377, 27404), True, 'import numpy as np\n'), ((27408, 27425), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (27421, 27425), True, 'import numpy as np\n'), ((27525, 27558), 'numpy.clip', 'np.clip', (['(lower_ - y_)', '(0.0)', 'np.inf'], {}), '(lower_ - y_, 0.0, np.inf)\n', (27532, 27558), True, 'import numpy as np\n'), ((27568, 27585), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (27581, 27585), True, 'import numpy as np\n'), ((27680, 27716), 'numpy.clip', 'np.clip', (['(y_ - upper_)', '(0.0)', '(1000000.0)'], {}), '(y_ - upper_, 0.0, 1000000.0)\n', (27687, 27716), True, 'import numpy as np\n'), ((27720, 27737), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (27733, 27737), True, 'import numpy as np\n'), ((27870, 27887), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (27883, 27887), True, 'import numpy as np\n'), ((27916, 27933), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (27929, 27933), True, 'import numpy as np\n'), ((28044, 28076), 'numpy.expand_dims', 'np.expand_dims', (['x_expand_min', '(-1)'], {}), '(x_expand_min, -1)\n', (28058, 28076), True, 'import numpy as np\n'), ((28100, 28132), 'numpy.expand_dims', 'np.expand_dims', (['x_expand_max', '(-1)'], {}), '(x_expand_max, -1)\n', (28114, 28132), True, 'import numpy as np\n'), ((1204, 1227), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (1215, 1227), True, 'import numpy as np\n'), ((1242, 1265), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (1253, 1265), True, 'import numpy as np\n'), ((1369, 1390), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (1380, 1390), True, 'import numpy as np\n'), ((1405, 1426), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (1416, 1426), True, 'import numpy as np\n'), ((1532, 1554), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nb'], {}), '(-1, 1, nb)\n', (1543, 1554), True, 'import numpy as np\n'), ((1569, 1591), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nb'], {}), '(-1, 1, nb)\n', (1580, 1591), True, 'import numpy as np\n'), ((1992, 2008), 'numpy.ones_like', 'np.ones_like', (['x_'], {}), '(x_)\n', (2004, 2008), True, 'import numpy as np\n'), ((2096, 2117), 'numpy.linspace', 'np.linspace', (['(1)', '(2)', 'nb'], {}), '(1, 2, nb)\n', (2107, 2117), True, 'import numpy as np\n'), ((2282, 2305), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (2293, 2305), True, 'import numpy as np\n'), ((2468, 2491), 'numpy.linspace', 'np.linspace', (['(-2)', '(-1)', 'nb'], {}), '(-2, -1, nb)\n', (2479, 2491), True, 'import numpy as np\n'), ((3505, 3516), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3510, 3516), False, 'from tensorflow.keras import Input\n'), ((3530, 3543), 'tensorflow.keras.Input', 'Input', (['(2, 1)'], {}), '((2, 1))\n', (3535, 3543), False, 'from tensorflow.keras import Input\n'), ((3557, 3568), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3562, 3568), False, 'from tensorflow.keras import Input\n'), ((3582, 3595), 'tensorflow.keras.Input', 'Input', (['(1, 1)'], {}), '((1, 1))\n', (3587, 3595), False, 'from tensorflow.keras import Input\n'), ((3609, 3620), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3614, 3620), False, 'from tensorflow.keras import Input\n'), ((3634, 3645), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3639, 3645), False, 'from tensorflow.keras import Input\n'), ((3659, 3672), 'tensorflow.keras.Input', 'Input', (['(1, 1)'], {}), '((1, 1))\n', (3664, 3672), False, 'from tensorflow.keras import Input\n'), ((3686, 3697), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3691, 3697), False, 'from tensorflow.keras import Input\n'), ((3711, 3722), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3716, 3722), False, 'from tensorflow.keras import Input\n'), ((3736, 3747), 'tensorflow.keras.Input', 'Input', (['(1,)'], {}), '((1,))\n', (3741, 3747), False, 'from tensorflow.keras import Input\n'), ((4188, 4199), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4193, 4199), False, 'from tensorflow.keras import Input\n'), ((4213, 4224), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4218, 4224), False, 'from tensorflow.keras import Input\n'), ((4238, 4251), 'tensorflow.keras.Input', 'Input', (['(2, n)'], {}), '((2, n))\n', (4243, 4251), False, 'from tensorflow.keras import Input\n'), ((4265, 4276), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4270, 4276), False, 'from tensorflow.keras import Input\n'), ((4290, 4303), 'tensorflow.keras.Input', 'Input', (['(n, n)'], {}), '((n, n))\n', (4295, 4303), False, 'from tensorflow.keras import Input\n'), ((4317, 4328), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4322, 4328), False, 'from tensorflow.keras import Input\n'), ((4342, 4353), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4347, 4353), False, 'from tensorflow.keras import Input\n'), ((4367, 4380), 'tensorflow.keras.Input', 'Input', (['(n, n)'], {}), '((n, n))\n', (4372, 4380), False, 'from tensorflow.keras import Input\n'), ((4394, 4405), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4399, 4405), False, 'from tensorflow.keras import Input\n'), ((4419, 4430), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4424, 4430), False, 'from tensorflow.keras import Input\n'), ((4444, 4455), 'tensorflow.keras.Input', 'Input', (['(n,)'], {}), '((n,))\n', (4449, 4455), False, 'from tensorflow.keras import Input\n'), ((6418, 6464), 'numpy.concatenate', 'np.concatenate', (['[h_0 + h_1, h_0 + 2 * h_1]', '(-1)'], {}), '([h_0 + h_1, h_0 + 2 * h_1], -1)\n', (6432, 6464), True, 'import numpy as np\n'), ((6482, 6528), 'numpy.concatenate', 'np.concatenate', (['[g_0 + g_1, g_0 + 2 * g_1]', '(-1)'], {}), '([g_0 + g_1, g_0 + 2 * g_1], -1)\n', (6496, 6528), True, 'import numpy as np\n'), ((7911, 7962), 'numpy.concatenate', 'np.concatenate', (['[h_0 + h_1, h_0 + 2 * h_1, h_2]', '(-1)'], {}), '([h_0 + h_1, h_0 + 2 * h_1, h_2], -1)\n', (7925, 7962), True, 'import numpy as np\n'), ((7980, 8031), 'numpy.concatenate', 'np.concatenate', (['[g_0 + g_1, g_0 + 2 * g_1, g_2]', '(-1)'], {}), '([g_0 + g_1, g_0 + 2 * g_1, g_2], -1)\n', (7994, 8031), True, 'import numpy as np\n'), ((13595, 13625), 'numpy.concatenate', 'np.concatenate', (['[h_0, h_0]', '(-1)'], {}), '([h_0, h_0], -1)\n', (13609, 13625), True, 'import numpy as np\n'), ((13643, 13673), 'numpy.concatenate', 'np.concatenate', (['[g_0, g_0]', '(-1)'], {}), '([g_0, g_0], -1)\n', (13657, 13673), True, 'import numpy as np\n'), ((14335, 14365), 'numpy.transpose', 'np.transpose', (['h_', '(0, 3, 1, 2)'], {}), '(h_, (0, 3, 1, 2))\n', (14347, 14365), True, 'import numpy as np\n'), ((14383, 14413), 'numpy.transpose', 'np.transpose', (['g_', '(0, 3, 1, 2)'], {}), '(g_, (0, 3, 1, 2))\n', (14395, 14413), True, 'import numpy as np\n'), ((15161, 15172), 'tensorflow.keras.Input', 'Input', (['(2,)'], {}), '((2,))\n', (15166, 15172), False, 'from tensorflow.keras import Input\n'), ((15186, 15202), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15191, 15202), False, 'from tensorflow.keras import Input\n'), ((15216, 15229), 'tensorflow.keras.Input', 'Input', (['(2, 2)'], {}), '((2, 2))\n', (15221, 15229), False, 'from tensorflow.keras import Input\n'), ((15243, 15259), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15248, 15259), False, 'from tensorflow.keras import Input\n'), ((15273, 15292), 'tensorflow.keras.Input', 'Input', (['(2, n, n, 2)'], {}), '((2, n, n, 2))\n', (15278, 15292), False, 'from tensorflow.keras import Input\n'), ((15306, 15322), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15311, 15322), False, 'from tensorflow.keras import Input\n'), ((15336, 15352), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15341, 15352), False, 'from tensorflow.keras import Input\n'), ((15366, 15385), 'tensorflow.keras.Input', 'Input', (['(2, n, n, 2)'], {}), '((2, n, n, 2))\n', (15371, 15385), False, 'from tensorflow.keras import Input\n'), ((15399, 15415), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15404, 15415), False, 'from tensorflow.keras import Input\n'), ((15551, 15562), 'tensorflow.keras.Input', 'Input', (['(2,)'], {}), '((2,))\n', (15556, 15562), False, 'from tensorflow.keras import Input\n'), ((15576, 15592), 'tensorflow.keras.Input', 'Input', (['(2, n, n)'], {}), '((2, n, n))\n', (15581, 15592), False, 'from tensorflow.keras import Input\n'), ((15606, 15619), 'tensorflow.keras.Input', 'Input', (['(2, 2)'], {}), '((2, 2))\n', (15611, 15619), False, 'from tensorflow.keras import Input\n'), ((15633, 15649), 'tensorflow.keras.Input', 'Input', (['(2, n, n)'], {}), '((2, n, n))\n', (15638, 15649), False, 'from tensorflow.keras import Input\n'), ((15663, 15682), 'tensorflow.keras.Input', 'Input', (['(2, 2, n, n)'], {}), '((2, 2, n, n))\n', (15668, 15682), False, 'from tensorflow.keras import Input\n'), ((15696, 15712), 'tensorflow.keras.Input', 'Input', (['(2, n, n)'], {}), '((2, n, n))\n', (15701, 15712), False, 'from tensorflow.keras import Input\n'), ((15726, 15742), 'tensorflow.keras.Input', 'Input', (['(2, n, n)'], {}), '((2, n, n))\n', (15731, 15742), False, 'from tensorflow.keras import Input\n'), ((15756, 15775), 'tensorflow.keras.Input', 'Input', (['(2, 2, n, n)'], {}), '((2, 2, n, n))\n', (15761, 15775), False, 'from tensorflow.keras import Input\n'), ((15789, 15805), 'tensorflow.keras.Input', 'Input', (['(2, n, n)'], {}), '((2, n, n))\n', (15794, 15805), False, 'from tensorflow.keras import Input\n'), ((16752, 16769), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (16765, 16769), True, 'import numpy as np\n'), ((16884, 16912), 'numpy.expand_dims', 'np.expand_dims', (['x_expand', '(-1)'], {}), '(x_expand, -1)\n', (16898, 16912), True, 'import numpy as np\n'), ((16931, 16957), 'numpy.sum', 'np.sum', (['(w_l_ * x_expand)', '(1)'], {}), '(w_l_ * x_expand, 1)\n', (16937, 16957), True, 'import numpy as np\n'), ((16982, 17008), 'numpy.sum', 'np.sum', (['(w_u_ * x_expand)', '(1)'], {}), '(w_u_ * x_expand, 1)\n', (16988, 17008), True, 'import numpy as np\n'), ((17140, 17176), 'numpy.clip', 'np.clip', (['(h_[:-1] - h_[1:])', '(0)', 'np.inf'], {}), '(h_[:-1] - h_[1:], 0, np.inf)\n', (17147, 17176), True, 'import numpy as np\n'), ((17190, 17211), 'numpy.zeros_like', 'np.zeros_like', (['h_[1:]'], {}), '(h_[1:])\n', (17203, 17211), True, 'import numpy as np\n'), ((17365, 17401), 'numpy.clip', 'np.clip', (['(g_[1:] - g_[:-1])', '(0)', 'np.inf'], {}), '(g_[1:] - g_[:-1], 0, np.inf)\n', (17372, 17401), True, 'import numpy as np\n'), ((17415, 17436), 'numpy.zeros_like', 'np.zeros_like', (['g_[1:]'], {}), '(g_[1:])\n', (17428, 17436), True, 'import numpy as np\n'), ((17622, 17655), 'numpy.clip', 'np.clip', (['(lower_ - y_)', '(0.0)', 'np.inf'], {}), '(lower_ - y_, 0.0, np.inf)\n', (17629, 17655), True, 'import numpy as np\n'), ((17669, 17686), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (17682, 17686), True, 'import numpy as np\n'), ((17802, 17838), 'numpy.clip', 'np.clip', (['(y_ - upper_)', '(0.0)', '(1000000.0)'], {}), '(y_ - upper_, 0.0, 1000000.0)\n', (17809, 17838), True, 'import numpy as np\n'), ((17846, 17863), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (17859, 17863), True, 'import numpy as np\n'), ((18003, 18034), 'numpy.clip', 'np.clip', (['(l_c_ - y_)', '(0.0)', 'np.inf'], {}), '(l_c_ - y_, 0.0, np.inf)\n', (18010, 18034), True, 'import numpy as np\n'), ((18048, 18065), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (18061, 18065), True, 'import numpy as np\n'), ((18177, 18211), 'numpy.clip', 'np.clip', (['(y_ - u_c_)', '(0.0)', '(1000000.0)'], {}), '(y_ - u_c_, 0.0, 1000000.0)\n', (18184, 18211), True, 'import numpy as np\n'), ((18219, 18236), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (18232, 18236), True, 'import numpy as np\n'), ((18432, 18449), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (18445, 18449), True, 'import numpy as np\n'), ((18482, 18499), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (18495, 18499), True, 'import numpy as np\n'), ((18622, 18654), 'numpy.expand_dims', 'np.expand_dims', (['x_expand_min', '(-1)'], {}), '(x_expand_min, -1)\n', (18636, 18654), True, 'import numpy as np\n'), ((18682, 18714), 'numpy.expand_dims', 'np.expand_dims', (['x_expand_max', '(-1)'], {}), '(x_expand_max, -1)\n', (18696, 18714), True, 'import numpy as np\n'), ((20482, 20499), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (20495, 20499), True, 'import numpy as np\n'), ((20614, 20642), 'numpy.expand_dims', 'np.expand_dims', (['x_expand', '(-1)'], {}), '(x_expand, -1)\n', (20628, 20642), True, 'import numpy as np\n'), ((20661, 20687), 'numpy.sum', 'np.sum', (['(w_l_ * x_expand)', '(1)'], {}), '(w_l_ * x_expand, 1)\n', (20667, 20687), True, 'import numpy as np\n'), ((20712, 20738), 'numpy.sum', 'np.sum', (['(w_u_ * x_expand)', '(1)'], {}), '(w_u_ * x_expand, 1)\n', (20718, 20738), True, 'import numpy as np\n'), ((21876, 21893), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (21889, 21893), True, 'import numpy as np\n'), ((21926, 21943), 'numpy.zeros_like', 'np.zeros_like', (['x_'], {}), '(x_)\n', (21939, 21943), True, 'import numpy as np\n'), ((22066, 22098), 'numpy.expand_dims', 'np.expand_dims', (['x_expand_min', '(-1)'], {}), '(x_expand_min, -1)\n', (22080, 22098), True, 'import numpy as np\n'), ((22126, 22158), 'numpy.expand_dims', 'np.expand_dims', (['x_expand_max', '(-1)'], {}), '(x_expand_max, -1)\n', (22140, 22158), True, 'import numpy as np\n'), ((24395, 24425), 'numpy.concatenate', 'np.concatenate', (['[h_0, h_1]', '(-1)'], {}), '([h_0, h_1], -1)\n', (24409, 24425), True, 'import numpy as np\n'), ((24443, 24473), 'numpy.concatenate', 'np.concatenate', (['[g_0, g_1]', '(-1)'], {}), '([g_0, g_1], -1)\n', (24457, 24473), True, 'import numpy as np\n'), ((25763, 25798), 'numpy.concatenate', 'np.concatenate', (['[h_0, h_1, h_2]', '(-1)'], {}), '([h_0, h_1, h_2], -1)\n', (25777, 25798), True, 'import numpy as np\n'), ((25816, 25851), 'numpy.concatenate', 'np.concatenate', (['[g_0, g_1, g_2]', '(-1)'], {}), '([g_0, g_1, g_2], -1)\n', (25830, 25851), True, 'import numpy as np\n'), ((15473, 15489), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15478, 15489), False, 'from tensorflow.keras import Input\n'), ((15491, 15507), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15496, 15507), False, 'from tensorflow.keras import Input\n'), ((15863, 15879), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15868, 15879), False, 'from tensorflow.keras import Input\n'), ((15881, 15897), 'tensorflow.keras.Input', 'Input', (['(n, n, 2)'], {}), '((n, n, 2))\n', (15886, 15897), False, 'from tensorflow.keras import Input\n'), ((21270, 21301), 'numpy.clip', 'np.clip', (['(l_c_ - y_)', '(0.0)', 'np.inf'], {}), '(l_c_ - y_, 0.0, np.inf)\n', (21277, 21301), True, 'import numpy as np\n'), ((21303, 21320), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (21316, 21320), True, 'import numpy as np\n'), ((21389, 21423), 'numpy.clip', 'np.clip', (['(y_ - u_c_)', '(0.0)', '(1000000.0)'], {}), '(y_ - u_c_, 0.0, 1000000.0)\n', (21396, 21423), True, 'import numpy as np\n'), ((21419, 21436), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (21432, 21436), True, 'import numpy as np\n'), ((21551, 21584), 'numpy.clip', 'np.clip', (['(lower_ - y_)', '(0.0)', 'np.inf'], {}), '(lower_ - y_, 0.0, np.inf)\n', (21558, 21584), True, 'import numpy as np\n'), ((21586, 21603), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (21599, 21603), True, 'import numpy as np\n'), ((21688, 21724), 'numpy.clip', 'np.clip', (['(y_ - upper_)', '(0.0)', '(1000000.0)'], {}), '(y_ - upper_, 0.0, 1000000.0)\n', (21695, 21724), True, 'import numpy as np\n'), ((21720, 21737), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (21733, 21737), True, 'import numpy as np\n'), ((22494, 22527), 'numpy.clip', 'np.clip', (['(lower_ - y_)', '(0.0)', 'np.inf'], {}), '(lower_ - y_, 0.0, np.inf)\n', (22501, 22527), True, 'import numpy as np\n'), ((22529, 22546), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (22542, 22546), True, 'import numpy as np\n'), ((22615, 22651), 'numpy.clip', 'np.clip', (['(y_ - upper_)', '(0.0)', '(1000000.0)'], {}), '(y_ - upper_, 0.0, 1000000.0)\n', (22622, 22651), True, 'import numpy as np\n'), ((22647, 22664), 'numpy.zeros_like', 'np.zeros_like', (['y_'], {}), '(y_)\n', (22660, 22664), True, 'import numpy as np\n'), ((1794, 1808), 'numpy.ones', 'np.ones', (['(50,)'], {}), '((50,))\n', (1801, 1808), True, 'import numpy as np\n'), ((1846, 1860), 'numpy.ones', 'np.ones', (['(50,)'], {}), '((50,))\n', (1853, 1860), True, 'import numpy as np\n'), ((3480, 3490), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3488, 3490), True, 'import tensorflow.python.keras.backend as K\n'), ((28154, 28173), 'numpy.maximum', 'np.maximum', (['(0)', 'w_l_'], {}), '(0, w_l_)\n', (28164, 28173), True, 'import numpy as np\n'), ((28202, 28221), 'numpy.minimum', 'np.minimum', (['(0)', 'w_l_'], {}), '(0, w_l_)\n', (28212, 28221), True, 'import numpy as np\n'), ((28268, 28287), 'numpy.maximum', 'np.maximum', (['(0)', 'w_u_'], {}), '(0, w_u_)\n', (28278, 28287), True, 'import numpy as np\n'), ((28316, 28335), 'numpy.minimum', 'np.minimum', (['(0)', 'w_u_'], {}), '(0, w_u_)\n', (28326, 28335), True, 'import numpy as np\n'), ((18740, 18759), 'numpy.maximum', 'np.maximum', (['(0)', 'w_l_'], {}), '(0, w_l_)\n', (18750, 18759), True, 'import numpy as np\n'), ((18788, 18807), 'numpy.minimum', 'np.minimum', (['(0)', 'w_l_'], {}), '(0, w_l_)\n', (18798, 18807), True, 'import numpy as np\n'), ((18858, 18877), 'numpy.maximum', 'np.maximum', (['(0)', 'w_u_'], {}), '(0, w_u_)\n', (18868, 18877), True, 'import numpy as np\n'), ((18906, 18925), 'numpy.minimum', 'np.minimum', (['(0)', 'w_u_'], {}), '(0, w_u_)\n', (18916, 18925), True, 'import numpy as np\n'), ((22184, 22203), 'numpy.maximum', 'np.maximum', (['(0)', 'w_l_'], {}), '(0, w_l_)\n', (22194, 22203), True, 'import numpy as np\n'), ((22232, 22251), 'numpy.minimum', 'np.minimum', (['(0)', 'w_l_'], {}), '(0, w_l_)\n', (22242, 22251), True, 'import numpy as np\n'), ((22302, 22321), 'numpy.maximum', 'np.maximum', (['(0)', 'w_u_'], {}), '(0, w_u_)\n', (22312, 22321), True, 'import numpy as np\n'), ((22350, 22369), 'numpy.minimum', 'np.minimum', (['(0)', 'w_u_'], {}), '(0, w_u_)\n', (22360, 22369), True, 'import numpy as np\n')] |
# --------------
import pandas as pd
import numpy as np
# Read the data using pandas module.
#path = 'ipl_dataset.csv'
df_ipl = pd.read_csv(path)
df_ipl.shape
# Find the list of unique cities where matches were played
#df_ipl['city']
print('Unique Cities',df_ipl.loc[:,'city'].unique())
# Find the columns which contains null values if any ?
#df_ipl.isna().sum()
print('Null Values')
print(df_ipl.isnull().sum())
#how many non-null values are there?
#df_ipl.notnull().sum()
# List down top 5 most played venues
#df_ipl['venue'].value_counts().head(5)
#1. get no. of mayches played in each stadium
print('Venues details')
print(df_ipl.groupby('venue')['match_code'].nunique().sort_values(ascending=False).head(5))
# Make a runs count frequency table
print('Runs Frequency Table')
print(df_ipl['runs'].value_counts().sort_index())
# How many seasons were played and in which year they were played
#
#df_ipl.loc[0,'date'].split('-')[0]
#df_ipl.loc[0,'date'][0:4]
#df_ipl['date'].apply(lambda x : x[0:4])
def slice_it(x):
return x[0:4]
year = df_ipl['date'].apply(slice_it)
print(df_ipl['date'].apply(slice_it).unique())
print(df_ipl['date'].apply(slice_it).nunique())
# No. of matches played per season
df_ipl['year'] = year
print('Matches per season')
Matches_per_season = df_ipl.groupby('year')['match_code'].nunique()
print(Matches_per_season)
# Total runs across the seasons
total_runs_per_seasons = df_ipl.groupby('year')['total'].sum()
print('Total runs across the seasons')
print(total_runs_per_seasons)
# Teams who have scored more than 200+ runs. Show the top 10 results
high_scoring_teams = df_ipl.groupby(['match_code','inning','team1','team2'])['total'].sum().reset_index()
high_scoring_teams[high_scoring_teams['total']>200]
high_scoring_teams.nlargest(10,'total')
# What are the chances of chasing 200+ target
inn_1_teams = high_scoring_teams[high_scoring_teams['inning']==1]
inn_2_teams = high_scoring_teams[high_scoring_teams['inning']==2]
high_scoring = inn_1_teams.merge(inn_2_teams[['match_code','inning','total']],on='match_code')
print(high_scoring)
high_scoring['chased'] = np.where(high_scoring['total_y'] > high_scoring['total_x'],'yes','no')
chance = high_scoring['chased'].value_counts()
print(chance['yes']/(chance['yes']+chance['no'])*100)
# Which team has the highest win count in their respective seasons ?
df_ipl.drop_duplicates(subset='match_code').groupby('year')['winner'].value_counts()
| [
"pandas.read_csv",
"numpy.where"
] | [((131, 148), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (142, 148), True, 'import pandas as pd\n'), ((2118, 2190), 'numpy.where', 'np.where', (["(high_scoring['total_y'] > high_scoring['total_x'])", '"""yes"""', '"""no"""'], {}), "(high_scoring['total_y'] > high_scoring['total_x'], 'yes', 'no')\n", (2126, 2190), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 15:50:28 2020
@author: <NAME>
"""
import numpy as np
from . import Protocols as prtcls
from memory_profiler import profile
from scipy.sparse import csr_matrix,lil_matrix
from scipy.sparse.csc import csc_matrix
from scipy.sparse import hstack
DEBUG = False
TypeEnum = tuple(['multi','single'])
ErrorEnum = tuple(['mse','mae'])
oPrtclsData = prtcls.CProtocolData()
oCuda = None
sparseFlag = False
def matMul(x,y):
'''
calculat the matrix product of two arrays
x: left matrix
y: right matrix
'''
if sparseFlag:
return x @ y
else:
return np.matmul(x,y)
def calCovariance(x,y):
'''
calculat the covariance of two matrices
x: left matrix
y: right matrix
#if the input for x and y are both 1-D vectors, they will be reshaped to (len(vector),1)
'''
# oPrtclsData(x,y)
# print(x.shape,y.shape)
if sparseFlag:
return x.T @ y
else:
return np.matmul(x.T,y)
def calSelfCovariance(x):
'''
calculat the covariance of matrix itself
x: input matrix
#if the input for x and y are both 1-D vectors, they will be reshaped to (len(vector),1)
'''
# oPrtclsData(x,y)
# print(x.shape,y.shape)
if sparseFlag:
return x.T @ x
else:
return np.matmul(x.T,x)
def genLagMat(x,lags,Zeropad:bool = True,bias =True): #
'''
build the lag matrix based on input.
x: input matrix
lags: a list (or list like supporting len() method) of integers,
each of them should indicate the time lag in samples.
see also 'lagGen' in mTRF-Toolbox https://github.com/mickcrosse/mTRF-Toolbox
#To Do:
make warning when absolute lag value is bigger than the number of samples
implement the zeropad part
'''
# oPrtclsData(x)
nLags = len(lags)
nSamples = x.shape[0]
nVar = x.shape[1]
# lagMatrix = np.zeros((nSamples,nVar*nLags))
# print(type(x),isinstance(x, csr_matrix))
if isinstance(x, csc_matrix):
lagMatrix = lil_matrix((nSamples,nVar*nLags))
# x = x.toarray()
else:
lagMatrix = np.zeros((nSamples,nVar*nLags))
# print(type(lagMatrix),lagMatrix)
for idx,lag in enumerate(lags):
colSlice = slice(idx * nVar,(idx + 1) * nVar)
if lag < 0:
lagMatrix[0:nSamples + lag,colSlice] = x[-lag:,:]
elif lag > 0:
lagMatrix[lag:nSamples,colSlice] = x[0:nSamples-lag,:]
else:
lagMatrix[:,colSlice] = x
if not Zeropad:
lagMatrix = truncate(lagMatrix,lags[0],lags[-1])
if bias:
if isinstance(x, csc_matrix):
ones = lil_matrix((lagMatrix.shape[0],1))
ones[:] = 1
lagMatrix = hstack([ones,lagMatrix])
else:
lagMatrix = np.concatenate([np.ones((lagMatrix.shape[0],1)),lagMatrix],1);
# print(lagMatrix.shape)
if sparseFlag:
lagMatrix = csr_matrix(lagMatrix)
return lagMatrix
def genRegMat(n:int, method = 'ridge'):
'''
generates a sparse regularization matrix of size (n,n) for the specified method.
see also regmat.m in mTRF-Toolbox https://github.com/mickcrosse/mTRF-Toolbox
'''
regMatrix = None
if method == 'ridge':
regMatrix = np.identity(n)
regMatrix[0,0] = 0
elif method == 'Tikhonov':
regMatrix = np.identity(n)
regMatrix -= 0.5 * (np.diag(np.ones(n-1),1) + np.diag(np.ones(n-1),-1))
regMatrix[1,1] = 0.5
regMatrix[n-1,n-1] = 0.5
regMatrix[0,0] = 0
regMatrix[0,1] = 0
regMatrix[1,0] = 0
else:
regMatrix = np.zeros((n,n))
return regMatrix
# @profile
def calOlsCovMat(x,y,lags,Type = 'multi',Zeropad = True):
assert Type in TypeEnum
if not Zeropad:
y = truncate(y,lags[0],lags[-1])
if Type == 'multi':
xLag = genLagMat(x,lags,Zeropad)
if oCuda is None:
Cxx = calSelfCovariance(xLag)
Cxy = calCovariance(xLag,y)
else:
Cxx = oCuda.calSelfCovariance(xLag)
Cxy = oCuda.calCovariance(xLag,y)
return Cxx, Cxy
def msec2Idxs(msecRange,fs):
'''
convert a millisecond range to a list of sample indexes
the left and right ranges will both be included
'''
assert len(msecRange) == 2
tmin = msecRange[0]/1e3
tmax = msecRange[1]/1e3
return list(range(int(np.floor(tmin*fs)),int(np.ceil(tmax*fs)) + 1))
def Idxs2msec(lags,fs):
'''
convert a list of sample indexes to a millisecond range
the left and right ranges will both be included
'''
temp = np.array(lags)
return list(temp/fs * 1e3)
def truncate(x,tminIdx,tmaxIdx):
'''
the left and right ranges will both be included
'''
rowSlice = slice(max(0,tmaxIdx),min(0,tminIdx) + len(x))# !!!!
output = x[rowSlice]
return output
def pearsonr(x,y):
x,y = oPrtclsData(x,y)
nObs = len(x)
sumX = np.sum(x,0)
sumY = np.sum(y,0)
sdXY = np.sqrt((np.sum(x**2,0) - (sumX**2/nObs)) * (np.sum(y ** 2, 0) - (sumY ** 2)/nObs))
r = (np.sum(x*y,0) - (sumX * sumY)/nObs) / sdXY
return r
def error(x,y,error = 'mse'):
assert error in ErrorEnum
x,y = oPrtclsData(x,y)
ans = None
if error == 'mse':
ans = np.sum(np.abs(x - y)**2, 0)/len(x)
elif error == 'mae':
ans = np.sum(np.abs(x - y),0)/len(x)
return ans | [
"numpy.sum",
"numpy.ceil",
"numpy.abs",
"numpy.floor",
"numpy.zeros",
"numpy.identity",
"numpy.ones",
"scipy.sparse.lil_matrix",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.matmul",
"scipy.sparse.hstack"
] | [((4713, 4727), 'numpy.array', 'np.array', (['lags'], {}), '(lags)\n', (4721, 4727), True, 'import numpy as np\n'), ((5047, 5059), 'numpy.sum', 'np.sum', (['x', '(0)'], {}), '(x, 0)\n', (5053, 5059), True, 'import numpy as np\n'), ((5070, 5082), 'numpy.sum', 'np.sum', (['y', '(0)'], {}), '(y, 0)\n', (5076, 5082), True, 'import numpy as np\n'), ((638, 653), 'numpy.matmul', 'np.matmul', (['x', 'y'], {}), '(x, y)\n', (647, 653), True, 'import numpy as np\n'), ((994, 1011), 'numpy.matmul', 'np.matmul', (['x.T', 'y'], {}), '(x.T, y)\n', (1003, 1011), True, 'import numpy as np\n'), ((1340, 1357), 'numpy.matmul', 'np.matmul', (['x.T', 'x'], {}), '(x.T, x)\n', (1349, 1357), True, 'import numpy as np\n'), ((2094, 2130), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(nSamples, nVar * nLags)'], {}), '((nSamples, nVar * nLags))\n', (2104, 2130), False, 'from scipy.sparse import csr_matrix, lil_matrix\n'), ((2184, 2218), 'numpy.zeros', 'np.zeros', (['(nSamples, nVar * nLags)'], {}), '((nSamples, nVar * nLags))\n', (2192, 2218), True, 'import numpy as np\n'), ((3010, 3031), 'scipy.sparse.csr_matrix', 'csr_matrix', (['lagMatrix'], {}), '(lagMatrix)\n', (3020, 3031), False, 'from scipy.sparse import csr_matrix, lil_matrix\n'), ((3348, 3362), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (3359, 3362), True, 'import numpy as np\n'), ((2729, 2764), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(lagMatrix.shape[0], 1)'], {}), '((lagMatrix.shape[0], 1))\n', (2739, 2764), False, 'from scipy.sparse import csr_matrix, lil_matrix\n'), ((2812, 2837), 'scipy.sparse.hstack', 'hstack', (['[ones, lagMatrix]'], {}), '([ones, lagMatrix])\n', (2818, 2837), False, 'from scipy.sparse import hstack\n'), ((3441, 3455), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (3452, 3455), True, 'import numpy as np\n'), ((3709, 3725), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (3717, 3725), True, 'import numpy as np\n'), ((5191, 5207), 'numpy.sum', 'np.sum', (['(x * y)', '(0)'], {}), '(x * y, 0)\n', (5197, 5207), True, 'import numpy as np\n'), ((4497, 4516), 'numpy.floor', 'np.floor', (['(tmin * fs)'], {}), '(tmin * fs)\n', (4505, 4516), True, 'import numpy as np\n'), ((5102, 5119), 'numpy.sum', 'np.sum', (['(x ** 2)', '(0)'], {}), '(x ** 2, 0)\n', (5108, 5119), True, 'import numpy as np\n'), ((5138, 5155), 'numpy.sum', 'np.sum', (['(y ** 2)', '(0)'], {}), '(y ** 2, 0)\n', (5144, 5155), True, 'import numpy as np\n'), ((2891, 2923), 'numpy.ones', 'np.ones', (['(lagMatrix.shape[0], 1)'], {}), '((lagMatrix.shape[0], 1))\n', (2898, 2923), True, 'import numpy as np\n'), ((4520, 4538), 'numpy.ceil', 'np.ceil', (['(tmax * fs)'], {}), '(tmax * fs)\n', (4527, 4538), True, 'import numpy as np\n'), ((5398, 5411), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (5404, 5411), True, 'import numpy as np\n'), ((5472, 5485), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (5478, 5485), True, 'import numpy as np\n'), ((3492, 3506), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (3499, 3506), True, 'import numpy as np\n'), ((3518, 3532), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (3525, 3532), True, 'import numpy as np\n')] |
import numpy as np
import xarray as xr
import pickle
import serialization
from copy import deepcopy
import pytest
def test_xarray():
arr = np.ones((128, 256, 256), dtype=np.float32)
arr[8, 8, 8] = 2
arr = xr.DataArray(arr)
arrpkl = pickle.dumps(arr)
data = [
1, "2", arr,
[3, "4", deepcopy(arr), {"arr": deepcopy(arr)}],
{4: "5", "6": 7, "arr": deepcopy(arr), "lst": [deepcopy(arr)]}
]
datapkl = pickle.dumps(data)
for key in (None, "key".encode()):
ser = serialization.serialize(arr, key)
assert len(ser) < 0.5 * len(arrpkl)
deser = serialization.deserialize(ser, key)
assert np.all(arr.data == deser.data)
ser = serialization.serialize(data, key)
assert len(ser) < len(datapkl)
assert len(ser) < len(arrpkl)
deser = serialization.deserialize(ser, key)
assert np.all(deser[2].data == arr.data)
assert np.all(deser[3][2].data == arr.data)
assert np.all(deser[3][3]["arr"].data == arr.data)
assert np.all(deser[4]["arr"].data == arr.data)
assert np.all(deser[4]["lst"][0].data == arr.data)
with pytest.raises(RuntimeError):
serialization.deserialize(serialization.serialize(arr, key), "nokey".encode())
| [
"copy.deepcopy",
"serialization.serialize",
"numpy.ones",
"pytest.raises",
"xarray.DataArray",
"serialization.deserialize",
"numpy.all",
"pickle.dumps"
] | [((145, 187), 'numpy.ones', 'np.ones', (['(128, 256, 256)'], {'dtype': 'np.float32'}), '((128, 256, 256), dtype=np.float32)\n', (152, 187), True, 'import numpy as np\n'), ((219, 236), 'xarray.DataArray', 'xr.DataArray', (['arr'], {}), '(arr)\n', (231, 236), True, 'import xarray as xr\n'), ((250, 267), 'pickle.dumps', 'pickle.dumps', (['arr'], {}), '(arr)\n', (262, 267), False, 'import pickle\n'), ((451, 469), 'pickle.dumps', 'pickle.dumps', (['data'], {}), '(data)\n', (463, 469), False, 'import pickle\n'), ((524, 557), 'serialization.serialize', 'serialization.serialize', (['arr', 'key'], {}), '(arr, key)\n', (547, 557), False, 'import serialization\n'), ((618, 653), 'serialization.deserialize', 'serialization.deserialize', (['ser', 'key'], {}), '(ser, key)\n', (643, 653), False, 'import serialization\n'), ((669, 699), 'numpy.all', 'np.all', (['(arr.data == deser.data)'], {}), '(arr.data == deser.data)\n', (675, 699), True, 'import numpy as np\n'), ((715, 749), 'serialization.serialize', 'serialization.serialize', (['data', 'key'], {}), '(data, key)\n', (738, 749), False, 'import serialization\n'), ((843, 878), 'serialization.deserialize', 'serialization.deserialize', (['ser', 'key'], {}), '(ser, key)\n', (868, 878), False, 'import serialization\n'), ((894, 927), 'numpy.all', 'np.all', (['(deser[2].data == arr.data)'], {}), '(deser[2].data == arr.data)\n', (900, 927), True, 'import numpy as np\n'), ((943, 979), 'numpy.all', 'np.all', (['(deser[3][2].data == arr.data)'], {}), '(deser[3][2].data == arr.data)\n', (949, 979), True, 'import numpy as np\n'), ((995, 1038), 'numpy.all', 'np.all', (["(deser[3][3]['arr'].data == arr.data)"], {}), "(deser[3][3]['arr'].data == arr.data)\n", (1001, 1038), True, 'import numpy as np\n'), ((1054, 1094), 'numpy.all', 'np.all', (["(deser[4]['arr'].data == arr.data)"], {}), "(deser[4]['arr'].data == arr.data)\n", (1060, 1094), True, 'import numpy as np\n'), ((1110, 1153), 'numpy.all', 'np.all', (["(deser[4]['lst'][0].data == arr.data)"], {}), "(deser[4]['lst'][0].data == arr.data)\n", (1116, 1153), True, 'import numpy as np\n'), ((1168, 1195), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1181, 1195), False, 'import pytest\n'), ((320, 333), 'copy.deepcopy', 'deepcopy', (['arr'], {}), '(arr)\n', (328, 333), False, 'from copy import deepcopy\n'), ((392, 405), 'copy.deepcopy', 'deepcopy', (['arr'], {}), '(arr)\n', (400, 405), False, 'from copy import deepcopy\n'), ((1231, 1264), 'serialization.serialize', 'serialization.serialize', (['arr', 'key'], {}), '(arr, key)\n', (1254, 1264), False, 'import serialization\n'), ((343, 356), 'copy.deepcopy', 'deepcopy', (['arr'], {}), '(arr)\n', (351, 356), False, 'from copy import deepcopy\n'), ((415, 428), 'copy.deepcopy', 'deepcopy', (['arr'], {}), '(arr)\n', (423, 428), False, 'from copy import deepcopy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Test for utils
from inspect import signature
import numpy as np
import xarray as xr
from xclim.core.indicator import Daily
from xclim.core.utils import (
ensure_chunk_size,
nan_calc_percentiles,
walk_map,
wrapped_partial,
)
def test_walk_map():
d = {"a": -1, "b": {"c": -2}}
o = walk_map(d, lambda x: 0)
assert o["a"] == 0
assert o["b"]["c"] == 0
def test_wrapped_partial():
def func(a, b=1, c=1):
"""Docstring"""
return (a, b, c)
newf = wrapped_partial(func, b=2)
assert list(signature(newf).parameters.keys()) == ["a", "c"]
assert newf(1) == (1, 2, 1)
newf = wrapped_partial(func, suggested=dict(c=2), b=2)
assert list(signature(newf).parameters.keys()) == ["a", "c"]
assert newf(1) == (1, 2, 2)
assert newf.__doc__ == func.__doc__
def func(a, b=1, c=1, **kws):
"""Docstring"""
return (a, b, c)
newf = wrapped_partial(func, suggested=dict(c=2), a=2, b=2)
assert list(signature(newf).parameters.keys()) == ["c", "kws"]
assert newf() == (2, 2, 2)
def test_wrapped_indicator(tas_series):
def indice(
tas: xr.DataArray,
tas2: xr.DataArray = None,
thresh: int = float,
freq: str = "YS",
):
if tas2 is None:
out = tas < thresh
else:
out = tas < tas2
out = out.resample(time="YS").sum()
out.attrs["units"] = "days"
return out
ind1 = Daily(
realm="atmos",
identifier="test_ind1",
units="days",
compute=wrapped_partial(indice, tas2=None),
)
ind2 = Daily(
realm="atmos",
identifier="test_ind2",
units="days",
compute=wrapped_partial(indice, thresh=None),
)
tas = tas_series(np.arange(366), start="2000-01-01")
tas2 = tas_series(1 + np.arange(366), start="2000-01-01")
assert ind2(tas, tas2) == 366
assert ind1(tas, thresh=1111) == 366
def test_ensure_chunk_size():
da = xr.DataArray(np.zeros((20, 21, 20)), dims=("x", "y", "z"))
out = ensure_chunk_size(da, x=10, y=-1)
assert da is out
dac = da.chunk({"x": (1,) * 20, "y": (10, 10, 1), "z": (10, 10)})
out = ensure_chunk_size(dac, x=3, y=5, z=-1)
assert out.chunks[0] == (3, 3, 3, 3, 3, 5)
assert out.chunks[1] == (10, 11)
assert out.chunks[2] == (20,)
class Test_nan_calc_percentiles:
def test_calc_perc_type7(self):
# Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method
arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0])
res = nan_calc_percentiles(arr, percentiles=[40.0], alpha=1, beta=1)
# The expected is from R `quantile(arr, probs=c(0.4), type=7)`
assert res[()] == 29
def test_calc_perc_type8(self):
# Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method
arr = np.asarray(
[[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]]
)
res = nan_calc_percentiles(
arr,
percentiles=[40.0],
alpha=1.0 / 3.0,
beta=1.0 / 3.0,
)
# The expected is from R `quantile(arr, probs=c(0.4), type=8)`
assert np.all(res[0][0] == 27)
assert np.all(res[0][1] == 27)
def test_calc_perc_2d(self):
# Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method
arr = np.asarray(
[[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]]
)
res = nan_calc_percentiles(arr, percentiles=[40.0])
# The expected is from R ` quantile(c(15.0, 20.0, 35.0, 40.0, 50.0), probs=0.4)`
assert np.all(res[0][0] == 29)
assert np.all(res[0][1] == 29)
def test_calc_perc_nan(self):
arr = np.asarray([np.NAN])
res = nan_calc_percentiles(arr, percentiles=[50.0])
assert np.isnan(res)
def test_calc_perc_empty(self):
arr = np.asarray([])
res = nan_calc_percentiles(arr)
assert np.isnan(res)
def test_calc_perc_partial_nan(self):
arr = np.asarray([np.NaN, 41.0, 41.0, 43.0, 43.0])
res = nan_calc_percentiles(arr, percentiles=[50.0], alpha=1 / 3.0, beta=1 / 3.0)
# The expected is from R `quantile(arr, 0.5, type=8, na.rm = TRUE)`
# Note that scipy mquantiles would give a different result here
assert res[()] == 42.0
| [
"xclim.core.utils.nan_calc_percentiles",
"numpy.asarray",
"numpy.zeros",
"numpy.isnan",
"xclim.core.utils.walk_map",
"numpy.arange",
"inspect.signature",
"xclim.core.utils.ensure_chunk_size",
"xclim.core.utils.wrapped_partial",
"numpy.all"
] | [((355, 379), 'xclim.core.utils.walk_map', 'walk_map', (['d', '(lambda x: 0)'], {}), '(d, lambda x: 0)\n', (363, 379), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((549, 575), 'xclim.core.utils.wrapped_partial', 'wrapped_partial', (['func'], {'b': '(2)'}), '(func, b=2)\n', (564, 575), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((2114, 2147), 'xclim.core.utils.ensure_chunk_size', 'ensure_chunk_size', (['da'], {'x': '(10)', 'y': '(-1)'}), '(da, x=10, y=-1)\n', (2131, 2147), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((2252, 2290), 'xclim.core.utils.ensure_chunk_size', 'ensure_chunk_size', (['dac'], {'x': '(3)', 'y': '(5)', 'z': '(-1)'}), '(dac, x=3, y=5, z=-1)\n', (2269, 2290), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((1829, 1843), 'numpy.arange', 'np.arange', (['(366)'], {}), '(366)\n', (1838, 1843), True, 'import numpy as np\n'), ((2057, 2079), 'numpy.zeros', 'np.zeros', (['(20, 21, 20)'], {}), '((20, 21, 20))\n', (2065, 2079), True, 'import numpy as np\n'), ((2590, 2632), 'numpy.asarray', 'np.asarray', (['[15.0, 20.0, 35.0, 40.0, 50.0]'], {}), '([15.0, 20.0, 35.0, 40.0, 50.0])\n', (2600, 2632), True, 'import numpy as np\n'), ((2647, 2709), 'xclim.core.utils.nan_calc_percentiles', 'nan_calc_percentiles', (['arr'], {'percentiles': '[40.0]', 'alpha': '(1)', 'beta': '(1)'}), '(arr, percentiles=[40.0], alpha=1, beta=1)\n', (2667, 2709), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((2956, 3032), 'numpy.asarray', 'np.asarray', (['[[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]]'], {}), '([[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]])\n', (2966, 3032), True, 'import numpy as np\n'), ((3069, 3147), 'xclim.core.utils.nan_calc_percentiles', 'nan_calc_percentiles', (['arr'], {'percentiles': '[40.0]', 'alpha': '(1.0 / 3.0)', 'beta': '(1.0 / 3.0)'}), '(arr, percentiles=[40.0], alpha=1.0 / 3.0, beta=1.0 / 3.0)\n', (3089, 3147), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((3293, 3316), 'numpy.all', 'np.all', (['(res[0][0] == 27)'], {}), '(res[0][0] == 27)\n', (3299, 3316), True, 'import numpy as np\n'), ((3332, 3355), 'numpy.all', 'np.all', (['(res[0][1] == 27)'], {}), '(res[0][1] == 27)\n', (3338, 3355), True, 'import numpy as np\n'), ((3499, 3575), 'numpy.asarray', 'np.asarray', (['[[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]]'], {}), '([[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]])\n', (3509, 3575), True, 'import numpy as np\n'), ((3612, 3657), 'xclim.core.utils.nan_calc_percentiles', 'nan_calc_percentiles', (['arr'], {'percentiles': '[40.0]'}), '(arr, percentiles=[40.0])\n', (3632, 3657), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((3762, 3785), 'numpy.all', 'np.all', (['(res[0][0] == 29)'], {}), '(res[0][0] == 29)\n', (3768, 3785), True, 'import numpy as np\n'), ((3801, 3824), 'numpy.all', 'np.all', (['(res[0][1] == 29)'], {}), '(res[0][1] == 29)\n', (3807, 3824), True, 'import numpy as np\n'), ((3874, 3894), 'numpy.asarray', 'np.asarray', (['[np.NAN]'], {}), '([np.NAN])\n', (3884, 3894), True, 'import numpy as np\n'), ((3909, 3954), 'xclim.core.utils.nan_calc_percentiles', 'nan_calc_percentiles', (['arr'], {'percentiles': '[50.0]'}), '(arr, percentiles=[50.0])\n', (3929, 3954), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((3970, 3983), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (3978, 3983), True, 'import numpy as np\n'), ((4035, 4049), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (4045, 4049), True, 'import numpy as np\n'), ((4064, 4089), 'xclim.core.utils.nan_calc_percentiles', 'nan_calc_percentiles', (['arr'], {}), '(arr)\n', (4084, 4089), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((4105, 4118), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (4113, 4118), True, 'import numpy as np\n'), ((4176, 4220), 'numpy.asarray', 'np.asarray', (['[np.NaN, 41.0, 41.0, 43.0, 43.0]'], {}), '([np.NaN, 41.0, 41.0, 43.0, 43.0])\n', (4186, 4220), True, 'import numpy as np\n'), ((4235, 4309), 'xclim.core.utils.nan_calc_percentiles', 'nan_calc_percentiles', (['arr'], {'percentiles': '[50.0]', 'alpha': '(1 / 3.0)', 'beta': '(1 / 3.0)'}), '(arr, percentiles=[50.0], alpha=1 / 3.0, beta=1 / 3.0)\n', (4255, 4309), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((1609, 1643), 'xclim.core.utils.wrapped_partial', 'wrapped_partial', (['indice'], {'tas2': 'None'}), '(indice, tas2=None)\n', (1624, 1643), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((1763, 1799), 'xclim.core.utils.wrapped_partial', 'wrapped_partial', (['indice'], {'thresh': 'None'}), '(indice, thresh=None)\n', (1778, 1799), False, 'from xclim.core.utils import ensure_chunk_size, nan_calc_percentiles, walk_map, wrapped_partial\n'), ((1891, 1905), 'numpy.arange', 'np.arange', (['(366)'], {}), '(366)\n', (1900, 1905), True, 'import numpy as np\n'), ((592, 607), 'inspect.signature', 'signature', (['newf'], {}), '(newf)\n', (601, 607), False, 'from inspect import signature\n'), ((749, 764), 'inspect.signature', 'signature', (['newf'], {}), '(newf)\n', (758, 764), False, 'from inspect import signature\n'), ((1035, 1050), 'inspect.signature', 'signature', (['newf'], {}), '(newf)\n', (1044, 1050), False, 'from inspect import signature\n')] |
# -*- coding: utf-8 -*-
"""
It is better to use constant variables instead of hoping you spell the same
string correctly every time you use it. (Also it makes it much easier if a
string name changes)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# import utool
import six
import numpy as np
from collections import OrderedDict
import math
from os.path import join
import utool as ut
ut.noinject('[const]')
PI = math.pi
TAU = 2.0 * PI
# Mapping of semantic viewpoints to yaw angles
VIEWTEXT_TO_YAW_RADIANS = OrderedDict([
('right' , 0.000 * TAU,),
('frontright' , 0.125 * TAU,),
('front' , 0.250 * TAU,),
('frontleft' , 0.375 * TAU,),
('left' , 0.500 * TAU,),
('backleft' , 0.625 * TAU,),
('back' , 0.750 * TAU,),
('backright' , 0.875 * TAU,),
])
#VIEWTEXT_TO_QT_VIEWTEXT = {
# 'right' : 'right',
# 'frontright' : 'frontright',
# 'front' : 'front',
# 'frontleft' : 'frontleft',
# 'left' : 'left',
# 'backleft' : 'backleft',
# 'back' : 'back',
# 'backright' : 'backright',
#}
YAWALIAS = {'frontleft': 'FL', 'frontright': 'FR', 'backleft': 'BL', 'backright': 'BR',
'front': 'F', 'left': 'L', 'back': 'B', 'right': 'R', }
QUAL_EXCELLENT = 'excellent'
QUAL_GOOD = 'good'
QUAL_OK = 'ok'
QUAL_POOR = 'poor'
QUAL_JUNK = 'junk'
QUAL_UNKNOWN = 'UNKNOWN'
QUALITY_INT_TO_TEXT = OrderedDict([
(5, QUAL_EXCELLENT,),
(4, QUAL_GOOD,),
(3, QUAL_OK,),
(2, QUAL_POOR,),
# oops forgot 1. will be mapped to poor
(0, QUAL_JUNK,),
(-1, QUAL_UNKNOWN,),
])
QUALITY_TEXT_TO_INT = ut.invert_dict(QUALITY_INT_TO_TEXT)
QUALITY_INT_TO_TEXT[1] = QUAL_JUNK
#QUALITY_TEXT_TO_INTS = ut.invert_dict(QUALITY_INT_TO_TEXT)
QUALITY_TEXT_TO_INTS = ut.group_items(
list(QUALITY_INT_TO_TEXT.keys()),
list(QUALITY_INT_TO_TEXT.values()))
QUALITY_TEXT_TO_INTS[QUAL_UNKNOWN] = -1
QUALITY_INT_TO_TEXT[None] = QUALITY_INT_TO_TEXT[-1]
SEX_INT_TO_TEXT = {
None: 'UNKNOWN NAME',
-1 : 'UNKNOWN SEX',
0 : 'Female',
1 : 'Male',
}
SEX_TEXT_TO_INT = ut.invert_dict(SEX_INT_TO_TEXT)
class PATH_NAMES(object):
""" Path names for internal IBEIS database """
sqldb = '_ibeis_database.sqlite3'
_ibsdb = '_ibsdb'
cache = '_ibeis_cache'
backups = '_ibeis_backups'
chips = 'chips'
figures = 'figures'
flann = 'flann'
images = 'images'
trees = 'trees'
nets = 'nets'
uploads = 'uploads'
detectimg = 'detectimg'
thumbs = 'thumbs'
trashdir = 'trashed_images'
distinctdir = 'distinctiveness_model'
scorenormdir = 'scorenorm'
smartpatrol = 'smart_patrol'
# Query Results (chipmatch dirs)
qres = 'qres_new'
bigcache = 'qres_bigcache_new'
class REL_PATHS(object):
""" all paths are relative to ibs.dbdir """
_ibsdb = PATH_NAMES._ibsdb
trashdir = PATH_NAMES.trashdir
figures = join(_ibsdb, PATH_NAMES.figures)
cache = join(_ibsdb, PATH_NAMES.cache)
backups = join(_ibsdb, PATH_NAMES.backups)
#chips = join(_ibsdb, PATH_NAMES.chips)
images = join(_ibsdb, PATH_NAMES.images)
trees = join(_ibsdb, PATH_NAMES.trees)
nets = join(_ibsdb, PATH_NAMES.nets)
uploads = join(_ibsdb, PATH_NAMES.uploads)
# All computed dirs live in <dbdir>/_ibsdb/_ibeis_cache
chips = join(cache, PATH_NAMES.chips)
thumbs = join(cache, PATH_NAMES.thumbs)
flann = join(cache, PATH_NAMES.flann)
qres = join(cache, PATH_NAMES.qres)
bigcache = join(cache, PATH_NAMES.bigcache)
distinctdir = join(cache, PATH_NAMES.distinctdir)
# Directories that should be excluded from copy operations
EXCLUDE_COPY_REL_DIRS = [
REL_PATHS.chips,
REL_PATHS.cache,
REL_PATHS.backups,
REL_PATHS.figures,
REL_PATHS.nets,
join(PATH_NAMES._ibsdb, '_ibeis_cache*'),
#'_ibsdb/_ibeis_cache',
'_ibsdb/chips', # old path for caches
'./images', # the hotspotter images dir
]
# TODO: Remove anything under this block completely
UNKNOWN_LBLANNOT_ROWID = 0
UNKNOWN_NAME_ROWID = 0
UNKNOWN_SPECIES_ROWID = 0
# Names normalized to the standard UNKNOWN_NAME
ACCEPTED_UNKNOWN_NAMES = set(['Unassigned'])
INDIVIDUAL_KEY = 'INDIVIDUAL_KEY'
SPECIES_KEY = 'SPECIES_KEY'
EMPTY_KEY = ''
UNKNOWN = '____'
KEY_DEFAULTS = {
INDIVIDUAL_KEY : UNKNOWN,
SPECIES_KEY : UNKNOWN,
}
# <UNFINISHED METADATA>
# We are letting wildbook do this metadata instead
# Define the special metadata for annotation
ROSEMARY_ANNOT_METADATA = [
('local_name' , 'Local name:', str),
('sun' , 'Sun:', ['FS', 'PS', 'NS']),
('wind' , 'Wind:', ['NW', 'LW', 'MW', 'SW']),
('rain' , 'Rain:', ['NR', 'LR', 'MR', 'HR']),
('cover' , 'Cover:', float),
('grass' , 'Grass:', ['less hf', 'less hk', 'less belly']),
('grass_color' , 'Grass Colour:', ['B', 'BG', 'GB', 'G']),
('grass_species' , 'Grass Species:', str),
('bush_type' , 'Bush type:', ['OG', 'LB', 'MB', 'TB']),
('bit' , 'Bit:', int),
('other_speceis' , 'Other Species:', str),
]
#ROSEMARY_KEYS = utool.get_list_column(ROSEMARY_ANNOT_METADATA, 0)
#KEY_DEFAULTS.update(**{key: UNKNOWN for key in ROSEMARY_KEYS})
# </UNFINISHED METADATA>
BASE_DATABASE_VERSION = '0.0.0'
#################################################################
# DO NOT DELETE FROM THE TABLE LIST, THE DATABASE UPDATER WILL BREAK!!!
# THIS GOES FOR OLD AND DEPRICATED TABLENAMES AS WELL!!!
# TODO:
# What should happen is when they are depricated they should go into a
# depricated tablename structure with the relevant versions suffixed
#################################################################
AL_RELATION_TABLE = 'annotation_lblannot_relationship'
GA_RELATION_TABLE = 'annotgroup_annotation_relationship'
ANNOTGROUP_TABLE = 'annotgroups'
ANNOTATION_TABLE = 'annotations'
CHIP_TABLE = 'chips'
CONFIG_TABLE = 'configs'
CONTRIBUTOR_TABLE = 'contributors'
GSG_RELATION_TABLE = 'imageset_image_relationship'
IMAGESET_TABLE = 'imagesets'
FEATURE_TABLE = 'features'
FEATURE_WEIGHT_TABLE = 'feature_weights'
GL_RELATION_TABLE = 'image_lblimage_relationship'
IMAGE_TABLE = 'images'
LBLANNOT_TABLE = 'lblannot'
LBLIMAGE_TABLE = 'lblimage'
LBLTYPE_TABLE = 'keys'
METADATA_TABLE = 'metadata'
# Ugly move from name to names, need better way of versioning old table names
NAME_TABLE_v121 = 'name'
NAME_TABLE_v130 = 'names'
NAME_TABLE = NAME_TABLE_v130
ANNOTMATCH_TABLE = 'annotmatch'
SPECIES_TABLE = 'species'
RESIDUAL_TABLE = 'residuals'
VERSIONS_TABLE = 'versions'
#
PARTY_CONTRIB_RELATION_TABLE = 'party_contrib_relation'
PARTY_TABLE = 'party'
#################################################################
# DEPCACHE TABLENAMES
#CHIPTHUMB_TABLE = 'chipthumb'
UNKNOWN_PURPLE_RGBA255 = np.array((102, 0, 153, 255))
NAME_BLUE_RGBA255 = np.array((20, 20, 235, 255))
NAME_RED_RGBA255 = np.array((235, 20, 20, 255))
NEW_YELLOW_RGBA255 = np.array((235, 235, 20, 255))
UNKNOWN_PURPLE_RGBA01 = UNKNOWN_PURPLE_RGBA255 / 255.0
NAME_BLUE_RGBA01 = NAME_BLUE_RGBA255 / 255.0
NAME_RED_RGBA01 = NAME_RED_RGBA255 / 255.0
NEW_YELLOW_RGBA01 = NEW_YELLOW_RGBA255 / 255.0
EXEMPLAR_IMAGESETTEXT = '*Exemplars'
ALL_IMAGE_IMAGESETTEXT = '*All Images'
UNREVIEWED_IMAGE_IMAGESETTEXT = '*Undetected Images'
REVIEWED_IMAGE_IMAGESETTEXT = '*Reviewed Detections'
UNGROUPED_IMAGES_IMAGESETTEXT = '*Ungrouped Images'
SPECIAL_IMAGESET_LABELS = [
EXEMPLAR_IMAGESETTEXT,
ALL_IMAGE_IMAGESETTEXT,
UNREVIEWED_IMAGE_IMAGESETTEXT,
REVIEWED_IMAGE_IMAGESETTEXT,
UNGROUPED_IMAGES_IMAGESETTEXT
]
NEW_IMAGESET_IMAGESETTEXT = 'NEW IMAGESET'
#IMAGE_THUMB_SUFFIX = '_thumb.png'
#CHIP_THUMB_SUFFIX = '_chip_thumb.png'
IMAGE_THUMB_SUFFIX = '_thumb.jpg'
IMAGE_BARE_THUMB_SUFFIX = '_thumb_bare.jpg'
CHIP_THUMB_SUFFIX = '_chip_thumb.jpg'
VS_EXEMPLARS_KEY = 'vs_exemplars'
INTRA_OCCUR_KEY = 'intra_occurrence'
HARD_NOTE_TAG = '<HARDCASE>'
# HACK
if ut.get_computer_name() == 'ibeis.cs.uic.edu':
#_DEFAULT_WILDBOOK_TARGET = 'prod'
_DEFAULT_WILDBOOK_TARGET = 'lewa2'
else:
_DEFAULT_WILDBOOK_TARGET = 'ibeis'
WILDBOOK_TARGET = ut.get_argval('--wildbook-target', type_=str, default=_DEFAULT_WILDBOOK_TARGET,
help_='specify the Wildbook target deployment')
class ZIPPED_URLS(object):
PZ_MTEST = 'https://lev.cs.rpi.edu/public/databases/PZ_MTEST.zip'
NAUTS = 'https://lev.cs.rpi.edu/public/databases/NAUT_test.zip'
WDS = 'https://lev.cs.rpi.edu/public/databases/wd_peter2.zip'
PZ_DISTINCTIVE = 'https://lev.cs.rpi.edu/public/models/distinctivness_zebra_plains.zip'
GZ_DISTINCTIVE = 'https://lev.cs.rpi.edu/public/models/distinctivness_zebra_grevys.zip'
if six.PY2:
__STR__ = unicode # change to str if needed
else:
__STR__ = str
# TODO: rename to same / different
# add add match, nomatch, notcomp
TRUTH_UNKNOWN = 2
TRUTH_MATCH = 1
TRUTH_NOT_MATCH = 0
TRUTH_INT_TO_TEXT = {
TRUTH_UNKNOWN : 'Unknown',
TRUTH_NOT_MATCH : 'Not Matched',
TRUTH_MATCH : 'Matched',
}
# Turn off features at Lewa :(
SIMPLIFY_INTERFACE = (ut.get_computer_name() == 'ibeis.cs.uic.edu') or ut.get_argflag('--simplify')
# For candidacy document
DBNAME_ALIAS = {
#'NNP_MasterGIRM_core': 'NNP_GIRM'
#'NNP_MasterGIRM_core': 'GIRM',
'NNP_MasterGIRM_core': 'GIRM',
'PZ_Master1': 'PZ',
'GZ_Master1': 'GZ',
'GIRM_Master1': 'GIRM',
'GZ_ALL': 'GZ',
}
class TEST_SPECIES(object):
BEAR_POLAR = 'bear_polar'
BUILDING = 'building'
GIR_RETICULATED = 'giraffe_reticulated'
GIR_MASAI = 'giraffe_masai'
WHALE_FLUKE = 'whale_fluke',
WHALE_HUMPBACK = 'whale_humpback',
ZEB_GREVY = 'zebra_grevys'
ZEB_HYBRID = 'zebra_hybrid'
ZEB_PLAIN = 'zebra_plains'
UNKNOWN = UNKNOWN
SPECIES_WITH_DETECTORS = (
TEST_SPECIES.ZEB_GREVY,
TEST_SPECIES.ZEB_PLAIN,
TEST_SPECIES.WHALE_FLUKE,
TEST_SPECIES.WHALE_HUMPBACK,
)
| [
"utool.invert_dict",
"utool.noinject",
"numpy.array",
"utool.get_argflag",
"collections.OrderedDict",
"utool.get_computer_name",
"os.path.join",
"utool.get_argval"
] | [((424, 446), 'utool.noinject', 'ut.noinject', (['"""[const]"""'], {}), "('[const]')\n", (435, 446), True, 'import utool as ut\n'), ((552, 780), 'collections.OrderedDict', 'OrderedDict', (["[('right', 0.0 * TAU), ('frontright', 0.125 * TAU), ('front', 0.25 * TAU),\n ('frontleft', 0.375 * TAU), ('left', 0.5 * TAU), ('backleft', 0.625 *\n TAU), ('back', 0.75 * TAU), ('backright', 0.875 * TAU)]"], {}), "([('right', 0.0 * TAU), ('frontright', 0.125 * TAU), ('front', \n 0.25 * TAU), ('frontleft', 0.375 * TAU), ('left', 0.5 * TAU), (\n 'backleft', 0.625 * TAU), ('back', 0.75 * TAU), ('backright', 0.875 * TAU)]\n )\n", (563, 780), False, 'from collections import OrderedDict\n'), ((1459, 1579), 'collections.OrderedDict', 'OrderedDict', (['[(5, QUAL_EXCELLENT), (4, QUAL_GOOD), (3, QUAL_OK), (2, QUAL_POOR), (0,\n QUAL_JUNK), (-1, QUAL_UNKNOWN)]'], {}), '([(5, QUAL_EXCELLENT), (4, QUAL_GOOD), (3, QUAL_OK), (2,\n QUAL_POOR), (0, QUAL_JUNK), (-1, QUAL_UNKNOWN)])\n', (1470, 1579), False, 'from collections import OrderedDict\n'), ((1687, 1722), 'utool.invert_dict', 'ut.invert_dict', (['QUALITY_INT_TO_TEXT'], {}), '(QUALITY_INT_TO_TEXT)\n', (1701, 1722), True, 'import utool as ut\n'), ((2166, 2197), 'utool.invert_dict', 'ut.invert_dict', (['SEX_INT_TO_TEXT'], {}), '(SEX_INT_TO_TEXT)\n', (2180, 2197), True, 'import utool as ut\n'), ((7159, 7187), 'numpy.array', 'np.array', (['(102, 0, 153, 255)'], {}), '((102, 0, 153, 255))\n', (7167, 7187), True, 'import numpy as np\n'), ((7215, 7243), 'numpy.array', 'np.array', (['(20, 20, 235, 255)'], {}), '((20, 20, 235, 255))\n', (7223, 7243), True, 'import numpy as np\n'), ((7269, 7297), 'numpy.array', 'np.array', (['(235, 20, 20, 255)'], {}), '((235, 20, 20, 255))\n', (7277, 7297), True, 'import numpy as np\n'), ((7323, 7352), 'numpy.array', 'np.array', (['(235, 235, 20, 255)'], {}), '((235, 235, 20, 255))\n', (7331, 7352), True, 'import numpy as np\n'), ((8535, 8667), 'utool.get_argval', 'ut.get_argval', (['"""--wildbook-target"""'], {'type_': 'str', 'default': '_DEFAULT_WILDBOOK_TARGET', 'help_': '"""specify the Wildbook target deployment"""'}), "('--wildbook-target', type_=str, default=\n _DEFAULT_WILDBOOK_TARGET, help_='specify the Wildbook target deployment')\n", (8548, 8667), True, 'import utool as ut\n'), ((3046, 3078), 'os.path.join', 'join', (['_ibsdb', 'PATH_NAMES.figures'], {}), '(_ibsdb, PATH_NAMES.figures)\n', (3050, 3078), False, 'from os.path import join\n'), ((3094, 3124), 'os.path.join', 'join', (['_ibsdb', 'PATH_NAMES.cache'], {}), '(_ibsdb, PATH_NAMES.cache)\n', (3098, 3124), False, 'from os.path import join\n'), ((3140, 3172), 'os.path.join', 'join', (['_ibsdb', 'PATH_NAMES.backups'], {}), '(_ibsdb, PATH_NAMES.backups)\n', (3144, 3172), False, 'from os.path import join\n'), ((3235, 3266), 'os.path.join', 'join', (['_ibsdb', 'PATH_NAMES.images'], {}), '(_ibsdb, PATH_NAMES.images)\n', (3239, 3266), False, 'from os.path import join\n'), ((3282, 3312), 'os.path.join', 'join', (['_ibsdb', 'PATH_NAMES.trees'], {}), '(_ibsdb, PATH_NAMES.trees)\n', (3286, 3312), False, 'from os.path import join\n'), ((3328, 3357), 'os.path.join', 'join', (['_ibsdb', 'PATH_NAMES.nets'], {}), '(_ibsdb, PATH_NAMES.nets)\n', (3332, 3357), False, 'from os.path import join\n'), ((3373, 3405), 'os.path.join', 'join', (['_ibsdb', 'PATH_NAMES.uploads'], {}), '(_ibsdb, PATH_NAMES.uploads)\n', (3377, 3405), False, 'from os.path import join\n'), ((3481, 3510), 'os.path.join', 'join', (['cache', 'PATH_NAMES.chips'], {}), '(cache, PATH_NAMES.chips)\n', (3485, 3510), False, 'from os.path import join\n'), ((3526, 3556), 'os.path.join', 'join', (['cache', 'PATH_NAMES.thumbs'], {}), '(cache, PATH_NAMES.thumbs)\n', (3530, 3556), False, 'from os.path import join\n'), ((3572, 3601), 'os.path.join', 'join', (['cache', 'PATH_NAMES.flann'], {}), '(cache, PATH_NAMES.flann)\n', (3576, 3601), False, 'from os.path import join\n'), ((3617, 3645), 'os.path.join', 'join', (['cache', 'PATH_NAMES.qres'], {}), '(cache, PATH_NAMES.qres)\n', (3621, 3645), False, 'from os.path import join\n'), ((3661, 3693), 'os.path.join', 'join', (['cache', 'PATH_NAMES.bigcache'], {}), '(cache, PATH_NAMES.bigcache)\n', (3665, 3693), False, 'from os.path import join\n'), ((3712, 3747), 'os.path.join', 'join', (['cache', 'PATH_NAMES.distinctdir'], {}), '(cache, PATH_NAMES.distinctdir)\n', (3716, 3747), False, 'from os.path import join\n'), ((3947, 3987), 'os.path.join', 'join', (['PATH_NAMES._ibsdb', '"""_ibeis_cache*"""'], {}), "(PATH_NAMES._ibsdb, '_ibeis_cache*')\n", (3951, 3987), False, 'from os.path import join\n'), ((8348, 8370), 'utool.get_computer_name', 'ut.get_computer_name', ([], {}), '()\n', (8368, 8370), True, 'import utool as ut\n'), ((9582, 9610), 'utool.get_argflag', 'ut.get_argflag', (['"""--simplify"""'], {}), "('--simplify')\n", (9596, 9610), True, 'import utool as ut\n'), ((9533, 9555), 'utool.get_computer_name', 'ut.get_computer_name', ([], {}), '()\n', (9553, 9555), True, 'import utool as ut\n')] |
#numpyZeroArrays.py
import numpy as np
zeroArray = np.zeros(25)
zeroArray2 = np.zeros((5,5))
print(zeroArray)
print(zeroArray2) | [
"numpy.zeros"
] | [((52, 64), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (60, 64), True, 'import numpy as np\n'), ((78, 94), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (86, 94), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('agg')
from substorm_utils.signature_lists import get_model_signature_lists, get_obs_signature_lists
from substorm_utils.bin_listings import find_substorms_convolution, find_substorms, find_convolution_onsets
from datetime import datetime, timedelta
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import gaussian_kde
from substorm_utils.isi import get_isi
from substorm_utils.kde import get_kde_bootstrap
from matplotlib_utils import remove_overhanging_labels
from pytz import UTC
matplotlib.rcParams['font.size']=8
matplotlib.rcParams['legend.handlelength']=0.7
matplotlib.rcParams['legend.borderpad']=0.2
matplotlib.rcParams['legend.borderaxespad']=0.2
matplotlib.rcParams['legend.handletextpad']=0.4
matplotlib.rcParams['legend.labelspacing']=0.25
matplotlib.rcParams['lines.linewidth']=0.75
run_properties=[
{
'name':'Hi-res w/ RCM',
'path':'/data2/jhaiduce/substorms_Jan2005_young-comp'
},
#{
# 'name':'Hi-res w/o RCM',
# 'path':'/data2/jhaiduce/Jan2005_rerun'
#},
#{
# 'name':'SWPC',
# 'path':'/data1/jhaiduce/Jan2005_swpc'
#},
]
model_threshold=2.5
obs_threshold=2.5
signature_type_labels={
'All':'All',
'AL':'AL',
'image':'IMAGE/FUV',
'plasmoids':'Plasmoids',
'dipolarizations':'Dipolarizations',
'epdata':'LANL',
'MPB':'MPB'
}
tstep=timedelta(0,1800)
from decouple import config
datadir=config('DATADIR')
def plot_isi(ax,bins,times,color,bw=None,show_ci=False):
from isi_functions import get_kde_cached,get_kde_ci
isi=get_isi(times)/3600
kde=get_kde_cached(isi,bw,bins)
if show_ci:
ci=get_kde_ci(isi,bins,2000,bw)
polies=ax.fill_between(bins,ci[0],ci[2],facecolor=color,edgecolor='none',alpha=0.5)
else:
polies=ax.fill_between(bins,kde,kde,facecolor='none',edgecolor='none',alpha=0.5)
line,=ax.plot(bins,kde,color=color)
return line,polies
def isi_subplot(ax,run_names,onset_type,bin_max=15):
fills=[]
lines=[]
labels=[]
for run_name in run_names:
if run_name=='obs':
signatures=get_obs_signature_lists(datadir=datadir)
threshold=obs_threshold
else:
runprops,=[runprops for runprops in run_properties if runprops['name']==run_name]
signatures=get_model_signature_lists(runprops,datadir=datadir)
threshold=model_threshold
if onset_type=='all':
onsets=find_convolution_onsets(signatures,threshold,bandwidth=timedelta(0,60*10))
#onsets=[(onset-datetime(2005,1,1,tzinfo=UTC)).total_seconds() for onset in onsets]
#print onsets
#substorms,onsets=find_substorms(signatures,threshold=1,return_times=True,signature_filters=['AL'])
#onsets=onsets.compressed()
#print onsets
else:
onsets=signatures.get(onset_type,[])
bins=np.linspace(0,bin_max,100)
bw=0.2
if len(onsets)>3:
if run_name=='obs':
color='LightSteelBlue'
else:
all_runnames=[runprops['name'] for runprops in run_properties]
irun=all_runnames.index(run_name)
import matplotlib
run_colors=matplotlib.rcParams['axes.prop_cycle'].by_key()['color']
color=run_colors[irun]
if run_name=='obs' or onset_type=='plasmoids':
show_ci=True
else:
show_ci=False
line,fill=plot_isi(ax,bins,onsets,color,bw,show_ci=show_ci)
lines.append(line)
fills.append(fill)
if run_name=='obs':
labels.append('Observations')
else:
if len(run_properties)>1:
labels.append(run_name)
else:
labels.append('MHD')
return zip(lines,fills),labels
def make_isi_figure(run_names,onset_type,bin_max=15):
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
handles,labels=isi_subplot(ax,run_names,onset_type,bin_max)
if len(labels)>1:
ax.legend(handles,labels,loc='best')
if onset_type=='all':
ax.set_title('All signatures')
else:
ax.set_title(signature_type_labels[onset_type])
ax.set_ylabel('Probability density')
ax.set_xlabel('Waiting time (h)')
return fig
def make_tiled_isi_figure(onset_types):
run_names=[runprops['name'] for runprops in run_properties]
run_names=['obs']+run_names
fig=plt.figure(figsize=(5.5,1.5*len(onset_types)))
from matplotlib.gridspec import GridSpec
gs=GridSpec(len(onset_types),len(onset_types[0]),hspace=0,right=0.98,top=0.9,wspace=0,left=0.1,bottom=0.12)
labelpos=(0.95,0.95)
from string import ascii_lowercase
subplot_labels=[ascii_lowercase[i] for i in range(6)]
axes=[]
for i in range(len(onset_types)):
axes.append([])
for j in range(len(onset_types[i])):
if j>0:
ax_kwargs={'sharey':axes[i][0]}
else:
ax_kwargs={}
ax=fig.add_subplot(gs[i,j],**ax_kwargs)
axes[i].append(ax)
# Add a label to the axis
label=subplot_labels[i*2+j]
text=ax.text(labelpos[0],labelpos[1],label,transform=ax.transAxes,weight='bold',fontsize=11,verticalalignment='top',color='k',horizontalalignment='right')
onset_type=onset_types[i][j]
handles,labels=isi_subplot(ax,run_names,onset_type,bin_max=20)
if onset_type=='all':
title='All signatures'
else:
title=signature_type_labels[onset_type]
ax.text(0.5,0.95,title,transform=ax.transAxes,fontsize=10,color='k',horizontalalignment='center',verticalalignment='top')
if j==0:
ax.set_ylabel('Probability density')
else:
plt.setp(ax.get_yticklabels(),visible=False)
ax.set_ylabel('')
if i==len(onset_types)-1:
ax.set_xlabel('Waiting time (h)')
else:
plt.setp(ax.get_xticklabels(),visible=False)
ax.set_xlabel('')
ax.tick_params('x',which='both',direction='inout',top=True)
ax.tick_params('y',which='both',direction='inout',top=True)
if i==0 and j==1:
ax.legend(handles,labels,loc='center right')
ymin,ymax=ax.get_ylim()
ax.set_ylim(0,ymax)
fig.canvas.draw()
for i in range(2):
for j in range(2):
ax=axes[i][j]
remove_overhanging_labels(ax,fig,'x')
remove_overhanging_labels(ax,fig,'y')
return fig
if __name__=='__main__':
fig=make_tiled_isi_figure([
['AL','dipolarizations'],
['MPB','all'],
])
fig.savefig('isi.svg')
| [
"substorm_utils.signature_lists.get_model_signature_lists",
"substorm_utils.isi.get_isi",
"decouple.config",
"isi_functions.get_kde_ci",
"isi_functions.get_kde_cached",
"matplotlib.pyplot.figure",
"matplotlib.use",
"datetime.timedelta",
"matplotlib_utils.remove_overhanging_labels",
"numpy.linspace... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (32, 39), False, 'import matplotlib\n'), ((1402, 1420), 'datetime.timedelta', 'timedelta', (['(0)', '(1800)'], {}), '(0, 1800)\n', (1411, 1420), False, 'from datetime import datetime, timedelta\n'), ((1457, 1474), 'decouple.config', 'config', (['"""DATADIR"""'], {}), "('DATADIR')\n", (1463, 1474), False, 'from decouple import config\n'), ((1630, 1659), 'isi_functions.get_kde_cached', 'get_kde_cached', (['isi', 'bw', 'bins'], {}), '(isi, bw, bins)\n', (1644, 1659), False, 'from isi_functions import get_kde_cached, get_kde_ci\n'), ((4041, 4053), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4051, 4053), True, 'from matplotlib import pyplot as plt\n'), ((1602, 1616), 'substorm_utils.isi.get_isi', 'get_isi', (['times'], {}), '(times)\n', (1609, 1616), False, 'from substorm_utils.isi import get_isi\n'), ((1690, 1721), 'isi_functions.get_kde_ci', 'get_kde_ci', (['isi', 'bins', '(2000)', 'bw'], {}), '(isi, bins, 2000, bw)\n', (1700, 1721), False, 'from isi_functions import get_kde_cached, get_kde_ci\n'), ((2982, 3010), 'numpy.linspace', 'np.linspace', (['(0)', 'bin_max', '(100)'], {}), '(0, bin_max, 100)\n', (2993, 3010), True, 'import numpy as np\n'), ((2169, 2209), 'substorm_utils.signature_lists.get_obs_signature_lists', 'get_obs_signature_lists', ([], {'datadir': 'datadir'}), '(datadir=datadir)\n', (2192, 2209), False, 'from substorm_utils.signature_lists import get_model_signature_lists, get_obs_signature_lists\n'), ((2390, 2442), 'substorm_utils.signature_lists.get_model_signature_lists', 'get_model_signature_lists', (['runprops'], {'datadir': 'datadir'}), '(runprops, datadir=datadir)\n', (2415, 2442), False, 'from substorm_utils.signature_lists import get_model_signature_lists, get_obs_signature_lists\n'), ((6727, 6766), 'matplotlib_utils.remove_overhanging_labels', 'remove_overhanging_labels', (['ax', 'fig', '"""x"""'], {}), "(ax, fig, 'x')\n", (6752, 6766), False, 'from matplotlib_utils import remove_overhanging_labels\n'), ((6777, 6816), 'matplotlib_utils.remove_overhanging_labels', 'remove_overhanging_labels', (['ax', 'fig', '"""y"""'], {}), "(ax, fig, 'y')\n", (6802, 6816), False, 'from matplotlib_utils import remove_overhanging_labels\n'), ((2585, 2606), 'datetime.timedelta', 'timedelta', (['(0)', '(60 * 10)'], {}), '(0, 60 * 10)\n', (2594, 2606), False, 'from datetime import datetime, timedelta\n')] |
import unittest
import vinnpy
import numpy
from contexts import contexts
from nose_parameterized import parameterized
class test_numpy_integration(unittest.TestCase):
@parameterized.expand(contexts().enumerate, contexts().name)
def test_create_with_numpy_matrix_succeeds(self, context):
a = vinnpy.matrix(context, numpy.matrix([[1.0, 2.0], [3.0, 4.0]], dtype='float32'))
self.assertEqual(a[0][0], 1.0)
self.assertEqual(a[0][1], 2.0)
self.assertEqual(a[1][0], 3.0)
self.assertEqual(a[1][1], 4.0)
@parameterized.expand(contexts().enumerate, contexts().name)
def test_modifying_created_matrix_does_not_mutate_original(self, context):
'''
Ensure matrix constructor does not reference the original numpy array's
memory and is mapped using the correct row-major memory layout
'''
array = numpy.matrix([[1.0, 2.0], [3.0, 4.0]], dtype='float32')
a = vinnpy.matrix(context, array)
a[0][0] = 42.0
self.assertEqual(array[0, 0], 1.0)
self.assertEqual(array[0, 1], 2.0)
self.assertEqual(array[1, 0], 3.0)
self.assertEqual(array[1, 1], 4.0)
@parameterized.expand(contexts().enumerate, contexts().name)
def test_can_access_rows_as_numpy_arrays(self, context):
'''
Ensure rows are mapped to numpy arrays when accessed
'''
a = vinnpy.matrix(context, 2, 4, 7.0)
self.assertEqual(4, len(a[0]))
self.assertIsInstance(a[0], numpy.ndarray)
numpy.testing.assert_array_equal(numpy.array([7.0, 7.0, 7.0, 7.0]), a[0])
self.assertEqual(4, len(a[1]))
self.assertIsInstance(a[1], numpy.ndarray)
numpy.testing.assert_array_equal(numpy.array([7.0, 7.0, 7.0, 7.0]), a[1])
@parameterized.expand(contexts().enumerate, contexts().name)
def test_mutating_matrix_through_rows_accessed_as_numpy_arrays(self, context):
'''
Ensure the actual elements of the matrix can be mutated using
a numpy array instead of just temporary copies
'''
a = vinnpy.matrix(context, 2, 4, 7.0)
row = a[0]
row[0] = 42.0
self.assertEqual(42.0, a[0][0])
@parameterized.expand(contexts().enumerate, contexts().name)
def test_len_returns_row_count(self, context):
a = vinnpy.matrix(context, 4, 2, 7.0)
self.assertEqual(4, len(a))
@parameterized.expand(contexts().enumerate, contexts().name)
def test_row_iteration(self, context):
row_count = 4
a = vinnpy.matrix(context, row_count, 2, 7.0)
rows_iterated = 0
for row in a:
rows_iterated += 1
self.assertEqual(row_count, rows_iterated)
| [
"contexts.contexts",
"numpy.matrix",
"vinnpy.matrix",
"numpy.array"
] | [((882, 937), 'numpy.matrix', 'numpy.matrix', (['[[1.0, 2.0], [3.0, 4.0]]'], {'dtype': '"""float32"""'}), "([[1.0, 2.0], [3.0, 4.0]], dtype='float32')\n", (894, 937), False, 'import numpy\n'), ((950, 979), 'vinnpy.matrix', 'vinnpy.matrix', (['context', 'array'], {}), '(context, array)\n', (963, 979), False, 'import vinnpy\n'), ((1399, 1432), 'vinnpy.matrix', 'vinnpy.matrix', (['context', '(2)', '(4)', '(7.0)'], {}), '(context, 2, 4, 7.0)\n', (1412, 1432), False, 'import vinnpy\n'), ((2089, 2122), 'vinnpy.matrix', 'vinnpy.matrix', (['context', '(2)', '(4)', '(7.0)'], {}), '(context, 2, 4, 7.0)\n', (2102, 2122), False, 'import vinnpy\n'), ((2333, 2366), 'vinnpy.matrix', 'vinnpy.matrix', (['context', '(4)', '(2)', '(7.0)'], {}), '(context, 4, 2, 7.0)\n', (2346, 2366), False, 'import vinnpy\n'), ((2546, 2587), 'vinnpy.matrix', 'vinnpy.matrix', (['context', 'row_count', '(2)', '(7.0)'], {}), '(context, row_count, 2, 7.0)\n', (2559, 2587), False, 'import vinnpy\n'), ((333, 388), 'numpy.matrix', 'numpy.matrix', (['[[1.0, 2.0], [3.0, 4.0]]'], {'dtype': '"""float32"""'}), "([[1.0, 2.0], [3.0, 4.0]], dtype='float32')\n", (345, 388), False, 'import numpy\n'), ((196, 206), 'contexts.contexts', 'contexts', ([], {}), '()\n', (204, 206), False, 'from contexts import contexts\n'), ((218, 228), 'contexts.contexts', 'contexts', ([], {}), '()\n', (226, 228), False, 'from contexts import contexts\n'), ((573, 583), 'contexts.contexts', 'contexts', ([], {}), '()\n', (581, 583), False, 'from contexts import contexts\n'), ((595, 605), 'contexts.contexts', 'contexts', ([], {}), '()\n', (603, 605), False, 'from contexts import contexts\n'), ((1565, 1598), 'numpy.array', 'numpy.array', (['[7.0, 7.0, 7.0, 7.0]'], {}), '([7.0, 7.0, 7.0, 7.0])\n', (1576, 1598), False, 'import numpy\n'), ((1738, 1771), 'numpy.array', 'numpy.array', (['[7.0, 7.0, 7.0, 7.0]'], {}), '([7.0, 7.0, 7.0, 7.0])\n', (1749, 1771), False, 'import numpy\n'), ((1202, 1212), 'contexts.contexts', 'contexts', ([], {}), '()\n', (1210, 1212), False, 'from contexts import contexts\n'), ((1224, 1234), 'contexts.contexts', 'contexts', ([], {}), '()\n', (1232, 1234), False, 'from contexts import contexts\n'), ((1806, 1816), 'contexts.contexts', 'contexts', ([], {}), '()\n', (1814, 1816), False, 'from contexts import contexts\n'), ((1828, 1838), 'contexts.contexts', 'contexts', ([], {}), '()\n', (1836, 1838), False, 'from contexts import contexts\n'), ((2231, 2241), 'contexts.contexts', 'contexts', ([], {}), '()\n', (2239, 2241), False, 'from contexts import contexts\n'), ((2253, 2263), 'contexts.contexts', 'contexts', ([], {}), '()\n', (2261, 2263), False, 'from contexts import contexts\n'), ((2430, 2440), 'contexts.contexts', 'contexts', ([], {}), '()\n', (2438, 2440), False, 'from contexts import contexts\n'), ((2452, 2462), 'contexts.contexts', 'contexts', ([], {}), '()\n', (2460, 2462), False, 'from contexts import contexts\n')] |
import pandas as pd
import json
import requests
from bs4 import BeautifulSoup
import numpy as np
from cloud_pricing.data.interface import FixedInstance
class AzureProcessor(FixedInstance):
url = 'https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/'
azure_gpus_ram = {
'K80': 12, 'M60': 8, 'P100': 16, 'P40': 24,
'T4': 16, 'V100': 16, 'A100': 40, np.nan: 0
}
include_cols = [
'Instance', 'Region', 'vCPU(s)', 'RAM', 'Temporary storage',
'GPU', 'Pay as you go', 'Spot(% Savings)'
]
def __init__(self, table_name='azure_data.pkl'):
super().__init__(table_name)
def extract_table(self, table, region='us-east'):
rows = table.find_all('tr')
titles = None
all_data = []
for row in rows:
if titles is None:
heads = row.find_all('th')
assert len(heads) > 0, "Oops, Missing Header!"
titles = [h.get_text().replace('*','').strip() for h in heads]
row_data = []
for d in row.find_all('td')[:len(titles)]:
row_data.append(d.get_text().strip())
if d.find_next().has_attr('data-amount'):
row_data[-1] = json.loads(d.find_next().get('data-amount'))['regional'].get(region, None)
if len(row_data) > 0:
all_data.append(row_data)
df = pd.DataFrame(all_data, columns=titles)
df.insert(0, 'Region', region)
return df
def download_data(self):
f = requests.get(self.url)
soup = BeautifulSoup(f.content, 'lxml')
self.tables = soup.find_all('table')
def setup(self):
print('Downloading latest Azure data...')
self.download_data()
# Extract each table and pricing data from HTML
dfs = [self.extract_table(t) for t in self.tables if len(t.find_all('th')) > 0]
# Parse, clean and combine data
dfs = [df for df in dfs if any(c in df.columns for c in {'vCPU(s)', 'GPU', 'Core', 'RAM'})]
cat = pd.concat(dfs, sort=False)
cat['vCPU(s)'] = [(v if v is not np.nan else c) for v,c in zip(cat['vCPU(s)'], cat['Core'])]
cat = cat.filter(self.include_cols).rename({
'vCPU(s)': 'CPUs',
'RAM': 'RAM (GB)',
'Pay as you go': 'Price ($/hr)',
'GPU': 'GPUs',
'Instance': 'Name',
'Temporary storage': 'Storage',
'Spot(% Savings)': 'Spot ($/hr)'
}, axis=1)
cat = cat.replace({'– –\nBlank': np.nan, 'N/A': np.nan}, regex=True).reset_index(drop=True)
# Parse GPU info
n_gpus, gpu_names = [],[]
for g in cat['GPUs'].values:
if isinstance(g, str):
n,t = g.split()[:2]
n_gpus.append(int(n[:-1]))
gpu_names.append(t)
else:
n_gpus.append(np.nan)
gpu_names.append(np.nan)
n_gpus = np.array(n_gpus)
gpu_ram = np.array([self.azure_gpus_ram[gpu_name] for gpu_name in gpu_names])
gpu_ram = n_gpus*gpu_ram
cat['GPUs'] = n_gpus
cat.insert(len(cat.columns)-2, 'GPU Name', gpu_names)
cat.insert(len(cat.columns)-2, 'GPU RAM (GB)', gpu_ram)
# Convert numbers
cat['RAM (GB)'] = [(float(a[:-4].replace(',', '')) if isinstance(a, str) else 0.) for a in cat['RAM (GB)'].values]
cat[['CPUs','GPUs','Price ($/hr)','RAM (GB)', 'Spot ($/hr)']] = cat[['CPUs','GPUs','Price ($/hr)','RAM (GB)', 'Spot ($/hr)']].apply(pd.to_numeric)
cat.to_pickle(self.table_name, protocol=4)
| [
"pandas.DataFrame",
"numpy.array",
"requests.get",
"bs4.BeautifulSoup",
"pandas.concat"
] | [((1419, 1457), 'pandas.DataFrame', 'pd.DataFrame', (['all_data'], {'columns': 'titles'}), '(all_data, columns=titles)\n', (1431, 1457), True, 'import pandas as pd\n'), ((1557, 1579), 'requests.get', 'requests.get', (['self.url'], {}), '(self.url)\n', (1569, 1579), False, 'import requests\n'), ((1595, 1627), 'bs4.BeautifulSoup', 'BeautifulSoup', (['f.content', '"""lxml"""'], {}), "(f.content, 'lxml')\n", (1608, 1627), False, 'from bs4 import BeautifulSoup\n'), ((2074, 2100), 'pandas.concat', 'pd.concat', (['dfs'], {'sort': '(False)'}), '(dfs, sort=False)\n', (2083, 2100), True, 'import pandas as pd\n'), ((2991, 3007), 'numpy.array', 'np.array', (['n_gpus'], {}), '(n_gpus)\n', (2999, 3007), True, 'import numpy as np\n'), ((3026, 3093), 'numpy.array', 'np.array', (['[self.azure_gpus_ram[gpu_name] for gpu_name in gpu_names]'], {}), '([self.azure_gpus_ram[gpu_name] for gpu_name in gpu_names])\n', (3034, 3093), True, 'import numpy as np\n')] |
import argparse
import numpy as np
from flask import Flask, request
import json
from text_classification.main import use_backend
from text_classification.opt import add_train_opt, add_server_opt
from text_classification.utils import load_opt
from text_classification.data import load_vocab, UNK_IDX
def run(opt_model, opt_server, module):
def deal_sentence(sentence):
sequence = [vocab.get(char, UNK_IDX) for char in sentence]
length = len(sequence)
return np.array([sequence]), np.array([length])
def predict(sentence):
label_idxs, class_pros = model.inference(*deal_sentence(sentence))
label_idx, class_pro = label_idxs[0], class_pros[0]
label = idx2label[label_idx]
pro = class_pro[label_idx]
return label, pro
_, model = module.create_model(opt_model, inference=True)
model.to_inference(opt_model.save_path)
vocab = load_vocab(opt_model.vocab_path)
label2idx = load_vocab(opt_model.label_path)
idx2label = {index:label for label, index in label2idx.items()}
app = Flask('TextClassification')
@app.route('/inference/')
def inference():
sentence = request.args.get("sentence")
print(sentence)
label, pro = predict(sentence)
return json.dumps({'sentence':sentence, 'label':label, 'pro':float(pro)}, ensure_ascii=False)
app.run('0.0.0.0', opt_server.port)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
add_train_opt(parser)
add_server_opt(parser)
opt_server = parser.parse_args()
save_path = opt_server.save_path
opt_model = load_opt(save_path)
module = use_backend(opt_model.backend)
print(opt_model)
print(opt_server)
run(opt_model, opt_server, module) | [
"argparse.ArgumentParser",
"flask.request.args.get",
"flask.Flask",
"text_classification.data.load_vocab",
"text_classification.main.use_backend",
"text_classification.opt.add_train_opt",
"numpy.array",
"text_classification.utils.load_opt",
"text_classification.opt.add_server_opt"
] | [((914, 946), 'text_classification.data.load_vocab', 'load_vocab', (['opt_model.vocab_path'], {}), '(opt_model.vocab_path)\n', (924, 946), False, 'from text_classification.data import load_vocab, UNK_IDX\n'), ((963, 995), 'text_classification.data.load_vocab', 'load_vocab', (['opt_model.label_path'], {}), '(opt_model.label_path)\n', (973, 995), False, 'from text_classification.data import load_vocab, UNK_IDX\n'), ((1077, 1104), 'flask.Flask', 'Flask', (['"""TextClassification"""'], {}), "('TextClassification')\n", (1082, 1104), False, 'from flask import Flask, request\n'), ((1453, 1478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1476, 1478), False, 'import argparse\n'), ((1483, 1504), 'text_classification.opt.add_train_opt', 'add_train_opt', (['parser'], {}), '(parser)\n', (1496, 1504), False, 'from text_classification.opt import add_train_opt, add_server_opt\n'), ((1509, 1531), 'text_classification.opt.add_server_opt', 'add_server_opt', (['parser'], {}), '(parser)\n', (1523, 1531), False, 'from text_classification.opt import add_train_opt, add_server_opt\n'), ((1622, 1641), 'text_classification.utils.load_opt', 'load_opt', (['save_path'], {}), '(save_path)\n', (1630, 1641), False, 'from text_classification.utils import load_opt\n'), ((1655, 1685), 'text_classification.main.use_backend', 'use_backend', (['opt_model.backend'], {}), '(opt_model.backend)\n', (1666, 1685), False, 'from text_classification.main import use_backend\n'), ((1176, 1204), 'flask.request.args.get', 'request.args.get', (['"""sentence"""'], {}), "('sentence')\n", (1192, 1204), False, 'from flask import Flask, request\n'), ((491, 511), 'numpy.array', 'np.array', (['[sequence]'], {}), '([sequence])\n', (499, 511), True, 'import numpy as np\n'), ((513, 531), 'numpy.array', 'np.array', (['[length]'], {}), '([length])\n', (521, 531), True, 'import numpy as np\n')] |
""" module to test mloutput assessments."""
import numpy as np
from duckie.mltesting_assessments import ConfusionMatrixAssessment
import os.path
import pandas as pd
def test_cma():
"""Test case for the confusion matrix assessment.
"""
confusion_matrix = np.load('tests/test_data/test_confusion_matrix.npy')
CMA = ConfusionMatrixAssessment(confusion_matrix, 'TestML')
# We know the correct values for this matrix
summary_data_true = {'Model Name': 'TestML', 'recall': 0.95,
'precision': 0.926829268292683, 'f1_score': 0.9382716049382716}
# Test the summarize function
summary_data = CMA.summarize()
for stat in summary_data.columns:
assert summary_data[stat].values[0] == summary_data_true[
stat], f"{stat} incorrect, getting {summary_data[stat].values[0]}, should be \
{summary_data_true[stat]}"
# Test the get_distribution function
model_name = np.array(['TestML', 'TestML', 'TestML', 'TestML'])
category = np.array(['TN', 'FP', 'FN', 'TP'])
true_counts = np.array([77, 3, 2, 38])
true_fractions = np.array([0.64166667, 0.025, 0.01666667, 0.31666667])
true_percentages = np.array([64.16666667, 2.5, 1.66666667, 31.66666667])
distribution_data_true = {
'Model Name': model_name,
'category': category,
'counts': true_counts,
'fractions': true_fractions,
'percentages': true_percentages}
distribution_data = CMA.distribute()
for stat in ['Model Name', 'category', 'counts']:
assert np.array_equal(distribution_data[stat].values, distribution_data_true[stat]
), f"{stat} incorrect, getting {distribution_data[stat].values}, \
should be {distribution_data_true[stat]}"
for stat in ['fractions', 'percentages']:
assert np.allclose(distribution_data[stat].values, distribution_data_true[stat]
), f"{stat} incorrect, getting {distribution_data[stat].values}, \
should be {distribution_data_true[stat]}"
# Test the plot function
CMA.plot('confusion_matrix.png', 'Confusion Matrix')
assert os.path.isfile('confusion_matrix.png'), "No plot generated."
if __name__ == "__main__":
test_cma()
| [
"numpy.load",
"numpy.allclose",
"numpy.array",
"duckie.mltesting_assessments.ConfusionMatrixAssessment",
"numpy.array_equal"
] | [((270, 322), 'numpy.load', 'np.load', (['"""tests/test_data/test_confusion_matrix.npy"""'], {}), "('tests/test_data/test_confusion_matrix.npy')\n", (277, 322), True, 'import numpy as np\n'), ((334, 387), 'duckie.mltesting_assessments.ConfusionMatrixAssessment', 'ConfusionMatrixAssessment', (['confusion_matrix', '"""TestML"""'], {}), "(confusion_matrix, 'TestML')\n", (359, 387), False, 'from duckie.mltesting_assessments import ConfusionMatrixAssessment\n'), ((962, 1012), 'numpy.array', 'np.array', (["['TestML', 'TestML', 'TestML', 'TestML']"], {}), "(['TestML', 'TestML', 'TestML', 'TestML'])\n", (970, 1012), True, 'import numpy as np\n'), ((1028, 1062), 'numpy.array', 'np.array', (["['TN', 'FP', 'FN', 'TP']"], {}), "(['TN', 'FP', 'FN', 'TP'])\n", (1036, 1062), True, 'import numpy as np\n'), ((1081, 1105), 'numpy.array', 'np.array', (['[77, 3, 2, 38]'], {}), '([77, 3, 2, 38])\n', (1089, 1105), True, 'import numpy as np\n'), ((1127, 1180), 'numpy.array', 'np.array', (['[0.64166667, 0.025, 0.01666667, 0.31666667]'], {}), '([0.64166667, 0.025, 0.01666667, 0.31666667])\n', (1135, 1180), True, 'import numpy as np\n'), ((1204, 1257), 'numpy.array', 'np.array', (['[64.16666667, 2.5, 1.66666667, 31.66666667]'], {}), '([64.16666667, 2.5, 1.66666667, 31.66666667])\n', (1212, 1257), True, 'import numpy as np\n'), ((1575, 1651), 'numpy.array_equal', 'np.array_equal', (['distribution_data[stat].values', 'distribution_data_true[stat]'], {}), '(distribution_data[stat].values, distribution_data_true[stat])\n', (1589, 1651), True, 'import numpy as np\n'), ((1884, 1957), 'numpy.allclose', 'np.allclose', (['distribution_data[stat].values', 'distribution_data_true[stat]'], {}), '(distribution_data[stat].values, distribution_data_true[stat])\n', (1895, 1957), True, 'import numpy as np\n')] |
import torch
import numpy as np
import numba
import warnings
from .._models import Model, enforce_observed
from ...fitter import FitterSGD
@numba.jit(nopython=True, fastmath=True)
def _quant_hawkes_model_init_cache(times, counts, M, excit_func_jit):
dim = len(counts)
n_jumps = [len(times[i]) for i in range(dim)]
cache = [np.zeros((dim, M, n_jumps[i])) for i in range(dim)]
for i in range(dim):
for j in range(dim):
for n in range(1, len(times[i])):
# Time difference from current t_{i,n} all events in j
t_ij = times[i][n] - times[j]
# Mask for valid events: those prior to t_{i,n-1}
valid_ij = (times[i][n-1] - times[j]) >= 0
# Compute cache value
kappas = excit_func_jit(t_ij[valid_ij]) * counts[j][valid_ij]
cache[i][j, :, n] = np.sum(kappas, axis=-1) # sum over M bases
return cache
class IrregQuantHawkesModel(Model):
"""
Irreguarly Quantized Multivariate Hawkes Process
"""
def __init__(self, excitation, verbose=False, device='cpu', **kwargs):
"""
Initialize the model
Arguments:
----------
prior : Prior
Prior object
excitation: excitation
Excitation object
"""
self.excitation = excitation
self.M = self.excitation.M or 1
self.n_jumps = None
self.dim = None
self.n_params = None
self.n_var_params = None
self._observed = False
self.verbose = verbose
if torch.cuda.is_available() and device == 'cuda':
self.device = 'cuda'
else:
self.device = 'cpu'
super().__init__(**kwargs)
def observe(self, times, counts):
"""
Set the data for the model
"""
assert isinstance(counts[0], torch.Tensor)
assert isinstance(times[0], torch.Tensor)
# Set the data attributes
self.counts = counts
self.times = times
self.deltas = [torch.cat(
(ts[:1], ts[1:] - ts[:-1])) for ts in self.times]
# Set various util attributes
self.dim = len(counts)
self.n_params = self.dim * (self.dim * self.excitation.M + 1)
self.n_jumps = sum(map(sum, counts))
# Init the pre-computed cache
if not self._observed:
self._init_cache()
self._observed = True
def _init_cache_python(self):
"""
caching the required computations
cache[i][j,0,k]: float
sum_{t^j < t^i_k} phi(t^i_k - t^j)
This is used in k^th timestamp of node i, i.e., lambda_i(t^i_k)
cache_integral: float
used in the integral of intensity
"""
self._cache = [torch.zeros(
(self.dim, self.excitation.M, len(counts_i)),
dtype=torch.float64, device=self.device)
for counts_i in self.counts]
for i in range(self.dim):
for j in range(self.dim):
if self.verbose:
print((f"\rInitialize cache {i*self.dim+j+1}/{self.dim**2}"
" "), end='')
for n in range(1, len(self.times[i])):
# Time difference from currtent t_{i,n} all events in j
t_ij = self.times[i][n] - self.times[j]
# Mask for valid events: those prior to t_{i,n-1}
valid_ij = (self.times[i][n-1] - self.times[j]) >= 0
# Compute cache value
kappas = (self.excitation.call(t_ij[valid_ij]) *
self.counts[j][valid_ij])
kappas = kappas.sum(-1) # sum over M bases
self._cache[i][j, :, n] = kappas
if self.verbose:
print()
def _init_cache(self):
try:
times_arr = [np.array(ev, dtype=float) for ev in self.times]
counts_arr = [np.array(ev, dtype=float) for ev in self.counts]
# Catch annoying NumbaPendingDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cache = _quant_hawkes_model_init_cache(
times_arr, counts_arr, M=self.excitation.M,
excit_func_jit=self.excitation.call_jit())
self._cache = [torch.tensor(
ci, dtype=torch.float64, device=self.device) for ci in cache]
except NotImplementedError:
print(('Notice: Fast caching not implemented for this excitation '
'kernel. Falling back to pure python implementation.'))
self._init_cache_python()
@enforce_observed
def log_likelihood(self, mu, W):
"""
Log likelihood of an irregularly quantized Hawkes process for the given
parameters mu and W.
Arguments:
----------
mu : torch.Tensor
(dim x 1)
Base intensities
W : torch.Tensor
(dim x dim x M) --> M is for the number of different excitation
functions
The weight matrix.
"""
log_like = 0
for i in range(self.dim):
# _cache[i] shape: (dim, M, len(events[i]))
# W[i] shape: (dim, M) --> need unsqueeze dimension 2
# mu[i] shape: () --> ok
lamb_i = mu[i] + (W[i].unsqueeze(2) * self._cache[i]).sum(0).sum(0)
intens_i = lamb_i * self.deltas[i]
log_like += torch.sum(self.counts[i] * intens_i.log() - intens_i)
return log_like
@enforce_observed
def mean_squared_loss(self, mu, W):
"""
Mean-square loss of an irregularly quantized Hawkes process for the
given parameters mu and W.
Arguments:
----------
mu : torch.Tensor
(dim x 1)
Base intensities
W : torch.Tensor
(dim x dim x M) --> M is for the number of different excitation
functions
The weight matrix.
"""
loss = 0.0
num = 0.0
for i in range(self.dim):
lamb_i = mu[i] + (W[i].unsqueeze(2) * self._cache[i]).sum(0).sum(0)
intens_i = lamb_i * self.deltas[i]
loss += torch.sum((intens_i - self.counts[i]) ** 2, axis=0)
num += len(intens_i)
loss /= num
return loss
def lambda_in(self, i, n, mu, W):
"""Compute the intensity in dimension `i` at the `n`-th observation"""
lamb_in = mu[i] + (W[i] * self._cache[i][:, :, n]).sum(0).sum(0)
return lamb_in
class IrregQuantHawkesModelMLE(IrregQuantHawkesModel, FitterSGD):
"""Irreguarly Quantized Multivariate Hawkes Process with Maximum Likelihood
Estimation fitter"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@enforce_observed
def mle_objective(self, coeffs):
"""Objectvie function for MLE: Averaged negative log-likelihood"""
mu = self.coeffs[:self.dim]
W = self.coeffs[self.dim:].reshape(self.dim, self.dim, self.excitation.M)
return -1.0 * self.log_likelihood(mu, W) / self.n_jumps
@enforce_observed
def mle_objective_log_input(self, coeffs):
"""Objectvie function for MLE: Averaged negative log-likelihood"""
log_mu = self.coeffs[:self.dim]
log_W = self.coeffs[self.dim:].reshape(self.dim, self.dim, self.excitation.M)
return -1.0 * self.log_likelihood(torch.exp(log_mu), torch.exp(log_W)) / self.n_jumps
def fit(self, *args, **kwargs):
return super().fit(objective_func=self.mle_objective, *args, **kwargs)
def fit_log_input(self, *args, **kwargs):
"""Fit log-likelihood with log-input variables"""
return super().fit(objective_func=self.mle_objective_log_input, *args, **kwargs)
def adjacency(self, exp_link=False):
W = self.coeffs[self.dim:].reshape(self.dim, self.dim, self.excitation.M).detach()
if exp_link:
W = torch.exp(W)
return W
def baseline(self, exp_link=False):
mu = self.coeffs[:self.dim].detach()
if exp_link:
mu = torch.exp(mu)
return mu
| [
"numpy.sum",
"warnings.simplefilter",
"numpy.zeros",
"torch.cat",
"torch.exp",
"torch.cuda.is_available",
"numba.jit",
"numpy.array",
"warnings.catch_warnings",
"torch.sum",
"torch.tensor"
] | [((143, 182), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'fastmath': '(True)'}), '(nopython=True, fastmath=True)\n', (152, 182), False, 'import numba\n'), ((338, 368), 'numpy.zeros', 'np.zeros', (['(dim, M, n_jumps[i])'], {}), '((dim, M, n_jumps[i]))\n', (346, 368), True, 'import numpy as np\n'), ((1592, 1617), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1615, 1617), False, 'import torch\n'), ((2067, 2104), 'torch.cat', 'torch.cat', (['(ts[:1], ts[1:] - ts[:-1])'], {}), '((ts[:1], ts[1:] - ts[:-1]))\n', (2076, 2104), False, 'import torch\n'), ((6332, 6383), 'torch.sum', 'torch.sum', (['((intens_i - self.counts[i]) ** 2)'], {'axis': '(0)'}), '((intens_i - self.counts[i]) ** 2, axis=0)\n', (6341, 6383), False, 'import torch\n'), ((8090, 8102), 'torch.exp', 'torch.exp', (['W'], {}), '(W)\n', (8099, 8102), False, 'import torch\n'), ((8244, 8257), 'torch.exp', 'torch.exp', (['mu'], {}), '(mu)\n', (8253, 8257), False, 'import torch\n'), ((884, 907), 'numpy.sum', 'np.sum', (['kappas'], {'axis': '(-1)'}), '(kappas, axis=-1)\n', (890, 907), True, 'import numpy as np\n'), ((3935, 3960), 'numpy.array', 'np.array', (['ev'], {'dtype': 'float'}), '(ev, dtype=float)\n', (3943, 3960), True, 'import numpy as np\n'), ((4009, 4034), 'numpy.array', 'np.array', (['ev'], {'dtype': 'float'}), '(ev, dtype=float)\n', (4017, 4034), True, 'import numpy as np\n'), ((4135, 4160), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4158, 4160), False, 'import warnings\n'), ((4178, 4209), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4199, 4209), False, 'import warnings\n'), ((4420, 4477), 'torch.tensor', 'torch.tensor', (['ci'], {'dtype': 'torch.float64', 'device': 'self.device'}), '(ci, dtype=torch.float64, device=self.device)\n', (4432, 4477), False, 'import torch\n'), ((7558, 7575), 'torch.exp', 'torch.exp', (['log_mu'], {}), '(log_mu)\n', (7567, 7575), False, 'import torch\n'), ((7577, 7593), 'torch.exp', 'torch.exp', (['log_W'], {}), '(log_W)\n', (7586, 7593), False, 'import torch\n')] |
"""
author: <NAME> & <NAME>
code to generate synthetic data from stock-model SDEs
"""
# ==============================================================================
from math import sqrt, exp
import numpy as np
import matplotlib.pyplot as plt
import copy, os
# ==============================================================================
class StockModel:
"""
mother class for all stock models defining the variables and methods shared
amongst all of them, some need to be defined individually
"""
def __init__(self, drift, volatility, S0, nb_paths, nb_steps,
maturity, sine_coeff, **kwargs):
self.drift = drift
self.volatility = volatility
self.S0 = S0
self.nb_paths = nb_paths
self.nb_steps = nb_steps
self.maturity = maturity
self.dimensions = np.size(S0)
if sine_coeff is None:
self.periodic_coeff = lambda t: 1
else:
self.periodic_coeff = lambda t: (1 + np.sin(sine_coeff * t))
def generate_paths(self, **options):
"""
generate random paths according to the model hyperparams
:return: stock paths as np.array, dim: [nb_paths, data_dim, nb_steps]
"""
raise ValueError("not implemented yet")
def next_cond_exp(self, *args, **kwargs):
"""
compute the next point of the conditional expectation starting from
given point for given time_delta
:return: cond. exp. at next time_point (= current_time + time_delta)
"""
raise ValueError("not implemented yet")
def compute_cond_exp(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, return_path=True, get_loss=False,
weight=0.5,
start_time=None,
**kwargs):
"""
compute conditional expectation similar to computing the prediction in
the model.NJODE.forward
:param times: see model.NJODE.forward
:param time_ptr: see model.NJODE.forward
:param X: see model.NJODE.forward, as np.array
:param obs_idx: see model.NJODE.forward, as np.array
:param delta_t: see model.NJODE.forward, as np.array
:param T: see model.NJODE.forward
:param start_X: see model.NJODE.forward, as np.array
:param n_obs_ot: see model.NJODE.forward, as np.array
:param return_path: see model.NJODE.forward
:param get_loss: see model.NJODE.forward
:param weight: see model.NJODE.forward
:param start_time: None or float, if float, this is first time point
:param kwargs: unused, to allow for additional unused inputs
:return: float (loss), if wanted paths of t and y (np.arrays)
"""
y = start_X
batch_size = start_X.shape[0]
current_time = 0.0
if start_time:
current_time = start_time
loss = 0
if return_path:
if start_time:
path_t = []
path_y = []
else:
path_t = [0.]
path_y = [y]
for i, obs_time in enumerate(times):
if obs_time > T + 1e-10:
break
if obs_time <= current_time:
continue
# Propagation of the ODE until next observation
while current_time < (
obs_time - 1e-10 * delta_t): # 1e-10*delta_t used for numerical consistency.
if current_time < obs_time - delta_t:
delta_t_ = delta_t
else:
delta_t_ = obs_time - current_time
y = self.next_cond_exp(y, delta_t_, current_time)
current_time = current_time + delta_t_
# Storing the predictions.
if return_path:
path_t.append(current_time)
path_y.append(y)
# Reached an observation - only update those elements of the batch,
# for which an observation is made
start = time_ptr[i]
end = time_ptr[i + 1]
X_obs = X[start:end]
i_obs = obs_idx[start:end]
# Using RNNCell to update h. Also updating loss, tau and last_X
Y_bj = y
temp = copy.copy(y)
temp[i_obs] = X_obs
y = temp
Y = y
if get_loss:
loss = loss + compute_loss(X_obs=X_obs, Y_obs=Y[i_obs],
Y_obs_bj=Y_bj[i_obs],
n_obs_ot=n_obs_ot[i_obs],
batch_size=batch_size, weight=weight)
if return_path:
path_t.append(obs_time)
path_y.append(y)
# after every observation has been processed, propagating until T
while current_time < T - 1e-10 * delta_t:
if current_time < T - delta_t:
delta_t_ = delta_t
else:
delta_t_ = T - current_time
y = self.next_cond_exp(y, delta_t_)
current_time = current_time + delta_t_
# Storing the predictions.
if return_path:
path_t.append(current_time)
path_y.append(y)
if return_path:
# path dimension: [time_steps, batch_size, output_size]
return loss, np.array(path_t), np.array(path_y)
else:
return loss
def get_optimal_loss(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, weight=0.5):
loss = self.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t, T, start_X, n_obs_ot,
return_path=False, get_loss=True, weight=weight)
return loss
class Heston(StockModel):
"""
the Heston model, see: https://en.wikipedia.org/wiki/Heston_model
a basic stochastic volatility stock price model
"""
def __init__(self, drift, volatility, mean, speed, correlation, nb_paths,
nb_steps, S0, maturity, sine_coeff=None, **kwargs):
super(Heston, self).__init__(
drift=drift, volatility=volatility, nb_paths=nb_paths,
nb_steps=nb_steps,
S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
self.mean = mean
self.speed = speed
self.correlation = correlation
def next_cond_exp(self, y, delta_t, current_t):
return y * np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
def generate_paths(self, start_X=None):
# Diffusion of the spot: dS = mu*S*dt + sqrt(v)*S*dW
spot_drift = lambda x, t: self.drift*self.periodic_coeff(t)*x
spot_diffusion = lambda x, v, t: np.sqrt(v) * x
# Diffusion of the variance: dv = -k(v-vinf)*dt + sqrt(v)*v*dW
var_drift = lambda v, t: - self.speed * (v - self.mean)
var_diffusion = lambda v, t: self.volatility * np.sqrt(v)
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
var_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
var_paths[i, :, 0] = self.mean
for k in range(1, self.nb_steps + 1):
normal_numbers_1 = np.random.normal(0, 1, self.dimensions)
normal_numbers_2 = np.random.normal(0, 1, self.dimensions)
dW = normal_numbers_1 * np.sqrt(dt)
dZ = (self.correlation * normal_numbers_1 + np.sqrt(
1 - self.correlation ** 2) * normal_numbers_2) * np.sqrt(dt)
var_paths[i, :, k] = (
var_paths[i, :, k - 1]
+ var_drift(var_paths[i, :, k - 1], (k) * dt) * dt
+ var_diffusion(var_paths[i, :, k - 1], (k) * dt) * dZ)
spot_paths[i, :, k] = (
spot_paths[i, :, k - 1]
+ spot_drift(spot_paths[i, :, k - 1], (k-1) * dt) * dt
+ spot_diffusion(spot_paths[i, :, k - 1],
var_paths[i, :, k],
(k) * dt) * dW)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
def draw_path_heston(self, filename):
nb_paths = self.nb_paths
self.nb_paths = 1
paths, dt = self.generate_paths()
self.nb_paths = nb_paths
spot_paths, var_paths = paths
one_spot_path = spot_paths[0, 0, :]
one_var_path = var_paths[0, 0, :]
dates = np.array([i for i in range(len(one_spot_path))])
dt = self.maturity / self.nb_steps
fig, ax1 = plt.subplots()
color = 'tab:blue'
ax1.set_xlabel('time')
ax1.set_ylabel('Stock', color=color)
ax1.plot(dates, one_spot_path, color=color)
ax1.tick_params(axis='y', labelcolor=color)
color = 'tab:red'
ax2 = ax1.twinx()
ax2.set_ylabel('Variance', color=color)
ax2.plot(dates, one_var_path, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
plt.savefig(filename)
plt.close()
class HestonWOFeller(StockModel):
"""
the Heston model, see: https://en.wikipedia.org/wiki/Heston_model
a basic stochastic volatility stock price model, that can be used
even if Feller condition is not satisfied
Feller condition: 2*speed*mean > volatility**2
"""
def __init__(self, drift, volatility, mean, speed, correlation, nb_paths,
nb_steps, S0, maturity, scheme='euler', return_vol=False,
v0=None, sine_coeff=None, **kwargs):
super(HestonWOFeller, self).__init__(
drift=drift, volatility=volatility, nb_paths=nb_paths,
nb_steps=nb_steps,
S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
self.mean = mean
self.speed = speed
self.correlation = correlation
self.scheme = scheme
self.retur_vol = return_vol
if v0 is None:
self.v0 = self.mean
else:
self.v0 = v0
def next_cond_exp(self, y, delta_t, current_t):
if self.retur_vol:
s, v = np.split(y, indices_or_sections=2, axis=1)
s = s * np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
exp_delta = np.exp(-self.speed * delta_t)
v = v * exp_delta + self.mean * (1 - exp_delta)
y = np.concatenate([s, v], axis=1)
return y
else:
return y*np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
def generate_paths(self, start_X=None):
if self.scheme == 'euler':
# Diffusion of the spot: dS = mu*S*dt + sqrt(v)*S*dW
log_spot_drift = lambda v, t: \
(self.drift*self.periodic_coeff(t) - 0.5 * np.maximum(v, 0))
log_spot_diffusion = lambda v: np.sqrt(np.maximum(v, 0))
# Diffusion of the variance: dv = -k(v-vinf)*dt + sqrt(v)*v*dW
var_drift = lambda v: - self.speed * (np.maximum(v, 0) - self.mean)
var_diffusion = lambda v: self.volatility * np.sqrt(np.maximum(v, 0))
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
var_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
var_paths[i, :, 0] = self.v0
for k in range(1, self.nb_steps + 1):
normal_numbers_1 = np.random.normal(0, 1, self.dimensions)
normal_numbers_2 = np.random.normal(0, 1, self.dimensions)
dW = normal_numbers_1 * np.sqrt(dt)
dZ = (self.correlation * normal_numbers_1 + np.sqrt(
1 - self.correlation ** 2) * normal_numbers_2) * np.sqrt(dt)
spot_paths[i, :, k] = np.exp(
np.log(spot_paths[i, :, k - 1])
+ log_spot_drift(
var_paths[i, :, k - 1], (k-1)*dt) * dt
+ log_spot_diffusion(var_paths[i, :, k - 1]) * dW
)
var_paths[i, :, k] = (
var_paths[i, :, k - 1]
+ var_drift(var_paths[i, :, k - 1]) * dt
+ var_diffusion(var_paths[i, :, k - 1]) * dZ
)
if self.retur_vol:
spot_paths = np.concatenate([spot_paths, var_paths], axis=1)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
else:
raise ValueError('unknown sampling scheme')
class BlackScholes(StockModel):
"""
standard Black-Scholes model, see:
https://en.wikipedia.org/wiki/Black–Scholes_model
https://en.wikipedia.org/wiki/Geometric_Brownian_motion
"""
def __init__(self, drift, volatility, nb_paths, nb_steps, S0,
maturity, sine_coeff=None, **kwargs):
super(BlackScholes, self).__init__(
drift=drift, volatility=volatility, nb_paths=nb_paths,
nb_steps=nb_steps, S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
def next_cond_exp(self, y, delta_t, current_t):
return y * np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
def generate_paths(self, start_X=None):
drift = lambda x, t: self.drift*self.periodic_coeff(t)*x
diffusion = lambda x, t: self.volatility * x
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
for k in range(1, self.nb_steps + 1):
random_numbers = np.random.normal(0, 1, self.dimensions)
dW = random_numbers * np.sqrt(dt)
spot_paths[i, :, k] = (
spot_paths[i, :, k - 1]
+ drift(spot_paths[i, :, k - 1], (k-1) * dt) * dt
+ diffusion(spot_paths[i, :, k - 1], (k) * dt) * dW)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
class OrnsteinUhlenbeck(StockModel):
"""
Ornstein-Uhlenbeeck stock model, see:
https://en.wikipedia.org/wiki/Ornstein–Uhlenbeck_process
"""
def __init__(self, volatility, nb_paths, nb_steps, S0,
mean, speed, maturity, sine_coeff=None, **kwargs):
super(OrnsteinUhlenbeck, self).__init__(
volatility=volatility, nb_paths=nb_paths, drift=None,
nb_steps=nb_steps, S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
self.mean = mean
self.speed = speed
def next_cond_exp(self, y, delta_t, current_t):
exp_delta = np.exp(-self.speed*self.periodic_coeff(current_t)*delta_t)
return y * exp_delta + self.mean * (1 - exp_delta)
def generate_paths(self, start_X=None):
# Diffusion of the variance: dv = -k(v-vinf)*dt + vol*dW
drift = lambda x, t: - self.speed*self.periodic_coeff(t)*(x - self.mean)
diffusion = lambda x, t: self.volatility
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
for k in range(1, self.nb_steps + 1):
random_numbers = np.random.normal(0, 1, self.dimensions)
dW = random_numbers * np.sqrt(dt)
spot_paths[i, :, k] = (
spot_paths[i, :, k - 1]
+ drift(spot_paths[i, :, k - 1], (k-1) * dt) * dt
+ diffusion(spot_paths[i, :, k - 1], (k) * dt) * dW)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
class Combined(StockModel):
def __init__(self, stock_model_names, hyperparam_dicts, **kwargs):
self.stock_model_names = stock_model_names
self.hyperparam_dicts = hyperparam_dicts
def compute_cond_exp(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, return_path=True, get_loss=False,
weight=0.5, **kwargs):
# get first stockmodel
stockmodel = STOCK_MODELS[self.stock_model_names[0]](
**self.hyperparam_dicts[0])
T = self.hyperparam_dicts[0]['maturity']
loss, path_t, path_y = stockmodel.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t,
T, start_X,
n_obs_ot, return_path=True, get_loss=get_loss,
weight=weight,
)
for i in range(1, len(self.stock_model_names)):
start_X = path_y[-1, :, :]
start_time = path_t[-1]
T += self.hyperparam_dicts[i]['maturity']
stockmodel = STOCK_MODELS[self.stock_model_names[i]](
**self.hyperparam_dicts[i])
_loss, _path_t, _path_y = stockmodel.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t,
T, start_X,
n_obs_ot, return_path=True, get_loss=get_loss,
weight=weight, start_time=start_time
)
loss += _loss
path_t = np.concatenate([path_t, _path_t])
path_y = np.concatenate([path_y, _path_y], axis=0)
if return_path:
# path dimension: [time_steps, batch_size, output_size]
return loss, np.array(path_t), np.array(path_y)
else:
return loss
def get_optimal_loss(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, weight=0.5):
loss = self.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t, T, start_X, n_obs_ot,
return_path=False, get_loss=True, weight=weight)
return loss
# ==============================================================================
# this is needed for computing the loss with the true conditional expectation
def compute_loss(X_obs, Y_obs, Y_obs_bj, n_obs_ot, batch_size, eps=1e-10,
weight=0.5):
"""
compute the loss of the true conditional expectation, as in
model.compute_loss
"""
inner = (2 * weight * np.sqrt(np.sum((X_obs - Y_obs) ** 2, axis=1) + eps) +
2 * (1 - weight) * np.sqrt(np.sum((Y_obs_bj - Y_obs) ** 2, axis=1)
+ eps)) ** 2
outer = np.sum(inner / n_obs_ot)
return outer / batch_size
# ==============================================================================
# dict for the supported stock models to get them from their name
STOCK_MODELS = {
"BlackScholes": BlackScholes,
"Heston": Heston,
"OrnsteinUhlenbeck": OrnsteinUhlenbeck,
"HestonWOFeller": HestonWOFeller,
"combined": Combined,
"sine_BlackScholes": BlackScholes,
"sine_Heston": Heston,
"sine_OrnsteinUhlenbeck": OrnsteinUhlenbeck,
}
# ==============================================================================
hyperparam_test_stock_models = {
'drift': 0.2, 'volatility': 0.3, 'mean': 0.5,
'speed': 0.5, 'correlation': 0.5, 'nb_paths': 10, 'nb_steps': 100,
'S0': 1, 'maturity': 1., 'dimension': 1}
def draw_stock_model(stock_model_name):
hyperparam_test_stock_models['model_name'] = stock_model_name
stockmodel = STOCK_MODELS[stock_model_name](**hyperparam_test_stock_models)
stock_paths, dt = stockmodel.generate_paths()
filename = '{}.pdf'.format(stock_model_name)
# draw a path
one_path = stock_paths[0, 0, :]
dates = np.array([i for i in range(len(one_path))])
cond_exp = np.zeros(len(one_path))
cond_exp[0] = hyperparam_test_stock_models['S0']
cond_exp_const = hyperparam_test_stock_models['S0']
for i in range(1, len(one_path)):
if i % 3 == 0:
cond_exp[i] = one_path[i]
else:
cond_exp[i] = cond_exp[i - 1] * exp(
hyperparam_test_stock_models['drift'] * dt)
plt.plot(dates, one_path, label='stock path')
plt.plot(dates, cond_exp, label='conditional expectation')
plt.legend()
plt.savefig(filename)
plt.close()
if __name__ == '__main__':
draw_stock_model("BlackScholes")
heston = STOCK_MODELS["Heston"](**hyperparam_test_stock_models)
heston.draw_path_heston("heston.pdf")
| [
"numpy.sum",
"numpy.maximum",
"numpy.empty",
"numpy.sin",
"numpy.exp",
"numpy.random.normal",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.size",
"matplotlib.pyplot.legend",
"numpy.concatenate",
"math.exp",
"numpy.log",
"matplotlib.pyplot.plot",
"copy.copy",
"numpy.s... | [((19536, 19560), 'numpy.sum', 'np.sum', (['(inner / n_obs_ot)'], {}), '(inner / n_obs_ot)\n', (19542, 19560), True, 'import numpy as np\n'), ((21093, 21138), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'one_path'], {'label': '"""stock path"""'}), "(dates, one_path, label='stock path')\n", (21101, 21138), True, 'import matplotlib.pyplot as plt\n'), ((21143, 21201), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'cond_exp'], {'label': '"""conditional expectation"""'}), "(dates, cond_exp, label='conditional expectation')\n", (21151, 21201), True, 'import matplotlib.pyplot as plt\n'), ((21206, 21218), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21216, 21218), True, 'import matplotlib.pyplot as plt\n'), ((21223, 21244), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (21234, 21244), True, 'import matplotlib.pyplot as plt\n'), ((21249, 21260), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21258, 21260), True, 'import matplotlib.pyplot as plt\n'), ((848, 859), 'numpy.size', 'np.size', (['S0'], {}), '(S0)\n', (855, 859), True, 'import numpy as np\n'), ((7077, 7138), 'numpy.empty', 'np.empty', (['(self.nb_paths, self.dimensions, self.nb_steps + 1)'], {}), '((self.nb_paths, self.dimensions, self.nb_steps + 1))\n', (7085, 7138), True, 'import numpy as np\n'), ((7172, 7233), 'numpy.empty', 'np.empty', (['(self.nb_paths, self.dimensions, self.nb_steps + 1)'], {}), '((self.nb_paths, self.dimensions, self.nb_steps + 1))\n', (7180, 7233), True, 'import numpy as np\n'), ((9059, 9073), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9071, 9073), True, 'import matplotlib.pyplot as plt\n'), ((9519, 9540), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (9530, 9540), True, 'import matplotlib.pyplot as plt\n'), ((9549, 9560), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9558, 9560), True, 'import matplotlib.pyplot as plt\n'), ((14261, 14322), 'numpy.empty', 'np.empty', (['(self.nb_paths, self.dimensions, self.nb_steps + 1)'], {}), '((self.nb_paths, self.dimensions, self.nb_steps + 1))\n', (14269, 14322), True, 'import numpy as np\n'), ((16084, 16145), 'numpy.empty', 'np.empty', (['(self.nb_paths, self.dimensions, self.nb_steps + 1)'], {}), '((self.nb_paths, self.dimensions, self.nb_steps + 1))\n', (16092, 16145), True, 'import numpy as np\n'), ((4349, 4361), 'copy.copy', 'copy.copy', (['y'], {}), '(y)\n', (4358, 4361), False, 'import copy, os\n'), ((10634, 10676), 'numpy.split', 'np.split', (['y'], {'indices_or_sections': '(2)', 'axis': '(1)'}), '(y, indices_or_sections=2, axis=1)\n', (10642, 10676), True, 'import numpy as np\n'), ((10779, 10808), 'numpy.exp', 'np.exp', (['(-self.speed * delta_t)'], {}), '(-self.speed * delta_t)\n', (10785, 10808), True, 'import numpy as np\n'), ((10885, 10915), 'numpy.concatenate', 'np.concatenate', (['[s, v]'], {'axis': '(1)'}), '([s, v], axis=1)\n', (10899, 10915), True, 'import numpy as np\n'), ((11629, 11690), 'numpy.empty', 'np.empty', (['(self.nb_paths, self.dimensions, self.nb_steps + 1)'], {}), '((self.nb_paths, self.dimensions, self.nb_steps + 1))\n', (11637, 11690), True, 'import numpy as np\n'), ((11732, 11793), 'numpy.empty', 'np.empty', (['(self.nb_paths, self.dimensions, self.nb_steps + 1)'], {}), '((self.nb_paths, self.dimensions, self.nb_steps + 1))\n', (11740, 11793), True, 'import numpy as np\n'), ((18334, 18367), 'numpy.concatenate', 'np.concatenate', (['[path_t, _path_t]'], {}), '([path_t, _path_t])\n', (18348, 18367), True, 'import numpy as np\n'), ((18389, 18430), 'numpy.concatenate', 'np.concatenate', (['[path_y, _path_y]'], {'axis': '(0)'}), '([path_y, _path_y], axis=0)\n', (18403, 18430), True, 'import numpy as np\n'), ((5475, 5491), 'numpy.array', 'np.array', (['path_t'], {}), '(path_t)\n', (5483, 5491), True, 'import numpy as np\n'), ((5493, 5509), 'numpy.array', 'np.array', (['path_y'], {}), '(path_y)\n', (5501, 5509), True, 'import numpy as np\n'), ((6838, 6848), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (6845, 6848), True, 'import numpy as np\n'), ((7044, 7054), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (7051, 7054), True, 'import numpy as np\n'), ((7610, 7649), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'self.dimensions'], {}), '(0, 1, self.dimensions)\n', (7626, 7649), True, 'import numpy as np\n'), ((7685, 7724), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'self.dimensions'], {}), '(0, 1, self.dimensions)\n', (7701, 7724), True, 'import numpy as np\n'), ((13187, 13234), 'numpy.concatenate', 'np.concatenate', (['[spot_paths, var_paths]'], {'axis': '(1)'}), '([spot_paths, var_paths], axis=1)\n', (13201, 13234), True, 'import numpy as np\n'), ((14653, 14692), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'self.dimensions'], {}), '(0, 1, self.dimensions)\n', (14669, 14692), True, 'import numpy as np\n'), ((16476, 16515), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'self.dimensions'], {}), '(0, 1, self.dimensions)\n', (16492, 16515), True, 'import numpy as np\n'), ((18549, 18565), 'numpy.array', 'np.array', (['path_t'], {}), '(path_t)\n', (18557, 18565), True, 'import numpy as np\n'), ((18567, 18583), 'numpy.array', 'np.array', (['path_y'], {}), '(path_y)\n', (18575, 18583), True, 'import numpy as np\n'), ((21023, 21070), 'math.exp', 'exp', (["(hyperparam_test_stock_models['drift'] * dt)"], {}), "(hyperparam_test_stock_models['drift'] * dt)\n", (21026, 21070), False, 'from math import sqrt, exp\n'), ((1000, 1022), 'numpy.sin', 'np.sin', (['(sine_coeff * t)'], {}), '(sine_coeff * t)\n', (1006, 1022), True, 'import numpy as np\n'), ((7765, 7776), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (7772, 7776), True, 'import numpy as np\n'), ((7915, 7926), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (7922, 7926), True, 'import numpy as np\n'), ((11347, 11363), 'numpy.maximum', 'np.maximum', (['v', '(0)'], {}), '(v, 0)\n', (11357, 11363), True, 'import numpy as np\n'), ((12208, 12247), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'self.dimensions'], {}), '(0, 1, self.dimensions)\n', (12224, 12247), True, 'import numpy as np\n'), ((12287, 12326), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'self.dimensions'], {}), '(0, 1, self.dimensions)\n', (12303, 12326), True, 'import numpy as np\n'), ((14731, 14742), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (14738, 14742), True, 'import numpy as np\n'), ((16554, 16565), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (16561, 16565), True, 'import numpy as np\n'), ((11278, 11294), 'numpy.maximum', 'np.maximum', (['v', '(0)'], {}), '(v, 0)\n', (11288, 11294), True, 'import numpy as np\n'), ((11491, 11507), 'numpy.maximum', 'np.maximum', (['v', '(0)'], {}), '(v, 0)\n', (11501, 11507), True, 'import numpy as np\n'), ((11585, 11601), 'numpy.maximum', 'np.maximum', (['v', '(0)'], {}), '(v, 0)\n', (11595, 11601), True, 'import numpy as np\n'), ((12371, 12382), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (12378, 12382), True, 'import numpy as np\n'), ((12529, 12540), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (12536, 12540), True, 'import numpy as np\n'), ((19345, 19381), 'numpy.sum', 'np.sum', (['((X_obs - Y_obs) ** 2)'], {'axis': '(1)'}), '((X_obs - Y_obs) ** 2, axis=1)\n', (19351, 19381), True, 'import numpy as np\n'), ((19431, 19470), 'numpy.sum', 'np.sum', (['((Y_obs_bj - Y_obs) ** 2)'], {'axis': '(1)'}), '((Y_obs_bj - Y_obs) ** 2, axis=1)\n', (19437, 19470), True, 'import numpy as np\n'), ((7837, 7871), 'numpy.sqrt', 'np.sqrt', (['(1 - self.correlation ** 2)'], {}), '(1 - self.correlation ** 2)\n', (7844, 7871), True, 'import numpy as np\n'), ((12447, 12481), 'numpy.sqrt', 'np.sqrt', (['(1 - self.correlation ** 2)'], {}), '(1 - self.correlation ** 2)\n', (12454, 12481), True, 'import numpy as np\n'), ((12620, 12651), 'numpy.log', 'np.log', (['spot_paths[i, :, k - 1]'], {}), '(spot_paths[i, :, k - 1])\n', (12626, 12651), True, 'import numpy as np\n')] |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from inspect import getargs
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import FunctionTransformer
from .bivariate import get_bivariate_funcs
from .univariate import get_univariate_funcs
class FeatureFunctionTransformer(FunctionTransformer):
""" Construct a transformer from a given feature function.
Similarly to FunctionTransformer, FeatureFunctionTranformer applies a
feature function to a given array X.
Parameters
----------
func : callable or None (default: None)
Feature function to be used in the transformer.
If None, the identity function is used.
validate : bool (default: True)
If True, the array X will be checked before calling the function.
If possible, a 2d Numpy array is returned. Otherwise, an exception
will be raised. If False, the array X is not checked.
params : dict or None (default: None)
If not None, dictionary of additional keyword arguments to pass to the
feature function.
"""
def __init__(self, func=None, validate=True, params=None):
self.params = params
super(FeatureFunctionTransformer, self).__init__(func=func,
validate=validate,
kw_args=params)
def transform(self, X, y='deprecated'):
""" Transform the array X with the given feature function.
Parameters
----------
X : ndarray, shape (n_channels, n_times)
y : (ignored)
Returns
-------
X_out : ndarray, shape (n_output_func,)
Usually, `n_output_func` will be equal to `n_channels` for most
univariate feature functions and to
`(n_channels * (n_channels + 1)) // 2` for most bivariate feature
functions. See the doc of `func` for more details.
"""
X_out = super(FeatureFunctionTransformer, self).transform(X, y)
self.output_shape_ = X_out.shape[0]
return X_out
def get_feature_names(self):
""" Mapping of the feature indices to feature names. """
if not hasattr(self, 'output_shape_'):
raise ValueError('Call `transform` or `fit_transform` first.')
else:
return np.arange(self.output_shape_).astype(str)
def get_params(self, deep=True):
""" Get the parameters (if any) of the given feature function.
Parameters
----------
deep : bool (default: True)
If True, the method will get the parameters of the transformer and
subobjects. (See `sklearn.preprocessing.FunctionTransformer`).
"""
_params = super(FeatureFunctionTransformer, self).get_params(deep=deep)
if hasattr(_params['func'], 'func'):
# If `_params['func'] is of type `functools.partial`
func_to_inspect = _params['func'].func
elif hasattr(_params['func'], 'py_func'):
# If `_params['func'] is a jitted Python function
func_to_inspect = _params['func'].py_func
else:
# If `_params['func'] is an actual Python function
func_to_inspect = _params['func']
# Get code object from the function
if hasattr(func_to_inspect, 'func_code'):
func_code = func_to_inspect.func_code
else:
func_code = func_to_inspect.__code__
args, _, _ = getargs(func_code)
# Get defaults from the function
if hasattr(func_to_inspect, 'defaults'):
defaults = func_to_inspect.func_defaults
else:
defaults = func_to_inspect.__defaults__
if defaults is None:
return dict()
else:
n_defaults = len(defaults)
func_params = {key: value for key, value in
zip(args[-n_defaults:], defaults)}
if self.params is not None:
func_params.update(self.params)
return func_params
def set_params(self, **new_params):
""" Set the parameters of the given feature function. """
valid_params = self.get_params()
for key in new_params.keys():
if key not in valid_params:
raise ValueError('Invalid parameter %s for transformer %s. '
'Check the list of available parameters '
'using the `get_params` method of the '
'transformer.' % (key, self))
if self.params is not None:
self.params.update(new_params)
else:
self.params = new_params
self.kw_args = self.params
return self
def _format_as_dataframe(X, feature_names):
""" Utility function to format extracted features (X) as a Pandas
DataFrame using names and indexes from `feature_names`. The index of the
columns is a MultiIndex with two levels. At level 0, the alias of the
feature function is given. At level 1, an enumeration of the features is
given.
Parameters
----------
X : ndarray, shape (n_epochs, n_features)
Extracted features. `X` should be the output of `extract_features`.
feature_names : list of str
Returns
-------
output : Pandas DataFrame
"""
n_features = X.shape[1]
if len(feature_names) != n_features:
raise ValueError('The length of `feature_names` should be equal to '
'`X.shape[1]` (`n_features`).')
else:
_names = [n.split('__')[0] for n in feature_names]
_idx = [n.split('__')[1] for n in feature_names]
columns = pd.MultiIndex.from_arrays([_names, _idx])
return pd.DataFrame(data=X, columns=columns)
def _apply_extractor(extractor, X):
""" Utility function to apply features extractor to ndarray X.
Parameters
----------
extractor : Instance of sklearn.pipeline.FeatureUnion or sklearn.pipeline
X : ndarray, shape (n_channels, n_times)
Returns
-------
ndarray, shape (n_features,)
"""
return extractor.fit_transform(X)
def _check_func_names(selected, feature_funcs_names):
""" Checks if the names of selected feature functions match the available
feature functions.
Parameters
----------
selected : list of str
Names of the selected feature functions.
feature_funcs_names : dict-keys or list
Names of available feature functions.
Returns
-------
valid_func_names : list of str
"""
valid_func_names = list()
for f in selected:
if f in feature_funcs_names:
valid_func_names.append(f)
else:
raise ValueError('The given alias (%s) is not valid. The valid '
'aliases for feature functions are: %s.' %
(f, feature_funcs_names))
if not valid_func_names:
raise ValueError('No valid feature function names given.')
else:
return valid_func_names
def extract_features(X, sfreq, selected_funcs, funcs_params=None, n_jobs=1,
return_as_df=False):
""" Extraction of temporal or spectral features from epoched EEG signals.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
Array of epoched EEG data.
sfreq : float
Sampling rate of the data.
selected_funcs : list of str
The elements of `selected_features` are aliases for the feature
functions which will be used to extract features from the data.
(See `mne_features` documentation for a complete list of available
feature functions).
funcs_params : dict or None (default: None)
If not None, dict of optional parameters to be passed to the feature
functions. Each key of the `funcs_params` dict should be of the form :
[alias_feature_function]__[optional_param] (for example:
'higuchi_fd__kmax`).
n_jobs : int (default: 1)
Number of CPU cores used when parallelizing the feature extraction.
If given a value of -1, all cores are used.
return_as_df : bool (default: False)
If True, the extracted features will be returned as a Pandas DataFrame.
The column index is a MultiIndex (see `pd.MultiIndex`) which contains
the alias of each feature function which was used. If False, the
features are returned as a 2d Numpy array.
Returns
-------
array-like, shape (n_epochs, n_features)
"""
if sfreq <= 0:
raise ValueError('Sampling rate `sfreq` must be positive.')
univariate_funcs = get_univariate_funcs(sfreq)
bivariate_funcs = get_bivariate_funcs(sfreq)
feature_funcs = univariate_funcs.copy()
feature_funcs.update(bivariate_funcs)
sel_funcs = _check_func_names(selected_funcs, feature_funcs.keys())
# Feature extraction
n_epochs = X.shape[0]
_tr = [(n, FeatureFunctionTransformer(func=feature_funcs[n]))
for n in sel_funcs]
extractor = FeatureUnion(transformer_list=_tr)
if funcs_params is not None:
extractor.set_params(**funcs_params)
res = joblib.Parallel(n_jobs=n_jobs)(joblib.delayed(_apply_extractor)(
extractor, X[j, :, :]) for j in range(n_epochs))
Xnew = np.vstack(res)
if return_as_df:
return _format_as_dataframe(Xnew, extractor.get_feature_names())
else:
return Xnew
| [
"pandas.DataFrame",
"sklearn.pipeline.FeatureUnion",
"sklearn.externals.joblib.Parallel",
"sklearn.externals.joblib.delayed",
"pandas.MultiIndex.from_arrays",
"inspect.getargs",
"numpy.arange",
"numpy.vstack"
] | [((9222, 9256), 'sklearn.pipeline.FeatureUnion', 'FeatureUnion', ([], {'transformer_list': '_tr'}), '(transformer_list=_tr)\n', (9234, 9256), False, 'from sklearn.pipeline import FeatureUnion\n'), ((9478, 9492), 'numpy.vstack', 'np.vstack', (['res'], {}), '(res)\n', (9487, 9492), True, 'import numpy as np\n'), ((3620, 3638), 'inspect.getargs', 'getargs', (['func_code'], {}), '(func_code)\n', (3627, 3638), False, 'from inspect import getargs\n'), ((5831, 5872), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[_names, _idx]'], {}), '([_names, _idx])\n', (5856, 5872), True, 'import pandas as pd\n'), ((5888, 5925), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X', 'columns': 'columns'}), '(data=X, columns=columns)\n', (5900, 5925), True, 'import pandas as pd\n'), ((9345, 9375), 'sklearn.externals.joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (9360, 9375), False, 'from sklearn.externals import joblib\n'), ((9376, 9408), 'sklearn.externals.joblib.delayed', 'joblib.delayed', (['_apply_extractor'], {}), '(_apply_extractor)\n', (9390, 9408), False, 'from sklearn.externals import joblib\n'), ((2470, 2499), 'numpy.arange', 'np.arange', (['self.output_shape_'], {}), '(self.output_shape_)\n', (2479, 2499), True, 'import numpy as np\n')] |
"""An abstract baseclass for apriori orbits
Description:
------------
Implementations of apriori orbits should inherit from `AprioriOrbit` and define their own read and calculate methods.
"""
# Standard library imports
import datetime
from typing import Any, Dict
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import exceptions
from midgard.math.constant import constant
# Where imports
from where.lib import config
from where.data import dataset3 as dataset
from where.lib import log
from where.lib import util
class AprioriOrbit:
"""An abstract baseclass for initial orbits
"""
name = "Overwritten by subclasses"
def __init__(self, rundate: datetime.date, **kwargs: Dict[str, Any]) -> None:
"""Set up a new AprioriOrbit object, does not parse any data
TODO: Remove dependency on rundate, use time to read correct files. (What to do with dataset?)
Args:
rundate: Date of model run.
"""
# MURKS: Should it be done like that. The technique is normally not given for unittest routines (like
# test_broadcast.py).
try:
pipeline = config.analysis.pipeline.str
except exceptions.MissingEntryError:
pipeline = None
# TODO: Getting of 'id' and 'profile' -> Should it be done like that?
try:
profile = config.analysis.profile.str
except exceptions.MissingEntryError:
profile = None
try:
id_ = config.analysis.id.str
except exceptions.MissingEntryError:
id_ = None
self.rundate = rundate
self.pipeline = pipeline
self._dset_raw = dataset.Dataset(
rundate=rundate,
pipeline=pipeline,
stage=self.name,
label="raw",
profile=profile,
id=id_,
)
self._dset_edit = dataset.Dataset(
rundate=rundate,
pipeline=pipeline,
stage=self.name,
label="edit",
profile=profile,
id=id_,
)
self._dset = None
@property
def dset_raw(self) -> "Dataset":
"""Dataset representing raw data from apriori orbit files
Reads data if the raw data are not already present.
"""
if not self._dset_raw.num_obs:
self._read(self._dset_raw)
return self._dset_raw
@property
def dset_edit(self) -> "Dataset":
"""Dataset representing edit data from apriori orbit files
Edits data if the edit data are not already present.
"""
if not self._dset_edit.num_obs:
self._edit(self._dset_edit)
return self._dset_edit
@property
def dset(self) -> "Dataset":
"""Dataset representing calculated apriori orbit
Calculates data from `dset_edit` if the data are not already present.
"""
if self._dset == None:
self._dset = dataset.Dataset(rundate=self.rundate, pipeline=self.pipeline, stage=self.name, label="orbit")
self._calculate(self._dset, self._dset_edit)
return self._dset
def calculate_orbit(self, dset: "Dataset", time: str = "time") -> None:
"""Set Dataset representing calculated apriori orbit
Args:
dset: A dataset containing the data.
time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to
observation time and 'sat_time' to satellite transmission time.
"""
if not dset.num_obs:
log.fatal(f"Dataset is empty. No observation epochs given for calculating orbits.")
# TODO: Getting of 'id' and 'profile' -> Should it be done like that?
try:
profile = config.analysis.profile.str
except exceptions.MissingEntryError:
profile = None
try:
id_ = config.analysis.id.str
except exceptions.MissingEntryError:
id_ = None
self._dset = dataset.Dataset(
rundate=self.rundate,
pipeline=self.pipeline,
stage=self.name,
label="orbit",
profile=profile,
id=id_,
)
self._calculate(self._dset, dset, time=time)
#
# Abstract methods
#
def _read(self, dset_raw: "Dataset"):
"""Read raw data
"""
util.not_implemented()
def _edit(self, dset_edit: "Dataset"):
"""Edit raw data
"""
util.not_implemented()
def _calculate(self, dset: "Dataset"):
"""Calculate orbit data
"""
util.not_implemented()
#
# Common methods for all apriori orbits
#
def relativistic_clock_correction(self, sat_pos: np.ndarray, sat_vel: np.ndarray) -> np.ndarray:
"""Determine relativistic clock correction due to orbit eccentricity
The correction is caluclated for precise and broadcast orbits after Eq. 10.10 and 10.11 in :cite:`iers2010`.
TODO: This routine should be placed in Midgard, e.g. models/ or math/?
Args:
sat_pos: Array with satellite positions.
sat_vel: Array with satellite velocities.
Returns:
Relativistic clock correction due to orbit eccentricity corrections for each observation
"""
return -2.0 / constant.c * np.einsum("ij,ij->i", sat_pos, sat_vel)
# return -2 / constant.c * (sat_pos[:, None, :] @
# sat_vel[:, :, None])[:, 0, 0]
def satellite_clock_correction_com(
self, antex: "AntennaCorrection", sys_freq: Dict[str, Dict[str, str]]
) -> np.ndarray:
"""Determine satellite clock correction related to center of mass (CoM)
The satellite clock correction is based on Section 20.3.3.3.3.1 in :cite:`is-gps-200h`.
Args:
antex: Antenna correction object based including ANTEX file data
sys_freq: Dictionary with frequency or frequency combination given for GNSS identifier:
sys_freq = { <sys_id>: <freq> } (e.g. sys_freq = {'E': 'E1', 'G': 'L1_L2'} )
Returns:
GNSS satellite clock corrections for each observation in [m] related to CoM (Note: without relativistic
orbit eccentricity correction)
"""
correction = antex.satellite_phase_center_offset(self.dset, sys_freq)
return self.satellite_clock_correction(self.dset) + correction.yaw.z
| [
"where.lib.util.not_implemented",
"where.lib.log.fatal",
"where.data.dataset3.Dataset",
"numpy.einsum"
] | [((1710, 1821), 'where.data.dataset3.Dataset', 'dataset.Dataset', ([], {'rundate': 'rundate', 'pipeline': 'pipeline', 'stage': 'self.name', 'label': '"""raw"""', 'profile': 'profile', 'id': 'id_'}), "(rundate=rundate, pipeline=pipeline, stage=self.name, label=\n 'raw', profile=profile, id=id_)\n", (1725, 1821), True, 'from where.data import dataset3 as dataset\n'), ((2073, 2185), 'where.data.dataset3.Dataset', 'dataset.Dataset', ([], {'rundate': 'rundate', 'pipeline': 'pipeline', 'stage': 'self.name', 'label': '"""edit"""', 'profile': 'profile', 'id': 'id_'}), "(rundate=rundate, pipeline=pipeline, stage=self.name, label=\n 'edit', profile=profile, id=id_)\n", (2088, 2185), True, 'from where.data import dataset3 as dataset\n'), ((4372, 4495), 'where.data.dataset3.Dataset', 'dataset.Dataset', ([], {'rundate': 'self.rundate', 'pipeline': 'self.pipeline', 'stage': 'self.name', 'label': '"""orbit"""', 'profile': 'profile', 'id': 'id_'}), "(rundate=self.rundate, pipeline=self.pipeline, stage=self.\n name, label='orbit', profile=profile, id=id_)\n", (4387, 4495), True, 'from where.data import dataset3 as dataset\n'), ((4849, 4871), 'where.lib.util.not_implemented', 'util.not_implemented', ([], {}), '()\n', (4869, 4871), False, 'from where.lib import util\n'), ((4961, 4983), 'where.lib.util.not_implemented', 'util.not_implemented', ([], {}), '()\n', (4981, 4983), False, 'from where.lib import util\n'), ((5080, 5102), 'where.lib.util.not_implemented', 'util.not_implemented', ([], {}), '()\n', (5100, 5102), False, 'from where.lib import util\n'), ((3293, 3391), 'where.data.dataset3.Dataset', 'dataset.Dataset', ([], {'rundate': 'self.rundate', 'pipeline': 'self.pipeline', 'stage': 'self.name', 'label': '"""orbit"""'}), "(rundate=self.rundate, pipeline=self.pipeline, stage=self.\n name, label='orbit')\n", (3308, 3391), True, 'from where.data import dataset3 as dataset\n'), ((3929, 4017), 'where.lib.log.fatal', 'log.fatal', (['f"""Dataset is empty. No observation epochs given for calculating orbits."""'], {}), "(\n f'Dataset is empty. No observation epochs given for calculating orbits.')\n", (3938, 4017), False, 'from where.lib import log\n'), ((5826, 5865), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'sat_pos', 'sat_vel'], {}), "('ij,ij->i', sat_pos, sat_vel)\n", (5835, 5865), True, 'import numpy as np\n')] |
"""
Usage:
$(cdips) python merge_for_exofoptess.py
"""
import os, socket
from glob import glob
import numpy as np, pandas as pd
from numpy import array as nparr
from scipy.optimize import curve_fit
from astrobase import imageutils as iu
from astrobase.timeutils import get_epochs_given_midtimes_and_period
from cdips.utils import today_YYYYMMDD
from cdips.utils import find_rvs as fr
from cdips.utils import get_vizier_catalogs as gvc
from cdips.utils import collect_cdips_lightcurves as ccl
from cdips.utils.pipelineutils import save_status, load_status
##########
# config #
##########
DEBUG = 1
hostname = socket.gethostname()
if 'phtess1' in hostname or 'phtess2' in hostname:
fitdir = "/home/lbouma/proj/cdips/results/fit_gold"
exofopdir = "/home/lbouma/proj/cdips/data/exoFOP_uploads"
elif 'brik' in hostname:
fitdir = "/home/luke/Dropbox/proj/cdips/results/fit_gold"
exofopdir = "/home/luke/Dropbox/proj/cdips/data/exoFOP_uploads"
else:
raise ValueError('where is fit_gold directory on {}?'.format(hostname))
FORMATDICT = {
'period': 8,
'period_unc': 8,
'epoch': 8,
'epoch_unc': 8,
'depth': -1,
'depth_unc': -1,
'duration': 3,
'duration_unc': 3,
'inc': 1,
'inc_unc': 1,
'imp': 3,
'imp_unc': 3,
'r_planet': 5,
'r_planet_unc': 5,
'ar_star': 2,
'a_rstar_unc': 2,
'radius': 2,
'radius_unc': 2,
'mass': 2,
'mass_unc': 2,
'temp': 2,
'temp_unc': 2,
'insol': 2,
'insol_unc': 2,
'dens': 2,
'dens_unc': 2,
'sma': 2,
'sma_unc': 2,
'ecc': 2,
'ecc_unc': 2,
'arg_peri': 2,
'arg_peri_unc': 2,
'time_peri': 2,
'time_peri_unc': 2,
'vsa': 2,
'vsa_unc': 2,
}
COLUMN_ORDER = ['target', 'flag', 'disp', 'period', 'period_unc', 'epoch',
'epoch_unc', 'depth', 'depth_unc', 'duration', 'duration_unc',
'inc', 'inc_unc', 'imp', 'imp_unc', 'r_planet', 'r_planet_unc',
'ar_star', 'a_rstar_unc', 'radius', 'radius_unc', 'mass',
'mass_unc', 'temp', 'temp_unc', 'insol', 'insol_unc', 'dens',
'dens_unc', 'sma', 'sma_unc', 'ecc', 'ecc_unc', 'arg_peri',
'arg_peri_unc', 'time_peri', 'time_peri_unc', 'vsa', 'vsa_unc',
'tag', 'group', 'prop_period', 'notes', 'source_id']
####################
# helper functions #
####################
def linear_model(xdata, m, b):
return m*xdata + b
########
# main #
########
def main(is_dayspecific_exofop_upload=1, cdipssource_vnum=0.4,
uploadnamestr='sectors_12_thru_13_clear_threshold'):
"""
Put together a few useful CSV candidate summaries:
* bulk uploads to exofop/tess
* observer info sparse (focus on TICIDs, gaia mags, positions on sky, etc)
* observer info full (stellar rvs for membership assessment; ephemeris
information)
* merge of everything (exoFOP upload, + the subset of gaia information
useful to observers)
----------
Args:
is_dayspecific_exofop_upload: if True, reads in the manually-written (from
google spreadsheet) comments and source_ids, and writes those to a
special "TO_EXOFOP" csv file.
uploadnamestr: used as unique identifying string in file names
"""
#
# Read in the results from the fits
#
paramglob = os.path.join(
fitdir, "sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*fitparameters.csv"
)
parampaths = glob(paramglob)
statusglob = os.path.join(
fitdir, "sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*.stat"
)
statuspaths = glob(statusglob)
statuses = [dict(load_status(f)['fivetransitparam_fit'])
for f in statuspaths]
param_df = pd.concat((pd.read_csv(f, sep='|') for f in parampaths))
outpath = os.path.join(
fitdir, "{}_{}_mergedfitparams.csv".
format(today_YYYYMMDD(), uploadnamestr)
)
param_df['param_path'] = parampaths
param_df.to_csv(outpath, index=False, sep='|')
print('made {}'.format(outpath))
status_df = pd.DataFrame(statuses)
status_df['statuspath'] = statuspaths
status_gaiaids = list(map(
lambda x: int(
os.path.dirname(x).split('gaiatwo')[1].split('-')[0].lstrip('0')
), statuspaths
))
status_df['source_id'] = status_gaiaids
if is_dayspecific_exofop_upload:
#
# Manually commented candidates are the only ones we're uploading.
#
manual_comment_df = pd.read_csv(
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/data/exoFOP_uploads/{}_cdips_candidate_upload.csv'.
format(today_YYYYMMDD()), sep=","
)
common = status_df.merge(manual_comment_df,
on='source_id',
how='inner')
sel_status_df = status_df[status_df.source_id.isin(common.source_id)]
#
# WARN: the MCMC fits should have converged before uploading.
# (20190918 had two exceptions, where the fit looked fine.)
#
if len(sel_status_df[sel_status_df['is_converged']=='False'])>0:
print('\nWRN! THE FOLLOWING CANDIDATES ARE NOT CONVERGED')
print(sel_status_df[sel_status_df['is_converged']=='False'])
param_gaiaids = list(map(
lambda x: int(
os.path.basename(x).split('gaiatwo')[1].split('-')[0].lstrip('0')
), parampaths
))
param_df['source_id'] = param_gaiaids
#
# Require that you actually have a parameter file (...).
#
_df = sel_status_df.merge(param_df, on='source_id', how='inner')
to_exofop_df = param_df[param_df.source_id.isin(_df.source_id)]
if len(to_exofop_df) != len(manual_comment_df):
print('\nWRN! {} CANDIDATES DID NOT HAVE PARAMETERS'.format(
len(manual_comment_df) - len(to_exofop_df)
))
print('They are...')
print(
manual_comment_df[
~manual_comment_df.source_id.isin(to_exofop_df.source_id)
]
)
print('\n')
#
# Duplicate entries in "to_exofop_df" are multi-sector. Average their
# parameters (really will end up just being durations) across sectors,
# and then remove the duplicate multi-sector rows using the "groupby"
# aggregator. This removes the string-based columns, which we can
# reclaim by a "drop_duplicates" call, since they don't have
# sector-specific information. Then, assign comments and format as
# appropriate for ExoFop-TESS. Unique tag for the entire upload.
#
to_exofop_df['source_id'] = to_exofop_df['source_id'].astype(str)
mean_val_to_exofop_df = to_exofop_df.groupby('target').mean().reset_index()
string_cols = ['target', 'flag', 'disp', 'tag', 'group', 'notes',
'source_id']
dup_dropped_str_df = (
to_exofop_df.drop_duplicates(
subset=['target'], keep='first', inplace=False
)[string_cols]
)
out_df = mean_val_to_exofop_df.merge(
dup_dropped_str_df, how='left', on='target'
)
#
# The above procedure got the epochs on multisector planets wrong.
# Determine (t0,P) by fitting a line to entries with >=3 sectors
# instead. For the two-sector case, due to bad covariance matrices,
# just use the newest ephemeris.
#
multisector_df = (
to_exofop_df[to_exofop_df.target.groupby(to_exofop_df.target).
transform('value_counts') > 1]
)
u_multisector_df = out_df[out_df.target.isin(multisector_df.target)]
# temporarily drop the multisector rows from out_df (they will be
# re-merged)
out_df = out_df.drop(
np.argwhere(out_df.target.isin(multisector_df.target)).flatten(),
axis=0
)
ephem_d = {}
for ix, t in enumerate(np.unique(multisector_df.target)):
sel = (multisector_df.target == t)
tmid = nparr(multisector_df[sel].epoch)
tmid_err = nparr(multisector_df[sel].epoch_unc)
init_period = nparr(multisector_df[sel].period.mean())
E, init_t0 = get_epochs_given_midtimes_and_period(
tmid, init_period, verbose=False
)
popt, pcov = curve_fit(
linear_model, E, tmid, p0=(init_period, init_t0), sigma=tmid_err
)
if np.all(np.isinf(pcov)):
# if least-squares doesn't give good error (i.e., just two
# epochs), take the most recent epoch.
s = np.argmax(tmid)
use_t0 = tmid[s]
use_t0_err = tmid_err[s]
use_period = nparr(multisector_df[sel].period)[s]
use_period_err = nparr(multisector_df[sel].period_unc)[s]
else:
use_t0 = popt[1]
use_t0_err = pcov[1,1]**0.5
use_period = popt[0]
use_period_err = pcov[0,0]**0.5
if DEBUG:
print(
'init tmid {}, tmiderr {}\nperiod {}, perioderr {}'.
format(tmid, tmid_err, nparr(multisector_df[sel].period),
nparr(multisector_df[sel].period_unc))
)
print(
'use tmid {}, tmiderr {}\nperiod {}, perioderr {}'.
format(use_t0, use_t0_err, use_period, use_period_err)
)
print(10*'-')
ephem_d[ix] = {
'target': t, 'epoch': use_t0, 'epoch_unc': use_t0_err,
'period': use_period, 'period_unc': use_period_err
}
ephem_df = pd.DataFrame(ephem_d).T
mdf = ephem_df.merge(u_multisector_df, how='left', on='target',
suffixes=('','_DEPRECATED'))
mdf = mdf.drop([c for c in mdf.columns if 'DEPRECATED' in c],
axis=1, inplace=False)
temp_df = out_df.append(mdf, ignore_index=True, sort=False)
out_df = temp_df
to_exofop_df = out_df[COLUMN_ORDER]
# to_exofop_df = mdf[COLUMN_ORDER] # special behavior for 2020/02/07 fix
# to_exofop_df['flag'] = 'newparams'
_df = manual_comment_df[
manual_comment_df.source_id.isin(to_exofop_df.source_id)
]
comments = list(_df['comment'])
# comments = 'Fixed ephemeris bug. (Old epoch was erroneous).' # #2020/02/07
for c in comments:
assert len(c)<=119
to_exofop_df = to_exofop_df.sort_values(by="source_id")
_df = _df.sort_values(by="source_id")
to_exofop_df['notes'] = comments
to_exofop_df['tag'] = (
'{}_bouma_cdips-v01_00001'.format(today_YYYYMMDD())
)
istoi = ~to_exofop_df['target'].astype(str).str.startswith('TIC')
if np.any(istoi):
newtargetname = 'TOI'+to_exofop_df[istoi].target.astype(str)
to_exofop_df.loc[istoi, 'target'] = newtargetname
outpath = os.path.join(
exofopdir, "{}_{}_w_sourceid.csv".
format(today_YYYYMMDD(), uploadnamestr)
)
to_exofop_df.to_csv(outpath, index=False, sep='|')
print('made {}'.format(outpath))
to_exofop_df = to_exofop_df.drop(['source_id'], axis=1)
outpath = os.path.join(
exofopdir, "params_planet_{}_001.txt".
format(today_YYYYMMDD())
)
for c in ['epoch','epoch_unc','period','period_unc']:
to_exofop_df[c] = to_exofop_df[c].astype(float)
to_exofop_df = to_exofop_df.round(FORMATDICT)
to_exofop_df['depth'] = to_exofop_df['depth'].astype(int)
to_exofop_df['depth_unc'] = to_exofop_df['depth_unc'].astype(int)
to_exofop_df.to_csv(outpath, index=False, sep='|', header=False)
print('made {}'.format(outpath))
# manually check these...
print('\n'+42*'='+'\n')
print('\nPeriod uncertainties [minutes]')
print(to_exofop_df['period_unc']*24*60)
print('\nEpoch uncertainties [minutes]')
print(to_exofop_df['epoch_unc']*24*60)
print('\nPlanet radii [Rearth]')
print(to_exofop_df[['radius','radius_unc','notes']])
print('\n'+42*'='+'\n')
#
# above is the format exofop-TESS wants. however it's not particularly
# useful for followup. for that, we want: gaia IDs, magnitudes, ra, dec.
#
gaiaids = list(map(
lambda x: int(
os.path.basename(x).split('gaiatwo')[1].split('-')[0].lstrip('0')
), parampaths
))
lcnames = list(map(
lambda x: os.path.basename(x).replace('_fitparameters.csv','.fits'),
parampaths
))
lcdir = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-*/cam?_ccd?/'
lcpaths = [ glob(os.path.join(lcdir,lcn))[0] for lcn in lcnames ]
# now get the header values
kwlist = ['RA_OBJ','DEC_OBJ','CDIPSREF','CDCLSTER','phot_g_mean_mag',
'phot_bp_mean_mag','phot_rp_mean_mag',
'TESSMAG','Gaia-ID','TICID','TICTEFF','TICRAD','TICMASS']
for k in kwlist:
thislist = []
for l in lcpaths:
thislist.append(iu.get_header_keyword(l, k, ext=0))
param_df[k] = np.array(thislist)
# now search for stellar RV xmatch
res = [fr.get_rv_xmatch(ra, dec, G_mag=gmag, dr2_sourceid=s)
for ra, dec, gmag, s in
zip(list(param_df['RA_OBJ']), list(param_df['DEC_OBJ']),
list(param_df['phot_g_mean_mag']), list(param_df['Gaia-ID']))
]
res = np.array(res)
param_df['stellar_rv'] = res[:,0]
param_df['stellar_rv_unc'] = res[:,1]
param_df['stellar_rv_provenance'] = res[:,2]
# make column showing whether there are ESO spectra available
res = [fr.wrangle_eso_for_rv_availability(ra, dec)
for ra, dec in
zip(list(param_df['RA_OBJ']), list(param_df['DEC_OBJ']))
]
param_df['eso_rv_availability'] = nparr(res)[:,2]
#
# try to get cluster RV. first from Soubiran, then from Kharchenko.
# to do this, load in CDIPS target catalog. merging the CDCLSTER name
# (comma-delimited string) against the target catalog on source identifiers
# allows unique cluster name identification, since I already did that,
# earlier.
#
cdips_df = ccl.get_cdips_pub_catalog(ver=cdipssource_vnum)
dcols = 'cluster;reference;source_id;unique_cluster_name'
ccdf = cdips_df[dcols.split(';')]
ccdf['source_id'] = ccdf['source_id'].astype(np.int64)
mdf = param_df.merge(ccdf, how='left', left_on='source_id', right_on='source_id')
param_df['unique_cluster_name'] = nparr(mdf['unique_cluster_name'])
s19 = gvc.get_soubiran_19_rv_table()
k13_param = gvc.get_k13_param_table()
c_rvs, c_err_rvs, c_rv_nstar, c_rv_prov = [], [], [], []
for ix, row in param_df.iterrows():
if row['unique_cluster_name'] in nparr(s19['ID']):
sel = (s19['ID'] == row['unique_cluster_name'])
c_rvs.append(float(s19[sel]['RV'].iloc[0]))
c_err_rvs.append(float(s19[sel]['e_RV'].iloc[0]))
c_rv_nstar.append(int(s19[sel]['Nsele'].iloc[0]))
c_rv_prov.append('Soubiran+19')
continue
elif row['unique_cluster_name'] in nparr(k13_param['Name']):
sel = (k13_param['Name'] == row['unique_cluster_name'])
c_rvs.append(float(k13_param[sel]['RV'].iloc[0]))
c_err_rvs.append(float(k13_param[sel]['e_RV'].iloc[0]))
c_rv_nstar.append(int(k13_param[sel]['o_RV'].iloc[0]))
c_rv_prov.append('Kharchenko+13')
continue
else:
c_rvs.append(np.nan)
c_err_rvs.append(np.nan)
c_rv_nstar.append(np.nan)
c_rv_prov.append('')
param_df['cluster_rv'] = c_rvs
param_df['cluster_err_rv'] = c_err_rvs
param_df['cluster_rv_nstar'] = c_rv_nstar
param_df['cluster_rv_provenance'] = c_rv_prov
#
# finally, begin writing the output
#
outpath = ("/home/lbouma/proj/cdips/results/fit_gold/"
"{}_{}_fitparams_plus_observer_info.csv".
format(today_YYYYMMDD(), uploadnamestr))
param_df.to_csv(outpath, index=False, sep='|')
print('made {}'.format(outpath))
#
# sparse observer info cut
#
scols = ['target', 'flag', 'disp','tag', 'group', 'RA_OBJ', 'DEC_OBJ',
'CDIPSREF', 'CDCLSTER', 'phot_g_mean_mag', 'phot_bp_mean_mag',
'phot_rp_mean_mag', 'TICID', 'TESSMAG', 'TICTEFF', 'TICRAD',
'TICMASS', 'Gaia-ID'
]
sparam_df = param_df[scols]
outpath = ("/home/lbouma/proj/cdips/results/fit_gold/"
"{}_{}_observer_info_sparse.csv".
format(today_YYYYMMDD(), uploadnamestr))
sparam_df.to_csv(outpath, index=False, sep='|')
print('made {}'.format(outpath))
#
# full observer info cut
#
scols = ['target', 'flag', 'disp','tag', 'group', 'RA_OBJ', 'DEC_OBJ',
'CDIPSREF', 'CDCLSTER', 'phot_g_mean_mag', 'phot_bp_mean_mag',
'phot_rp_mean_mag', 'TICID', 'TESSMAG', 'TICTEFF', 'TICRAD',
'TICMASS', 'Gaia-ID',
'period', 'period_unc', 'epoch', 'epoch_unc', 'depth',
'depth_unc', 'duration', 'duration_unc', 'radius', 'radius_unc',
'stellar_rv', 'stellar_rv_unc', 'stellar_rv_provenance',
'eso_rv_availability', 'cluster_rv', 'cluster_err_rv',
'cluster_rv_nstar', 'cluster_rv_provenance'
]
sparam_df = param_df[scols]
outpath = ("/home/lbouma/proj/cdips/results/fit_gold/"
"{}_{}_observer_info_full.csv".
format(today_YYYYMMDD(), uploadnamestr))
sparam_df.to_csv(outpath, index=False, sep='|')
print('made {}'.format(outpath))
if __name__=="__main__":
main()
| [
"cdips.utils.get_vizier_catalogs.get_k13_param_table",
"numpy.argmax",
"pandas.read_csv",
"cdips.utils.pipelineutils.load_status",
"glob.glob",
"os.path.join",
"astrobase.imageutils.get_header_keyword",
"numpy.unique",
"pandas.DataFrame",
"os.path.dirname",
"cdips.utils.collect_cdips_lightcurves... | [((615, 635), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (633, 635), False, 'import os, socket\n'), ((3347, 3453), 'os.path.join', 'os.path.join', (['fitdir', '"""sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*fitparameters.csv"""'], {}), "(fitdir,\n 'sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*fitparameters.csv'\n )\n", (3359, 3453), False, 'import os, socket\n'), ((3476, 3491), 'glob.glob', 'glob', (['paramglob'], {}), '(paramglob)\n', (3480, 3491), False, 'from glob import glob\n'), ((3509, 3598), 'os.path.join', 'os.path.join', (['fitdir', '"""sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*.stat"""'], {}), "(fitdir,\n 'sector-*_CLEAR_THRESHOLD/fitresults/hlsp_*gaiatwo*_llc/*.stat')\n", (3521, 3598), False, 'import os, socket\n'), ((3627, 3643), 'glob.glob', 'glob', (['statusglob'], {}), '(statusglob)\n', (3631, 3643), False, 'from glob import glob\n'), ((4090, 4112), 'pandas.DataFrame', 'pd.DataFrame', (['statuses'], {}), '(statuses)\n', (4102, 4112), True, 'import numpy as np, pandas as pd\n'), ((13838, 13851), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (13846, 13851), True, 'import numpy as np, pandas as pd\n'), ((14607, 14654), 'cdips.utils.collect_cdips_lightcurves.get_cdips_pub_catalog', 'ccl.get_cdips_pub_catalog', ([], {'ver': 'cdipssource_vnum'}), '(ver=cdipssource_vnum)\n', (14632, 14654), True, 'from cdips.utils import collect_cdips_lightcurves as ccl\n'), ((14938, 14971), 'numpy.array', 'nparr', (["mdf['unique_cluster_name']"], {}), "(mdf['unique_cluster_name'])\n", (14943, 14971), True, 'from numpy import array as nparr\n'), ((14983, 15013), 'cdips.utils.get_vizier_catalogs.get_soubiran_19_rv_table', 'gvc.get_soubiran_19_rv_table', ([], {}), '()\n', (15011, 15013), True, 'from cdips.utils import get_vizier_catalogs as gvc\n'), ((15030, 15055), 'cdips.utils.get_vizier_catalogs.get_k13_param_table', 'gvc.get_k13_param_table', ([], {}), '()\n', (15053, 15055), True, 'from cdips.utils import get_vizier_catalogs as gvc\n'), ((11114, 11127), 'numpy.any', 'np.any', (['istoi'], {}), '(istoi)\n', (11120, 11127), True, 'import numpy as np, pandas as pd\n'), ((13511, 13529), 'numpy.array', 'np.array', (['thislist'], {}), '(thislist)\n', (13519, 13529), True, 'import numpy as np, pandas as pd\n'), ((13581, 13634), 'cdips.utils.find_rvs.get_rv_xmatch', 'fr.get_rv_xmatch', (['ra', 'dec'], {'G_mag': 'gmag', 'dr2_sourceid': 's'}), '(ra, dec, G_mag=gmag, dr2_sourceid=s)\n', (13597, 13634), True, 'from cdips.utils import find_rvs as fr\n'), ((14059, 14102), 'cdips.utils.find_rvs.wrangle_eso_for_rv_availability', 'fr.wrangle_eso_for_rv_availability', (['ra', 'dec'], {}), '(ra, dec)\n', (14093, 14102), True, 'from cdips.utils import find_rvs as fr\n'), ((14247, 14257), 'numpy.array', 'nparr', (['res'], {}), '(res)\n', (14252, 14257), True, 'from numpy import array as nparr\n'), ((16448, 16464), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (16462, 16464), False, 'from cdips.utils import today_YYYYMMDD\n'), ((17050, 17066), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (17064, 17066), False, 'from cdips.utils import today_YYYYMMDD\n'), ((17991, 18007), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (18005, 18007), False, 'from cdips.utils import today_YYYYMMDD\n'), ((3771, 3794), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '"""|"""'}), "(f, sep='|')\n", (3782, 3794), True, 'import numpy as np, pandas as pd\n'), ((3906, 3922), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (3920, 3922), False, 'from cdips.utils import today_YYYYMMDD\n'), ((8112, 8144), 'numpy.unique', 'np.unique', (['multisector_df.target'], {}), '(multisector_df.target)\n', (8121, 8144), True, 'import numpy as np, pandas as pd\n'), ((8213, 8245), 'numpy.array', 'nparr', (['multisector_df[sel].epoch'], {}), '(multisector_df[sel].epoch)\n', (8218, 8245), True, 'from numpy import array as nparr\n'), ((8269, 8305), 'numpy.array', 'nparr', (['multisector_df[sel].epoch_unc'], {}), '(multisector_df[sel].epoch_unc)\n', (8274, 8305), True, 'from numpy import array as nparr\n'), ((8399, 8469), 'astrobase.timeutils.get_epochs_given_midtimes_and_period', 'get_epochs_given_midtimes_and_period', (['tmid', 'init_period'], {'verbose': '(False)'}), '(tmid, init_period, verbose=False)\n', (8435, 8469), False, 'from astrobase.timeutils import get_epochs_given_midtimes_and_period\n'), ((8526, 8601), 'scipy.optimize.curve_fit', 'curve_fit', (['linear_model', 'E', 'tmid'], {'p0': '(init_period, init_t0)', 'sigma': 'tmid_err'}), '(linear_model, E, tmid, p0=(init_period, init_t0), sigma=tmid_err)\n', (8535, 8601), False, 'from scipy.optimize import curve_fit\n'), ((9933, 9954), 'pandas.DataFrame', 'pd.DataFrame', (['ephem_d'], {}), '(ephem_d)\n', (9945, 9954), True, 'import numpy as np, pandas as pd\n'), ((11000, 11016), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (11014, 11016), False, 'from cdips.utils import today_YYYYMMDD\n'), ((15200, 15216), 'numpy.array', 'nparr', (["s19['ID']"], {}), "(s19['ID'])\n", (15205, 15216), True, 'from numpy import array as nparr\n'), ((3666, 3680), 'cdips.utils.pipelineutils.load_status', 'load_status', (['f'], {}), '(f)\n', (3677, 3680), False, 'from cdips.utils.pipelineutils import save_status, load_status\n'), ((4662, 4678), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (4676, 4678), False, 'from cdips.utils import today_YYYYMMDD\n'), ((8655, 8669), 'numpy.isinf', 'np.isinf', (['pcov'], {}), '(pcov)\n', (8663, 8669), True, 'import numpy as np, pandas as pd\n'), ((8822, 8837), 'numpy.argmax', 'np.argmax', (['tmid'], {}), '(tmid)\n', (8831, 8837), True, 'import numpy as np, pandas as pd\n'), ((11363, 11379), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (11377, 11379), False, 'from cdips.utils import today_YYYYMMDD\n'), ((11674, 11690), 'cdips.utils.today_YYYYMMDD', 'today_YYYYMMDD', ([], {}), '()\n', (11688, 11690), False, 'from cdips.utils import today_YYYYMMDD\n'), ((13074, 13098), 'os.path.join', 'os.path.join', (['lcdir', 'lcn'], {}), '(lcdir, lcn)\n', (13086, 13098), False, 'import os, socket\n'), ((13453, 13487), 'astrobase.imageutils.get_header_keyword', 'iu.get_header_keyword', (['l', 'k'], {'ext': '(0)'}), '(l, k, ext=0)\n', (13474, 13487), True, 'from astrobase import imageutils as iu\n'), ((15567, 15591), 'numpy.array', 'nparr', (["k13_param['Name']"], {}), "(k13_param['Name'])\n", (15572, 15591), True, 'from numpy import array as nparr\n'), ((8941, 8974), 'numpy.array', 'nparr', (['multisector_df[sel].period'], {}), '(multisector_df[sel].period)\n', (8946, 8974), True, 'from numpy import array as nparr\n'), ((9011, 9048), 'numpy.array', 'nparr', (['multisector_df[sel].period_unc'], {}), '(multisector_df[sel].period_unc)\n', (9016, 9048), True, 'from numpy import array as nparr\n'), ((9395, 9428), 'numpy.array', 'nparr', (['multisector_df[sel].period'], {}), '(multisector_df[sel].period)\n', (9400, 9428), True, 'from numpy import array as nparr\n'), ((9457, 9494), 'numpy.array', 'nparr', (['multisector_df[sel].period_unc'], {}), '(multisector_df[sel].period_unc)\n', (9462, 9494), True, 'from numpy import array as nparr\n'), ((12889, 12908), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (12905, 12908), False, 'import os, socket\n'), ((4223, 4241), 'os.path.dirname', 'os.path.dirname', (['x'], {}), '(x)\n', (4238, 4241), False, 'import os, socket\n'), ((12751, 12770), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (12767, 12770), False, 'import os, socket\n'), ((5379, 5398), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (5395, 5398), False, 'import os, socket\n')] |
"""
Test routine to test some of the model reduction routines
"""
import numpy as np
import scipy as sp
import nose
from amfe import modal_assurance, principal_angles
def test_mac_diag_ones():
'''
Test mac criterion for getting ones on the diagonal, if the same matrix is
given.
'''
N = 100
n = 10
A = np.random.rand(N, n)
macvals = modal_assurance(A, A)
np.testing.assert_allclose(np.diag(macvals), np.ones(n))
def test_mac_symmetric():
'''
'''
N = 100
n = 10
A = np.random.rand(N, n)
macvals = modal_assurance(A, A)
result = macvals - macvals.T
np.testing.assert_allclose(result, np.zeros((n, n)))
def test_mac_identity():
N = 100
n = 10
A = np.random.rand(N, n)
Q, __ = sp.linalg.qr(A, mode='economic')
macvals = modal_assurance(Q, Q)
np.testing.assert_allclose(macvals, np.eye(n), atol=1E-14)
def test_principal_angles():
n_vec = 3
n_dim = 5
n_overlap = 2*n_vec - n_dim
A = np.random.rand(n_dim, n_vec)
B = np.random.rand(n_dim, n_vec)
gamma, F1, F2 = principal_angles(A, B, principal_vectors=True)
# test orthogonality of F1
np.testing.assert_almost_equal(F1[:,:n_overlap].T @ F1[:,:n_overlap],
np.eye(n_overlap))
# test orthogonality of F2
np.testing.assert_almost_equal(F2[:,:n_overlap].T @ F2[:,:n_overlap],
np.eye(n_overlap))
# test equality of F1 and F2 in the intersecting subspace
np.testing.assert_almost_equal(F2[:,:n_overlap].T @ F1[:,:n_overlap],
np.eye(n_overlap))
# test principal angle cosines of intersecting subspace
np.testing.assert_almost_equal(gamma[:n_overlap], np.ones(n_overlap))
| [
"numpy.eye",
"amfe.modal_assurance",
"numpy.zeros",
"numpy.ones",
"scipy.linalg.qr",
"numpy.random.rand",
"numpy.diag",
"amfe.principal_angles"
] | [((333, 353), 'numpy.random.rand', 'np.random.rand', (['N', 'n'], {}), '(N, n)\n', (347, 353), True, 'import numpy as np\n'), ((368, 389), 'amfe.modal_assurance', 'modal_assurance', (['A', 'A'], {}), '(A, A)\n', (383, 389), False, 'from amfe import modal_assurance, principal_angles\n'), ((525, 545), 'numpy.random.rand', 'np.random.rand', (['N', 'n'], {}), '(N, n)\n', (539, 545), True, 'import numpy as np\n'), ((560, 581), 'amfe.modal_assurance', 'modal_assurance', (['A', 'A'], {}), '(A, A)\n', (575, 581), False, 'from amfe import modal_assurance, principal_angles\n'), ((729, 749), 'numpy.random.rand', 'np.random.rand', (['N', 'n'], {}), '(N, n)\n', (743, 749), True, 'import numpy as np\n'), ((762, 794), 'scipy.linalg.qr', 'sp.linalg.qr', (['A'], {'mode': '"""economic"""'}), "(A, mode='economic')\n", (774, 794), True, 'import scipy as sp\n'), ((809, 830), 'amfe.modal_assurance', 'modal_assurance', (['Q', 'Q'], {}), '(Q, Q)\n', (824, 830), False, 'from amfe import modal_assurance, principal_angles\n'), ((993, 1021), 'numpy.random.rand', 'np.random.rand', (['n_dim', 'n_vec'], {}), '(n_dim, n_vec)\n', (1007, 1021), True, 'import numpy as np\n'), ((1030, 1058), 'numpy.random.rand', 'np.random.rand', (['n_dim', 'n_vec'], {}), '(n_dim, n_vec)\n', (1044, 1058), True, 'import numpy as np\n'), ((1080, 1126), 'amfe.principal_angles', 'principal_angles', (['A', 'B'], {'principal_vectors': '(True)'}), '(A, B, principal_vectors=True)\n', (1096, 1126), False, 'from amfe import modal_assurance, principal_angles\n'), ((421, 437), 'numpy.diag', 'np.diag', (['macvals'], {}), '(macvals)\n', (428, 437), True, 'import numpy as np\n'), ((439, 449), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (446, 449), True, 'import numpy as np\n'), ((654, 670), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (662, 670), True, 'import numpy as np\n'), ((871, 880), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (877, 880), True, 'import numpy as np\n'), ((1268, 1285), 'numpy.eye', 'np.eye', (['n_overlap'], {}), '(n_overlap)\n', (1274, 1285), True, 'import numpy as np\n'), ((1427, 1444), 'numpy.eye', 'np.eye', (['n_overlap'], {}), '(n_overlap)\n', (1433, 1444), True, 'import numpy as np\n'), ((1617, 1634), 'numpy.eye', 'np.eye', (['n_overlap'], {}), '(n_overlap)\n', (1623, 1634), True, 'import numpy as np\n'), ((1750, 1768), 'numpy.ones', 'np.ones', (['n_overlap'], {}), '(n_overlap)\n', (1757, 1768), True, 'import numpy as np\n')] |
# 1. Import packages
import numpy as np
import pandas as pd
import os
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.preprocessing.sequence import pad_sequences
from gensim.models import word2vec
import logging
import utils.stemming as stemming
import utils.tokens as tokens
import utils.longest as longest
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
# 2. User setting 1: Import data
# Important: data needs to be stored in directory 'data' in parent folder of current working directory
path = os.getcwd()
os.chdir(path)
train_df = pd.read_csv("data/train_data.csv", nrows=1000, delimiter=',')
test_df = pd.read_csv("data/test_data.csv", nrows=1000, delimiter=',')
train_labels = pd.read_csv("data/train_labels.csv", nrows=1000, delimiter=',')
train_labels = train_labels['is_duplicate']
# 3. Split text
train_tokenized = train_df.apply(tokens.word_tokens, axis=1, raw=True)
test_tokenized = test_df.apply(tokens.word_tokens, axis=1, raw=True)
# 5. Stemming
train_stemmed = train_tokenized.apply(stemming.stemming_row, axis=1, raw=True)
test_stemmed = test_tokenized.apply(stemming.stemming_row, axis=1, raw=True)
# 6. Zeropadding
def zero_padding(sequences, max_length):
return pad_sequences(sequences, maxlen=max_length, padding='post')
train_padded1 = zero_padding(train_stemmed.question1, longestWordLength)
train_padded2 = zero_padding(train_stemmed.question2, longestWordLength)
test_padded1 = zero_padding(test_stemmed.question1, longestWordLength)
test_padded2 = zero_padding(test_stemmed.question2, longestWordLength)
# 6. Set vocabulary/Dictionary
sentences = np.concatenate(train_padded1, train_padded2, axis=1);
test_sentences = np.concatenate(test_padded1, test_padded2, axis=1);
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
trainWordModel = word2vec.Word2Vec(sentences, size=5, window = context, min_count=1, workers=num_workers, sample=downsampling)
testWordModel = word2vec.Word2Vec(test_sentences, size=5, window = context, min_count=1, workers=num_workers, sample=downsampling)
# Add zero padding, make the matrix as wide as the longest sentence
trainWordModel.save("trainWordModel")
testWordModel.save("testWordModel")
trainWords = word2vec.Word2Vec.load("trainWordModel")
testWords = word2vec.Word2Vec.load("testWordModel")
x_train=trainWords[trainWords.wv.vocab]
x_test=testWords[testWords.wv.vocab]
print('xtrain_length:', len(x_train))
print('ytrain_length:', len(train_labels))
#print('similarity-test: ',wordModel.wv.most_similar(positive=['muslim'], negative=['man']))
# 7. TF IDF
# 8. RNN (LSTM)
def trainNeuralNet(x_train, y_train):
model = Sequential()
model.add(Dense(64, input_dim=5, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=32)
trainNeuralNet(sentences, train_labels)
#def testNeuralNet(x_test):
model = Sequential()
model.add(Dense(64, input_dim=5, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
output = model.predict(x_test, batch_size=32)
submission_df = pd.DataFrame(index=test_df.test_id, columns=['is_duplicate'], dtype=np.uint)
submission_df.index.name = 'test_id'
submission_df.is_duplicate = output
submission_df.to_csv('data/submission.csv')
#testNeuralNet(testWordModel) | [
"pandas.DataFrame",
"logging.basicConfig",
"os.getcwd",
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"gensim.models.word2vec.Word2Vec",
"keras.layers.Dense",
"gensim.models.word2vec.Word2Vec.load",
"keras.models.Sequential",
"os.chdir",
"numpy.concatenate"
] | [((350, 445), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (369, 445), False, 'import logging\n'), ((586, 597), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (595, 597), False, 'import os\n'), ((598, 612), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (606, 612), False, 'import os\n'), ((624, 685), 'pandas.read_csv', 'pd.read_csv', (['"""data/train_data.csv"""'], {'nrows': '(1000)', 'delimiter': '""","""'}), "('data/train_data.csv', nrows=1000, delimiter=',')\n", (635, 685), True, 'import pandas as pd\n'), ((696, 756), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_data.csv"""'], {'nrows': '(1000)', 'delimiter': '""","""'}), "('data/test_data.csv', nrows=1000, delimiter=',')\n", (707, 756), True, 'import pandas as pd\n'), ((772, 835), 'pandas.read_csv', 'pd.read_csv', (['"""data/train_labels.csv"""'], {'nrows': '(1000)', 'delimiter': '""","""'}), "('data/train_labels.csv', nrows=1000, delimiter=',')\n", (783, 835), True, 'import pandas as pd\n'), ((1676, 1728), 'numpy.concatenate', 'np.concatenate', (['train_padded1', 'train_padded2'], {'axis': '(1)'}), '(train_padded1, train_padded2, axis=1)\n', (1690, 1728), True, 'import numpy as np\n'), ((1747, 1797), 'numpy.concatenate', 'np.concatenate', (['test_padded1', 'test_padded2'], {'axis': '(1)'}), '(test_padded1, test_padded2, axis=1)\n', (1761, 1797), True, 'import numpy as np\n'), ((2209, 2321), 'gensim.models.word2vec.Word2Vec', 'word2vec.Word2Vec', (['sentences'], {'size': '(5)', 'window': 'context', 'min_count': '(1)', 'workers': 'num_workers', 'sample': 'downsampling'}), '(sentences, size=5, window=context, min_count=1, workers=\n num_workers, sample=downsampling)\n', (2226, 2321), False, 'from gensim.models import word2vec\n'), ((2335, 2451), 'gensim.models.word2vec.Word2Vec', 'word2vec.Word2Vec', (['test_sentences'], {'size': '(5)', 'window': 'context', 'min_count': '(1)', 'workers': 'num_workers', 'sample': 'downsampling'}), '(test_sentences, size=5, window=context, min_count=1,\n workers=num_workers, sample=downsampling)\n', (2352, 2451), False, 'from gensim.models import word2vec\n'), ((2607, 2647), 'gensim.models.word2vec.Word2Vec.load', 'word2vec.Word2Vec.load', (['"""trainWordModel"""'], {}), "('trainWordModel')\n", (2629, 2647), False, 'from gensim.models import word2vec\n'), ((2660, 2699), 'gensim.models.word2vec.Word2Vec.load', 'word2vec.Word2Vec.load', (['"""testWordModel"""'], {}), "('testWordModel')\n", (2682, 2699), False, 'from gensim.models import word2vec\n'), ((3494, 3506), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3504, 3506), False, 'from keras.models import Sequential\n'), ((3873, 3949), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'test_df.test_id', 'columns': "['is_duplicate']", 'dtype': 'np.uint'}), "(index=test_df.test_id, columns=['is_duplicate'], dtype=np.uint)\n", (3885, 3949), True, 'import pandas as pd\n'), ((1280, 1339), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_length', 'padding': '"""post"""'}), "(sequences, maxlen=max_length, padding='post')\n", (1293, 1339), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3033, 3045), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3043, 3045), False, 'from keras.models import Sequential\n'), ((3517, 3558), 'keras.layers.Dense', 'Dense', (['(64)'], {'input_dim': '(5)', 'activation': '"""relu"""'}), "(64, input_dim=5, activation='relu')\n", (3522, 3558), False, 'from keras.layers import Dense, Activation\n'), ((3570, 3598), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3575, 3598), False, 'from keras.layers import Dense, Activation\n'), ((3610, 3638), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3615, 3638), False, 'from keras.layers import Dense, Activation\n'), ((3650, 3678), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3655, 3678), False, 'from keras.layers import Dense, Activation\n'), ((3690, 3720), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3695, 3720), False, 'from keras.layers import Dense, Activation\n'), ((3058, 3099), 'keras.layers.Dense', 'Dense', (['(64)'], {'input_dim': '(5)', 'activation': '"""relu"""'}), "(64, input_dim=5, activation='relu')\n", (3063, 3099), False, 'from keras.layers import Dense, Activation\n'), ((3113, 3141), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3118, 3141), False, 'from keras.layers import Dense, Activation\n'), ((3155, 3183), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3160, 3183), False, 'from keras.layers import Dense, Activation\n'), ((3197, 3225), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3202, 3225), False, 'from keras.layers import Dense, Activation\n'), ((3239, 3269), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3244, 3269), False, 'from keras.layers import Dense, Activation\n')] |
from ldpc.encoder import EncoderG, EncoderTriangularH
import pytest
from ldpc.utils import NonBinaryMatrix, AList, IncorrectLength
import numpy as np
from bitstring import Bits
import numpy.typing as npt
class TestEncoderG:
def test_non_binary_matrix(self) -> None:
mat = np.arange(1, 5).reshape((2, 2))
with pytest.raises(NonBinaryMatrix):
EncoderG(mat)
def test_params(self) -> None:
g = AList.from_file("tests/test_data/Hamming_7_4_g.alist").to_array()
enc = EncoderG(g)
assert enc.n == 7
assert enc.k == 4
np.testing.assert_array_equal(g, enc.generator) # type: ignore
def test_encoding(self) -> None:
g = AList.from_file("tests/test_data/Hamming_7_4_g.alist").to_array()
enc = EncoderG(g)
bits: npt.NDArray[np.int_] = np.array([1, 1, 0, 1])
encoded = np.matmul(bits, g)
res = enc.encode(Bits(bits))
assert res == Bits(encoded)
def test_incorrect_length(self) -> None:
g = AList.from_file("tests/test_data/Hamming_7_4_g.alist").to_array()
enc = EncoderG(g)
bits: npt.NDArray[np.int_] = np.array([1, 1, 0])
with pytest.raises(IncorrectLength):
enc.encode(Bits(bits))
class TestEncoderTriangularH:
def test_non_binary_matrix(self) -> None:
mat = np.arange(1, 5).reshape((2, 2))
with pytest.raises(NonBinaryMatrix):
EncoderTriangularH(mat)
def test_params(self) -> None:
h = AList.from_file("tests/test_data/systematic_4098_3095.alist").to_array()
enc = EncoderTriangularH(h)
assert enc.n == 4098
assert enc.m == 3095
assert enc.k == 4098-3095
np.testing.assert_array_equal(h, enc.h) # type: ignore
def test_encoding(self) -> None:
h = AList.from_file("tests/test_data/systematic_4098_3095.alist").to_array()
enc = EncoderTriangularH(h)
bits: npt.NDArray[np.int_] = np.random.randint(2, size=enc.k)
encoded = enc.encode(Bits(bits))
s = np.mod(np.matmul(h, encoded), 2)
assert s.max() == 0
assert s.min() == 0
def test_incorrect_length(self) -> None:
h = AList.from_file("tests/test_data/systematic_4098_3095.alist").to_array()
enc = EncoderTriangularH(h)
bits: npt.NDArray[np.int_] = np.array([1, 1, 0])
with pytest.raises(IncorrectLength):
enc.encode(Bits(bits))
| [
"ldpc.utils.AList.from_file",
"numpy.testing.assert_array_equal",
"ldpc.encoder.EncoderTriangularH",
"pytest.raises",
"numpy.random.randint",
"numpy.array",
"ldpc.encoder.EncoderG",
"numpy.matmul",
"bitstring.Bits",
"numpy.arange"
] | [((517, 528), 'ldpc.encoder.EncoderG', 'EncoderG', (['g'], {}), '(g)\n', (525, 528), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((589, 636), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['g', 'enc.generator'], {}), '(g, enc.generator)\n', (618, 636), True, 'import numpy as np\n'), ((783, 794), 'ldpc.encoder.EncoderG', 'EncoderG', (['g'], {}), '(g)\n', (791, 794), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((832, 854), 'numpy.array', 'np.array', (['[1, 1, 0, 1]'], {}), '([1, 1, 0, 1])\n', (840, 854), True, 'import numpy as np\n'), ((873, 891), 'numpy.matmul', 'np.matmul', (['bits', 'g'], {}), '(bits, g)\n', (882, 891), True, 'import numpy as np\n'), ((1103, 1114), 'ldpc.encoder.EncoderG', 'EncoderG', (['g'], {}), '(g)\n', (1111, 1114), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((1152, 1171), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (1160, 1171), True, 'import numpy as np\n'), ((1592, 1613), 'ldpc.encoder.EncoderTriangularH', 'EncoderTriangularH', (['h'], {}), '(h)\n', (1610, 1613), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((1714, 1753), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['h', 'enc.h'], {}), '(h, enc.h)\n', (1743, 1753), True, 'import numpy as np\n'), ((1907, 1928), 'ldpc.encoder.EncoderTriangularH', 'EncoderTriangularH', (['h'], {}), '(h)\n', (1925, 1928), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((1966, 1998), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'enc.k'}), '(2, size=enc.k)\n', (1983, 1998), True, 'import numpy as np\n'), ((2286, 2307), 'ldpc.encoder.EncoderTriangularH', 'EncoderTriangularH', (['h'], {}), '(h)\n', (2304, 2307), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((2345, 2364), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (2353, 2364), True, 'import numpy as np\n'), ((331, 361), 'pytest.raises', 'pytest.raises', (['NonBinaryMatrix'], {}), '(NonBinaryMatrix)\n', (344, 361), False, 'import pytest\n'), ((375, 388), 'ldpc.encoder.EncoderG', 'EncoderG', (['mat'], {}), '(mat)\n', (383, 388), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((917, 927), 'bitstring.Bits', 'Bits', (['bits'], {}), '(bits)\n', (921, 927), False, 'from bitstring import Bits\n'), ((951, 964), 'bitstring.Bits', 'Bits', (['encoded'], {}), '(encoded)\n', (955, 964), False, 'from bitstring import Bits\n'), ((1185, 1215), 'pytest.raises', 'pytest.raises', (['IncorrectLength'], {}), '(IncorrectLength)\n', (1198, 1215), False, 'import pytest\n'), ((1389, 1419), 'pytest.raises', 'pytest.raises', (['NonBinaryMatrix'], {}), '(NonBinaryMatrix)\n', (1402, 1419), False, 'import pytest\n'), ((1433, 1456), 'ldpc.encoder.EncoderTriangularH', 'EncoderTriangularH', (['mat'], {}), '(mat)\n', (1451, 1456), False, 'from ldpc.encoder import EncoderG, EncoderTriangularH\n'), ((2028, 2038), 'bitstring.Bits', 'Bits', (['bits'], {}), '(bits)\n', (2032, 2038), False, 'from bitstring import Bits\n'), ((2059, 2080), 'numpy.matmul', 'np.matmul', (['h', 'encoded'], {}), '(h, encoded)\n', (2068, 2080), True, 'import numpy as np\n'), ((2378, 2408), 'pytest.raises', 'pytest.raises', (['IncorrectLength'], {}), '(IncorrectLength)\n', (2391, 2408), False, 'import pytest\n'), ((286, 301), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (295, 301), True, 'import numpy as np\n'), ((437, 491), 'ldpc.utils.AList.from_file', 'AList.from_file', (['"""tests/test_data/Hamming_7_4_g.alist"""'], {}), "('tests/test_data/Hamming_7_4_g.alist')\n", (452, 491), False, 'from ldpc.utils import NonBinaryMatrix, AList, IncorrectLength\n'), ((703, 757), 'ldpc.utils.AList.from_file', 'AList.from_file', (['"""tests/test_data/Hamming_7_4_g.alist"""'], {}), "('tests/test_data/Hamming_7_4_g.alist')\n", (718, 757), False, 'from ldpc.utils import NonBinaryMatrix, AList, IncorrectLength\n'), ((1023, 1077), 'ldpc.utils.AList.from_file', 'AList.from_file', (['"""tests/test_data/Hamming_7_4_g.alist"""'], {}), "('tests/test_data/Hamming_7_4_g.alist')\n", (1038, 1077), False, 'from ldpc.utils import NonBinaryMatrix, AList, IncorrectLength\n'), ((1240, 1250), 'bitstring.Bits', 'Bits', (['bits'], {}), '(bits)\n', (1244, 1250), False, 'from bitstring import Bits\n'), ((1344, 1359), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (1353, 1359), True, 'import numpy as np\n'), ((1505, 1566), 'ldpc.utils.AList.from_file', 'AList.from_file', (['"""tests/test_data/systematic_4098_3095.alist"""'], {}), "('tests/test_data/systematic_4098_3095.alist')\n", (1520, 1566), False, 'from ldpc.utils import NonBinaryMatrix, AList, IncorrectLength\n'), ((1820, 1881), 'ldpc.utils.AList.from_file', 'AList.from_file', (['"""tests/test_data/systematic_4098_3095.alist"""'], {}), "('tests/test_data/systematic_4098_3095.alist')\n", (1835, 1881), False, 'from ldpc.utils import NonBinaryMatrix, AList, IncorrectLength\n'), ((2199, 2260), 'ldpc.utils.AList.from_file', 'AList.from_file', (['"""tests/test_data/systematic_4098_3095.alist"""'], {}), "('tests/test_data/systematic_4098_3095.alist')\n", (2214, 2260), False, 'from ldpc.utils import NonBinaryMatrix, AList, IncorrectLength\n'), ((2433, 2443), 'bitstring.Bits', 'Bits', (['bits'], {}), '(bits)\n', (2437, 2443), False, 'from bitstring import Bits\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 24 23:27:19 2021
@author: <NAME>
"""
from scipy.io import loadmat
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import save_npz, load_npz
import multiprocessing as mp
import itertools
import time
import os
from gidmutils import *
import gc
fpath = '../TestData/Cancer/outputmat/'
os.makedirs(os.path.dirname(fpath), exist_ok=True)
def fill_sparse(i):
try:
sample=data[i,:]
gene_idx_1=[]
gene_idx_2=[]
vals=[]
nz=np.nonzero(sample)
nz=nz[0]
for k in range(len(nz)-1):
t=len(nz)-1 - k
temp=[[nz[k]]*t]
temp2=nz[k+1:len(nz)]
gene_idx_2.append(temp2)
temp=list(itertools.chain(*temp))
gene_idx_1.append(temp)
vals.append(0.5*rho[temp, temp2]*(sample[temp]+sample[temp2]))
gene_idx_1=list(itertools.chain(*gene_idx_1))
gene_idx_2=list(itertools.chain(*gene_idx_2))
vals=list(itertools.chain(*vals))
u=csr_matrix((vals, (gene_idx_1, gene_idx_2)), shape=(data.shape[1], data.shape[1]), dtype=np.float32)
filename=fpath+str(i)+'.npz'
save_npz(filename, u)
return 1
except Exception as e:
print(e)
def get_distance(i, j):
try:
ci=load_npz(fpath+str(i)+'.npz')
cj=load_npz(fpath+str(j)+'.npz')
diff = ci-cj
diff_v = np.abs(diff.data)
d=np.sum(diff_v)
return [i,j,d]
except Exception as e:
print(e)
scData_m = loadmat('../TestData/Cancer/breast_cancer_5000.mat')
data = scData_m['A'] # Format should be: n_cells x m_genes
rho = compute_spearman(data.T)
rho = process_corr_matrix_using_adjacency_matrix(rho, 0.37)
try:
#The next line is to set the number of CPUs
#Change it accordingly if you are using a SLURM managed cluster
N_CPUs =4#int(os.getenv('SLURM_CPUS_ON_NODE'))
pool = mp.Pool(N_CPUs)
result_objects = []
print("Computing sparse matrices...")
tic = time.perf_counter()
for i in range(data.shape[0]):
result_objects.append(pool.apply_async(fill_sparse, args=(i,)))
pool.close()
pool.join()
toc = time.perf_counter()
results = [r.get() for r in result_objects]
results=[]
result_objects=[]
print(f"Done: {toc - tic:0.4f} seconds")
del rho
gc.collect()
pool = mp.Pool(N_CPUs)
tic = time.perf_counter()
print("Computing distances...")
for i in range(data.shape[0]-1):
for j in range(i+1, data.shape[0]):
result_objects.append(pool.apply_async(get_distance, args=(i, j)))
pool.close()
pool.join()
results = [r.get() for r in result_objects]
D = np.zeros((data.shape[0], data.shape[0]))
for r in results:
D[r[0], r[1]] = r[2]
toc = time.perf_counter()
print(f"Distances done: {toc - tic:0.4f} seconds")
except Exception as e:
print(e)
print("Saving distance matrix as cancerMP_21_signed")
np.save('../TestData/Cancer/cancerMP_21_signed.npy', D)
print("Cancer completed!")
| [
"numpy.save",
"numpy.abs",
"numpy.sum",
"scipy.io.loadmat",
"os.path.dirname",
"numpy.zeros",
"time.perf_counter",
"numpy.nonzero",
"gc.collect",
"scipy.sparse.csr_matrix",
"scipy.sparse.save_npz",
"multiprocessing.Pool",
"itertools.chain"
] | [((1588, 1640), 'scipy.io.loadmat', 'loadmat', (['"""../TestData/Cancer/breast_cancer_5000.mat"""'], {}), "('../TestData/Cancer/breast_cancer_5000.mat')\n", (1595, 1640), False, 'from scipy.io import loadmat\n'), ((3036, 3091), 'numpy.save', 'np.save', (['"""../TestData/Cancer/cancerMP_21_signed.npy"""', 'D'], {}), "('../TestData/Cancer/cancerMP_21_signed.npy', D)\n", (3043, 3091), True, 'import numpy as np\n'), ((391, 413), 'os.path.dirname', 'os.path.dirname', (['fpath'], {}), '(fpath)\n', (406, 413), False, 'import os\n'), ((1981, 1996), 'multiprocessing.Pool', 'mp.Pool', (['N_CPUs'], {}), '(N_CPUs)\n', (1988, 1996), True, 'import multiprocessing as mp\n'), ((2073, 2092), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2090, 2092), False, 'import time\n'), ((2243, 2262), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2260, 2262), False, 'import time\n'), ((2413, 2425), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2423, 2425), False, 'import gc\n'), ((2437, 2452), 'multiprocessing.Pool', 'mp.Pool', (['N_CPUs'], {}), '(N_CPUs)\n', (2444, 2452), True, 'import multiprocessing as mp\n'), ((2463, 2482), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2480, 2482), False, 'import time\n'), ((2768, 2808), 'numpy.zeros', 'np.zeros', (['(data.shape[0], data.shape[0])'], {}), '((data.shape[0], data.shape[0]))\n', (2776, 2808), True, 'import numpy as np\n'), ((2870, 2889), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2887, 2889), False, 'import time\n'), ((556, 574), 'numpy.nonzero', 'np.nonzero', (['sample'], {}), '(sample)\n', (566, 574), True, 'import numpy as np\n'), ((1074, 1179), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(vals, (gene_idx_1, gene_idx_2))'], {'shape': '(data.shape[1], data.shape[1])', 'dtype': 'np.float32'}), '((vals, (gene_idx_1, gene_idx_2)), shape=(data.shape[1], data.\n shape[1]), dtype=np.float32)\n', (1084, 1179), False, 'from scipy.sparse import csr_matrix\n'), ((1220, 1241), 'scipy.sparse.save_npz', 'save_npz', (['filename', 'u'], {}), '(filename, u)\n', (1228, 1241), False, 'from scipy.sparse import save_npz, load_npz\n'), ((1465, 1482), 'numpy.abs', 'np.abs', (['diff.data'], {}), '(diff.data)\n', (1471, 1482), True, 'import numpy as np\n'), ((1493, 1507), 'numpy.sum', 'np.sum', (['diff_v'], {}), '(diff_v)\n', (1499, 1507), True, 'import numpy as np\n'), ((938, 966), 'itertools.chain', 'itertools.chain', (['*gene_idx_1'], {}), '(*gene_idx_1)\n', (953, 966), False, 'import itertools\n'), ((992, 1020), 'itertools.chain', 'itertools.chain', (['*gene_idx_2'], {}), '(*gene_idx_2)\n', (1007, 1020), False, 'import itertools\n'), ((1040, 1062), 'itertools.chain', 'itertools.chain', (['*vals'], {}), '(*vals)\n', (1055, 1062), False, 'import itertools\n'), ((777, 799), 'itertools.chain', 'itertools.chain', (['*temp'], {}), '(*temp)\n', (792, 799), False, 'import itertools\n')] |
from typing import Dict, List, Iterable, Optional, Tuple
import numpy as np
from yarll.memory.experiences_memory import Experience
class PreAllocMemory:
def __init__(self, max_size: int, observation_shape: Tuple[int], action_shape: Tuple[int], states_dtype: type = np.float32):
self.max_size = max_size
self._pointer = 0 # where to start writing new data
self.n_entries = 0 # Number of filled rows
self._data = {
"states0": np.empty((self.max_size, *observation_shape), dtype=states_dtype),
"actions": np.empty((self.max_size, *action_shape), dtype=np.float32),
"rewards": np.empty((self.max_size, 1), dtype=np.float32),
"states1": np.empty((self.max_size, *observation_shape), dtype=states_dtype),
"terminals1": np.empty((self.max_size, 1), dtype=np.float32)
}
self._keys = list(self._data.keys())
def reallocate(self, new_max_size: int):
if new_max_size == self.max_size:
return
ids = np.arange(self.n_entries) % new_max_size
for k, v in self._data.items():
new_arr = np.empty((new_max_size, *v.shape[1:]), dtype=v.dtype)
new_arr[ids] = v
self._data[k] = new_arr
self._pointer = self._pointer % new_max_size
self.max_size = new_max_size
def get_batch(self, batch_size: int, keys: Optional[Iterable[str]] = None) -> Dict[str, np.ndarray]:
if keys is None:
keys = self._keys
# Randomly sample batch_size examples
ids = np.random.randint(0, self.n_entries, batch_size)
return {k: v[ids] for k, v in self.get_by_keys(keys).items()}
def get_by_keys(self, keys: Iterable[str]) -> Dict[str, np.ndarray]:
result = {}
for k in keys:
x = self._data[k][:self.n_entries]
if k == "terminals1":
x = x.astype(np.float32)
result[k] = x
return result
def get_all(self) -> Dict[str, np.ndarray]:
return self.get_by_keys(self._keys)
def _update(self, n_samples: int):
self._pointer = (self._pointer + n_samples) % self.max_size
self.n_entries = min(self.n_entries + n_samples, self.max_size)
def add(self, state: np.ndarray, action: np.ndarray, reward: float, new_state: np.ndarray, terminal: bool) -> None:
np.copyto(self._data["states0"][self._pointer], state)
np.copyto(self._data["actions"][self._pointer], action)
np.copyto(self._data["rewards"][self._pointer], reward)
np.copyto(self._data["states1"][self._pointer], new_state)
np.copyto(self._data["terminals1"][self._pointer], terminal)
self._update(1)
def add_by_experiences(self, experiences: List[Experience]) -> None:
for experience in experiences:
self.add(experience.state, experience.action, experience.reward,
experience.next_state, experience.terminal)
def add_by_arrays(self,
states: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
new_states: np.ndarray,
terminals: np.ndarray) -> None:
n_samples = states.shape[0]
ids = np.arange(self._pointer, self._pointer + n_samples) % self.max_size
self._data["states0"][ids] = states
self._data["actions"][ids] = actions
self._data["rewards"][ids] = rewards
self._data["states1"][ids] = new_states
self._data["terminals1"][ids] = terminals
self._update(n_samples)
def erase(self):
self._pointer = 0
self.n_entries = 0
| [
"numpy.empty",
"numpy.copyto",
"numpy.random.randint",
"numpy.arange"
] | [((1566, 1614), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.n_entries', 'batch_size'], {}), '(0, self.n_entries, batch_size)\n', (1583, 1614), True, 'import numpy as np\n'), ((2374, 2428), 'numpy.copyto', 'np.copyto', (["self._data['states0'][self._pointer]", 'state'], {}), "(self._data['states0'][self._pointer], state)\n", (2383, 2428), True, 'import numpy as np\n'), ((2437, 2492), 'numpy.copyto', 'np.copyto', (["self._data['actions'][self._pointer]", 'action'], {}), "(self._data['actions'][self._pointer], action)\n", (2446, 2492), True, 'import numpy as np\n'), ((2501, 2556), 'numpy.copyto', 'np.copyto', (["self._data['rewards'][self._pointer]", 'reward'], {}), "(self._data['rewards'][self._pointer], reward)\n", (2510, 2556), True, 'import numpy as np\n'), ((2565, 2623), 'numpy.copyto', 'np.copyto', (["self._data['states1'][self._pointer]", 'new_state'], {}), "(self._data['states1'][self._pointer], new_state)\n", (2574, 2623), True, 'import numpy as np\n'), ((2632, 2692), 'numpy.copyto', 'np.copyto', (["self._data['terminals1'][self._pointer]", 'terminal'], {}), "(self._data['terminals1'][self._pointer], terminal)\n", (2641, 2692), True, 'import numpy as np\n'), ((473, 538), 'numpy.empty', 'np.empty', (['(self.max_size, *observation_shape)'], {'dtype': 'states_dtype'}), '((self.max_size, *observation_shape), dtype=states_dtype)\n', (481, 538), True, 'import numpy as np\n'), ((563, 621), 'numpy.empty', 'np.empty', (['(self.max_size, *action_shape)'], {'dtype': 'np.float32'}), '((self.max_size, *action_shape), dtype=np.float32)\n', (571, 621), True, 'import numpy as np\n'), ((646, 692), 'numpy.empty', 'np.empty', (['(self.max_size, 1)'], {'dtype': 'np.float32'}), '((self.max_size, 1), dtype=np.float32)\n', (654, 692), True, 'import numpy as np\n'), ((717, 782), 'numpy.empty', 'np.empty', (['(self.max_size, *observation_shape)'], {'dtype': 'states_dtype'}), '((self.max_size, *observation_shape), dtype=states_dtype)\n', (725, 782), True, 'import numpy as np\n'), ((810, 856), 'numpy.empty', 'np.empty', (['(self.max_size, 1)'], {'dtype': 'np.float32'}), '((self.max_size, 1), dtype=np.float32)\n', (818, 856), True, 'import numpy as np\n'), ((1033, 1058), 'numpy.arange', 'np.arange', (['self.n_entries'], {}), '(self.n_entries)\n', (1042, 1058), True, 'import numpy as np\n'), ((1136, 1189), 'numpy.empty', 'np.empty', (['(new_max_size, *v.shape[1:])'], {'dtype': 'v.dtype'}), '((new_max_size, *v.shape[1:]), dtype=v.dtype)\n', (1144, 1189), True, 'import numpy as np\n'), ((3279, 3330), 'numpy.arange', 'np.arange', (['self._pointer', '(self._pointer + n_samples)'], {}), '(self._pointer, self._pointer + n_samples)\n', (3288, 3330), True, 'import numpy as np\n')] |
"""Base ModelServer test class."""
import json
import numpy as np
from serveit.server import ModelServer
class ModelServerTest(object):
"""Base class to test the prediction server.
ModelServerTest should be inherited by a class that has a `model` attribute,
and calls `ModelServerTest._setup()` after instantiation. That class should
also inherit from `unittest.TestCase` to ensure tests are executed.
"""
def _setup(self, model, fit, data, predict=None, **kwargs):
"""Set up method to be called before each unit test.
Arguments:
- fit (callable): model training method; must accept args (data, target)
"""
self.data = data
fit(self.data.data, self.data.target)
self.predict = predict or self.model.predict
self.server_kwargs = kwargs
self.server = ModelServer(self.model, self.predict, **kwargs)
self.app = self.server.app.test_client()
@staticmethod
def _prediction_post(app, data):
"""Make a POST request to `app` with JSON body `data`."""
return app.post(
'/predictions',
headers={'Content-Type': 'application/json'},
data=json.dumps(data),
)
def _get_sample_data(self, n=100):
"""Return a sample of size n of self.data."""
sample_idx = np.random.randint(self.data.data.shape[0], size=n)
return self.data.data[sample_idx, :]
def test_404_media(self):
"""Make sure API serves 404 response with JSON."""
response = self.app.get('/fake-endpoint')
self.assertEqual(response.status_code, 404)
response_data_raw = response.get_data()
self.assertIsNotNone(response_data_raw)
response_data = json.loads(response_data_raw)
self.assertGreater(len(response_data), 0)
def test_features_info_none(self):
"""Verify 404 response if '/info/features' endpoint not yet created."""
response = self.app.get('/info/features')
self.assertEqual(response.status_code, 404)
def test_features_info(self):
"""Test features info endpoint."""
self.server.create_info_endpoint('features', self.data.feature_names)
app = self.server.app.test_client()
response = app.get('/info/features')
response_data = json.loads(response.get_data())
self.assertEqual(len(response_data), self.data.data.shape[1])
try:
self.assertCountEqual(response_data, self.data.feature_names)
except AttributeError: # Python 2
self.assertItemsEqual(response_data, self.data.feature_names)
def test_target_labels_info_none(self):
"""Verify 404 response if '/info/target_labels' endpoint not yet created."""
response = self.app.get('/info/target_labels')
self.assertEqual(response.status_code, 404)
def test_target_labels_info(self):
"""Test target labels info endpoint."""
if not hasattr(self.data, 'target_names'):
return
self.server.create_info_endpoint('target_labels', self.data.target_names.tolist())
app = self.server.app.test_client()
response = app.get('/info/target_labels')
response_data = json.loads(response.get_data())
self.assertEqual(len(response_data), self.data.target_names.shape[0])
try:
self.assertCountEqual(response_data, self.data.target_names)
except AttributeError: # Python 2
self.assertItemsEqual(response_data, self.data.target_names)
def test_predictions(self):
"""Test predictions endpoint."""
sample_data = self._get_sample_data()
response = self._prediction_post(self.app, sample_data.tolist())
response_data = json.loads(response.get_data())
self.assertEqual(len(response_data), len(sample_data))
if self.data.target.ndim > 1:
# for multiclass each prediction should be one of the training labels
for prediction in response_data:
self.assertIn(prediction, self.data.target)
else:
# the average regression prediction for a sample of data should be similar
# to the population mean
# TODO: remove variance from this test (i.e., no chance of false negative)
pred_pct_diff = np.array(response_data).mean() / self.data.target.mean() - 1
self.assertAlmostEqual(pred_pct_diff / 1e4, 0, places=1)
def test_input_validation(self):
"""Add simple input validator and make sure it triggers."""
# model input validator
def feature_count_check(data):
try:
# convert PyTorch variables to numpy arrays
data = data.data.numpy()
except:
pass
# check num dims
if data.ndim != 2:
return False, 'Data should have two dimensions.'
# check number of columns
if data.shape[1] != self.data.data.shape[1]:
reason = '{} features required, {} features provided'.format(
data.shape[1], self.data.data.shape[1])
return False, reason
# validation passed
return True, None
# set up test server
server = ModelServer(self.model, self.predict, feature_count_check, **self.server_kwargs)
app = server.app.test_client()
# generate sample data
sample_data = self._get_sample_data()
# post good data, verify 200 response
response = self._prediction_post(app, sample_data.tolist())
self.assertEqual(response.status_code, 200)
# post bad data (drop a single column), verify 400 response
response = self._prediction_post(app, sample_data[:, :-1].tolist())
self.assertEqual(response.status_code, 400)
response_data = json.loads(response.get_data())
expected_reason = '{} features required, {} features provided'.format(
self.data.data.shape[1] - 1, self.data.data.shape[1])
self.assertIn(expected_reason, response_data['message'])
def test_model_info(self):
"""Test model info endpoint."""
response = self.app.get('/info/model')
response_data = json.loads(response.get_data())
self.assertGreater(len(response_data), 3) # TODO: expand test scope
def test_data_loader(self):
"""Test model prediction with a custom data loader callback."""
# TODO: test alternative request method (e.g., URL params)
# define custom data loader
def read_json_from_dict():
from flask import request
# read data as the value of the 'data' key
data = request.get_json()
return np.array(data['data'])
# create test client
server = ModelServer(self.model, self.predict, data_loader=read_json_from_dict, **self.server_kwargs)
app = server.app.test_client()
# generate sample data, and wrap in dict keyed by 'data'
sample_data = self._get_sample_data()
data_dict = dict(data=sample_data.tolist())
response = self._prediction_post(app, data_dict)
response_data = json.loads(response.get_data())
self.assertEqual(len(response_data), len(sample_data))
if self.data.target.ndim > 1:
# for multiclass each prediction should be one of the training labels
for prediction in response_data:
self.assertIn(prediction, self.data.target)
else:
# the average regression prediction for a sample of data should be similar
# to the population mean
# TODO: remove variance from this test (i.e., no chance of false negative)
pred_pct_diff = np.array(response_data).mean() / self.data.target.mean() - 1
self.assertAlmostEqual(pred_pct_diff / 1e4, 0, places=1)
def _update_kwargs_item(self, item, key_name, position='first'):
"""Prepend a method to the existing preprocessing chain, add to self's kwargs and return."""
kwargs = self.server_kwargs
if key_name in self.server_kwargs:
existing_items = kwargs[key_name]
if not isinstance(existing_items, (list, tuple)):
existing_items = [existing_items]
else:
existing_items = []
if position == 'first':
kwargs[key_name] = [item] + existing_items
if position == 'last':
kwargs[key_name] = existing_items + [item]
return kwargs
def test_preprocessing(self):
"""Test predictions endpoint with custom preprocessing callback."""
# create test client with postprocessor that unraps data from a dict as the value of the 'data' key
kwargs = self._update_kwargs_item(lambda d: d['data'], 'preprocessor')
server = ModelServer(self.model, self.predict, **kwargs)
app = server.app.test_client()
# generate sample data, and wrap in dict keyed by 'data'
sample_data = self._get_sample_data()
data_dict = dict(data=sample_data.tolist())
response = self._prediction_post(app, data_dict)
response_data = json.loads(response.get_data())
self.assertEqual(len(response_data), len(sample_data))
if self.data.target.ndim > 1:
# for multiclass each prediction should be one of the training labels
for prediction in response_data:
self.assertIn(prediction, self.data.target)
else:
# the average regression prediction for a sample of data should be similar
# to the population mean
# TODO: remove variance from this test (i.e., no chance of false negative)
pred_pct_diff = np.array(response_data).mean() / self.data.target.mean() - 1
self.assertAlmostEqual(pred_pct_diff / 1e4, 0, places=1)
def test_preprocessing_list(self):
"""Test predictions endpoint with chained preprocessing callbacks."""
# create test client with postprocessor that unraps data from a dict as the value of the 'data' key
kwargs = self._update_kwargs_item(lambda d: d['data'], 'preprocessor')
kwargs['preprocessor'] = [lambda d: d['data2']] + kwargs['preprocessor']
server = ModelServer(
self.model,
self.predict,
**kwargs
)
app = server.app.test_client()
# generate sample data, and wrap in dict keyed by 'data'
sample_data = self._get_sample_data()
data_dict = dict(data2=dict(data=sample_data.tolist()))
response = self._prediction_post(app, data_dict)
response_data = json.loads(response.get_data())
self.assertEqual(len(response_data), len(sample_data))
if self.data.target.ndim > 1:
# for multiclass each prediction should be one of the training labels
for prediction in response_data:
self.assertIn(prediction, self.data.target)
else:
# the average regression prediction for a sample of data should be similar
# to the population mean
# TODO: remove variance from this test (i.e., no chance of false negative)
pred_pct_diff = np.array(response_data).mean() / self.data.target.mean() - 1
self.assertAlmostEqual(pred_pct_diff / 1e4, 0, places=1)
def test_postprocessing(self):
"""Test predictions endpoint with custom postprocessing callback."""
# create test client with postprocessor that wraps predictions in a dictionary
kwargs = self._update_kwargs_item(lambda x: dict(prediction=x.tolist()), 'postprocessor', 'last')
server = ModelServer(self.model, self.predict, **kwargs)
app = server.app.test_client()
# generate sample data
sample_data = self._get_sample_data()
response = self._prediction_post(app, sample_data.tolist())
response_data = json.loads(response.get_data())['prediction'] # predictions are nested under 'prediction' key
self.assertEqual(len(response_data), len(sample_data))
if self.data.target.ndim > 1:
# for multiclass each prediction should be one of the training labels
for prediction in response_data:
self.assertIn(prediction, self.data.target)
else:
# the average regression prediction for a sample of data should be similar
# to the population mean
# TODO: remove variance from this test (i.e., no chance of false negative)
pred_pct_diff = np.array(response_data).mean() / self.data.target.mean() - 1
self.assertAlmostEqual(pred_pct_diff / 1e4, 0, places=1)
def test_get_app(self):
"""Make sure get_app method returns the same app."""
self.assertEqual(self.server.get_app(), self.server.app)
def test_400_no_content_type(self):
"""Check 400 response if no Content-Type header specified."""
response = self.app.post(
'/predictions',
)
self.assertEqual(response.status_code, 400)
response_body = json.loads(response.get_data())
self.assertEqual(response_body['message'], 'Unable to fetch data')
self.assertGreaterEqual(len(response_body['details']), 2)
| [
"serveit.server.ModelServer",
"json.loads",
"json.dumps",
"numpy.random.randint",
"numpy.array",
"flask.request.get_json"
] | [((855, 902), 'serveit.server.ModelServer', 'ModelServer', (['self.model', 'self.predict'], {}), '(self.model, self.predict, **kwargs)\n', (866, 902), False, 'from serveit.server import ModelServer\n'), ((1345, 1395), 'numpy.random.randint', 'np.random.randint', (['self.data.data.shape[0]'], {'size': 'n'}), '(self.data.data.shape[0], size=n)\n', (1362, 1395), True, 'import numpy as np\n'), ((1753, 1782), 'json.loads', 'json.loads', (['response_data_raw'], {}), '(response_data_raw)\n', (1763, 1782), False, 'import json\n'), ((5306, 5391), 'serveit.server.ModelServer', 'ModelServer', (['self.model', 'self.predict', 'feature_count_check'], {}), '(self.model, self.predict, feature_count_check, **self.server_kwargs\n )\n', (5317, 5391), False, 'from serveit.server import ModelServer\n'), ((6849, 6946), 'serveit.server.ModelServer', 'ModelServer', (['self.model', 'self.predict'], {'data_loader': 'read_json_from_dict'}), '(self.model, self.predict, data_loader=read_json_from_dict, **\n self.server_kwargs)\n', (6860, 6946), False, 'from serveit.server import ModelServer\n'), ((8894, 8941), 'serveit.server.ModelServer', 'ModelServer', (['self.model', 'self.predict'], {}), '(self.model, self.predict, **kwargs)\n', (8905, 8941), False, 'from serveit.server import ModelServer\n'), ((10333, 10380), 'serveit.server.ModelServer', 'ModelServer', (['self.model', 'self.predict'], {}), '(self.model, self.predict, **kwargs)\n', (10344, 10380), False, 'from serveit.server import ModelServer\n'), ((11750, 11797), 'serveit.server.ModelServer', 'ModelServer', (['self.model', 'self.predict'], {}), '(self.model, self.predict, **kwargs)\n', (11761, 11797), False, 'from serveit.server import ModelServer\n'), ((6741, 6759), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6757, 6759), False, 'from flask import request\n'), ((6779, 6801), 'numpy.array', 'np.array', (["data['data']"], {}), "(data['data'])\n", (6787, 6801), True, 'import numpy as np\n'), ((1202, 1218), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1212, 1218), False, 'import json\n'), ((4336, 4359), 'numpy.array', 'np.array', (['response_data'], {}), '(response_data)\n', (4344, 4359), True, 'import numpy as np\n'), ((7800, 7823), 'numpy.array', 'np.array', (['response_data'], {}), '(response_data)\n', (7808, 7823), True, 'import numpy as np\n'), ((9800, 9823), 'numpy.array', 'np.array', (['response_data'], {}), '(response_data)\n', (9808, 9823), True, 'import numpy as np\n'), ((11297, 11320), 'numpy.array', 'np.array', (['response_data'], {}), '(response_data)\n', (11305, 11320), True, 'import numpy as np\n'), ((12644, 12667), 'numpy.array', 'np.array', (['response_data'], {}), '(response_data)\n', (12652, 12667), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Author : <NAME>
# e-mail : <EMAIL>
# Powered by Seculayer © 2021 Service Model Team, R&D Center.
import tensorflow as tf
from mlps.common.Common import Common
from mlps.common.exceptions.ParameterError import ParameterError
from mlps.core.apeflow.api.algorithms.tf.keras.TFKerasAlgAbstract import TFKerasAlgAbstract
from mlps.core.apeflow.interface.utils.tf.TFUtils import TFUtils
class KCNN(TFKerasAlgAbstract):
# MODEL INFORMATION
ALG_CODE = "KCNN"
ALG_TYPE = ["Classifier", "Regressor"]
DATA_TYPE = ["Single"]
VERSION = "1.0.0"
def __init__(self, param_dict, ext_data=None):
super(KCNN, self).__init__(param_dict, ext_data)
def _check_parameter(self, param_dict):
_param_dict = super(KCNN, self)._check_parameter(param_dict)
# Parameter Setting
try:
_param_dict["hidden_units"] = list(map(int, str(param_dict["hidden_units"]).split(",")))
_param_dict["act_fn"] = str(param_dict["act_fn"])
_param_dict["algorithm_type"] = str(param_dict["algorithm_type"])
_param_dict["filter_sizes"] = list(map(int, str(param_dict["filter_sizes"]).split(",")))
_param_dict["pool_sizes"] = list(map(int, str(param_dict["pool_sizes"]).split(",")))
_param_dict["num_filters"] = int(param_dict["num_filters"])
_param_dict["dropout_prob"] = float(param_dict["dropout_prob"])
_param_dict["pooling_fn"] = str(param_dict["pooling_fn"])
_param_dict["conv_fn"] = str(param_dict["conv_fn"])
_param_dict["optimizer_fn"] = str(param_dict["optimizer_fn"])
_param_dict["learning_rate"] = float(param_dict["learning_rate"])
except:
raise ParameterError
return _param_dict
def _build(self):
# Parameter Setting
input_units = self.param_dict["input_units"]
output_units = self.param_dict["output_units"]
hidden_units = self.param_dict["hidden_units"]
act_fn = self.param_dict["act_fn"]
model_nm = self.param_dict["model_nm"]
alg_sn = self.param_dict["alg_sn"]
filter_sizes = self.param_dict["filter_sizes"]
pool_sizes = self.param_dict["pool_sizes"]
num_filters = self.param_dict["num_filters"]
dropout_prob = self.param_dict["dropout_prob"]
pooling_fn = self.param_dict["pooling_fn"]
conv_fn = self.param_dict["conv_fn"]
optimizer_fn = self.param_dict["optimizer_fn"]
learning_rate = self.param_dict["learning_rate"]
activation = eval(Common.ACTIVATE_FN_CODE_DICT[act_fn])
# Generate to Keras Model
self.model = tf.keras.Sequential()
self.inputs = tf.keras.Input(shape=input_units, name="{}_{}_X".format(model_nm, alg_sn))
self.model.add(self.inputs)
if "1D" in conv_fn:
# input: text (None, n_feature, 1)
conv_stride = 1
pooling_stride = 2
self.model.add(
tf.keras.layers.Reshape(
input_units + (1,), # tuple operation ex) (1,2) + (3,) = (1,2,3)
name="{}_{}_input_reshape".format(model_nm, alg_sn)
)
)
elif "2D" in conv_fn:
# input: image (None, width, height, channel)
conv_stride = [1, 1]
pooling_stride = [2, 2]
if len(input_units) == 2:
self.model.add(
tf.keras.layers.Reshape(
input_units + (1,), # tuple operation ex) (1,2) + (3,) = (1,2,3)
name="{}_{}_input_reshape".format(model_nm, alg_sn)
)
)
else: # 3D
conv_stride = [1, 1, 1]
pooling_stride = [2, 2, 2]
for i, filter_size in enumerate(filter_sizes):
# Convolution Layer
conv_cls = eval(Common.CONV_FN_CODE_DICT[conv_fn])(
kernel_size=filter_size,
filters=num_filters,
strides=conv_stride,
padding="SAME",
activation=str.lower(act_fn),
name="{}_{}_conv_{}".format(model_nm, alg_sn, i)
)
self.model.add(conv_cls)
# Pooling Layer
pooled_cls = eval(Common.POOLING_FN_CODE_DICT[pooling_fn])(
pool_size=pool_sizes[i],
strides=pooling_stride,
padding='SAME',
name="{}_{}_pool_{}".format(model_nm, alg_sn, i))
self.model.add(pooled_cls)
#####################################################################################
flatten_cls = tf.keras.layers.Flatten()
self.model.add(flatten_cls)
self.model.add(
tf.keras.layers.Dropout(
dropout_prob
)
)
units = TFUtils.get_units(self.model.output_shape[1], hidden_units, output_units)
TFUtils.tf_keras_mlp_block_v2(
self.model, units, activation, dropout_prob=self.param_dict["dropout_prob"],
name="{}_{}".format(model_nm, alg_sn), alg_type=self.param_dict["algorithm_type"]
)
self.predicts = self.model.get_layer(index=-1)
# MAKE TRAINING METRICS
if self.param_dict["algorithm_type"] == "Classifier":
self.model.compile(
loss='categorical_crossentropy',
optimizer=eval(Common.OPTIMIZER_FN_CODE_DICT[optimizer_fn])(learning_rate),
metrics=['accuracy']
)
elif self.param_dict["algorithm_type"] == "Regressor":
self.model.compile(
loss="mse",
optimizer=eval(Common.OPTIMIZER_FN_CODE_DICT[optimizer_fn])(learning_rate),
)
if dropout_prob != 0.:
self.model.summary(print_fn=self.LOGGER.info)
if __name__ == '__main__':
# CLASSIFIER
physical_devices = tf.config.list_physical_devices('GPU')
print("physical devices: ", physical_devices)
for gpu in physical_devices:
tf.config.experimental.set_memory_growth(gpu, True)
__param_dict = {
"algorithm_code": "KCNN",
"algorithm_type": "Regressor",
"data_type": "Single",
"method_type": "Basic",
"input_units": (2,),
"output_units": "2",
"hidden_units": "64,32,4",
"global_step": "100",
"dropout_prob": "0.2",
"optimizer_fn": "Adadelta",
"model_nm": "KCNN-1111111111111112234",
"alg_sn": "0",
"job_type": "learn",
"depth": "0",
"global_sn": "0",
"learning_rate": "0.001",
"num_layer": "5",
"act_fn": "Sigmoid",
"filter_sizes": "2,2,2",
"pool_sizes": "2,2,2",
"num_filters": "64",
"pooling_fn": "Average1D",
"conv_fn": "Conv1D",
"early_type": "0",
"minsteps": "10",
"early_key": "accuracy",
"early_value": "0.98",
"num_workers": "1"
}
import numpy as np
dataset = {
"x": np.array([[-1., -1.], [-2., -1.], [1., 1.], [2., 1.]]),
"y": np.array([[0.5, 0.5], [0.8, 0.2], [0.3, 0.7], [0.1, 0.9]]),
}
GSSG = KCNN(__param_dict)
GSSG._build()
GSSG.learn(dataset=dataset)
eval_data = {"x": np.array([[3., 2.], [-1., -1.], [-2., -1.], [1., 1.], [2., 1.]]),
"y": np.array([[0., 1.], [0.5, 0.5], [0.8, 0.2], [0.3, 0.7], [0.1, 0.9]])}
GSSG.eval(eval_data)
print(GSSG.predict(eval_data["x"]))
GSSG.saved_model()
temp = KCNN(__param_dict)
temp.load_model()
temp.eval(eval_data)
print(temp.predict(eval_data["x"]))
| [
"tensorflow.keras.layers.Dropout",
"tensorflow.config.list_physical_devices",
"tensorflow.config.experimental.set_memory_growth",
"numpy.array",
"tensorflow.keras.Sequential",
"mlps.core.apeflow.interface.utils.tf.TFUtils.TFUtils.get_units",
"tensorflow.keras.layers.Flatten"
] | [((5970, 6008), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (6001, 6008), True, 'import tensorflow as tf\n'), ((2685, 2706), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (2704, 2706), True, 'import tensorflow as tf\n'), ((4707, 4732), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (4730, 4732), True, 'import tensorflow as tf\n'), ((4900, 4973), 'mlps.core.apeflow.interface.utils.tf.TFUtils.TFUtils.get_units', 'TFUtils.get_units', (['self.model.output_shape[1]', 'hidden_units', 'output_units'], {}), '(self.model.output_shape[1], hidden_units, output_units)\n', (4917, 4973), False, 'from mlps.core.apeflow.interface.utils.tf.TFUtils import TFUtils\n'), ((6100, 6151), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (6140, 6151), True, 'import tensorflow as tf\n'), ((7101, 7163), 'numpy.array', 'np.array', (['[[-1.0, -1.0], [-2.0, -1.0], [1.0, 1.0], [2.0, 1.0]]'], {}), '([[-1.0, -1.0], [-2.0, -1.0], [1.0, 1.0], [2.0, 1.0]])\n', (7109, 7163), True, 'import numpy as np\n'), ((7170, 7228), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.8, 0.2], [0.3, 0.7], [0.1, 0.9]]'], {}), '([[0.5, 0.5], [0.8, 0.2], [0.3, 0.7], [0.1, 0.9]])\n', (7178, 7228), True, 'import numpy as np\n'), ((7341, 7415), 'numpy.array', 'np.array', (['[[3.0, 2.0], [-1.0, -1.0], [-2.0, -1.0], [1.0, 1.0], [2.0, 1.0]]'], {}), '([[3.0, 2.0], [-1.0, -1.0], [-2.0, -1.0], [1.0, 1.0], [2.0, 1.0]])\n', (7349, 7415), True, 'import numpy as np\n'), ((7429, 7499), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.5, 0.5], [0.8, 0.2], [0.3, 0.7], [0.1, 0.9]]'], {}), '([[0.0, 1.0], [0.5, 0.5], [0.8, 0.2], [0.3, 0.7], [0.1, 0.9]])\n', (7437, 7499), True, 'import numpy as np\n'), ((4805, 4842), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (4828, 4842), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 17 02:03:03 2021
@author: J76747
"""
import requests
import json
import urllib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
import os
import sys
import json
from tsdst.utils import updateProgBar
from timeit import default_timer as dt
def drop_extras(data):
'''
Drop dataframe columns that contain only zeros. This is meant to be
used on dummy encoded variables, or any variable where a zero value
represents the non-interesting case..
Parameters
----------
data : dataframe
A pandas dataframe.
Returns
-------
subx : dataframe
A dataframe with the offending columns dropped.
'''
cols_to_drop = []
for col in subx.columns:
if subx[col].mean() == 0:
cols_to_drop.append(col)
print('Dropped: ', cols_to_drop)
subx = subx.drop(cols_to_drop, axis=1)
return subx
def data_pipeline(data, estimator, parameters, target_var, scaler=None,
test_size=0.25, random_state=42, cv=5, scoring='f1',
verbose=10, n_jobs=1):
'''
Data pipeline performs scaling and generates an arbitrary model.
Parameters
----------
data : dataframe
The data you want to model.
estimator : sklearn estimator
An estimator that is similar to sklearn estimator objects.
parameters : dict
A dictionary of parameters to gridsearch as part of the pipeline.
target_var : str
The target variable in the dataset.
scaler : sklearn object, None
An object that fits/transforms data, default is minmax scaler.
Follows sklearn API.
test_size : float
The size of the test/hold-out set
random_state : int
Random seed for reproducing data splits
cv : int
Number of (Stratified) K-folds in cross-validation
scoring : str, list, or sklearn scorer object
Function to use in scoring gridsearch
verbose : str
Print progress of gridsearch. Higher number means more output
n_jobs : int
Number of cores to use in gridsearch processing
Returns
-------
The model.
'''
# List of training columns
train_cols = [col for col in data.columns if col != target_var]
# Split data into hold-out/training set
X_train, X_test, y_train, y_test = train_test_split(data[train_cols],
data[target_var],
test_size=test_size,
random_state=random_state)
# Initialize pipeline
pipe = Pipeline(steps=[('scaler', scaler), ('estimator', estimator)])
# Initialize gridsearch
clf = GridSearchCV(estimator=pipe, param_grid=parameters, cv=cv,
refit=True, scoring=scoring, verbose=verbose,
n_jobs=n_jobs)
# fit gridsearch
clf.fit(X_train, y_train.values.reshape(-1, ))
# Print CV results
cv_results = pd.concat([pd.DataFrame(clf.cv_results_["params"]),
pd.DataFrame(clf.cv_results_["mean_test_score"],
columns=["F1 Score"])],
axis=1).sort_values('F1 Score', ascending=False)
# Prediction on hold-out set
Y_pred = clf.predict(X_test.values)
# Print hold-out results
print('F1: ', f1_score(y_test.values.reshape(-1, ), Y_pred))
print('Accuracy: ', accuracy_score(y_test.values.reshape(-1, ), Y_pred))
return clf
# The url for retrieving the data
url = 'https://udacity-dsnd.s3.amazonaws.com/sparkify/sparkify_event_data.json'
# I found that downloading the data to my hardrive made the rest of the steps
# faster for me, so this was worth it to me. If you just want to pull straight
# from the website, just replace the references of 'large_sparkify' to the url,
# etc.
urllib.request.urlretrieve(url, 'large_sparkify.json')
# First, generate a list of all the unique userId's so that the data
# can be divided into smaller, loadable chunks. The goal is to have several
# small json files where a single user's data only appears in one file (i.e.
# a user's data is grouped together in the same file). This will run very quick
ids = []
with open('large_sparkify.json') as f:
u_key = 'userId'
with open('new_large_sparkify.json', 'a') as f2:
for i, line in enumerate(f):
match = re.search('\"' + u_key + '\".*?, \"', line).group(0)
ids.append(match)
if i % 100000 == 0: print(i)
f2.write(line)
# generate unique set of userId's
unique_ids = list(set(ids))
number_of_files = 10
# establish the group size per file (number of users in each file)
group_size = int(len(unique_ids)/number_of_files)+1
# create the unique_groups of userId's
id_groups = [unique_ids[(i*group_size):((i+1)*group_size)] for i in range(number_of_files)]
# initialize and open files for each group
group_files = [open('sparkify_group_' + str(i) + '.json', 'a') for i in range(number_of_files)]
# Divide and write users to their respective files
with open('large_sparkify.json') as f:
key = 'userId'
for i, line in enumerate(f):
if i % 100000 == 0: print(i)
match = re.search('\"' + key + '\".*?, \"', line).group(0)
for i, group in enumerate(group_files):
if match in id_groups[i]:
group.write(line)
for group in group_files:
group.close()
# Load the mini files that were created
files = ['sparkify_group_'+ str(i) + '.json' for i in range(number_of_files)]
# Create functions for future data aggregations
longest_song = lambda x: 0 if pd.isnull(np.max(x)) else np.max(x)
listening_time = lambda x: 0 if pd.isnull(np.sum(x)) else np.sum(x)
number_of_songs = lambda x: 0 if pd.isnull(x.count()) else x.count()
minmax = lambda x: np.max(x)-np.min(x)
# Initialize dictionaries for translating complicated userAgent labels
ua_value = {'userAgent_mozilla macintosh intel mac os applewebkit khtml like gecko chrome safari': 0}
ua_dict = {'userAgent_mozilla macintosh intel mac os applewebkit khtml like gecko chrome safari': 'userAgent_0'}
# Initialize dictionaries for performing aggregations, i.e. assigns functions
# to columns
agg1_dict = {'itemInSession': 'max',
'length': [longest_song, listening_time],
'song': number_of_songs,
'ts': ['min', 'max', minmax],
'registration': 'min',
'cancelled': 'max',
'gender_F': 'max',
'gender_M': 'max',
'gender_Unknown': 'max',
'status_200': 'sum',
'status_307': 'sum',
'status_404': 'sum',
'level_paid': 'sum',
'level_free': 'sum',
'method_PUT': 'sum',
'method_GET': 'sum',
'page_NextSong': 'sum',
'page_Thumbs Up': 'sum',
'page_Home': 'sum',
'page_Add to Playlist': 'sum',
'page_Add Friend': 'sum',
'page_Roll Advert': 'sum',
'page_Register': 'sum',
'page_Submit Registration': 'sum',
'page_Login': 'sum',
'page_Logout': 'sum',
'page_Thumbs Down': 'sum',
'page_Downgrade': 'sum',
'page_Settings': 'sum',
'page_Help': 'sum',
'page_Upgrade': 'sum',
'page_About': 'sum',
'page_Save Settings': 'sum',
'page_Error': 'sum',
'page_Submit Upgrade': 'sum',
'page_Submit Downgrade': 'sum',
}
agg2_dict = {'itemInSessionmax': 'mean',
'length<lambda_0>': 'max',
'length<lambda_1>': ['mean', 'sum'],
'sessionId': 'count',
'song<lambda>': 'sum',
'ts<lambda_0>': ['mean', 'sum'],
'tsmin': 'min',
'tsmax': 'max',
'registrationmin': 'min',
'cancelledmax': 'max',
'gender_Fmax': 'mean',
'gender_Mmax': 'mean',
'gender_Unknownmax': 'mean',
'status_200sum': 'mean',
'status_307sum': 'mean',
'status_404sum': 'mean',
'level_paidsum': 'mean',
'level_freesum': 'mean',
'method_PUTsum': 'mean',
'method_GETsum': 'mean',
'page_NextSongsum': 'mean',
'page_Thumbs Upsum': 'mean',
'page_Homesum': 'mean',
'page_Add to Playlistsum': 'mean',
'page_Add Friendsum': 'mean',
'page_Roll Advertsum': 'mean',
'page_Registersum': 'mean',
'page_Submit Registrationsum': 'mean',
'page_Loginsum': 'mean',
'page_Logoutsum': 'mean',
'page_Thumbs Downsum': 'mean',
'page_Downgradesum': 'mean',
'page_Settingssum': 'mean',
'page_Helpsum': 'mean',
'page_Upgradesum': 'mean',
'page_Aboutsum': 'mean',
'page_Save Settingssum': 'mean',
'page_Errorsum': 'mean',
'page_Submit Upgradesum': 'mean',
'page_Submit Downgradesum': 'mean',
}
X_final = None
X_list = []
#initialize start time for progress tracking
t0 = dt()
# For each file generated earlier
for i, file in enumerate(files):
print('File ', i)
print(' read in data')
# Load the data in that file
X = pd.read_json(file,
lines=True,
convert_dates=False,
dtype={
#'ts': np.int16,
'userId': object,
'sessionId': object,
'page': object,
'auth': object,
'method': object,
'status': object,
'level': object,
#'itemInSession': np.int8,
#'location': str,
'userAgent': object,
#'lastName': str,
#'firstName': str,
#'registration': np.int16,
'gender': object,
#'artist': str,
'song': object,
#'length': np.float16
}
)
print(' edit data')
# Drop na values
X = X.dropna(subset=["userId", "sessionId"])
X = X[X['userId'] != ""]
# Drop columns that will not be used in the analysis
X = X.drop(['location', 'lastName', 'auth', 'artist', 'firstName'], axis=1)
# Reduce timestamps to seconds (for comparing to length variable)
X['ts'] = X['ts']/1000
X['registration'] = X['registration']/1000
# Reduce the complexity of userAgent variable through string manipulation.
# This allows it to be dummy encoded in a more practical way later
X['userAgent'] = X['userAgent'].str.replace(r'[^a-zA-Z]', ' ', regex=True)
X['userAgent'] = X['userAgent'].str.replace(r'\s+', ' ', regex=True).str.lower().str.strip()
X['userAgent'] = X['userAgent'].str.replace(r'\s[a-z]\s', ' ', regex=True)
# Fill gender/userAgent NA values with a category, if present
X[['userAgent', 'gender']] = X[['userAgent','gender']].fillna(value='Unknown')
X.registration.fillna(X.ts, inplace=True)
print(' create dummy data')
# Generate the dummy encoding
X = pd.get_dummies(X, columns=['gender', 'level', 'method', 'userAgent', 'page', 'status'])
X = X.drop(['page_Cancel'], axis=1)
# Define the truth label (user cancellations)
X = X.rename({'page_Cancellation Confirmation': 'cancelled'}, axis=1)
# Generate list of userAgent columns from the dummy encoded data
userAgent_columns = [col for col in X.columns if 'userAgent' in col]
# Update the userAgent dictionaries
for col in userAgent_columns:
if col not in list(ua_value.keys()):
ua_value.update({col: max(ua_value.values()) + 1})
ua_dict[col] = 'userAgent_'+str(ua_value[col])
# Rename the columns of X with the new userAgent column names, and
# update the list of names
X = X.rename(ua_dict, axis=1)
userAgent_columns = [col for col in X.columns if 'userAgent' in col]
# Update the first aggregation dict with any additional userAgent columns
agg1_dict.update({col: 'sum' for col in userAgent_columns})
all_agg_cols = [col for col in X.columns if col != 'userId' and col != 'sessionId']
sub_agg1 = {k: agg1_dict[k] for k in all_agg_cols}
print(' aggregate data 1')
# Perform aggregation at the sessionId level. Remove multilevel index.
session_summary = X.groupby(['userId', 'sessionId'], as_index=False).agg(sub_agg1)
session_summary.columns = session_summary.columns.map(''.join)
# Update the second aggregation dict with any additional userAgent columns
agg2_dict.update({col+'sum': 'mean' for col in userAgent_columns})
all_agg_cols2 = [col for col in session_summary.columns if col != 'userId']
sub_agg2 = {k: agg2_dict[k] for k in all_agg_cols2}
print(' aggregate data 2')
# Perform aggregation at the userId level
user_summary = session_summary.groupby(['userId'], as_index=False).agg(sub_agg2)
user_summary.columns = user_summary.columns.map(''.join)
# Append data to list for concatenation later
X_list.append(user_summary.copy(deep=True))
# garbage collect
session_summary = []
user_summary = []
updateProgBar(i+1, len(files), t0)
# Generate complete list of columns, and perpare the dataframes for merging.
# For each dataframe, check if the column exists, and if it doesn't, create
# the column and initialize it with 0.
final_columns = []
for df in X_list:
final_columns = final_columns + list(df.columns)
final_columns = list(set(final_columns))
for df in X_list:
for col in final_columns:
if col not in df.columns:
df[col] = 0
# Ensure all dataframe have same order of columns
df = df[final_columns]
# Concatenate all the aggregated dataframes into one.
X_final = pd.concat(X_list, ignore_index=True)
# Dictionary to rename columns in X_final
rename_keys = {
'userId': 'userId',
'gender_Fmaxmean': 'avg_gender_F',
'gender_Mmaxmean': 'avg_gender_M',
'gender_Unknownmaxmean': 'avg_gender_Unknown',
'itemInSessionmaxmean': 'avg_num_items_in_session',
'length<lambda_0>max': 'longest_song',
'length<lambda_1>mean': 'longest_song_per_session',
'length<lambda_1>sum': 'total_session_listening_time',
'sessionIdcount': 'total_number_of_sessions',
'registrationminmin': 'registration',
'tsminmin': 'min_session_begin',
'tsmaxmax': 'max_session_end',
'ts<lambda_0>sum': 'total_session_length',
'ts<lambda_0>mean': 'avg_session_length',
'song<lambda>sum': 'number_of_songs',
'level_freesummean': 'avg_num_free_interactions',
'level_paidsummean': 'avg_num_paid_interactions',
'method_GETsummean': 'avg_num_get_interactions',
'method_PUTsummean': 'avg_num_put_interactions',
'page_Aboutsummean': 'avg_num_about_visits',
'page_Add Friendsummean': 'avg_num_addfriend_clicks',
'page_Add to Playlistsummean': 'avg_num_addtoplaylist_clicks',
'page_Downgradesummean': 'avg_num_downgrade_visits',
'page_Errorsummean': 'avg_num_errors',
'page_Helpsummean': 'avg_num_help_visits',
'page_Homesummean': 'avg_num_home_visits',
'page_Loginsummean': 'avg_num_login_visits',
'page_Logoutsummean': 'avg_num_logout_visits',
'page_NextSongsummean': 'avg_num_nextsong_clicks',
'page_Roll Advertsummean': 'avg_num_roll_advert_visits',
'page_Save Settingssummean': 'avg_num_savesettings_clicks',
'page_Settingssummean': 'avg_num_settings_visits',
'page_Submit Downgradesummean': 'avg_num_downgrade_clicks',
'page_Submit Upgradesummean': 'avg_num_upgrade_clicks',
'page_Submit Registrationsummean': 'avg_num_submitreg_clicks',
'page_Registersummean': 'avg_num_register_visits',
'page_Thumbs Downsummean': 'avg_num_thumbsdown_clicks',
'page_Thumbs Upsummean': 'avg_num_thumbsup_clicks',
'page_Upgradesummean': 'avg_num_upgrade_visits',
'status_200summean': 'avg_status_200',
'status_307summean': 'avg_status_307',
'status_404summean': 'avg_status_404',
'userAgent_0summean': 'avg_userAgent_0_interactions',
'userAgent_1summean': 'avg_userAgent_1_interactions',
'userAgent_2summean': 'avg_userAgent_2_interactions',
'userAgent_3summean': 'avg_userAgent_3_interactions',
'userAgent_4summean': 'avg_userAgent_4_interactions',
'userAgent_5summean': 'avg_userAgent_5_interactions',
'userAgent_6summean': 'avg_userAgent_6_interactions',
'userAgent_7summean': 'avg_userAgent_7_interactions',
'userAgent_8summean': 'avg_userAgent_8_interactions',
'userAgent_9summean': 'avg_userAgent_9_interactions',
'userAgent_10summean': 'avg_userAgent_10_interactions',
'userAgent_11summean': 'avg_userAgent_11_interactions',
'userAgent_12summean': 'avg_userAgent_12_interactions',
'userAgent_13summean': 'avg_userAgent_13_interactions',
'userAgent_14summean': 'avg_userAgent_14_interactions',
'userAgent_15summean': 'avg_userAgent_15_interactions',
'userAgent_16summean': 'avg_userAgent_16_interactions',
'userAgent_17summean': 'avg_userAgent_17_interactions',
'userAgent_18summean': 'avg_userAgent_18_interactions',
'cancelledmaxmax': 'cancelled'
}
# rename columns
X_final.rename(rename_keys, axis=1, inplace=True)
# Additional feature engineering
X_final['listening_time_per_session'] = X_final['total_session_listening_time']/X_final['total_number_of_sessions']
X_final['avg_num_songs_per_session'] = X_final['number_of_songs']/X_final['total_number_of_sessions']
X_final['avg_song_length'] = X_final['total_session_listening_time']/X_final['number_of_songs']
X_final['time_since_joined'] = X_final['max_session_end'] - X_final['registration']
X_final['time_to_first_session'] = X_final['min_session_begin'] - X_final['registration']
X_final['avg_time_between_sessions'] = ((X_final['max_session_end'] - X_final['min_session_begin']) - X_final['total_session_length'])/(X_final['total_number_of_sessions']-1)
# fill in any null values after creating new features
X_final = X_final.fillna(0)
# drop bad userId
X_final = X_final[X_final['userId'] != '1261737']
# drop any columns that have all zeros
X_final = drop_extras(data=X_final)
# drop any last columns from the data (these were used in feature engineering
# and aren't particularily useful anymore)
X_final = X_final.drop(['max_session_end', 'min_session_begin',
'total_session_length', 'registration',
'total_session_listening_time',
'number_of_songs', 'userId'], axis=1)
data = X_final.copy(deep=True)
# Prepare the estimators and parameter dictionaries
lr_estimator = LogisticRegression(penalty='elasticnet', max_iter=10000,
solver='saga')
lr_parameters = {
'estimator__C': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100],
'estimator__l1_ratio': [0, 0.25, 0.4, 0.5, 0.6, 0.75, 1]
}
gb_estimator = GradientBoostingClassifier(max_depth=5, min_samples_split=2)
gb_parameters = {
'estimator__max_depth': [2, 5, 10, 20, 100],
'estimator__min_samples_split': [2, 8, 16, 32]
}
# Loop through each estimator type
target_var = 'cancelled'
for i, (estimator, params) in enumerate(zip([lr_estimator, gb_estimator],
[lr_parameters, gb_parameters])):
clf = data_pipeline(data, estimator, params, target_var)
if i == 0:
# Build coefficient matrix
coef = pd.DataFrame(list(clf.best_estimator_.steps[1][1].coef_[0]),
index=X_train.columns,
columns=["Coefficients"])
coef['Abs. Value Coefficients'] = np.abs(coef['Coefficients'])
# sort coefficients, assign color to classes
sorted_coef = coef.sort_values(['Abs. Value Coefficients'], ascending=True)
sorted_coef['color'] = ['red' if x == -1 else 'blue' for x in np.sign(sorted_coef['Coefficients'])]
# Plot the sorted coefficients
plt.figure(figsize=(15, 15))
plt.barh(range(len(sorted_coef.index)), sorted_coef['Abs. Value Coefficients'],
color=sorted_coef['color'])
plt.title('Coefficient Rankings')
plt.yticks(range(len(sorted_coef.index)), sorted_coef.index)
| [
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"pandas.DataFrame",
"numpy.sum",
"numpy.abs",
"timeit.default_timer",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies",
"pandas.read_json",
"sklearn.ensemble.GradientBoostingClassifier",
"urllib.request.urlretriev... | [((4461, 4515), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', '"""large_sparkify.json"""'], {}), "(url, 'large_sparkify.json')\n", (4487, 4515), False, 'import urllib\n'), ((10173, 10177), 'timeit.default_timer', 'dt', ([], {}), '()\n', (10175, 10177), True, 'from timeit import default_timer as dt\n'), ((15354, 15390), 'pandas.concat', 'pd.concat', (['X_list'], {'ignore_index': '(True)'}), '(X_list, ignore_index=True)\n', (15363, 15390), True, 'import pandas as pd\n'), ((20298, 20369), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""elasticnet"""', 'max_iter': '(10000)', 'solver': '"""saga"""'}), "(penalty='elasticnet', max_iter=10000, solver='saga')\n", (20316, 20369), False, 'from sklearn.linear_model import LogisticRegression\n'), ((20597, 20657), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'max_depth': '(5)', 'min_samples_split': '(2)'}), '(max_depth=5, min_samples_split=2)\n', (20623, 20657), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((2813, 2917), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data[train_cols]', 'data[target_var]'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(data[train_cols], data[target_var], test_size=test_size,\n random_state=random_state)\n', (2829, 2917), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((3130, 3192), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('scaler', scaler), ('estimator', estimator)]"}), "(steps=[('scaler', scaler), ('estimator', estimator)])\n", (3138, 3192), False, 'from sklearn.pipeline import Pipeline\n'), ((3239, 3362), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'pipe', 'param_grid': 'parameters', 'cv': 'cv', 'refit': '(True)', 'scoring': 'scoring', 'verbose': 'verbose', 'n_jobs': 'n_jobs'}), '(estimator=pipe, param_grid=parameters, cv=cv, refit=True,\n scoring=scoring, verbose=verbose, n_jobs=n_jobs)\n', (3251, 3362), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((10352, 10602), 'pandas.read_json', 'pd.read_json', (['file'], {'lines': '(True)', 'convert_dates': '(False)', 'dtype': "{'userId': object, 'sessionId': object, 'page': object, 'auth': object,\n 'method': object, 'status': object, 'level': object, 'userAgent':\n object, 'gender': object, 'song': object}"}), "(file, lines=True, convert_dates=False, dtype={'userId': object,\n 'sessionId': object, 'page': object, 'auth': object, 'method': object,\n 'status': object, 'level': object, 'userAgent': object, 'gender':\n object, 'song': object})\n", (10364, 10602), True, 'import pandas as pd\n'), ((12549, 12640), 'pandas.get_dummies', 'pd.get_dummies', (['X'], {'columns': "['gender', 'level', 'method', 'userAgent', 'page', 'status']"}), "(X, columns=['gender', 'level', 'method', 'userAgent', 'page',\n 'status'])\n", (12563, 12640), True, 'import pandas as pd\n'), ((6363, 6372), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6369, 6372), True, 'import numpy as np\n'), ((6432, 6441), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (6438, 6441), True, 'import numpy as np\n'), ((6532, 6541), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6538, 6541), True, 'import numpy as np\n'), ((6542, 6551), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (6548, 6551), True, 'import numpy as np\n'), ((21351, 21379), 'numpy.abs', 'np.abs', (["coef['Coefficients']"], {}), "(coef['Coefficients'])\n", (21357, 21379), True, 'import numpy as np\n'), ((21697, 21725), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (21707, 21725), True, 'import matplotlib.pyplot as plt\n'), ((21870, 21903), 'matplotlib.pyplot.title', 'plt.title', (['"""Coefficient Rankings"""'], {}), "('Coefficient Rankings')\n", (21879, 21903), True, 'import matplotlib.pyplot as plt\n'), ((6347, 6356), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6353, 6356), True, 'import numpy as np\n'), ((6416, 6425), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (6422, 6425), True, 'import numpy as np\n'), ((21600, 21636), 'numpy.sign', 'np.sign', (["sorted_coef['Coefficients']"], {}), "(sorted_coef['Coefficients'])\n", (21607, 21636), True, 'import numpy as np\n'), ((3546, 3585), 'pandas.DataFrame', 'pd.DataFrame', (["clf.cv_results_['params']"], {}), "(clf.cv_results_['params'])\n", (3558, 3585), True, 'import pandas as pd\n'), ((3616, 3686), 'pandas.DataFrame', 'pd.DataFrame', (["clf.cv_results_['mean_test_score']"], {'columns': "['F1 Score']"}), "(clf.cv_results_['mean_test_score'], columns=['F1 Score'])\n", (3628, 3686), True, 'import pandas as pd\n')] |
# Import Modules
from keras.models import Sequential
from keras.layers import MaxPooling2D
from keras.layers import Conv2D
from keras.layers import Activation, Dropout, Flatten, Dense
import numpy as np
# Set Categories
categories = ["원피스", "블라우스", "코트", "롱자켓", "패딩", "티셔츠", "맨투맨", "니트", "자켓", "가디건",
"점퍼", "뷔스티", "스웨터", "남방", "스커트", "슬랙스", "린넨팬츠", "데님팬츠"]
num_cat = len(categories)
# Set Image Size
image_w = 64
image_h = 128
# Set train, test dataSet
train_X, test_X, train_Y, test_Y = np.load("./detailer_data.npy", allow_pickle=True)
train_X = train_X.astype("float") / 256
test_X = test_X.astype("float") / 256
# Construct CNN Model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=train_X.shape[1:], padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_cat))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# Save weight file
hdf5_file = "./detailer_model.hdf5"
model.fit(train_X, train_Y, batch_size=32, nb_epoch=10)
model.save_weights(hdf5_file)
# Evaluation of model
score = model.evaluate(test_X, test_Y)
print('loss=', score[0]) # loss
print('accuracy=', score[1]) # acc
| [
"numpy.load",
"keras.layers.Activation",
"keras.layers.Dropout",
"keras.layers.Flatten",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential",
"keras.layers.MaxPooling2D"
] | [((505, 554), 'numpy.load', 'np.load', (['"""./detailer_data.npy"""'], {'allow_pickle': '(True)'}), "('./detailer_data.npy', allow_pickle=True)\n", (512, 554), True, 'import numpy as np\n'), ((664, 676), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (674, 676), False, 'from keras.models import Sequential\n'), ((687, 752), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'input_shape': 'train_X.shape[1:]', 'padding': '"""same"""'}), "(32, (3, 3), input_shape=train_X.shape[1:], padding='same')\n", (693, 752), False, 'from keras.layers import Conv2D\n'), ((764, 782), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (774, 782), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((794, 824), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (806, 824), False, 'from keras.layers import MaxPooling2D\n'), ((836, 849), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (843, 849), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((861, 895), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (867, 895), False, 'from keras.layers import Conv2D\n'), ((907, 925), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (917, 925), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((937, 955), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (943, 955), False, 'from keras.layers import Conv2D\n'), ((967, 997), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (979, 997), False, 'from keras.layers import MaxPooling2D\n'), ((1009, 1022), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1016, 1022), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1034, 1043), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1041, 1043), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1055, 1065), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (1060, 1065), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1077, 1095), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1087, 1095), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1107, 1119), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1114, 1119), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1131, 1145), 'keras.layers.Dense', 'Dense', (['num_cat'], {}), '(num_cat)\n', (1136, 1145), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((1157, 1178), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1167, 1178), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n')] |
import numpy as np
import sys
from .reference_signal import *
from .helper import *
class Amplifier:
"""
A software Lock-in Amplifier
"""
def __init__(self, cutoff, pbar = True):
"""
Takes in a cutoff frequency (float) as an input
as well as whether or not to display the progress
bar.
"""
self.cutoff = cutoff
self.pbar = pbar
def update_cutoff(self, new_cutoff):
"""
Changes cutoff frequency to new value and returns
new value
"""
self.cutoff = new_cutoff
return new_cutoff
def amplify(self, references, signal_input, fit_ref = True,
num_windows = 1, window_size = 1, interpolate = False):
"""
Performs simultaneous lock-in. See the docstrings in helper.py and
the tutorial example for a more detailed description of the input
parameters and outputs. The docstring for the lock_in function in
helper.py might be helpful.
"""
#Fits the reference signals to sine waves.
if fit_ref:
ref_vals = fit(references)
magnitudes = []
angles = []
mag_errors = []
ang_errors = []
fit_vals = {'frequencies' : [], 'phases' : []}
for fit_params in ref_vals:
est_freq, est_phase, est_offset, est_amp = fit_params[0],\
fit_params[1], fit_params[2], fit_params[3]
fit_vals['frequencies'].append(est_freq)
fit_vals['phases'].append(est_phase)
#Timestamps
time = np.asarray(signal_input['time'])
signal = np.asarray(signal_input['signal'])
size = signal.shape
dim = len(signal.shape)
#Reshaping the n-dimensional input into a 1D array for each timestamp
arr_len = 1
for i in range(1, dim):
arr_len *= size[i]
signal = np.reshape(signal, (size[0], arr_len))
#Applies lock-in with errorbars
curr_magnitudes, curr_angles, curr_mag_err, curr_phase_err, indices = lock_in(self, signal,
time, est_freq, est_phase, num_windows, window_size, interpolate)
#Applies lock-in for results - only necessary if there is more than one window.
if num_windows != 1:
curr_magnitudes, curr_angles, _, _, _ = lock_in(self,signal,
time, est_freq, est_phase, num_windows = 1, window_size = 1,
interpolate = interpolate)
magnitudes.append(curr_magnitudes)
angles.append(curr_angles)
mag_errors.append(curr_mag_err)
ang_errors.append(curr_phase_err)
i = 0
out = {'ref. fit params' : fit_vals}
if num_windows != 1:
out['indices'] = indices
while i < len(magnitudes):
label = 'reference ' + str(i + 1)
#reshaping output into their original form without the time dependence
mags = np.reshape(magnitudes[i], size[1: dim])
phases = np.reshape(angles[i], size[1: dim])
out[label] = {'magnitudes' : mags.tolist(), 'phases' : phases.tolist()}
if num_windows != 1:
magnitude_stds = np.reshape(mag_errors[i], size[1: dim])
phase_stds = np.reshape(ang_errors[i], size[1: dim])
out[label]['magnitude stds'] = magnitude_stds.tolist()
out[label]['phase stds'] = phase_stds.tolist()
i += 1
else:
magnitudes = []
angles = []
mag_errors = []
ang_errors = []
for ref in references:
ref_time = np.asarray(ref['time'])
ref_sig = np.asarray(ref['signal'])
sig_time = np.asarray(signal_input['time'])
signal = np.asarray(signal_input['signal'])
size = signal.shape
dim = len(signal.shape)
#Reshaping the n-dimensional input into a 1D array for each timestamp
arr_len = 1
for i in range(1, dim):
arr_len *= size[i]
signal = np.reshape(signal, (size[0], arr_len))
#Applies lock-in with errorbars
curr_magnitudes, curr_mag_err, indices = lock_in_no_fit(self, signal, sig_time, ref_sig,
ref_time, num_windows, window_size, interpolate)
#Applies lock-in for results - only necessary if there is more than one window.
if num_windows != 1:
curr_magnitudes, _, _ = lock_in_no_fit(self,signal, sig_time, ref_sig,
ref_time, num_windows = 1, window_size = 1,
interpolate = interpolate)
magnitudes.append(curr_magnitudes)
mag_errors.append(curr_mag_err)
i = 0
out = {}
if num_windows != 1:
out['indices'] = indices
while i < len(magnitudes):
label = 'reference ' + str(i + 1)
#reshaping output into their original form without the time dependence
mags = np.reshape(magnitudes[i], size[1: dim])
out[label] = {'magnitudes' : mags.tolist()}
if num_windows != 1:
magnitude_stds = np.reshape(mag_errors[i], size[1: dim])
out[label]['magnitude stds'] = magnitude_stds.tolist()
i += 1
return out | [
"numpy.asarray",
"numpy.reshape"
] | [((1351, 1383), 'numpy.asarray', 'np.asarray', (["signal_input['time']"], {}), "(signal_input['time'])\n", (1361, 1383), True, 'import numpy as np\n'), ((1397, 1431), 'numpy.asarray', 'np.asarray', (["signal_input['signal']"], {}), "(signal_input['signal'])\n", (1407, 1431), True, 'import numpy as np\n'), ((1640, 1678), 'numpy.reshape', 'np.reshape', (['signal', '(size[0], arr_len)'], {}), '(signal, (size[0], arr_len))\n', (1650, 1678), True, 'import numpy as np\n'), ((2562, 2600), 'numpy.reshape', 'np.reshape', (['magnitudes[i]', 'size[1:dim]'], {}), '(magnitudes[i], size[1:dim])\n', (2572, 2600), True, 'import numpy as np\n'), ((2615, 2649), 'numpy.reshape', 'np.reshape', (['angles[i]', 'size[1:dim]'], {}), '(angles[i], size[1:dim])\n', (2625, 2649), True, 'import numpy as np\n'), ((3121, 3144), 'numpy.asarray', 'np.asarray', (["ref['time']"], {}), "(ref['time'])\n", (3131, 3144), True, 'import numpy as np\n'), ((3159, 3184), 'numpy.asarray', 'np.asarray', (["ref['signal']"], {}), "(ref['signal'])\n", (3169, 3184), True, 'import numpy as np\n'), ((3200, 3232), 'numpy.asarray', 'np.asarray', (["signal_input['time']"], {}), "(signal_input['time'])\n", (3210, 3232), True, 'import numpy as np\n'), ((3246, 3280), 'numpy.asarray', 'np.asarray', (["signal_input['signal']"], {}), "(signal_input['signal'])\n", (3256, 3280), True, 'import numpy as np\n'), ((3489, 3527), 'numpy.reshape', 'np.reshape', (['signal', '(size[0], arr_len)'], {}), '(signal, (size[0], arr_len))\n', (3499, 3527), True, 'import numpy as np\n'), ((4288, 4326), 'numpy.reshape', 'np.reshape', (['magnitudes[i]', 'size[1:dim]'], {}), '(magnitudes[i], size[1:dim])\n', (4298, 4326), True, 'import numpy as np\n'), ((2774, 2812), 'numpy.reshape', 'np.reshape', (['mag_errors[i]', 'size[1:dim]'], {}), '(mag_errors[i], size[1:dim])\n', (2784, 2812), True, 'import numpy as np\n'), ((2832, 2870), 'numpy.reshape', 'np.reshape', (['ang_errors[i]', 'size[1:dim]'], {}), '(ang_errors[i], size[1:dim])\n', (2842, 2870), True, 'import numpy as np\n'), ((4423, 4461), 'numpy.reshape', 'np.reshape', (['mag_errors[i]', 'size[1:dim]'], {}), '(mag_errors[i], size[1:dim])\n', (4433, 4461), True, 'import numpy as np\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
def _assert_variables(
test_case, expected_global=None, expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual(
[] if expected_global is None else expected_global,
[k.name for k in tf.global_variables()])
test_case.assertItemsEqual(
[] if expected_model is None else expected_model,
[k.name for k in tf.model_variables()])
test_case.assertItemsEqual(
[] if expected_trainable is None else expected_trainable,
[k.name for k in tf.trainable_variables()])
def _assert_no_variables(test_case):
_assert_variables(test_case, set([]), set([]), set([]))
class RegressionModelHeadTest(tf.test.TestCase):
def _assert_metrics(self, model_fn_ops):
self.assertItemsEqual((
"loss",
), six.iterkeys(model_fn_ops.eval_metric_ops))
# TODO(zakaria): test multilabel regresssion.
def testRegression(self):
head = head_lib._regression_head()
with tf.Graph().as_default(), tf.Session() as sess:
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
self._assert_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(5. / 3, sess.run(model_fn_ops.loss))
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.EVAL,
_noop_train_op, logits=prediction)
self.assertIsNone(model_fn_ops.train_op)
def testRegressionWithWeights(self):
head = head_lib._regression_head(
weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([[2.], [5.], [0.]])}
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
self._assert_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(2. / 3, sess.run(model_fn_ops.loss), places=3)
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(
weight_column_name="label_weight", enable_centered_bias=True)
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([[2.], [5.], [0.]])}
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
self._assert_metrics(model_fn_ops)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
self.assertAlmostEqual(2. / 3, sess.run(model_fn_ops.loss), places=3)
def testErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with tf.Graph().as_default():
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.SparseTensor(
indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
values=tf.constant([0., 1., 1.]),
shape=[3, 1])
with self.assertRaisesRegexp(
ValueError, "SparseTensor is not supported as labels."):
head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
class MultiLabelModelHeadTest(tf.test.TestCase):
def _assert_metrics(self, model_fn_ops):
self.assertItemsEqual((
"accuracy",
"loss",
), six.iterkeys(model_fn_ops.eval_metric_ops))
def testMultiLabel(self):
head = head_lib._multi_label_head(n_classes=3)
with tf.Graph().as_default(), tf.Session() as sess:
logits = tf.constant([[1., 0., 0.]])
labels = tf.constant([[0, 0, 1]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(0.89985204, sess.run(model_fn_ops.loss))
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.EVAL,
_noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
def testMultiLabelWithWeight(self):
head = head_lib._multi_label_head(
n_classes=3, weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([0.1])}
logits = tf.constant([[1., 0., 0.]])
labels = tf.constant([[0, 0, 1]])
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(0.089985214, sess.run(model_fn_ops.loss))
def testMultiLabelWithCenteredBias(self):
head = head_lib._multi_label_head(n_classes=3, enable_centered_bias=True)
with tf.Graph().as_default(), tf.Session() as sess:
logits = tf.constant([[1., 0., 0.]])
labels = tf.constant([[0, 0, 1]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_metrics(model_fn_ops)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
self.assertAlmostEqual(0.89985204, sess.run(model_fn_ops.loss))
class MultiClassModelHeadTest(tf.test.TestCase):
def _assert_binary_metrics(self, model_fn_ops):
self.assertItemsEqual((
"accuracy",
"accuracy/baseline_label_mean",
"accuracy/threshold_0.500000_mean",
"auc",
"labels/actual_label_mean",
"labels/prediction_mean",
"loss",
"precision/positive_threshold_0.500000_mean",
"recall/positive_threshold_0.500000_mean",
), six.iterkeys(model_fn_ops.eval_metric_ops))
def testBinaryClassification(self):
head = head_lib._multi_class_head(n_classes=2)
with tf.Graph().as_default(), tf.Session() as sess:
logits = tf.constant([[1.], [1.]])
labels = tf.constant([[1.], [0.]])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_binary_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(0.81326175, sess.run(model_fn_ops.loss),
delta=1e-6)
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.EVAL,
_noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
def testErrorInSparseTensorLabels(self):
head = head_lib._multi_class_head(n_classes=2)
with tf.Graph().as_default():
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.SparseTensor(
indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
values=tf.constant([0, 1, 1]),
shape=[3, 1])
with self.assertRaisesRegexp(
ValueError, "SparseTensor is not supported as labels."):
head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
def testBinaryClassificationWithWeights(self):
head = head_lib._multi_class_head(
n_classes=2, weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([[1.], [0.]])}
logits = tf.constant([[1.], [1.]])
labels = tf.constant([[1.], [0.]])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_binary_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(.31326166 / 2, sess.run(model_fn_ops.loss),
delta=1e-6)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib._multi_class_head(n_classes=2, enable_centered_bias=True)
with tf.Graph().as_default(), tf.Session() as sess:
logits = tf.constant([[1.], [1.]])
labels = tf.constant([[1.], [0.]])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_binary_metrics(model_fn_ops)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
self.assertAlmostEqual(0.81326175, sess.run(model_fn_ops.loss),
delta=1e-6)
def _assert_multi_class_metrics(self, model_fn_ops):
self.assertItemsEqual((
"accuracy",
"loss",
), six.iterkeys(model_fn_ops.eval_metric_ops))
def testMultiClass(self):
n_classes = 3
head = head_lib._multi_class_head(n_classes=n_classes)
with tf.Graph().as_default(), tf.Session() as sess:
logits = tf.constant([[1., 0., 0.]])
labels = tf.constant([2])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_multi_class_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(1.5514446, sess.run(model_fn_ops.loss))
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.EVAL,
_noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
def testMultiClassWithWeight(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([0.1])}
logits = tf.constant([[1., 0., 0.]])
labels = tf.constant([2])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
self._assert_multi_class_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(.15514446, sess.run(model_fn_ops.loss))
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib._multi_class_head(n_classes=n_classes)
class BinarySvmModelHeadTest(tf.test.TestCase):
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-0.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (0.5, 0.0)
def _assert_metrics(self, model_fn_ops):
self.assertItemsEqual((
"accuracy",
"loss",
), six.iterkeys(model_fn_ops.eval_metric_ops))
def testBinarySVMDefaultWeights(self):
head = head_lib._binary_svm_head()
with tf.Graph().as_default(), tf.Session():
predictions = tf.constant(self._predictions)
labels = tf.constant(self._labels)
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=predictions)
self._assert_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(
np.average(self._expected_losses), model_fn_ops.loss.eval())
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.EVAL,
_noop_train_op, logits=predictions)
self.assertIsNone(model_fn_ops.train_op)
def testBinarySVMWithWeights(self):
head = head_lib._binary_svm_head(weight_column_name="weights")
with tf.Graph().as_default(), tf.Session():
predictions = tf.constant(self._predictions)
labels = tf.constant(self._labels)
weights = (7.0, 11.0)
features = {"weights": tf.constant(weights)}
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=predictions)
self._assert_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(
np.sum(np.multiply(weights, self._expected_losses)) / 2.0,
model_fn_ops.loss.eval())
def testBinarySVMWithCenteredBias(self):
head = head_lib._binary_svm_head(enable_centered_bias=True)
with tf.Graph().as_default(), tf.Session():
predictions = tf.constant(self._predictions)
labels = tf.constant(self._labels)
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=predictions)
self._assert_metrics(model_fn_ops)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
self.assertAlmostEqual(
np.average(self._expected_losses), model_fn_ops.loss.eval())
def _noop_train_op(unused_loss):
return tf.no_op()
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.test.main",
"tensorflow.contrib.learn.python.learn.estimators.head._regression_head",
"numpy.average",
"tensorflow.contrib.learn.python.learn.estimators.head._binary_svm_head",
"tensorflow.trainable_variables",
"six.iterkeys",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.... | [((16038, 16048), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (16046, 16048), True, 'import tensorflow as tf\n'), ((16079, 16093), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (16091, 16093), True, 'import tensorflow as tf\n'), ((1857, 1884), 'tensorflow.contrib.learn.python.learn.estimators.head._regression_head', 'head_lib._regression_head', ([], {}), '()\n', (1882, 1884), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((2647, 2707), 'tensorflow.contrib.learn.python.learn.estimators.head._regression_head', 'head_lib._regression_head', ([], {'weight_column_name': '"""label_weight"""'}), "(weight_column_name='label_weight')\n", (2672, 2707), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((3335, 3426), 'tensorflow.contrib.learn.python.learn.estimators.head._regression_head', 'head_lib._regression_head', ([], {'weight_column_name': '"""label_weight"""', 'enable_centered_bias': '(True)'}), "(weight_column_name='label_weight',\n enable_centered_bias=True)\n", (3360, 3426), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((4265, 4292), 'tensorflow.contrib.learn.python.learn.estimators.head._regression_head', 'head_lib._regression_head', ([], {}), '()\n', (4290, 4292), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((5028, 5067), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_label_head', 'head_lib._multi_label_head', ([], {'n_classes': '(3)'}), '(n_classes=3)\n', (5054, 5067), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((5810, 5884), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_label_head', 'head_lib._multi_label_head', ([], {'n_classes': '(3)', 'weight_column_name': '"""label_weight"""'}), "(n_classes=3, weight_column_name='label_weight')\n", (5836, 5884), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((6475, 6541), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_label_head', 'head_lib._multi_label_head', ([], {'n_classes': '(3)', 'enable_centered_bias': '(True)'}), '(n_classes=3, enable_centered_bias=True)\n', (6501, 6541), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((7763, 7802), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head', 'head_lib._multi_class_head', ([], {'n_classes': '(2)'}), '(n_classes=2)\n', (7789, 7802), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((8692, 8731), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head', 'head_lib._multi_class_head', ([], {'n_classes': '(2)'}), '(n_classes=2)\n', (8718, 8731), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((9275, 9349), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head', 'head_lib._multi_class_head', ([], {'n_classes': '(2)', 'weight_column_name': '"""label_weight"""'}), "(n_classes=2, weight_column_name='label_weight')\n", (9301, 9349), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((10102, 10168), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head', 'head_lib._multi_class_head', ([], {'n_classes': '(2)', 'enable_centered_bias': '(True)'}), '(n_classes=2, enable_centered_bias=True)\n', (10128, 10168), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((11221, 11268), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head', 'head_lib._multi_class_head', ([], {'n_classes': 'n_classes'}), '(n_classes=n_classes)\n', (11247, 11268), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((12127, 12214), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head', 'head_lib._multi_class_head', ([], {'n_classes': 'n_classes', 'weight_column_name': '"""label_weight"""'}), "(n_classes=n_classes, weight_column_name=\n 'label_weight')\n", (12153, 12214), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((13689, 13716), 'tensorflow.contrib.learn.python.learn.estimators.head._binary_svm_head', 'head_lib._binary_svm_head', ([], {}), '()\n', (13714, 13716), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((14493, 14548), 'tensorflow.contrib.learn.python.learn.estimators.head._binary_svm_head', 'head_lib._binary_svm_head', ([], {'weight_column_name': '"""weights"""'}), "(weight_column_name='weights')\n", (14518, 14548), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((15224, 15276), 'tensorflow.contrib.learn.python.learn.estimators.head._binary_svm_head', 'head_lib._binary_svm_head', ([], {'enable_centered_bias': '(True)'}), '(enable_centered_bias=True)\n', (15249, 15276), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((1725, 1767), 'six.iterkeys', 'six.iterkeys', (['model_fn_ops.eval_metric_ops'], {}), '(model_fn_ops.eval_metric_ops)\n', (1737, 1767), False, 'import six\n'), ((1919, 1931), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1929, 1931), True, 'import tensorflow as tf\n'), ((1960, 1994), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0], [3.0]]'], {}), '([[1.0], [1.0], [3.0]])\n', (1971, 1994), True, 'import tensorflow as tf\n'), ((2007, 2041), 'tensorflow.constant', 'tf.constant', (['[[0.0], [1.0], [1.0]]'], {}), '([[0.0], [1.0], [1.0]])\n', (2018, 2041), True, 'import tensorflow as tf\n'), ((2751, 2763), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2761, 2763), True, 'import tensorflow as tf\n'), ((2859, 2893), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0], [3.0]]'], {}), '([[1.0], [1.0], [3.0]])\n', (2870, 2893), True, 'import tensorflow as tf\n'), ((2906, 2940), 'tensorflow.constant', 'tf.constant', (['[[0.0], [1.0], [1.0]]'], {}), '([[0.0], [1.0], [1.0]])\n', (2917, 2940), True, 'import tensorflow as tf\n'), ((3466, 3478), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3476, 3478), True, 'import tensorflow as tf\n'), ((3574, 3608), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0], [3.0]]'], {}), '([[1.0], [1.0], [3.0]])\n', (3585, 3608), True, 'import tensorflow as tf\n'), ((3621, 3655), 'tensorflow.constant', 'tf.constant', (['[[0.0], [1.0], [1.0]]'], {}), '([[0.0], [1.0], [1.0]])\n', (3632, 3655), True, 'import tensorflow as tf\n'), ((4346, 4380), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0], [3.0]]'], {}), '([[1.0], [1.0], [3.0]])\n', (4357, 4380), True, 'import tensorflow as tf\n'), ((4944, 4986), 'six.iterkeys', 'six.iterkeys', (['model_fn_ops.eval_metric_ops'], {}), '(model_fn_ops.eval_metric_ops)\n', (4956, 4986), False, 'import six\n'), ((5102, 5114), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5112, 5114), True, 'import tensorflow as tf\n'), ((5139, 5169), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0]])\n', (5150, 5169), True, 'import tensorflow as tf\n'), ((5182, 5206), 'tensorflow.constant', 'tf.constant', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (5193, 5206), True, 'import tensorflow as tf\n'), ((5928, 5940), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5938, 5940), True, 'import tensorflow as tf\n'), ((6019, 6049), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0]])\n', (6030, 6049), True, 'import tensorflow as tf\n'), ((6062, 6086), 'tensorflow.constant', 'tf.constant', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (6073, 6086), True, 'import tensorflow as tf\n'), ((6576, 6588), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6586, 6588), True, 'import tensorflow as tf\n'), ((6613, 6643), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0]])\n', (6624, 6643), True, 'import tensorflow as tf\n'), ((6656, 6680), 'tensorflow.constant', 'tf.constant', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (6667, 6680), True, 'import tensorflow as tf\n'), ((7669, 7711), 'six.iterkeys', 'six.iterkeys', (['model_fn_ops.eval_metric_ops'], {}), '(model_fn_ops.eval_metric_ops)\n', (7681, 7711), False, 'import six\n'), ((7837, 7849), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7847, 7849), True, 'import tensorflow as tf\n'), ((7874, 7901), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0]]'], {}), '([[1.0], [1.0]])\n', (7885, 7901), True, 'import tensorflow as tf\n'), ((7915, 7942), 'tensorflow.constant', 'tf.constant', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (7926, 7942), True, 'import tensorflow as tf\n'), ((8785, 8819), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0], [3.0]]'], {}), '([[1.0], [1.0], [3.0]])\n', (8796, 8819), True, 'import tensorflow as tf\n'), ((9393, 9405), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9403, 9405), True, 'import tensorflow as tf\n'), ((9491, 9518), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0]]'], {}), '([[1.0], [1.0]])\n', (9502, 9518), True, 'import tensorflow as tf\n'), ((9532, 9559), 'tensorflow.constant', 'tf.constant', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (9543, 9559), True, 'import tensorflow as tf\n'), ((10203, 10215), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10213, 10215), True, 'import tensorflow as tf\n'), ((10240, 10267), 'tensorflow.constant', 'tf.constant', (['[[1.0], [1.0]]'], {}), '([[1.0], [1.0]])\n', (10251, 10267), True, 'import tensorflow as tf\n'), ((10281, 10308), 'tensorflow.constant', 'tf.constant', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (10292, 10308), True, 'import tensorflow as tf\n'), ((11119, 11161), 'six.iterkeys', 'six.iterkeys', (['model_fn_ops.eval_metric_ops'], {}), '(model_fn_ops.eval_metric_ops)\n', (11131, 11161), False, 'import six\n'), ((11303, 11315), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11313, 11315), True, 'import tensorflow as tf\n'), ((11340, 11370), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0]])\n', (11351, 11370), True, 'import tensorflow as tf\n'), ((11383, 11399), 'tensorflow.constant', 'tf.constant', (['[2]'], {}), '([2])\n', (11394, 11399), True, 'import tensorflow as tf\n'), ((12253, 12265), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (12263, 12265), True, 'import tensorflow as tf\n'), ((12344, 12374), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0]])\n', (12355, 12374), True, 'import tensorflow as tf\n'), ((12387, 12403), 'tensorflow.constant', 'tf.constant', (['[2]'], {}), '([2])\n', (12398, 12403), True, 'import tensorflow as tf\n'), ((13592, 13634), 'six.iterkeys', 'six.iterkeys', (['model_fn_ops.eval_metric_ops'], {}), '(model_fn_ops.eval_metric_ops)\n', (13604, 13634), False, 'import six\n'), ((13751, 13763), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13761, 13763), True, 'import tensorflow as tf\n'), ((13785, 13815), 'tensorflow.constant', 'tf.constant', (['self._predictions'], {}), '(self._predictions)\n', (13796, 13815), True, 'import tensorflow as tf\n'), ((13831, 13856), 'tensorflow.constant', 'tf.constant', (['self._labels'], {}), '(self._labels)\n', (13842, 13856), True, 'import tensorflow as tf\n'), ((14583, 14595), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (14593, 14595), True, 'import tensorflow as tf\n'), ((14617, 14647), 'tensorflow.constant', 'tf.constant', (['self._predictions'], {}), '(self._predictions)\n', (14628, 14647), True, 'import tensorflow as tf\n'), ((14663, 14688), 'tensorflow.constant', 'tf.constant', (['self._labels'], {}), '(self._labels)\n', (14674, 14688), True, 'import tensorflow as tf\n'), ((15311, 15323), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (15321, 15323), True, 'import tensorflow as tf\n'), ((15345, 15375), 'tensorflow.constant', 'tf.constant', (['self._predictions'], {}), '(self._predictions)\n', (15356, 15375), True, 'import tensorflow as tf\n'), ((15391, 15416), 'tensorflow.constant', 'tf.constant', (['self._labels'], {}), '(self._labels)\n', (15402, 15416), True, 'import tensorflow as tf\n'), ((1182, 1203), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (1201, 1203), True, 'import tensorflow as tf\n'), ((1315, 1335), 'tensorflow.model_variables', 'tf.model_variables', ([], {}), '()\n', (1333, 1335), True, 'import tensorflow as tf\n'), ((1455, 1479), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1477, 1479), True, 'import tensorflow as tf\n'), ((2807, 2841), 'tensorflow.constant', 'tf.constant', (['[[2.0], [5.0], [0.0]]'], {}), '([[2.0], [5.0], [0.0]])\n', (2818, 2841), True, 'import tensorflow as tf\n'), ((3522, 3556), 'tensorflow.constant', 'tf.constant', (['[[2.0], [5.0], [0.0]]'], {}), '([[2.0], [5.0], [0.0]])\n', (3533, 3556), True, 'import tensorflow as tf\n'), ((5984, 6002), 'tensorflow.constant', 'tf.constant', (['[0.1]'], {}), '([0.1])\n', (5995, 6002), True, 'import tensorflow as tf\n'), ((9449, 9476), 'tensorflow.constant', 'tf.constant', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (9460, 9476), True, 'import tensorflow as tf\n'), ((12309, 12327), 'tensorflow.constant', 'tf.constant', (['[0.1]'], {}), '([0.1])\n', (12320, 12327), True, 'import tensorflow as tf\n'), ((12996, 13043), 'tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head', 'head_lib._multi_class_head', ([], {'n_classes': 'n_classes'}), '(n_classes=n_classes)\n', (13022, 13043), True, 'from tensorflow.contrib.learn.python.learn.estimators import head as head_lib\n'), ((14157, 14190), 'numpy.average', 'np.average', (['self._expected_losses'], {}), '(self._expected_losses)\n', (14167, 14190), True, 'import numpy as np\n'), ((14746, 14766), 'tensorflow.constant', 'tf.constant', (['weights'], {}), '(weights)\n', (14757, 14766), True, 'import tensorflow as tf\n'), ((15933, 15966), 'numpy.average', 'np.average', (['self._expected_losses'], {}), '(self._expected_losses)\n', (15943, 15966), True, 'import numpy as np\n'), ((1894, 1904), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1902, 1904), True, 'import tensorflow as tf\n'), ((2726, 2736), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2734, 2736), True, 'import tensorflow as tf\n'), ((3441, 3451), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3449, 3451), True, 'import tensorflow as tf\n'), ((4094, 4127), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4125, 4127), True, 'import tensorflow as tf\n'), ((4302, 4312), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4310, 4312), True, 'import tensorflow as tf\n'), ((4428, 4481), 'tensorflow.constant', 'tf.constant', (['[[0, 0], [1, 0], [2, 0]]'], {'dtype': 'tf.int64'}), '([[0, 0], [1, 0], [2, 0]], dtype=tf.int64)\n', (4439, 4481), True, 'import tensorflow as tf\n'), ((4500, 4528), 'tensorflow.constant', 'tf.constant', (['[0.0, 1.0, 1.0]'], {}), '([0.0, 1.0, 1.0])\n', (4511, 4528), True, 'import tensorflow as tf\n'), ((5077, 5087), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5085, 5087), True, 'import tensorflow as tf\n'), ((5903, 5913), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5911, 5913), True, 'import tensorflow as tf\n'), ((6551, 6561), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6559, 6561), True, 'import tensorflow as tf\n'), ((7112, 7145), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7143, 7145), True, 'import tensorflow as tf\n'), ((7812, 7822), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7820, 7822), True, 'import tensorflow as tf\n'), ((8741, 8751), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8749, 8751), True, 'import tensorflow as tf\n'), ((8867, 8920), 'tensorflow.constant', 'tf.constant', (['[[0, 0], [1, 0], [2, 0]]'], {'dtype': 'tf.int64'}), '([[0, 0], [1, 0], [2, 0]], dtype=tf.int64)\n', (8878, 8920), True, 'import tensorflow as tf\n'), ((8939, 8961), 'tensorflow.constant', 'tf.constant', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (8950, 8961), True, 'import tensorflow as tf\n'), ((9368, 9378), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9376, 9378), True, 'import tensorflow as tf\n'), ((10178, 10188), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10186, 10188), True, 'import tensorflow as tf\n'), ((10841, 10874), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10872, 10874), True, 'import tensorflow as tf\n'), ((11278, 11288), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11286, 11288), True, 'import tensorflow as tf\n'), ((12228, 12238), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12236, 12238), True, 'import tensorflow as tf\n'), ((13726, 13736), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13734, 13736), True, 'import tensorflow as tf\n'), ((14558, 14568), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (14566, 14568), True, 'import tensorflow as tf\n'), ((15286, 15296), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (15294, 15296), True, 'import tensorflow as tf\n'), ((15853, 15886), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15884, 15886), True, 'import tensorflow as tf\n'), ((15081, 15124), 'numpy.multiply', 'np.multiply', (['weights', 'self._expected_losses'], {}), '(weights, self._expected_losses)\n', (15092, 15124), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''
This script starts an object detection service which uses the filtered pcl data
to detect and recognize the object
'''
from pcl_helper import *
from transform_helper import Transformer
from image_helper import ImagePub
import pcl
import rospy
from sensor_msgs.msg import PointCloud2
from geometry_msgs.msg import Point
from std_msgs.msg import Header
from object_msgs.msg import ObjectPose
import numpy as np
from std_srvs.srv import Empty, EmptyResponse
#SVM
from svmClassifier import Classifier
camera_matrix = np.array([[554.3827128226441, 0.0, 320.5, 0],\
[0.0, 554.3827128226441, 240.5, 0.0],\
[0.0, 0.0, 1.0, 0.0]])
def euclidiean_cluster(cloud_objects):
#Remove RGB data from PCL cloud
white_cloud = pcl.PointCloud()
points = []
for p in cloud_objects:
points.append([p[0], p[1], p[2]])
white_cloud.from_list(points)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(0.015)
ec.set_MinClusterSize(25)
ec.set_MaxClusterSize(2000)
ec.set_SearchMethod(tree) # Search the k-d tree for clusters
# Extract indices for each of the discovered clusters
return ec.Extract()
def mapToImage(x , y, z):
point_3d = np.array([(x, y, z, 1.0)])
point_2d = np.matmul(camera_matrix, point_3d.T)
x = int(np.asscalar(point_2d[0] / point_2d[2]))
y = int(np.asscalar(point_2d[1] / point_2d[2]))
return x, y
class DetectObject:
def __init__(self):
rospy.Subscriber('/pcl_objects', PointCloud2, self.callback, queue_size=1)
self.detection_info_pub = rospy.Publisher("/detection_info", ObjectPose, latch=True, queue_size=1)
self.service = rospy.Service('detect', Empty, self.detect)
def callback(self, msg):
self.ros_cloud = msg
def detect(self, req):
cloud_objects = ros_to_pcl(self.ros_cloud)
cluster_indices = euclidiean_cluster(cloud_objects)
imagePub.capture_image()
for index , pts_list in enumerate(cluster_indices):
pcl_cluster = cloud_objects.extract(pts_list)
pcl_cluster_arr = pcl_cluster.to_array()
centroid = np.mean(pcl_cluster_arr , axis=0)[:3]
centroid_point = Point()
centroid_point.x = np.asscalar(centroid[0])
centroid_point.y = np.asscalar(centroid[1])
centroid_point.z = np.asscalar(centroid[2])
max_val = np.max(pcl_cluster_arr, axis=0)[:2]
min_val = np.min(pcl_cluster_arr, axis=0)[:2]
x1, y1 = mapToImage(min_val[0], min_val[1], centroid[2])
x2, y2 = mapToImage(max_val[0], max_val[1], centroid[2])
#Classifier
label = classifier.predict(pcl_cluster)
rospy.loginfo("DETECTED " + label)
imagePub.draw_rectangle_with_label([x1, y1, x2, y2], label)
imagePub.publish_image()
#Transform the centroid to base_link
centroid_point = transformer.transform_point(centroid_point, 'camera_rgb_frame2' , 'base_link')
objPoseMsg = ObjectPose()
objPoseMsg.name = label
objPoseMsg.pose.header.frame_id = 'base_link'
objPoseMsg.pose.header.stamp = rospy.Time.now()
objPoseMsg.pose.pose.position = centroid_point
#TODO REMOVE this berfore submission
width = np.asscalar(max_val[0] - min_val[0])
objPoseMsg.pose.pose.orientation.w = width
hight = np.asscalar(max_val[1] - min_val[1])
objPoseMsg.pose.pose.orientation.z = hight
self.detection_info_pub.publish(objPoseMsg)
rospy.sleep(0.1)
#print("%d, %d, %d, %d %0.5f" % (x1, y1, x2, y2, width))
return EmptyResponse()
class DetectObjectTrain:
def __init__(self):
rospy.Subscriber('/pcl_objects', PointCloud2, self.callback, queue_size=1)
# self.detection_info_pub = rospy.Publisher("/detection_info", ObjectPose, latch=True, queue_size=1)
# self.service = rospy.Service('detect', Empty, self.detect)
def callback(self, msg):
self.ros_cloud = msg
def detect(self):
self.pcl_cloud = ros_to_pcl(self.ros_cloud)
cluster_indices = euclidiean_cluster(self.pcl_cloud)
detected_obj = []
for pts_list in cluster_indices:
pcl_cluster_arr = self.pcl_cloud.extract(pts_list).to_array()
centroid = np.mean(pcl_cluster_arr , axis=0)[:3]
centroid_point = Point()
centroid_point.x = np.asscalar(centroid[0])
centroid_point.y = np.asscalar(centroid[1])
centroid_point.z = np.asscalar(centroid[2])
c = centroid_point
max_val = np.max(pcl_cluster_arr, axis=0)[:2]
min_val = np.min(pcl_cluster_arr, axis=0)[:2]
x1, y1 = mapToImage(min_val[0], min_val[1], centroid[2])
x2, y2 = mapToImage(max_val[0], max_val[1], centroid[2])
#width = np.asscalar(max_val[0] - min_val[0])
# print("%d, %d, %d, %d %0.5f" % (x1, y1, x2, y2, width))
detected_obj.append([x1, y1, x2, y2])
return detected_obj
def obj_detect_train(): #Function only used for training script
#DEPRECATED - was used for tensorflow classification
cloud_objects = ros_to_pcl(rospy.wait_for_message('/pcl_objects', PointCloud2))
cluster_indices = euclidiean_cluster(cloud_objects)
detected_obj = []
for pts_list in cluster_indices:
pcl_cluster_arr = cloud_objects.extract(pts_list).to_array()
centroid = np.mean(pcl_cluster_arr , axis=0)[:3]
centroid_point = Point()
centroid_point.x = np.asscalar(centroid[0])
centroid_point.y = np.asscalar(centroid[1])
centroid_point.z = np.asscalar(centroid[2])
c = centroid_point
max_val = np.max(pcl_cluster_arr, axis=0)[:2]
min_val = np.min(pcl_cluster_arr, axis=0)[:2]
x1, y1 = mapToImage(min_val[0], min_val[1], centroid[2])
x2, y2 = mapToImage(max_val[0], max_val[1], centroid[2])
#width = np.asscalar(max_val[0] - min_val[0])
# print("%d, %d, %d, %d %0.5f" % (x1, y1, x2, y2, width))
detected_obj.append([x1, y1, x2, y2])
return detected_obj
if __name__ == '__main__':
rospy.init_node('obj_detection')
transformer = Transformer()
classifier = Classifier()
objDetector = DetectObject()
imagePub = ImagePub()
rospy.loginfo("Started Object detection")
rospy.spin()
| [
"rospy.Subscriber",
"transform_helper.Transformer",
"numpy.mean",
"numpy.asscalar",
"std_srvs.srv.EmptyResponse",
"image_helper.ImagePub",
"rospy.Time.now",
"numpy.max",
"rospy.init_node",
"pcl.PointCloud",
"svmClassifier.Classifier",
"rospy.loginfo",
"numpy.min",
"rospy.Service",
"rospy... | [((547, 658), 'numpy.array', 'np.array', (['[[554.3827128226441, 0.0, 320.5, 0], [0.0, 554.3827128226441, 240.5, 0.0],\n [0.0, 0.0, 1.0, 0.0]]'], {}), '([[554.3827128226441, 0.0, 320.5, 0], [0.0, 554.3827128226441, \n 240.5, 0.0], [0.0, 0.0, 1.0, 0.0]])\n', (555, 658), True, 'import numpy as np\n'), ((774, 790), 'pcl.PointCloud', 'pcl.PointCloud', ([], {}), '()\n', (788, 790), False, 'import pcl\n'), ((1337, 1363), 'numpy.array', 'np.array', (['[(x, y, z, 1.0)]'], {}), '([(x, y, z, 1.0)])\n', (1345, 1363), True, 'import numpy as np\n'), ((1379, 1415), 'numpy.matmul', 'np.matmul', (['camera_matrix', 'point_3d.T'], {}), '(camera_matrix, point_3d.T)\n', (1388, 1415), True, 'import numpy as np\n'), ((6442, 6474), 'rospy.init_node', 'rospy.init_node', (['"""obj_detection"""'], {}), "('obj_detection')\n", (6457, 6474), False, 'import rospy\n'), ((6493, 6506), 'transform_helper.Transformer', 'Transformer', ([], {}), '()\n', (6504, 6506), False, 'from transform_helper import Transformer\n'), ((6524, 6536), 'svmClassifier.Classifier', 'Classifier', ([], {}), '()\n', (6534, 6536), False, 'from svmClassifier import Classifier\n'), ((6585, 6595), 'image_helper.ImagePub', 'ImagePub', ([], {}), '()\n', (6593, 6595), False, 'from image_helper import ImagePub\n'), ((6600, 6641), 'rospy.loginfo', 'rospy.loginfo', (['"""Started Object detection"""'], {}), "('Started Object detection')\n", (6613, 6641), False, 'import rospy\n'), ((6646, 6658), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (6656, 6658), False, 'import rospy\n'), ((1428, 1466), 'numpy.asscalar', 'np.asscalar', (['(point_2d[0] / point_2d[2])'], {}), '(point_2d[0] / point_2d[2])\n', (1439, 1466), True, 'import numpy as np\n'), ((1480, 1518), 'numpy.asscalar', 'np.asscalar', (['(point_2d[1] / point_2d[2])'], {}), '(point_2d[1] / point_2d[2])\n', (1491, 1518), True, 'import numpy as np\n'), ((1589, 1663), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/pcl_objects"""', 'PointCloud2', 'self.callback'], {'queue_size': '(1)'}), "('/pcl_objects', PointCloud2, self.callback, queue_size=1)\n", (1605, 1663), False, 'import rospy\n'), ((1698, 1770), 'rospy.Publisher', 'rospy.Publisher', (['"""/detection_info"""', 'ObjectPose'], {'latch': '(True)', 'queue_size': '(1)'}), "('/detection_info', ObjectPose, latch=True, queue_size=1)\n", (1713, 1770), False, 'import rospy\n'), ((1794, 1837), 'rospy.Service', 'rospy.Service', (['"""detect"""', 'Empty', 'self.detect'], {}), "('detect', Empty, self.detect)\n", (1807, 1837), False, 'import rospy\n'), ((3872, 3887), 'std_srvs.srv.EmptyResponse', 'EmptyResponse', ([], {}), '()\n', (3885, 3887), False, 'from std_srvs.srv import Empty, EmptyResponse\n'), ((3946, 4020), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/pcl_objects"""', 'PointCloud2', 'self.callback'], {'queue_size': '(1)'}), "('/pcl_objects', PointCloud2, self.callback, queue_size=1)\n", (3962, 4020), False, 'import rospy\n'), ((5457, 5508), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/pcl_objects"""', 'PointCloud2'], {}), "('/pcl_objects', PointCloud2)\n", (5479, 5508), False, 'import rospy\n'), ((5783, 5790), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (5788, 5790), False, 'from geometry_msgs.msg import Point\n'), ((5818, 5842), 'numpy.asscalar', 'np.asscalar', (['centroid[0]'], {}), '(centroid[0])\n', (5829, 5842), True, 'import numpy as np\n'), ((5870, 5894), 'numpy.asscalar', 'np.asscalar', (['centroid[1]'], {}), '(centroid[1])\n', (5881, 5894), True, 'import numpy as np\n'), ((5922, 5946), 'numpy.asscalar', 'np.asscalar', (['centroid[2]'], {}), '(centroid[2])\n', (5933, 5946), True, 'import numpy as np\n'), ((2337, 2344), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (2342, 2344), False, 'from geometry_msgs.msg import Point\n'), ((2376, 2400), 'numpy.asscalar', 'np.asscalar', (['centroid[0]'], {}), '(centroid[0])\n', (2387, 2400), True, 'import numpy as np\n'), ((2432, 2456), 'numpy.asscalar', 'np.asscalar', (['centroid[1]'], {}), '(centroid[1])\n', (2443, 2456), True, 'import numpy as np\n'), ((2488, 2512), 'numpy.asscalar', 'np.asscalar', (['centroid[2]'], {}), '(centroid[2])\n', (2499, 2512), True, 'import numpy as np\n'), ((2870, 2904), 'rospy.loginfo', 'rospy.loginfo', (["('DETECTED ' + label)"], {}), "('DETECTED ' + label)\n", (2883, 2904), False, 'import rospy\n'), ((3200, 3212), 'object_msgs.msg.ObjectPose', 'ObjectPose', ([], {}), '()\n', (3210, 3212), False, 'from object_msgs.msg import ObjectPose\n'), ((3350, 3366), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3364, 3366), False, 'import rospy\n'), ((3496, 3532), 'numpy.asscalar', 'np.asscalar', (['(max_val[0] - min_val[0])'], {}), '(max_val[0] - min_val[0])\n', (3507, 3532), True, 'import numpy as np\n'), ((3608, 3644), 'numpy.asscalar', 'np.asscalar', (['(max_val[1] - min_val[1])'], {}), '(max_val[1] - min_val[1])\n', (3619, 3644), True, 'import numpy as np\n'), ((3769, 3785), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (3780, 3785), False, 'import rospy\n'), ((4632, 4639), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (4637, 4639), False, 'from geometry_msgs.msg import Point\n'), ((4671, 4695), 'numpy.asscalar', 'np.asscalar', (['centroid[0]'], {}), '(centroid[0])\n', (4682, 4695), True, 'import numpy as np\n'), ((4727, 4751), 'numpy.asscalar', 'np.asscalar', (['centroid[1]'], {}), '(centroid[1])\n', (4738, 4751), True, 'import numpy as np\n'), ((4783, 4807), 'numpy.asscalar', 'np.asscalar', (['centroid[2]'], {}), '(centroid[2])\n', (4794, 4807), True, 'import numpy as np\n'), ((5720, 5752), 'numpy.mean', 'np.mean', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (5727, 5752), True, 'import numpy as np\n'), ((5993, 6024), 'numpy.max', 'np.max', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (5999, 6024), True, 'import numpy as np\n'), ((6047, 6078), 'numpy.min', 'np.min', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (6053, 6078), True, 'import numpy as np\n'), ((2270, 2302), 'numpy.mean', 'np.mean', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (2277, 2302), True, 'import numpy as np\n'), ((2536, 2567), 'numpy.max', 'np.max', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (2542, 2567), True, 'import numpy as np\n'), ((2594, 2625), 'numpy.min', 'np.min', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (2600, 2625), True, 'import numpy as np\n'), ((4565, 4597), 'numpy.mean', 'np.mean', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (4572, 4597), True, 'import numpy as np\n'), ((4862, 4893), 'numpy.max', 'np.max', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (4868, 4893), True, 'import numpy as np\n'), ((4920, 4951), 'numpy.min', 'np.min', (['pcl_cluster_arr'], {'axis': '(0)'}), '(pcl_cluster_arr, axis=0)\n', (4926, 4951), True, 'import numpy as np\n')] |
"""
Author: Tong
Time: --2021
"""
import json
import numpy as np
with open("data/webred/webred_21.json", "r") as file_in:
original_data = json.load(file_in)
# process data into <x, y>
_pair_data = []
for item in original_data:
_pair_data.append([item['sentence'], item['relation_name']])
pass
len_ = []
for i, sent in enumerate(_pair_data):
len_.append(len(sent[0].split()))
len_ = np.array(len_)
length = len(len_)
print(np.max(len_))
print(np.size(len_))
print("200: ", float(len(np.where(len_>200)[0]) / length))
print("100: ", float(len(np.where(len_>100)[0]) / length))
print("80: ", float(len(np.where(len_>80)[0]) / length))
print("70: ", float(len(np.where(len_>70)[0]) / length))
print("60: ", float(len(np.where(len_>60)[0]) / length))
print("50: ", float(len(np.where(len_>50)[0]) / length))
| [
"numpy.size",
"json.load",
"numpy.max",
"numpy.where",
"numpy.array"
] | [((400, 414), 'numpy.array', 'np.array', (['len_'], {}), '(len_)\n', (408, 414), True, 'import numpy as np\n'), ((146, 164), 'json.load', 'json.load', (['file_in'], {}), '(file_in)\n', (155, 164), False, 'import json\n'), ((442, 454), 'numpy.max', 'np.max', (['len_'], {}), '(len_)\n', (448, 454), True, 'import numpy as np\n'), ((462, 475), 'numpy.size', 'np.size', (['len_'], {}), '(len_)\n', (469, 475), True, 'import numpy as np\n'), ((502, 522), 'numpy.where', 'np.where', (['(len_ > 200)'], {}), '(len_ > 200)\n', (510, 522), True, 'import numpy as np\n'), ((561, 581), 'numpy.where', 'np.where', (['(len_ > 100)'], {}), '(len_ > 100)\n', (569, 581), True, 'import numpy as np\n'), ((619, 638), 'numpy.where', 'np.where', (['(len_ > 80)'], {}), '(len_ > 80)\n', (627, 638), True, 'import numpy as np\n'), ((676, 695), 'numpy.where', 'np.where', (['(len_ > 70)'], {}), '(len_ > 70)\n', (684, 695), True, 'import numpy as np\n'), ((733, 752), 'numpy.where', 'np.where', (['(len_ > 60)'], {}), '(len_ > 60)\n', (741, 752), True, 'import numpy as np\n'), ((790, 809), 'numpy.where', 'np.where', (['(len_ > 50)'], {}), '(len_ > 50)\n', (798, 809), True, 'import numpy as np\n')] |
from torch.utils.data import Dataset, DataLoader
from tqdm.autonotebook import tqdm
import torch
from PIL import Image
import numpy as np
from torch.nn.utils.rnn import pad_sequence
"""
Write your own dataset here.
The __getitem__ method must return a dict with the following key, value pairs:
'ques': tensor of ints, representing the index of words in the vocab
'ans': tensor of int, representing the index of word answer
'img': tensor representing the image
Get Images for the dataset:
! wget http://datasets.d2.mpi-inf.mpg.de/mateusz14visual-turing/nyu_depth_images.tar
Get Train question & ans:
! wget https://raw.githubusercontent.com/jayantk/lsp/master/data/daquar/reduced/qa.37.raw.train.txt
DAQAR dataset:
https://github.com/jayantk/lsp/tree/master/data/daquar
"""
class VQADataset(Dataset):
def __init__(self, ques_file, image_dir, tokenizer, max_len=30):
super(Dataset, self).__init__()
self.ques_file = ques_file
self.img_dir = image_dir
self.tokenizer = tokenizer
self.max_sentence_len = max_len
self.data = []
self.load_data()
def load_data(self):
with open(self.ques_file, 'r') as f:
data = f.readlines()
for index, line in tqdm(enumerate(data[::2]), desc='Iterating over questions'):
img = line.replace('?', '').strip(' ').split()[-1] + '.png'
ques = [x for x in self.tokenizer.encode(line)]
ques = [torch.tensor(min(x, vocab_size-1)) for x in ques]
ans = self.tokenizer.convert_tokens_to_ids([data[2*index+1].strip()])
ans = [torch.tensor(min(vocab_size-1, ans[0]))]
dct = {
'ques': ques,
'ques_str': line,
'ans_str': data[2*index+1],
'ans': ans,
'img_file_name': img
}
if len(dct['ans']) == 1:
self.data.append(dct)
def __len__(self):
return len(self.data) #// 10
def __getitem__(self, idx):
dct = self.data[idx]
# Crop to given size, as input to vgg is fixed.
img = Image.open(self.img_dir + dct['img_file_name']).crop((0, 0, 448, 448))
# Normalize image pixels
img = np.array(img, dtype=np.uint8) / 255
# (H, W, C) -> (C, H, W)
img = np.moveaxis(img, -1, 0)
dct['img'] = torch.from_numpy(img)
return dct
def pad_collate(batch):
"""Padds the sentences to given length ensuring all sentences are of same length.
Args:
batch (Dict): Dictionary containing the data. See load_data method from VQADataset class
Returns:
batch (dict): Pads the sentences and returns the dict
"""
ques = [torch.tensor(x['ques']) for x in batch]
ques = pad_sequence(ques, batch_first=True)
for idx, x in enumerate(ques):
batch[idx]['ques'] = x
return batch
if __name__ == '__main__':
dl = DataLoader(VQADataset(ques_file='/content/qa.37.raw.train.txt', image_dir='/content/nyu_depth_images/', tokenizer=tokenizer), batch_size=2, collate_fn=pad_collate) | [
"numpy.moveaxis",
"PIL.Image.open",
"numpy.array",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor",
"torch.from_numpy"
] | [((2633, 2669), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['ques'], {'batch_first': '(True)'}), '(ques, batch_first=True)\n', (2645, 2669), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((2180, 2203), 'numpy.moveaxis', 'np.moveaxis', (['img', '(-1)', '(0)'], {}), '(img, -1, 0)\n', (2191, 2203), True, 'import numpy as np\n'), ((2221, 2242), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2237, 2242), False, 'import torch\n'), ((2582, 2605), 'torch.tensor', 'torch.tensor', (["x['ques']"], {}), "(x['ques'])\n", (2594, 2605), False, 'import torch\n'), ((2105, 2134), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (2113, 2134), True, 'import numpy as np\n'), ((1995, 2042), 'PIL.Image.open', 'Image.open', (["(self.img_dir + dct['img_file_name'])"], {}), "(self.img_dir + dct['img_file_name'])\n", (2005, 2042), False, 'from PIL import Image\n')] |
import cv2
import numpy as np
from skimage.measure import compare_ssim
import matplotlib.pyplot as plt
import numpy as np
import cv2
import imutils
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
def readFlow(fn):
""" Read .flo file in Middlebury format"""
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
#print('Reading %d x %d flo file\n' % (w, h))
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
x=np.resize(data, (int(h), int(w), 2))
x=x
u = x[:, :, 0]
v = x[:, :, 1]
print("u mean : " + str(np.mean(u)))
print("v mean : " + str(np.mean(v)))
print("u std : " + str(np.std(u)))
print("v std : " + str(np.std(v)))
print("u max : " + str(np.max(u)))
print("u min : " + str(np.min(u)))
print("v max : " + str(np.max(v)))
print("v min : " + str(np.min(v)))
return x
flow = readFlow("/home/nudlesoup/Research/flownet2-pytorch/rangetest/flownet2-catch37/flow/000204.flo")
im1 = np.asarray(cv2.imread("/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx205.png"))
im2 = np.asarray(cv2.imread("/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx206.png"))
wrap1 = warp_flow(im1, flow)
wrap1_fake = warp_flow(im1, np.zeros_like(flow))
# cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex4/im1*255.jpg", im1*255)
# cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx1-flow-warping.jpg", wrap1)
#cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/warping-fake.jpg", wrap1_fake)
# imageA=wrap1
# imageB=im2
imageA=im1
imageB=im2
# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
thresh = cv2.threshold(diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
# compute the bounding box of the contour and then draw the
# bounding box on both input images to represent where the two
# images differ
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)
# show the output images
# cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/original-frame2-0.jpg", imageA)
# cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/modified-frame2-0.jpg", imageB)
# cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/diff-0.jpg", diff)
# invert = cv2.bitwise_not(diff)
# cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/ex3/ssim/flownetSD400/diff-invert-0.jpg", invert)
# cv2.imwrite("/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/unet.jpg", thresh)
| [
"skimage.measure.compare_ssim",
"numpy.zeros_like",
"cv2.cvtColor",
"numpy.fromfile",
"cv2.threshold",
"numpy.std",
"cv2.remap",
"cv2.rectangle",
"cv2.imread",
"numpy.max",
"numpy.mean",
"numpy.arange",
"numpy.min",
"imutils.grab_contours",
"cv2.boundingRect"
] | [((2354, 2394), 'cv2.cvtColor', 'cv2.cvtColor', (['imageA', 'cv2.COLOR_BGR2GRAY'], {}), '(imageA, cv2.COLOR_BGR2GRAY)\n', (2366, 2394), False, 'import cv2\n'), ((2403, 2443), 'cv2.cvtColor', 'cv2.cvtColor', (['imageB', 'cv2.COLOR_BGR2GRAY'], {}), '(imageB, cv2.COLOR_BGR2GRAY)\n', (2415, 2443), False, 'import cv2\n'), ((2461, 2498), 'skimage.measure.compare_ssim', 'compare_ssim', (['grayA', 'grayB'], {'full': '(True)'}), '(grayA, grayB, full=True)\n', (2473, 2498), False, 'from skimage.measure import compare_ssim\n'), ((2741, 2768), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (2762, 2768), False, 'import imutils\n'), ((238, 250), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (247, 250), True, 'import numpy as np\n'), ((307, 351), 'cv2.remap', 'cv2.remap', (['img', 'flow', 'None', 'cv2.INTER_LINEAR'], {}), '(img, flow, None, cv2.INTER_LINEAR)\n', (316, 351), False, 'import cv2\n'), ((1664, 1761), 'cv2.imread', 'cv2.imread', (['"""/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx205.png"""'], {}), "(\n '/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx205.png'\n )\n", (1674, 1761), False, 'import cv2\n'), ((1770, 1867), 'cv2.imread', 'cv2.imread', (['"""/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx206.png"""'], {}), "(\n '/home/nudlesoup/Research/flownet2-pytorch/rangetest/inference/imxx206.png'\n )\n", (1780, 1867), False, 'import cv2\n'), ((1917, 1936), 'numpy.zeros_like', 'np.zeros_like', (['flow'], {}), '(flow)\n', (1930, 1936), True, 'import numpy as np\n'), ((2577, 2645), 'cv2.threshold', 'cv2.threshold', (['diff', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (2590, 2645), False, 'import cv2\n'), ((2942, 2961), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (2958, 2961), False, 'import cv2\n'), ((2963, 3024), 'cv2.rectangle', 'cv2.rectangle', (['imageA', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (2976, 3024), False, 'import cv2\n'), ((3026, 3087), 'cv2.rectangle', 'cv2.rectangle', (['imageB', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (3039, 3087), False, 'import cv2\n'), ((270, 282), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (279, 282), True, 'import numpy as np\n'), ((479, 514), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32'], {'count': '(1)'}), '(f, np.float32, count=1)\n', (490, 514), True, 'import numpy as np\n'), ((663, 696), 'numpy.fromfile', 'np.fromfile', (['f', 'np.int32'], {'count': '(1)'}), '(f, np.int32, count=1)\n', (674, 696), True, 'import numpy as np\n'), ((713, 746), 'numpy.fromfile', 'np.fromfile', (['f', 'np.int32'], {'count': '(1)'}), '(f, np.int32, count=1)\n', (724, 746), True, 'import numpy as np\n'), ((1177, 1187), 'numpy.mean', 'np.mean', (['u'], {}), '(u)\n', (1184, 1187), True, 'import numpy as np\n'), ((1226, 1236), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (1233, 1236), True, 'import numpy as np\n'), ((1274, 1283), 'numpy.std', 'np.std', (['u'], {}), '(u)\n', (1280, 1283), True, 'import numpy as np\n'), ((1321, 1330), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (1327, 1330), True, 'import numpy as np\n'), ((1368, 1377), 'numpy.max', 'np.max', (['u'], {}), '(u)\n', (1374, 1377), True, 'import numpy as np\n'), ((1415, 1424), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (1421, 1424), True, 'import numpy as np\n'), ((1462, 1471), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (1468, 1471), True, 'import numpy as np\n'), ((1509, 1518), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (1515, 1518), True, 'import numpy as np\n')] |
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from freeenergyframework import stats
import pandas as pd
class Result(object):
def __init__(self, ligandA, ligandB,
exp_DDG, exp_dDDG,
calc_DDG, mbar_error, other_error):
self.ligandA = str(ligandA).strip()
self.ligandB = str(ligandB).strip()
self.exp_DDG = float(exp_DDG)
self.dexp_DDG = float(exp_dDDG)
# scope for an experimental dDDG?
self.calc_DDG = float(calc_DDG)
self.mbar_dDDG = float(mbar_error)
self.other_dDDG = float(other_error)
self.dcalc_DDG = self.mbar_dDDG+self.other_dDDG # is this definitely always additive?
def toDF(self):
# TODO - can we do the handling of the dataframe in a different way? Or inside the plotting function that needs it?
return pd.DataFrame({'ligandA': self.ligandA,
'ligandB': self.ligandB,
'exp_DDG': self.exp_DDG,
'exp_dDDG': self.dexp_DDG,
'calc_DDG': self.calc_DDG,
'mbar_dDDG': self.mbar_dDDG,
'other_dDDG': self.other_dDDG,
'dcalc_DDG': self.dcalc_DDG}, index=[f'{self.ligandA}_{self.ligandB}'])
class FEMap(object):
def __init__(self, csv):
self.results = read_csv(csv)
self.graph = nx.DiGraph()
self.n_edges = len(self.results)
self.generate_graph_from_results()
# check the graph has minimal connectivity
def generate_graph_from_results(self):
self._name_to_id = {}
id = 0
for result in self.results:
if result.ligandA not in self._name_to_id.keys():
self._name_to_id[result.ligandA] = id
id += 1
if result.ligandB not in self._name_to_id.keys():
self._name_to_id[result.ligandB] = id
id += 1
# TODO need some exp error for mle to converge for exp... this is a horrible hack
if result.dexp_DDG == 0.0:
result.dexp_DDG = 0.01
self.graph.add_edge(self._name_to_id[result.ligandA], self._name_to_id[result.ligandB],
exp_DDG=result.exp_DDG, dexp_DDG=result.dexp_DDG,
calc_DDG=result.calc_DDG, dcalc_DDG=result.dcalc_DDG)
self.n_ligands = self.graph.number_of_nodes()
self.degree = self.graph.number_of_edges() / self.n_ligands
# check the graph has minimal connectivity
self.check_weakly_connected()
if not self.weakly_connected:
print('Graph is not connected enough to compute absolute values')
else:
self.generate_absolute_values()
def check_weakly_connected(self):
undirected_graph = self.graph.to_undirected()
self.weakly_connected = nx.is_connected(undirected_graph)
return nx.is_connected(undirected_graph)
def generate_absolute_values(self):
if self.weakly_connected:
f_i_exp, C_exp = stats.mle(self.graph, factor='exp_DDG')
variance = np.diagonal(C_exp)
for i, (f_i, df_i) in enumerate(zip(f_i_exp, variance**0.5)):
self.graph.nodes[i]['f_i_exp'] = f_i
self.graph.nodes[i]['df_i_exp'] = df_i
f_i_calc, C_calc = stats.mle(self.graph, factor='calc_DDG')
variance = np.diagonal(C_calc)
for i, (f_i, df_i) in enumerate(zip(f_i_calc, variance**0.5)):
self.graph.nodes[i]['f_i_calc'] = f_i
self.graph.nodes[i]['df_i_calc'] = df_i
def draw_graph(self, title='', filename=None):
plt.figure(figsize=(10, 10))
self._id_to_name = {}
for i, j in self._name_to_id.items():
self._id_to_name[j] = i
nx.draw_circular(self.graph, labels=self._id_to_name, node_color='hotpink', node_size=250)
long_title = f'{title} \n Nedges={self.n_edges} \n Nligands={self.n_ligands} \n Degree={self.degree:.2f}'
plt.title(long_title)
if filename is None:
plt.show()
else:
plt.savefig(filename, bbox_inches='tight')
def read_csv(filename):
raw_results = []
with open(filename, 'r') as f:
for line in f:
if line[0] != '#':
raw_results.append(Result(*line.split(',')))
return raw_results
| [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"networkx.draw_circular",
"matplotlib.pyplot.show",
"networkx.is_connected",
"matplotlib.pyplot.figure",
"freeenergyframework.stats.mle",
"networkx.DiGraph",
"matplotlib.pyplot.savefig",
"numpy.diagonal"
] | [((874, 1160), 'pandas.DataFrame', 'pd.DataFrame', (["{'ligandA': self.ligandA, 'ligandB': self.ligandB, 'exp_DDG': self.exp_DDG,\n 'exp_dDDG': self.dexp_DDG, 'calc_DDG': self.calc_DDG, 'mbar_dDDG': self\n .mbar_dDDG, 'other_dDDG': self.other_dDDG, 'dcalc_DDG': self.dcalc_DDG}"], {'index': "[f'{self.ligandA}_{self.ligandB}']"}), "({'ligandA': self.ligandA, 'ligandB': self.ligandB, 'exp_DDG':\n self.exp_DDG, 'exp_dDDG': self.dexp_DDG, 'calc_DDG': self.calc_DDG,\n 'mbar_dDDG': self.mbar_dDDG, 'other_dDDG': self.other_dDDG, 'dcalc_DDG':\n self.dcalc_DDG}, index=[f'{self.ligandA}_{self.ligandB}'])\n", (886, 1160), True, 'import pandas as pd\n'), ((1464, 1476), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1474, 1476), True, 'import networkx as nx\n'), ((2971, 3004), 'networkx.is_connected', 'nx.is_connected', (['undirected_graph'], {}), '(undirected_graph)\n', (2986, 3004), True, 'import networkx as nx\n'), ((3020, 3053), 'networkx.is_connected', 'nx.is_connected', (['undirected_graph'], {}), '(undirected_graph)\n', (3035, 3053), True, 'import networkx as nx\n'), ((3783, 3811), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3793, 3811), True, 'import matplotlib.pyplot as plt\n'), ((3932, 4026), 'networkx.draw_circular', 'nx.draw_circular', (['self.graph'], {'labels': 'self._id_to_name', 'node_color': '"""hotpink"""', 'node_size': '(250)'}), "(self.graph, labels=self._id_to_name, node_color='hotpink',\n node_size=250)\n", (3948, 4026), True, 'import networkx as nx\n'), ((4145, 4166), 'matplotlib.pyplot.title', 'plt.title', (['long_title'], {}), '(long_title)\n', (4154, 4166), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3197), 'freeenergyframework.stats.mle', 'stats.mle', (['self.graph'], {'factor': '"""exp_DDG"""'}), "(self.graph, factor='exp_DDG')\n", (3167, 3197), False, 'from freeenergyframework import stats\n'), ((3221, 3239), 'numpy.diagonal', 'np.diagonal', (['C_exp'], {}), '(C_exp)\n', (3232, 3239), True, 'import numpy as np\n'), ((3454, 3494), 'freeenergyframework.stats.mle', 'stats.mle', (['self.graph'], {'factor': '"""calc_DDG"""'}), "(self.graph, factor='calc_DDG')\n", (3463, 3494), False, 'from freeenergyframework import stats\n'), ((3518, 3537), 'numpy.diagonal', 'np.diagonal', (['C_calc'], {}), '(C_calc)\n', (3529, 3537), True, 'import numpy as np\n'), ((4208, 4218), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4216, 4218), True, 'import matplotlib.pyplot as plt\n'), ((4245, 4287), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""'}), "(filename, bbox_inches='tight')\n", (4256, 4287), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from allennlp.training.metrics import Metric
from overrides import overrides
from typing import Dict
@Metric.register('cross-entropy')
class CrossEntropyMetric(Metric):
def __init__(self) -> None:
self.total_loss = 0
self.total_num_tokens = 0
@overrides
def __call__(self, loss: float, num_tokens: int) -> None:
self.total_loss += loss
self.total_num_tokens += num_tokens
@overrides
def get_metric(self, reset: bool = False) -> Dict[str, float]:
cross_entropy = self.total_loss / self.total_num_tokens
perplexity = np.exp(cross_entropy)
if reset:
self.total_loss = 0
self.total_num_tokens = 0
return {
'cross-entropy': cross_entropy,
'perplexity': perplexity
}
| [
"numpy.exp",
"allennlp.training.metrics.Metric.register"
] | [((123, 155), 'allennlp.training.metrics.Metric.register', 'Metric.register', (['"""cross-entropy"""'], {}), "('cross-entropy')\n", (138, 155), False, 'from allennlp.training.metrics import Metric\n'), ((606, 627), 'numpy.exp', 'np.exp', (['cross_entropy'], {}), '(cross_entropy)\n', (612, 627), True, 'import numpy as np\n')] |
"""
Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-ND 4.0 license (https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode).
"""
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import math
from torch.nn.modules.utils import _triple
import numpy as np
if torch.cuda.is_available():
T = torch.cuda
else:
T = torch
class Noise(nn.Module):
def __init__(self, use_noise, sigma=0.2):
super(Noise, self).__init__()
self.use_noise = use_noise
self.sigma = sigma
def forward(self, x):
if self.use_noise:
return x + self.sigma * Variable(T.FloatTensor(x.size()).normal_(), requires_grad=False)
return x
class ImageDiscriminator(nn.Module):
def __init__(self, n_channels, ndf=64, use_noise=False, noise_sigma=None):
super(ImageDiscriminator, self).__init__()
self.use_noise = use_noise
self.main = nn.Sequential(
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(n_channels, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
)
def forward(self, input):
h = self.main(input).squeeze()
return h, None
class PatchImageDiscriminator(nn.Module):
def __init__(self, n_channels, ndf=64, use_noise=False, noise_sigma=None):
super(PatchImageDiscriminator, self).__init__()
self.use_noise = use_noise
self.main = nn.Sequential(
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(n_channels, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv2d(ndf * 4, 1, 4, 2, 1, bias=False),
)
def forward(self, input):
h = self.main(input).squeeze()
return h, None
class PatchVideoDiscriminator(nn.Module):
def __init__(self, n_channels, n_output_neurons=1, bn_use_gamma=True, use_noise=False, noise_sigma=None, ndf=64):
super(PatchVideoDiscriminator, self).__init__()
self.n_channels = n_channels
self.n_output_neurons = n_output_neurons
self.use_noise = use_noise
self.bn_use_gamma = bn_use_gamma
self.main = nn.Sequential(
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(n_channels, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
nn.Conv3d(ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv3d(ndf * 4, 1, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
)
def forward(self, input):
h = self.main(input).squeeze()
return h, None
class SpatioTemporalConv(nn.Module):
#12.20 Relu->LeakyRelU
r"""Applies a factored 3D convolution over an input signal composed of several input
planes with distinct spatial and time axes, by performing a 2D convolution over the
spatial axes to an intermediate subspace, followed by a 1D convolution over the time
axis to produce the final output.
Args:
in_channels (int): Number of channels in the input tensor
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to the sides of the input during their respective convolutions. Default: 0
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
super(SpatioTemporalConv, self).__init__()
# if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
# decomposing the parameters into spatial and temporal components by
# masking out the values with the defaults on the axis that
# won't be convolved over. This is necessary to avoid unintentional
# behavior such as padding being added twice
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
spatial_stride = [1, stride[1], stride[2]]
spatial_padding = [0, padding[1], padding[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
temporal_stride = [stride[0], 1, 1]
temporal_padding = [padding[0], 0, 0]
# compute the number of intermediary channels (M) using formula
# from the paper section 3.5
intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/ \
(kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))
# the spatial conv is effectively a 2D conv due to the
# spatial_kernel_size, followed by batch_norm and ReLU
self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
stride=spatial_stride, padding=spatial_padding, bias=bias)
self.bn = nn.BatchNorm3d(intermed_channels)
self.leakyrelu = nn.LeakyReLU()
# the temporal conv is effectively a 1D conv, but has batch norm
# and ReLU added inside the model constructor, not here. This is an
# intentional design choice, to allow this module to externally act
# identical to a standard Conv3D, so it can be reused easily in any
# other codebase
self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size,
stride=temporal_stride, padding=temporal_padding, bias=bias)
def forward(self, x):
x = self.leakyrelu(self.bn(self.spatial_conv(x)))
x = self.temporal_conv(x)
return x
class VideoDiscriminator(nn.Module):
def __init__(self, n_channels, n_output_neurons=1, bn_use_gamma=True, use_noise=False, noise_sigma=None, ndf=64):
super(VideoDiscriminator, self).__init__()
self.n_channels = n_channels
self.n_output_neurons = n_output_neurons
self.use_noise = use_noise
self.bn_use_gamma = bn_use_gamma
#self.SpatioTemporalConv = SpatioTemporalConv()???
self.main = nn.Sequential(
Noise(use_noise, sigma=noise_sigma),
SpatioTemporalConv(n_channels, ndf, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),
#nn.Conv3d(n_channels, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
SpatioTemporalConv(ndf, ndf * 2, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),
#nn.Conv3d(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
SpatioTemporalConv(ndf * 2,ndf * 4, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),
#nn.Conv3d(ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
Noise(use_noise, sigma=noise_sigma),
SpatioTemporalConv(ndf * 4, ndf * 8, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),
#nn.Conv3d(ndf * 4, ndf * 8, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),
nn.BatchNorm3d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
SpatioTemporalConv(ndf * 8, n_output_neurons, 4, stride=1, padding=0, bias=False),
#nn.Conv3d(ndf * 8, n_output_neurons, 4, 1, 0, bias=False),
)
def forward(self, input):
h = self.main(input).squeeze()
return h, None
class CategoricalVideoDiscriminator(VideoDiscriminator): #Video Discriminator상속받아 사용
def __init__(self, n_channels, dim_categorical, n_output_neurons=1, use_noise=False, noise_sigma=None):
super(CategoricalVideoDiscriminator, self).__init__(n_channels=n_channels,
n_output_neurons=n_output_neurons + dim_categorical,
use_noise=use_noise,
noise_sigma=noise_sigma)
self.dim_categorical = dim_categorical
def split(self, input):
return input[:, :input.size(1) - self.dim_categorical], input[:, input.size(1) - self.dim_categorical:]
def forward(self, input):
h, _ = super(CategoricalVideoDiscriminator, self).forward(input)
labels, categ = self.split(h)
return labels, categ
class embedding_f(nn.Module):
#U-Net에 쓰일 임베딩 class. 1D vector -> 2D Embedding vector
def __init__(self,n_class, x):
super().__init__()
self.n_class = n_class
self.categ_embed = nn.Embedding(self.n_class, x)
def forward(self,z_category):
categ_embedding = self.categ_embed(z_category.long())
return categ_embedding
class UNet(nn.Module):
"""
z_motion,z_category(one-hot)은 입력으로 받는다.
이미지는 인코더를 통과하여 임베딩
이미지 임베딩벡터와 z_motion,z_category와 concate되어 디코더 통과
디코더 피쳐맵에, 인코더 피쳐맵과 z_motion임베딩값 z_cateogry임베딩값 concate
"""
def __init__(self, n_class, n_channels, z_motion, batch_size,video_length):
super().__init__()
self.n_class = n_class
self.z_noise = torch.from_numpy(np.random.normal(0, 1, (batch_size, 100)).astype(np.float32))
self.z_motion = z_motion
#self.z_category = z_category_labels
self.n_channels = n_channels
self.video_length = video_length
self.embedding_c1 = embedding_f(self.n_class,16)
#self.embedding_m1 = embedding_f(int(torch.max(self.z_motion).item()),16)
self.embedding_c2 = embedding_f(self.n_class,64)
#self.embedding_m2 = embedding_f(int(torch.max(self.z_motion).item()),64)
self.embedding_c3 = embedding_f(self.n_class,256)
#self.embedding_m3 = embedding_f(int(torch.max(self.z_motion).item()),256)
self.embedding_c4 = embedding_f(self.n_class,1024)
#self.embedding_m4 = embedding_f(int(torch.max(self.z_motion).item()),1024)
# input 3x64x64
self.conv_down1 = nn.utils.spectral_norm(nn.Conv2d(3, 16, 4, stride =2,padding=1)) # 32x32x16 if input1024 ->Output = 512x512x16/conv층 더 쌓기
self.conv_down_acf1 = nn.LeakyReLU(inplace=True)
self.conv_down2 = nn.utils.spectral_norm(nn.Conv2d(16, 32, 4, stride =2,padding=1)) # 16x16x32
self.conv_down_acf2 = nn.LeakyReLU(inplace=True)
self.conv_down3 = nn.utils.spectral_norm(nn.Conv2d(32, 64, 4, stride =2,padding=1)) # 8x8x64
self.conv_down_acf3 = nn.LeakyReLU(inplace=True)
self.conv_down4 = nn.utils.spectral_norm(nn.Conv2d(64, 128, 4, stride =2,padding=1)) #4x4x128
self.conv_down_acf4 = nn.LeakyReLU(inplace=True)
self.flatten = nn.Flatten()
self.linear = nn.Linear(2048,200) #image embedding dim = 200
#여기까지 이미지 임베딩을 위한 인코더, 밑에부터 디코더
self.conv_up4 = nn.utils.spectral_norm(nn.ConvTranspose2d(316, 128, 4, 1, padding=0)) #output feature map W,H = 4
self.conv_up_acf4 = nn.LeakyReLU(inplace=True)
self.conv_up3 = nn.utils.spectral_norm(nn.ConvTranspose2d(259, 64, 4, 2, padding=1)) #output feature map W,H = 8
self.conv_up_acf3 = nn.LeakyReLU(inplace=True)
self.conv_up2 = nn.utils.spectral_norm(nn.ConvTranspose2d(131, 32, 4, 2, padding=1)) #output feature map W,H = 16
self.conv_up_acf2 = nn.LeakyReLU(inplace=True)
self.conv_up1 = nn.utils.spectral_norm(nn.ConvTranspose2d(67, 16, 4, 2, padding=1))#output feature map W,H = 32
self.conv_up_acf1 = nn.LeakyReLU(inplace=True)
self.conv_last = nn.ConvTranspose2d(35, self.n_channels, 4, 2, padding=1) #output feature map W,H = 64
def forward(self,image,z_category):
conv1 = self.conv_down1(image)
conv1 = self.conv_down_acf1(conv1)
conv2 = self.conv_down2(conv1)
conv2 = self.conv_down_acf2(conv2)
conv3 = self.conv_down3(conv2)
conv3 = self.conv_down_acf3(conv3)
conv4 = self.conv_down4(conv3)
conv4 = self.conv_down_acf4(conv4)
x = self.flatten(conv4)
x = self.linear(x)
if torch.cuda.is_available():
z_category = z_category.cuda()
self.z_motion = self.z_motion.cuda()
x = x.cuda()
self.z_noise = self.z_noise.cuda()
x = x.repeat(self.video_length,1)
z_noise_1 = self.z_noise.repeat(self.video_length,1)
p = torch.cat([z_category, self.z_motion, x, z_noise_1], dim=1)
# x : 200, noise : 10, z_motion: 13, categ_embedding : 3
p = p.view(p.size(0),p.size(1),1,1)#[b,316,1,,]
u_conv4 = self.conv_up4(p)
x = self.conv_up_acf4(u_conv4)#c=128
conv4 = conv4.repeat(self.video_length,1,1,1)
categ_embedding_1 = self.embedding_c1(z_category)
categ_embedding_1 = categ_embedding_1.reshape(categ_embedding_1.size(0),categ_embedding_1.size(1),x.size(2),x.size(3))
x = torch.cat([x, conv4, categ_embedding_1], dim=1)
x = self.conv_up_acf3(u_conv3)
conv3 = conv3.repeat(self.video_length,1,1,1)
categ_embedding_2 = self.embedding_c2(z_category)
categ_embedding_2 = categ_embedding_2.reshape(categ_embedding_2.size(0),categ_embedding_2.size(1),x.size(2),x.size(3))
x = torch.cat([x, conv3, categ_embedding_2 ], dim=1)
u_conv2 = self.conv_up2(x)
x = self.conv_up_acf2(u_conv2)
conv2 = conv2.repeat(self.video_length,1,1,1)
categ_embedding_3 = self.embedding_c3(z_category)
categ_embedding_3 = categ_embedding_3.reshape(categ_embedding_3.size(0),categ_embedding_3.size(1),x.size(2),x.size(3))
x = torch.cat([x, conv2, categ_embedding_3 ], dim=1)
u_conv1 = self.conv_up1(x)
x = self.conv_up_acf1(u_conv1)
conv1 = conv1.repeat(self.video_length,1,1,1)
categ_embedding_4 = self.embedding_c4(z_category)
categ_embedding_4 = categ_embedding_4.reshape(categ_embedding_4.size(0),categ_embedding_4.size(1),x.size(2),x.size(3))
x = torch.cat([x, conv1, categ_embedding_4], dim=1)
out = self.conv_last(x)
return out
class VideoGenerator(nn.Module):
def __init__(self, n_class,n_channels, dim_z_category, dim_z_motion,
video_length, ngf=64):
super(VideoGenerator, self).__init__()
self.n_class = n_class
self.n_channels = n_channels
self.dim_z_category = dim_z_category
self.dim_z_motion = dim_z_motion
self.video_length = video_length
dim_z = dim_z_motion + dim_z_category
self.recurrent = nn.GRUCell(dim_z_motion, dim_z_motion)
def sample_z_m(self, num_samples, video_len=None): #GRU통과한 motion vector 만들기
video_len = video_len if video_len is not None else self.video_length
h_t = [self.get_gru_initial_state(num_samples)]
for frame_num in range(video_len):
e_t = self.get_iteration_noise(num_samples)
h_t.append(self.recurrent(e_t, h_t[-1]))
z_m_t = [h_k.view(-1, 1, self.dim_z_motion) for h_k in h_t]
z_m = torch.cat(z_m_t[1:], dim=1).view(-1, self.dim_z_motion)
return z_m
def sample_z_categ(self, num_samples, video_len): # category one-hot vector, z_category_labels(categorical classification loss에 사용) 만들기
video_len = video_len if video_len is not None else self.video_length
if self.dim_z_category <= 0:
return None, np.zeros(num_samples)
classes_to_generate = np.random.randint(self.dim_z_category, size=num_samples)
one_hot = np.zeros((num_samples, self.dim_z_category), dtype=np.float32)
one_hot[np.arange(num_samples), classes_to_generate] = 1
one_hot_video = np.repeat(one_hot, video_len, axis=0)
one_hot_video = torch.from_numpy(one_hot_video)
if torch.cuda.is_available():
one_hot_video = one_hot_video.cuda()
return Variable(one_hot_video), torch.from_numpy(classes_to_generate)
def sample_z_video(self, num_samples, video_len=None):
# motion(z)만들기, motion(z:생성에 사용)와 one hot category(z_category:생성에 사용) z_category_labels(categorical classification loss에 사용) 출력
z_category, z_category_labels = self.sample_z_categ(num_samples, video_len)
z_motion = self.sample_z_m(num_samples, video_len)
if z_category is not None:
z = torch.cat([z_category, z_motion], dim=1)
else:
z = z_motion
return z, z_category, z_category_labels
def sample_videos(self, image, num_samples, target_class=None, video_len=None): # main network(Unet)으로 video 만들기
video_len = video_len if video_len is not None else self.video_length
self.video_length
z, z_category, z_category_labels = self.sample_z_video(num_samples, video_len)
if target_class is not None : #inference
print("inference")
z_category = target_class
main = UNet(self.n_class, self.n_channels, z, num_samples,video_len)
if torch.cuda.is_available():
main.cuda()
h = main(image,z_category)
h = h.view(int(h.size(0) / video_len), int(video_len), self.n_channels, h.size(3), h.size(3))
h = h.permute(0, 2, 1, 3, 4)
return h, Variable(z_category_labels, requires_grad=False)
def sample_images(self, image, num_samples, video_len=None):
video_len = video_len if video_len is not None else self.video_length
z, z_category, z_category_labels = self.sample_z_video(num_samples, video_len)
h, _ = self.sample_videos(image, num_samples, None, video_len)
h_result =[]
for i in range(num_samples):
j = np.random.randint(video_len)
img_frame = h[i,:,j,:,:]
h_result.append(img_frame)
h_result = torch.stack(h_result)
return h_result, None
def get_gru_initial_state(self, num_samples): #z_motion만드는 recurrent(GRU cell) network input
return Variable(T.FloatTensor(num_samples, self.dim_z_motion).normal_())
def get_iteration_noise(self, num_samples): #z_motion만드는 recurrent(GRU cell) network input
return Variable(T.FloatTensor(num_samples, self.dim_z_motion).normal_())
| [
"torch.nn.Embedding",
"torch.cat",
"numpy.random.randint",
"numpy.arange",
"numpy.random.normal",
"torch.nn.BatchNorm3d",
"torch.nn.Conv3d",
"torch.nn.Linear",
"torch.nn.GRUCell",
"numpy.repeat",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available... | [((376, 401), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (399, 401), False, 'import torch\n'), ((5321, 5341), 'torch.nn.modules.utils._triple', '_triple', (['kernel_size'], {}), '(kernel_size)\n', (5328, 5341), False, 'from torch.nn.modules.utils import _triple\n'), ((5359, 5374), 'torch.nn.modules.utils._triple', '_triple', (['stride'], {}), '(stride)\n', (5366, 5374), False, 'from torch.nn.modules.utils import _triple\n'), ((5393, 5409), 'torch.nn.modules.utils._triple', '_triple', (['padding'], {}), '(padding)\n', (5400, 5409), False, 'from torch.nn.modules.utils import _triple\n'), ((6508, 6634), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channels', 'intermed_channels', 'spatial_kernel_size'], {'stride': 'spatial_stride', 'padding': 'spatial_padding', 'bias': 'bias'}), '(in_channels, intermed_channels, spatial_kernel_size, stride=\n spatial_stride, padding=spatial_padding, bias=bias)\n', (6517, 6634), True, 'import torch.nn as nn\n'), ((6684, 6717), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['intermed_channels'], {}), '(intermed_channels)\n', (6698, 6717), True, 'import torch.nn as nn\n'), ((6743, 6757), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (6755, 6757), True, 'import torch.nn as nn\n'), ((7118, 7248), 'torch.nn.Conv3d', 'nn.Conv3d', (['intermed_channels', 'out_channels', 'temporal_kernel_size'], {'stride': 'temporal_stride', 'padding': 'temporal_padding', 'bias': 'bias'}), '(intermed_channels, out_channels, temporal_kernel_size, stride=\n temporal_stride, padding=temporal_padding, bias=bias)\n', (7127, 7248), True, 'import torch.nn as nn\n'), ((10540, 10569), 'torch.nn.Embedding', 'nn.Embedding', (['self.n_class', 'x'], {}), '(self.n_class, x)\n', (10552, 10569), True, 'import torch.nn as nn\n'), ((12090, 12116), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (12102, 12116), True, 'import torch.nn as nn\n'), ((12252, 12278), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (12264, 12278), True, 'import torch.nn as nn\n'), ((12411, 12437), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (12423, 12437), True, 'import torch.nn as nn\n'), ((12571, 12597), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (12583, 12597), True, 'import torch.nn as nn\n'), ((12637, 12649), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (12647, 12649), True, 'import torch.nn as nn\n'), ((12673, 12693), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(200)'], {}), '(2048, 200)\n', (12682, 12693), True, 'import torch.nn as nn\n'), ((12911, 12937), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (12923, 12937), True, 'import torch.nn as nn\n'), ((13088, 13114), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (13100, 13114), True, 'import torch.nn as nn\n'), ((13266, 13292), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (13278, 13292), True, 'import torch.nn as nn\n'), ((13450, 13476), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (13462, 13476), True, 'import torch.nn as nn\n'), ((13503, 13559), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(35)', 'self.n_channels', '(4)', '(2)'], {'padding': '(1)'}), '(35, self.n_channels, 4, 2, padding=1)\n', (13521, 13559), True, 'import torch.nn as nn\n'), ((14067, 14092), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14090, 14092), False, 'import torch\n'), ((14375, 14434), 'torch.cat', 'torch.cat', (['[z_category, self.z_motion, x, z_noise_1]'], {'dim': '(1)'}), '([z_category, self.z_motion, x, z_noise_1], dim=1)\n', (14384, 14434), False, 'import torch\n'), ((14888, 14935), 'torch.cat', 'torch.cat', (['[x, conv4, categ_embedding_1]'], {'dim': '(1)'}), '([x, conv4, categ_embedding_1], dim=1)\n', (14897, 14935), False, 'import torch\n'), ((15236, 15283), 'torch.cat', 'torch.cat', (['[x, conv3, categ_embedding_2]'], {'dim': '(1)'}), '([x, conv3, categ_embedding_2], dim=1)\n', (15245, 15283), False, 'import torch\n'), ((15619, 15666), 'torch.cat', 'torch.cat', (['[x, conv2, categ_embedding_3]'], {'dim': '(1)'}), '([x, conv2, categ_embedding_3], dim=1)\n', (15628, 15666), False, 'import torch\n'), ((16003, 16050), 'torch.cat', 'torch.cat', (['[x, conv1, categ_embedding_4]'], {'dim': '(1)'}), '([x, conv1, categ_embedding_4], dim=1)\n', (16012, 16050), False, 'import torch\n'), ((16568, 16606), 'torch.nn.GRUCell', 'nn.GRUCell', (['dim_z_motion', 'dim_z_motion'], {}), '(dim_z_motion, dim_z_motion)\n', (16578, 16606), True, 'import torch.nn as nn\n'), ((17471, 17527), 'numpy.random.randint', 'np.random.randint', (['self.dim_z_category'], {'size': 'num_samples'}), '(self.dim_z_category, size=num_samples)\n', (17488, 17527), True, 'import numpy as np\n'), ((17546, 17608), 'numpy.zeros', 'np.zeros', (['(num_samples, self.dim_z_category)'], {'dtype': 'np.float32'}), '((num_samples, self.dim_z_category), dtype=np.float32)\n', (17554, 17608), True, 'import numpy as np\n'), ((17698, 17735), 'numpy.repeat', 'np.repeat', (['one_hot', 'video_len'], {'axis': '(0)'}), '(one_hot, video_len, axis=0)\n', (17707, 17735), True, 'import numpy as np\n'), ((17761, 17792), 'torch.from_numpy', 'torch.from_numpy', (['one_hot_video'], {}), '(one_hot_video)\n', (17777, 17792), False, 'import torch\n'), ((17805, 17830), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17828, 17830), False, 'import torch\n'), ((19000, 19025), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19023, 19025), False, 'import torch\n'), ((19791, 19812), 'torch.stack', 'torch.stack', (['h_result'], {}), '(h_result)\n', (19802, 19812), False, 'import torch\n'), ((1088, 1135), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channels', 'ndf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(n_channels, ndf, 4, 2, 1, bias=False)\n', (1097, 1135), True, 'import torch.nn as nn\n'), ((1149, 1180), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1161, 1180), True, 'import torch.nn as nn\n'), ((1244, 1288), 'torch.nn.Conv2d', 'nn.Conv2d', (['ndf', '(ndf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf, ndf * 2, 4, 2, 1, bias=False)\n', (1253, 1288), True, 'import torch.nn as nn\n'), ((1302, 1325), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 2)'], {}), '(ndf * 2)\n', (1316, 1325), True, 'import torch.nn as nn\n'), ((1339, 1370), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1351, 1370), True, 'import torch.nn as nn\n'), ((1434, 1482), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 2)', '(ndf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 2, ndf * 4, 4, 2, 1, bias=False)\n', (1443, 1482), True, 'import torch.nn as nn\n'), ((1496, 1519), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 4)'], {}), '(ndf * 4)\n', (1510, 1519), True, 'import torch.nn as nn\n'), ((1533, 1564), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1545, 1564), True, 'import torch.nn as nn\n'), ((1628, 1676), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 4)', '(ndf * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 4, ndf * 8, 4, 2, 1, bias=False)\n', (1637, 1676), True, 'import torch.nn as nn\n'), ((1690, 1713), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 8)'], {}), '(ndf * 8)\n', (1704, 1713), True, 'import torch.nn as nn\n'), ((1727, 1758), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1739, 1758), True, 'import torch.nn as nn\n'), ((1773, 1815), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 8)', '(1)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(ndf * 8, 1, 4, 1, 0, bias=False)\n', (1782, 1815), True, 'import torch.nn as nn\n'), ((2232, 2279), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channels', 'ndf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(n_channels, ndf, 4, 2, 1, bias=False)\n', (2241, 2279), True, 'import torch.nn as nn\n'), ((2293, 2324), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2305, 2324), True, 'import torch.nn as nn\n'), ((2388, 2432), 'torch.nn.Conv2d', 'nn.Conv2d', (['ndf', '(ndf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf, ndf * 2, 4, 2, 1, bias=False)\n', (2397, 2432), True, 'import torch.nn as nn\n'), ((2446, 2469), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 2)'], {}), '(ndf * 2)\n', (2460, 2469), True, 'import torch.nn as nn\n'), ((2483, 2514), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2495, 2514), True, 'import torch.nn as nn\n'), ((2578, 2626), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 2)', '(ndf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 2, ndf * 4, 4, 2, 1, bias=False)\n', (2587, 2626), True, 'import torch.nn as nn\n'), ((2640, 2663), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 4)'], {}), '(ndf * 4)\n', (2654, 2663), True, 'import torch.nn as nn\n'), ((2677, 2708), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2689, 2708), True, 'import torch.nn as nn\n'), ((2772, 2814), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 4)', '(1)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 4, 1, 4, 2, 1, bias=False)\n', (2781, 2814), True, 'import torch.nn as nn\n'), ((3396, 3474), 'torch.nn.Conv3d', 'nn.Conv3d', (['n_channels', 'ndf', '(4)'], {'stride': '(1, 2, 2)', 'padding': '(0, 1, 1)', 'bias': '(False)'}), '(n_channels, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False)\n', (3405, 3474), True, 'import torch.nn as nn\n'), ((3488, 3519), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3500, 3519), True, 'import torch.nn as nn\n'), ((3583, 3658), 'torch.nn.Conv3d', 'nn.Conv3d', (['ndf', '(ndf * 2)', '(4)'], {'stride': '(1, 2, 2)', 'padding': '(0, 1, 1)', 'bias': '(False)'}), '(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False)\n', (3592, 3658), True, 'import torch.nn as nn\n'), ((3672, 3695), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(ndf * 2)'], {}), '(ndf * 2)\n', (3686, 3695), True, 'import torch.nn as nn\n'), ((3709, 3740), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3721, 3740), True, 'import torch.nn as nn\n'), ((3804, 3883), 'torch.nn.Conv3d', 'nn.Conv3d', (['(ndf * 2)', '(ndf * 4)', '(4)'], {'stride': '(1, 2, 2)', 'padding': '(0, 1, 1)', 'bias': '(False)'}), '(ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False)\n', (3813, 3883), True, 'import torch.nn as nn\n'), ((3897, 3920), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(ndf * 4)'], {}), '(ndf * 4)\n', (3911, 3920), True, 'import torch.nn as nn\n'), ((3934, 3965), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3946, 3965), True, 'import torch.nn as nn\n'), ((3980, 4053), 'torch.nn.Conv3d', 'nn.Conv3d', (['(ndf * 4)', '(1)', '(4)'], {'stride': '(1, 2, 2)', 'padding': '(0, 1, 1)', 'bias': '(False)'}), '(ndf * 4, 1, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False)\n', (3989, 4053), True, 'import torch.nn as nn\n'), ((6149, 6329), 'math.floor', 'math.floor', (['(kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels *\n out_channels / (kernel_size[1] * kernel_size[2] * in_channels + \n kernel_size[0] * out_channels))'], {}), '(kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels *\n out_channels / (kernel_size[1] * kernel_size[2] * in_channels + \n kernel_size[0] * out_channels))\n', (6159, 6329), False, 'import math\n'), ((8138, 8169), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (8150, 8169), True, 'import torch.nn as nn\n'), ((8421, 8444), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(ndf * 2)'], {}), '(ndf * 2)\n', (8435, 8444), True, 'import torch.nn as nn\n'), ((8458, 8489), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (8470, 8489), True, 'import torch.nn as nn\n'), ((8748, 8771), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(ndf * 4)'], {}), '(ndf * 4)\n', (8762, 8771), True, 'import torch.nn as nn\n'), ((8785, 8816), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (8797, 8816), True, 'import torch.nn as nn\n'), ((9076, 9099), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(ndf * 8)'], {}), '(ndf * 8)\n', (9090, 9099), True, 'import torch.nn as nn\n'), ((9113, 9144), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (9125, 9144), True, 'import torch.nn as nn\n'), ((11961, 12001), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(3, 16, 4, stride=2, padding=1)\n', (11970, 12001), True, 'import torch.nn as nn\n'), ((12167, 12208), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(16, 32, 4, stride=2, padding=1)\n', (12176, 12208), True, 'import torch.nn as nn\n'), ((12329, 12370), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(32, 64, 4, stride=2, padding=1)\n', (12338, 12370), True, 'import torch.nn as nn\n'), ((12488, 12530), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(64, 128, 4, stride=2, padding=1)\n', (12497, 12530), True, 'import torch.nn as nn\n'), ((12808, 12853), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(316)', '(128)', '(4)', '(1)'], {'padding': '(0)'}), '(316, 128, 4, 1, padding=0)\n', (12826, 12853), True, 'import torch.nn as nn\n'), ((12986, 13030), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(259)', '(64)', '(4)', '(2)'], {'padding': '(1)'}), '(259, 64, 4, 2, padding=1)\n', (13004, 13030), True, 'import torch.nn as nn\n'), ((13163, 13207), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(131)', '(32)', '(4)', '(2)'], {'padding': '(1)'}), '(131, 32, 4, 2, padding=1)\n', (13181, 13207), True, 'import torch.nn as nn\n'), ((13349, 13392), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(67)', '(16)', '(4)', '(2)'], {'padding': '(1)'}), '(67, 16, 4, 2, padding=1)\n', (13367, 13392), True, 'import torch.nn as nn\n'), ((17897, 17920), 'torch.autograd.Variable', 'Variable', (['one_hot_video'], {}), '(one_hot_video)\n', (17905, 17920), False, 'from torch.autograd import Variable\n'), ((17922, 17959), 'torch.from_numpy', 'torch.from_numpy', (['classes_to_generate'], {}), '(classes_to_generate)\n', (17938, 17959), False, 'import torch\n'), ((18354, 18394), 'torch.cat', 'torch.cat', (['[z_category, z_motion]'], {'dim': '(1)'}), '([z_category, z_motion], dim=1)\n', (18363, 18394), False, 'import torch\n'), ((19241, 19289), 'torch.autograd.Variable', 'Variable', (['z_category_labels'], {'requires_grad': '(False)'}), '(z_category_labels, requires_grad=False)\n', (19249, 19289), False, 'from torch.autograd import Variable\n'), ((19671, 19699), 'numpy.random.randint', 'np.random.randint', (['video_len'], {}), '(video_len)\n', (19688, 19699), True, 'import numpy as np\n'), ((17060, 17087), 'torch.cat', 'torch.cat', (['z_m_t[1:]'], {'dim': '(1)'}), '(z_m_t[1:], dim=1)\n', (17069, 17087), False, 'import torch\n'), ((17418, 17439), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (17426, 17439), True, 'import numpy as np\n'), ((17625, 17647), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (17634, 17647), True, 'import numpy as np\n'), ((11094, 11135), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, 100)'], {}), '(0, 1, (batch_size, 100))\n', (11110, 11135), True, 'import numpy as np\n')] |
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from keras.utils.np_utils import to_categorical
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import ModelCheckpoint, EarlyStopping
import pandas as pd
import numpy as np
import re
import pickle as pkl
from sklearn import preprocessing
import json
import random
# load the user configs
with open('config.json') as f:
config = json.load(f)
random.seed(config["seed"])
MAX_NB_WORDS = config["MAX_NB_WORDS"]
MAX_SEQUENCE_LENGTH=config["MAX_SEQUENCE_LENGTH"]
VALIDATION_SPLIT=config["VALIDATION_SPLIT"]
EMBEDDING_DIM=300
LSTM_OUT=config["LSTM_OUT"]
BATCH_SIZE=config["BATCH_SIZE"]
EPOCHS=config["EPOCHS"]
GLOVE_EMBEDDING_PATH=config["GLOVE_EMBEDDING_PATH"]
VECTORIZER_PATH=config["VECTORIZER_PATH"]
LABEL_ENCODER_PATH=config["LABEL_ENCODER_PATH"]
model_json_file=config["model_json_file"]
weights=config["weights"]
input_file=config["input_file"]
df = pd.read_csv(input_file,sep=",,,",header=None ,names=['question','type'])
df['type']=df['type'].str.strip()
df['question'] = df['question'].apply(lambda x: x.lower())
df['question'] = df['question'].apply((lambda x: re.sub('[^a-zA-z0-9\s]','',x)))
NUM_CLASSES=len(df['type'].unique())
print(df['type'].value_counts())
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, split=' ')
tokenizer.fit_on_texts(df['question'].values)
X = tokenizer.texts_to_sequences(df['question'].values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
Y = df['type']
with open(VECTORIZER_PATH, 'wb') as fil:
pkl.dump(tokenizer, fil)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
le = preprocessing.LabelEncoder()
le.fit(Y)
Y=le.transform(Y)
labels = to_categorical(np.asarray(Y))
with open(LABEL_ENCODER_PATH, 'wb') as fil:
pkl.dump(le, fil)
# split the data into a training set and a validation set
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
X = X[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * X.shape[0])
x_train = X[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = X[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
embeddings_index = {}
f = open(GLOVE_EMBEDDING_PATH, encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
model = Sequential()
model.add(embedding_layer)
model.add(LSTM(LSTM_OUT, dropout_U=0.25, dropout_W=0.25))
model.add(Dense(NUM_CLASSES,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print(model.summary())
checkpoint = ModelCheckpoint(weights, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(x_val, y_val),
callbacks = [checkpoint,early])
# serialize model to JSON
model_json = model.to_json()
with open(model_json_file, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
# model.save_weights("model.h5")
print("Saved model to disk")
| [
"pickle.dump",
"json.load",
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"keras.callbacks.ModelCheckpoint",
"numpy.asarray",
"keras.layers.LSTM",
"sklearn.preprocessing.LabelEncoder",
"keras.preprocessing.text.Tokenizer",
"random.seed",
"numpy.arange",
"keras.callbacks.Earl... | [((521, 548), 'random.seed', 'random.seed', (["config['seed']"], {}), "(config['seed'])\n", (532, 548), False, 'import random\n'), ((1048, 1123), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'sep': '""",,,"""', 'header': 'None', 'names': "['question', 'type']"}), "(input_file, sep=',,,', header=None, names=['question', 'type'])\n", (1059, 1123), True, 'import pandas as pd\n'), ((1387, 1431), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'MAX_NB_WORDS', 'split': '""" """'}), "(num_words=MAX_NB_WORDS, split=' ')\n", (1396, 1431), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1541, 1585), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X'], {'maxlen': 'MAX_SEQUENCE_LENGTH'}), '(X, maxlen=MAX_SEQUENCE_LENGTH)\n', (1554, 1585), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1773, 1801), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (1799, 1801), False, 'from sklearn import preprocessing\n'), ((2012, 2033), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (2021, 2033), True, 'import numpy as np\n'), ((2035, 2061), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2052, 2061), True, 'import numpy as np\n'), ((3202, 3214), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3212, 3214), False, 'from keras.models import Sequential\n'), ((3485, 3612), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['weights'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""auto"""', 'period': '(1)'}), "(weights, monitor='val_acc', verbose=1, save_best_only=True,\n save_weights_only=False, mode='auto', period=1)\n", (3500, 3612), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((3618, 3705), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode=\n 'auto')\n", (3631, 3705), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((505, 517), 'json.load', 'json.load', (['f'], {}), '(f)\n', (514, 517), False, 'import json\n'), ((1649, 1673), 'pickle.dump', 'pkl.dump', (['tokenizer', 'fil'], {}), '(tokenizer, fil)\n', (1657, 1673), True, 'import pickle as pkl\n'), ((1857, 1870), 'numpy.asarray', 'np.asarray', (['Y'], {}), '(Y)\n', (1867, 1870), True, 'import numpy as np\n'), ((1922, 1939), 'pickle.dump', 'pkl.dump', (['le', 'fil'], {}), '(le, fil)\n', (1930, 1939), True, 'import pickle as pkl\n'), ((2476, 2515), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (2486, 2515), True, 'import numpy as np\n'), ((3254, 3300), 'keras.layers.LSTM', 'LSTM', (['LSTM_OUT'], {'dropout_U': '(0.25)', 'dropout_W': '(0.25)'}), '(LSTM_OUT, dropout_U=0.25, dropout_W=0.25)\n', (3258, 3300), False, 'from keras.layers import Dense, Embedding, LSTM\n'), ((3313, 3353), 'keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""'}), "(NUM_CLASSES, activation='softmax')\n", (3318, 3353), False, 'from keras.layers import Dense, Embedding, LSTM\n'), ((1266, 1298), 're.sub', 're.sub', (['"""[^a-zA-z0-9\\\\s]"""', '""""""', 'x'], {}), "('[^a-zA-z0-9\\\\s]', '', x)\n", (1272, 1298), False, 'import re\n')] |
# Author: bbrighttaer
# Project: irelease
# Date: 7/15/2020
# Time: 11:59 AM
# File: internal_diversity.py
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import rdkit.Chem as Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
def verify_sequence(smile):
mol = Chem.MolFromSmiles(smile)
return smile != '' and mol is not None and mol.GetNumAtoms() > 1
def batch_internal_diversity(smiles):
"""
Calculates internal diversity of the given compounds.
See http://arxiv.org/abs/1708.08227
:param smiles:
:param set_smiles:
:return:
"""
rand_mols = [Chem.MolFromSmiles(s) for s in smiles]
fps = [AllChem.GetMorganFingerprintAsBitVect(m, 4, nBits=2048) for m in rand_mols]
vals = [bulk_tanimoto_distance(s, fps) if verify_sequence(s) else 0.0 for s in smiles]
return np.mean(vals)
def bulk_tanimoto_distance(smile, fps):
ref_mol = Chem.MolFromSmiles(smile)
ref_fps = AllChem.GetMorganFingerprintAsBitVect(ref_mol, 4, nBits=2048)
dist = DataStructs.BulkTanimotoSimilarity(ref_fps, fps, returnDistance=True)
return dist
if __name__ == '__main__':
comps = ['COc1cc(Cc2ccccc2Cl)ncc1NC(=N)Nc1ccc(C(=O)N=C2NCCN2C)cc1',
'Oc1ccc2ccccc2c1-c1nc2ccccc2[nH]1',
'CN(C)CCn1c(Nc2ccccc2)nc2ccc(NC3CCCC3)cc21',
'CCCCCCCCCCCCCC(=O)N(C(=O)CCCCCCCC(C)C)C(C)C',
'COc1ccc(-c2ncnc(N3CCCC3C(=O)NC3CCOC3)n2)cc1']
print(batch_internal_diversity(comps))
| [
"numpy.mean",
"rdkit.DataStructs.BulkTanimotoSimilarity",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect"
] | [((338, 363), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smile'], {}), '(smile)\n', (356, 363), True, 'import rdkit.Chem as Chem\n'), ((888, 901), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (895, 901), True, 'import numpy as np\n'), ((958, 983), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smile'], {}), '(smile)\n', (976, 983), True, 'import rdkit.Chem as Chem\n'), ((998, 1059), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['ref_mol', '(4)'], {'nBits': '(2048)'}), '(ref_mol, 4, nBits=2048)\n', (1035, 1059), False, 'from rdkit.Chem import AllChem\n'), ((1071, 1140), 'rdkit.DataStructs.BulkTanimotoSimilarity', 'DataStructs.BulkTanimotoSimilarity', (['ref_fps', 'fps'], {'returnDistance': '(True)'}), '(ref_fps, fps, returnDistance=True)\n', (1105, 1140), False, 'from rdkit import DataStructs\n'), ((660, 681), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['s'], {}), '(s)\n', (678, 681), True, 'import rdkit.Chem as Chem\n'), ((710, 765), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['m', '(4)'], {'nBits': '(2048)'}), '(m, 4, nBits=2048)\n', (747, 765), False, 'from rdkit.Chem import AllChem\n')] |
import argparse
from datetime import datetime
from pathlib import Path
from collections import defaultdict
import numpy as np
import torch as th
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN
from dataset import MAESTRO_small, allocate_batch
from evaluate import evaluate
from constants import HOP_SIZE
def cycle(iterable):
while True:
for item in iterable:
yield item
def train(model_type, logdir, batch_size, iterations, validation_interval, sequence_length, learning_rate, weight_decay, cnn_unit, fc_unit, debug=False, save_midi=False):
if logdir is None:
logdir = Path('runs') / ('exp_' + datetime.now().strftime('%y%m%d-%H%M%S'))
Path(logdir).mkdir(parents=True, exist_ok=True)
if sequence_length % HOP_SIZE != 0:
adj_length = sequence_length // HOP_SIZE * HOP_SIZE
print(f'sequence_length: {sequence_length} is not divide by {HOP_SIZE}.\n \
adjusted into : {adj_length}')
sequence_length = adj_length
if debug:
dataset = MAESTRO_small(groups=['debug'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=True)
valid_dataset = dataset
iterations = 100
validation_interval = 10
else:
dataset = MAESTRO_small(groups=['train'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=True)
valid_dataset = MAESTRO_small(groups=['validation'], sequence_length=sequence_length, hop_size=HOP_SIZE, random_sample=False)
loader = DataLoader(dataset, batch_size, shuffle=True)
device = th.device('cuda') if th.cuda.is_available() else th.device('cpu')
if model_type == 'baseline':
model = Transcriber(cnn_unit=cnn_unit, fc_unit=fc_unit)
elif model_type == 'rnn':
model = Transcriber_RNN(cnn_unit=cnn_unit, fc_unit=fc_unit)
elif model_type == 'crnn':
model = Transcriber_CRNN(cnn_unit=cnn_unit, fc_unit=fc_unit)
elif model_type == 'ONF':
model = Transcriber_ONF(cnn_unit=cnn_unit, fc_unit=fc_unit)
optimizer = th.optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)
scheduler = StepLR(optimizer, step_size=1000, gamma=0.98)
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
loop = tqdm(range(1, iterations+1))
for step, batch in zip(loop, cycle(loader)):
optimizer.zero_grad()
batch = allocate_batch(batch, device)
frame_logit, onset_logit = model(batch['audio'])
frame_loss = criterion(frame_logit, batch['frame'])
onset_loss = criterion(onset_logit, batch['onset'])
loss = onset_loss + frame_loss
loss.mean().backward()
for parameter in model.parameters():
clip_grad_norm_([parameter], 3.0)
optimizer.step()
scheduler.step()
loop.set_postfix_str("loss: {:.3e}".format(loss.mean()))
if step % validation_interval == 0:
model.eval()
with th.no_grad():
loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
metrics = defaultdict(list)
for batch in loader:
batch_results = evaluate(model, batch, device)
for key, value in batch_results.items():
metrics[key].extend(value)
print('')
for key, value in metrics.items():
if key[-2:] == 'f1' or 'loss' in key:
print(f'{key:27} : {np.mean(value):.4f}')
model.train()
th.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'step' : step,
'cnn_unit' : cnn_unit,
'fc_unit' : fc_unit
},
Path(logdir) / f'model-{step}.pt')
del dataset, valid_dataset
test_dataset = MAESTRO_small(groups=['test'], hop_size=HOP_SIZE, random_sample=False)
model.eval()
with th.no_grad():
loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
metrics = defaultdict(list)
for batch in loader:
batch_results = evaluate(model, batch, device, save=save_midi, save_path=logdir)
for key, value in batch_results.items():
metrics[key].extend(value)
print('')
for key, value in metrics.items():
if key[-2:] == 'f1' or 'loss' in key:
print(f'{key} : {np.mean(value)}')
with open(Path(logdir) / 'results.txt', 'w') as f:
for key, values in metrics.items():
_, category, name = key.split('/')
metric_string = f'{category:>32} {name:26}: {np.mean(values):.3f} +- {np.std(values):.3f}'
print(metric_string)
f.write(metric_string + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default='baseline', type=str)
parser.add_argument('--logdir', default=None, type=str)
parser.add_argument('-v', '--sequence_length', default=102400, type=int)
parser.add_argument('-lr', '--learning_rate', default=6e-4, type=float)
parser.add_argument('-b', '--batch_size', default=16, type=int)
parser.add_argument('-i', '--iterations', default=10000, type=int)
parser.add_argument('-vi', '--validation_interval', default=1000, type=int)
parser.add_argument('-wd', '--weight_decay', default=0)
parser.add_argument('-cnn', '--cnn_unit', default=48, type=int)
parser.add_argument('-fc', '--fc_unit', default=256, type=int)
parser.add_argument('--save_midi', action='store_true')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
train(**vars(parser.parse_args())) | [
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"collections.defaultdict",
"pathlib.Path",
"model.Transcriber_ONF",
"numpy.mean",
"torch.device",
"torch.no_grad",
"torch.utils.data.DataLoader",
"numpy.std",
"model.Transcriber_CRNN",
"datetime.datetime.now",
"model.Transcriber_R... | [((1710, 1755), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)'}), '(dataset, batch_size, shuffle=True)\n', (1720, 1755), False, 'from torch.utils.data import DataLoader\n'), ((2338, 2383), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(1000)', 'gamma': '(0.98)'}), '(optimizer, step_size=1000, gamma=0.98)\n', (2344, 2383), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((2400, 2422), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2420, 2422), True, 'import torch.nn as nn\n'), ((4088, 4158), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['test']", 'hop_size': 'HOP_SIZE', 'random_sample': '(False)'}), "(groups=['test'], hop_size=HOP_SIZE, random_sample=False)\n", (4101, 4158), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((5041, 5066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5064, 5066), False, 'import argparse\n'), ((1237, 1345), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['debug']", 'sequence_length': 'sequence_length', 'hop_size': 'HOP_SIZE', 'random_sample': '(True)'}), "(groups=['debug'], sequence_length=sequence_length, hop_size=\n HOP_SIZE, random_sample=True)\n", (1250, 1345), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((1459, 1567), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['train']", 'sequence_length': 'sequence_length', 'hop_size': 'HOP_SIZE', 'random_sample': '(True)'}), "(groups=['train'], sequence_length=sequence_length, hop_size=\n HOP_SIZE, random_sample=True)\n", (1472, 1567), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((1587, 1700), 'dataset.MAESTRO_small', 'MAESTRO_small', ([], {'groups': "['validation']", 'sequence_length': 'sequence_length', 'hop_size': 'HOP_SIZE', 'random_sample': '(False)'}), "(groups=['validation'], sequence_length=sequence_length,\n hop_size=HOP_SIZE, random_sample=False)\n", (1600, 1700), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((1791, 1813), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (1811, 1813), True, 'import torch as th\n'), ((1770, 1787), 'torch.device', 'th.device', (['"""cuda"""'], {}), "('cuda')\n", (1779, 1787), True, 'import torch as th\n'), ((1819, 1835), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (1828, 1835), True, 'import torch as th\n'), ((1886, 1933), 'model.Transcriber', 'Transcriber', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (1897, 1933), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((2594, 2623), 'dataset.allocate_batch', 'allocate_batch', (['batch', 'device'], {}), '(batch, device)\n', (2608, 2623), False, 'from dataset import MAESTRO_small, allocate_batch\n'), ((4185, 4197), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (4195, 4197), True, 'import torch as th\n'), ((4216, 4269), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(test_dataset, batch_size=1, shuffle=False)\n', (4226, 4269), False, 'from torch.utils.data import DataLoader\n'), ((4288, 4305), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4299, 4305), False, 'from collections import defaultdict\n'), ((816, 828), 'pathlib.Path', 'Path', (['"""runs"""'], {}), "('runs')\n", (820, 828), False, 'from pathlib import Path\n'), ((887, 899), 'pathlib.Path', 'Path', (['logdir'], {}), '(logdir)\n', (891, 899), False, 'from pathlib import Path\n'), ((1980, 2031), 'model.Transcriber_RNN', 'Transcriber_RNN', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (1995, 2031), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((2931, 2964), 'torch.nn.utils.clip_grad_norm_', 'clip_grad_norm_', (['[parameter]', '(3.0)'], {}), '([parameter], 3.0)\n', (2946, 2964), False, 'from torch.nn.utils import clip_grad_norm_\n'), ((3998, 4010), 'pathlib.Path', 'Path', (['logdir'], {}), '(logdir)\n', (4002, 4010), False, 'from pathlib import Path\n'), ((4363, 4427), 'evaluate.evaluate', 'evaluate', (['model', 'batch', 'device'], {'save': 'save_midi', 'save_path': 'logdir'}), '(model, batch, device, save=save_midi, save_path=logdir)\n', (4371, 4427), False, 'from evaluate import evaluate\n'), ((2079, 2131), 'model.Transcriber_CRNN', 'Transcriber_CRNN', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (2095, 2131), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((3168, 3180), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (3178, 3180), True, 'import torch as th\n'), ((3207, 3270), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(valid_dataset, batch_size=batch_size, shuffle=False)\n', (3217, 3270), False, 'from torch.utils.data import DataLoader\n'), ((3297, 3314), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3308, 3314), False, 'from collections import defaultdict\n'), ((4685, 4697), 'pathlib.Path', 'Path', (['logdir'], {}), '(logdir)\n', (4689, 4697), False, 'from pathlib import Path\n'), ((2178, 2229), 'model.Transcriber_ONF', 'Transcriber_ONF', ([], {'cnn_unit': 'cnn_unit', 'fc_unit': 'fc_unit'}), '(cnn_unit=cnn_unit, fc_unit=fc_unit)\n', (2193, 2229), False, 'from model import Transcriber, Transcriber_CRNN, Transcriber_ONF, Transcriber_RNN\n'), ((3388, 3418), 'evaluate.evaluate', 'evaluate', (['model', 'batch', 'device'], {}), '(model, batch, device)\n', (3396, 3418), False, 'from evaluate import evaluate\n'), ((4874, 4889), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (4881, 4889), True, 'import numpy as np\n'), ((4899, 4913), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (4905, 4913), True, 'import numpy as np\n'), ((841, 855), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (853, 855), False, 'from datetime import datetime\n'), ((4652, 4666), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (4659, 4666), True, 'import numpy as np\n'), ((3715, 3729), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (3722, 3729), True, 'import numpy as np\n')] |
import shapely
import numpy as np
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
class Obstacle(object):
def __init__(self, frame, width, height):
self.frame = frame
self.width = width
self.height = height
self.shape = np.array([ [-0.5, 0.5],[0.5, 0.5], [0.5, -0.5], [-0.5, -0.5] ])
self.shape *= np.array([width, height])
self.ax_artists = []
def get_position(self):
return self.frame.origin()
def get_points(self):
return self.frame.transform_points(self.shape)
def get_collider(self):
return shapely.geometry.Polygon(self.get_points())
def point_collides(self, x, y):
point = shapely.geometry.Point(x, y)
collider = self.get_collider()
return point.intersects(collider)
def draw(self, ax, color='red', clear=True):
if clear:
for a in self.ax_artists:
a.remove()
self.ax_artists = []
poly = Polygon(self.get_points(), True)
p = PatchCollection([poly], alpha=1.0, facecolors=color)
#p.set_array(np.array(colors))
#ax.add_collection(p)
self.ax_artists.append(ax.add_collection(p)) | [
"matplotlib.collections.PatchCollection",
"shapely.geometry.Point",
"numpy.array"
] | [((300, 362), 'numpy.array', 'np.array', (['[[-0.5, 0.5], [0.5, 0.5], [0.5, -0.5], [-0.5, -0.5]]'], {}), '([[-0.5, 0.5], [0.5, 0.5], [0.5, -0.5], [-0.5, -0.5]])\n', (308, 362), True, 'import numpy as np\n'), ((386, 411), 'numpy.array', 'np.array', (['[width, height]'], {}), '([width, height])\n', (394, 411), True, 'import numpy as np\n'), ((728, 756), 'shapely.geometry.Point', 'shapely.geometry.Point', (['x', 'y'], {}), '(x, y)\n', (750, 756), False, 'import shapely\n'), ((1065, 1117), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['[poly]'], {'alpha': '(1.0)', 'facecolors': 'color'}), '([poly], alpha=1.0, facecolors=color)\n', (1080, 1117), False, 'from matplotlib.collections import PatchCollection\n')] |
import os
import pprint as pp
import random
import sys
import numpy as np
import networkx as nx
test_app = sys.argv[1]
def convert(integer, length):
bool_list = [0] * length
bool_list[integer] = 1
return bool_list
def maxminnor(cols):
temp = []
for i in range(len(cols)):
temp.append(int(cols[i],16))
#print(temp[i])
temp1 = temp
temp1.sort()
max_value = temp1[len(temp)-1]
min_value = temp1[0]
for i in range(len(temp)):
#cols[i] = '0x'+cols[i]
#temp[i] = int(temp[i],16)
temp[i] = (temp[i]-min_value)/(max_value-min_value)
return temp
labels = []
train = []
node_list = []
node_type = []
bit_loc = []
op_type =[]
G = nx.DiGraph()
gl_dir = os.environ.get('GRAPHLEARN')
save_dir = gl_dir + '/gdata/'
temp_file = open( save_dir + 'bugfile', 'w')
SAMPLES = int(sys.argv[2])
TRAIN_SAMPLES = int(SAMPLES * 1)
map_file = open(gl_dir+'/source_data/num_source/nfile_efile_glearn.map', 'r')
l = map_file.readlines()
map_file.close()
sample_list = [l_.split()[0] for l_ in l]
print(len(sample_list))
print(sample_list)
app_map = {}
feats_part =[]
#feats_6=[]
#feats_7=[]
#feats_8=[]
#feats_9=[]
for j in range(0, SAMPLES):
print(j)
#if 'TRIT_' not in sample_list[j] and 'RS232_' not in sample_list[j]:
# continue
if os.path.exists((gl_dir+'/source_data/num_source/{}.nodelist').format(j)):
nodefile = (gl_dir+'/source_data/num_source/{}.nodelist').format(j)
edgefile = (gl_dir+'/source_data/num_source/{}.edgelist').format(j)
num_nodes = len(node_list)
id_map = {}
with open(nodefile) as ff:
for i, line in enumerate(ff):
info = line.split()
id_map[info[0]] = i
if i==0 :
#app_map[sample_list[j]] = num_nodes
app_map[num_nodes] = sample_list[j]
#print('%d %s \n' % (i+num_nodes, info[0]))
#attr = maxminnor(info[1])
#G.add_node(i+num_nodes, attribute=info[1]) #4:-1
G.add_node(i+num_nodes, attribute=info[4:-1]) #4:-1
feats_part.append(info[7:-1])
#if (info[6] != '0' and info[6] != '1') or (info[7] != '0' and info[7] != '1') or (info[8] != '0' and info[8] != '1') or (info[8] != '0' and info[9] != '1'):
if ('SDC' not in info[-1]) and ('Masked' not in info[-1]) and ('Detected' not in info[-1]):
#temp_file.write(info[0] + '\n')
print(info[0])
if info[-1] == 'Masked':
label = 0
elif(info[-1] == 'SDC:Eggregious' or info[-1] == 'SDC:Eggregious-pixel_mismatch' or info[-1] == 'SDC:Eggregious-line_num_mismatch' or info[-1] == 'SDC:Tolerable'):
label = 1
else:
label = 2
labels.append(label)
#if 'blackscholes_1_bit' in sample_list[j] or 'blackscholes_2_bit' in sample_list[j] or 'blackscholes_3_bit' in sample_list[j] or 'blackscholes_bit' in sample_list[j] or 'blackscholes_16_bit' in sample_list[j]:
if test_app in sample_list[j]:
train.append(0)
else:#elif 'lu_cb_bit' in sample_list[j]:
train.append(1)
#id_map[info[0]] = int(info[0])+num_nodes
node_list.append(i+num_nodes)
#node_type.append(info[4]) # bit-level graph
op_type.append(info[4])
node_type.append(info[5])
bit_loc.append(info[6])
temp_file.write(info[-1] + '\n')
with open(edgefile) as ff:
for i, line in enumerate(ff):
info = line.split()
G.add_edge(id_map[info[0]]+num_nodes, id_map[info[1]]+num_nodes)
#print('Working on Sample ID: ', j)
#temp_file.write(node_type)
print('Train size: ' + str(len(train)))
print('Labels size: ' + str(len(labels)))
nx.write_edgelist(G, save_dir+test_app+"_all.edgelist")
#nx.write_nodelist(G, save_dir+"all.nodelist")
with open(save_dir+test_app+"_labels.txt", "w") as ff:
for i in range(len(labels)):
ff.write(str(i))
ff.write(" ")
ff.write(str(labels[i]))
ff.write(" ")
ff.write(str(train[i]))
ff.write("\n")
all_op = {}# reg type in x86
for x in op_type:
if x not in all_op:
all_op[x] = len(all_op)
num_ops = len(all_op)
#print(all_op)
print('Size of opcode type vocubulary; ', num_ops)
all_type = {}# reg type in x86
for x in node_type:
if x not in all_type:
all_type[x] = len(all_type)
num_types = len(all_type)
#print(all_type)
print('Size of reg type vocubulary; ', num_types)
all_loc = {}# bit localation
for x in bit_loc:
if x not in all_loc:
all_loc[x] = len(all_loc)
num_loc = len(all_loc)
#print('Size of bit loc vocubulary; ', num_loc)
feats0 = []
feats1 = []
feats2 = []
#feats = maxminnor(node_type)
#print(feats)
for x in op_type:
feats0.append(convert(all_op[x], num_ops))
for x in node_type:
feats1.append(convert(all_type[x], num_types))
#print(feats)
for x in bit_loc:
feats2.append(convert(all_loc[x],num_loc))
#features = np.array(feats)
features0 = np.array([np.array(x) for x in feats0])
features1 = np.array([np.array(x) for x in feats1])
features2 = np.array([np.array(x) for x in feats2])
#features6 = np.array( feats_6)
#features7 = np.array( feats_7)
#features8 = np.array( feats_8)
#features9 = np.array( feats_9)
feats_part_new = []
for x in feats_part:
feats_part_new.append([int(a) for a in x])
features_part = np.array([np.array(x) for x in feats_part_new])
#print(features_part.ndim)
print(features_part.shape)
features1 = np.concatenate((features0,features1),axis=1)
features1 = np.concatenate((features1,features2),axis=1)
features1 = np.concatenate((features1,features_part),axis=1)
np.save(save_dir+test_app+"_feats.npy", features1)
#print(len(node_list))
#print(labels)
#print(node_type)
#print(len(G.edges()))
| [
"numpy.save",
"networkx.write_edgelist",
"os.environ.get",
"numpy.array",
"networkx.DiGraph",
"numpy.concatenate"
] | [((653, 665), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (663, 665), True, 'import networkx as nx\n'), ((675, 703), 'os.environ.get', 'os.environ.get', (['"""GRAPHLEARN"""'], {}), "('GRAPHLEARN')\n", (689, 703), False, 'import os\n'), ((3954, 4013), 'networkx.write_edgelist', 'nx.write_edgelist', (['G', "(save_dir + test_app + '_all.edgelist')"], {}), "(G, save_dir + test_app + '_all.edgelist')\n", (3971, 4013), True, 'import networkx as nx\n'), ((5720, 5766), 'numpy.concatenate', 'np.concatenate', (['(features0, features1)'], {'axis': '(1)'}), '((features0, features1), axis=1)\n', (5734, 5766), True, 'import numpy as np\n'), ((5777, 5823), 'numpy.concatenate', 'np.concatenate', (['(features1, features2)'], {'axis': '(1)'}), '((features1, features2), axis=1)\n', (5791, 5823), True, 'import numpy as np\n'), ((5834, 5884), 'numpy.concatenate', 'np.concatenate', (['(features1, features_part)'], {'axis': '(1)'}), '((features1, features_part), axis=1)\n', (5848, 5884), True, 'import numpy as np\n'), ((5884, 5938), 'numpy.save', 'np.save', (["(save_dir + test_app + '_feats.npy')", 'features1'], {}), "(save_dir + test_app + '_feats.npy', features1)\n", (5891, 5938), True, 'import numpy as np\n'), ((5230, 5241), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5238, 5241), True, 'import numpy as np\n'), ((5282, 5293), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5290, 5293), True, 'import numpy as np\n'), ((5334, 5345), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5342, 5345), True, 'import numpy as np\n'), ((5616, 5627), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5624, 5627), True, 'import numpy as np\n')] |
from numpy import finfo
from ._make_annotations import _make_annotations
from ._process_target_or_features_for_plotting import (
_process_target_or_features_for_plotting,
)
from ._style import (
ANNOTATION_FONT_SIZE,
ANNOTATION_WIDTH,
LAYOUT_SIDE_MARGIN,
LAYOUT_WIDTH,
ROW_HEIGHT,
)
from .plot.plot.plot_and_save import plot_and_save
from .support.support.iterable import make_object_int_mapping
eps = finfo(float).eps
def make_summary_match_panel(
target,
data_dicts,
score_moe_p_value_fdr,
plot_only_columns_shared_by_target_and_all_features=False,
target_ascending=True,
target_type="continuous",
plot_std=None,
title="Summary Match Panel",
layout_width=LAYOUT_WIDTH,
row_height=ROW_HEIGHT,
layout_side_margin=LAYOUT_SIDE_MARGIN,
annotation_font_size=ANNOTATION_FONT_SIZE,
html_file_path=None,
plotly_file_path=None,
):
if target.name is None:
target_name = "Target"
else:
target_name = target.name
if plot_only_columns_shared_by_target_and_all_features:
for data_dict in data_dicts.values():
target = target.loc[target.index & data_dict["df"].columns]
if target.dtype == "O":
target = target.map(make_object_int_mapping(target)[0])
if target_ascending is not None:
target.sort_values(ascending=target_ascending, inplace=True)
target, target_plot_min, target_plot_max, target_colorscale = _process_target_or_features_for_plotting(
target, target_type, plot_std
)
n_row = 1 + len(data_dicts)
for data_dict in data_dicts.values():
n_row += data_dict["df"].shape[0]
layout = dict(
width=layout_width,
margin=dict(l=layout_side_margin, r=layout_side_margin),
xaxis=dict(anchor="y"),
height=row_height / 2 * max(10, n_row),
title=title,
annotations=[],
)
row_fraction = 1 / n_row
yaxis_name = "yaxis{}".format(len(data_dicts) + 1).replace("axis1", "axis")
domain_end = 1
domain_start = domain_end - row_fraction
if abs(domain_start) <= eps:
domain_start = 0
layout[yaxis_name] = dict(
domain=(domain_start, domain_end), tickfont=dict(size=annotation_font_size)
)
data = [
dict(
yaxis=yaxis_name.replace("axis", ""),
type="heatmap",
z=target.to_frame().T.values,
x=target.index,
y=(target_name,),
text=(target.index,),
zmin=target_plot_min,
zmax=target_plot_max,
colorscale=target_colorscale,
showscale=False,
)
]
for data_name_index, (data_name, data_dict) in enumerate(data_dicts.items()):
print("Making match panel for {} ...".format(data_name))
df = data_dict["df"]
features_to_plot = df[df.columns & target.index]
score_moe_p_value_fdr_to_plot = score_moe_p_value_fdr.loc[
features_to_plot.index
].sort_values("Score", ascending=data_dict.get("emphasis", "high") == "low")
features_to_plot = features_to_plot.loc[score_moe_p_value_fdr_to_plot.index]
annotations = _make_annotations(
score_moe_p_value_fdr_to_plot.dropna(axis=1, how="all")
)
features_to_plot, features_plot_min, features_plot_max, features_colorscale = _process_target_or_features_for_plotting(
features_to_plot, data_dict["data_type"], plot_std
)
yaxis_name = "yaxis{}".format(len(data_dicts) - data_name_index).replace(
"axis1", "axis"
)
domain_end = domain_start - row_fraction
if abs(domain_end) <= eps:
domain_end = 0
domain_start = domain_end - data_dict["df"].shape[0] * row_fraction
if abs(domain_start) <= eps:
domain_start = 0
layout[yaxis_name] = dict(
domain=(domain_start, domain_end),
dtick=1,
tickfont=dict(size=annotation_font_size),
)
data.append(
dict(
yaxis=yaxis_name.replace("axis", ""),
type="heatmap",
z=features_to_plot.values[::-1],
x=features_to_plot.columns,
y=features_to_plot.index[::-1],
zmin=features_plot_min,
zmax=features_plot_max,
colorscale=features_colorscale,
showscale=False,
)
)
layout_annotation_template = dict(
xref="paper",
yref="paper",
yanchor="middle",
font=dict(size=annotation_font_size),
showarrow=False,
)
layout["annotations"].append(
dict(
xanchor="center",
x=0.5,
y=domain_end + (row_fraction / 2),
text="<b>{}</b>".format(data_name),
**layout_annotation_template,
)
)
layout_annotation_template.update(dict(xanchor="left", width=ANNOTATION_WIDTH))
for (
annotation_index,
(annotation_column_name, annotation_column_strs),
) in enumerate(annotations.items()):
x = 1.0016 + annotation_index / 10
if data_name_index == 0:
layout["annotations"].append(
dict(
x=x,
y=1 - (row_fraction / 2),
text="<b>{}</b>".format(annotation_column_name),
**layout_annotation_template,
)
)
y = domain_end - (row_fraction / 2)
for str_ in annotation_column_strs:
layout["annotations"].append(
dict(
x=x,
y=y,
text="<b>{}</b>".format(str_),
**layout_annotation_template,
)
)
y -= row_fraction
plot_and_save(dict(layout=layout, data=data), html_file_path, plotly_file_path)
| [
"numpy.finfo"
] | [((428, 440), 'numpy.finfo', 'finfo', (['float'], {}), '(float)\n', (433, 440), False, 'from numpy import finfo\n')] |
import os
import random
import numpy as np
import torch.utils.data
import misc
from data.structure import RelationType, ObjectType, AttributeType, Triple, Structure
class PreprocessDataset(torch.utils.data.Dataset):
'''This is only used to load data for preprocessing purposes.'''
def __init__(self, data_files, key):
self.data_files = data_files
self.key = key
def __getitem__(self, ind):
data_file = self.data_files[ind]
data = np.load(data_file)
return data[self.key]
def __len__(self):
return len(self.data_files)
def parse_structure(structure_arr):
structure = Structure()
for triple_arr in structure_arr:
triple_arr = list(triple_arr)
triple_arr = [elem.decode('UTF-8') for elem in triple_arr]
triple_arr = [elem.upper() for elem in triple_arr]
triple = Triple(RelationType[triple_arr[2]], ObjectType[triple_arr[0]], AttributeType[triple_arr[1]])
structure.add(triple)
return structure
def save_structure_to_files(regime):
'''Parse the metadata to compute a mapping between structures and their corresponding datapoints.'''
print('Saving structure metadata')
data_dir = '../data/'+regime+'/'
data_files = os.listdir(data_dir)
data_files = [data_dir + data_file for data_file in data_files]
structure_to_files = {}
for count,data_file in enumerate(data_files):
if count % 10000 == 0:
print(count, '/', len(data_files))
data = np.load(data_file)
# TODO Serialize structure instead of using string representation
structure = parse_structure(data['relation_structure']).to_str()
if structure not in structure_to_files:
structure_to_files[structure] = [data_file]
else:
structure_to_files[structure].append(data_file)
if os.path.exists('save_state/'+regime):
os.mkdir('save_state/'+regime)
misc.save_file(structure_to_files, 'save_state/'+regime+'/structure_to_files.pkl')
return structure_to_files
def compute_data_files(regime, n, args):
'''Sort the data files in increasing order of complexity, then return the n least complex datapoints.'''
data_dir = '../data/'+regime+'/'
if n == -1:
data_files = os.listdir(data_dir)
data_files = [data_dir + data_file for data_file in data_files]
return data_files
elif n == -2:
test_files = [data_dir + data_file for data_file in os.listdir(data_dir) if 'test' in data_file]
data_files = []
if os.path.exists('save_state/' + regime + '/structure_to_files.pkl'):
print('Loading structure metadata')
structure_to_files = misc.load_file('save_state/' + regime + '/structure_to_files.pkl')
else:
structure_to_files = save_structure_to_files(regime)
all_structure_strs = list(structure_to_files.keys())
all_structure_strs.sort(key=lambda x: x.count(','))
for structure_str in all_structure_strs:
data_i=structure_to_files[structure_str]
if len(data_i)>5000:
data_i=data_i[:5000]
data_files.extend(data_i)
#print(structure_str, len(structure_to_files[structure_str]))
data_files=[data_file for data_file in data_files if 'train' in data_file]
data_files.extend(test_files)
return data_files
elif n == -3:
data_files = []
if os.path.exists('save_state/' + regime + '/structure_to_files.pkl'):
print('Loading structure metadata')
structure_to_files = misc.load_file('save_state/' + regime + '/structure_to_files.pkl')
else:
structure_to_files = save_structure_to_files(regime)
all_structure_strs = list(structure_to_files.keys())
all_structure_strs.sort(key=lambda x: x.count(','))
for structure_str in all_structure_strs:
data_i=structure_to_files[structure_str]
if len(data_i)>20000 or len(data_i)<10000:
continue
data_files.extend(data_i)
print(structure_str, len(structure_to_files[structure_str]))
return data_files
elif n==-4:
data_files = os.listdir("/home/zkc/reason/andshapecolormask/")
data_files = ["/home/zkc/reason/andshapecolormask/" + data_file for data_file in data_files]
return data_files
else:
data_files = []
if os.path.exists('save_state/'+regime+'/structure_to_files.pkl'):
print('Loading structure metadata')
structure_to_files = misc.load_file('save_state/'+regime+'/structure_to_files.pkl')
else:
structure_to_files = save_structure_to_files(regime)
all_structure_strs = list(structure_to_files.keys())
# The number of commas in the structure_str is used as a proxy for complexity
i=0
all_structure_strs.sort(key=lambda x: x.count(','))
for structure_str in all_structure_strs:
data_i=structure_to_files[structure_str]
if args.image_type=="image":
if "SHAPE" in structure_str and "),("not in structure_str:
data_files.extend(data_i)
print(structure_str, ":", len(data_i))
if "LINE" in structure_str and "),("not in structure_str:
data_files.extend(data_i)
print(structure_str, ":", len(data_i))
elif args.image_type=="shape_im":
if "SHAPE" in structure_str and "),("not in structure_str:
data_files.extend(data_i)
print(structure_str, ":", len(data_i))
elif args.image_type=="line_im":
if "LINE" in structure_str and "),("not in structure_str:
data_files.extend(data_i)
print(structure_str, ":", len(data_i))
return data_files
'''
class RelationType(Enum):
PROGRESSION = 1
XOR = 2
OR = 3
AND = 4
CONSISTENT_UNION = 5
class ObjectType(Enum):
SHAPE = 1
LINE = 2
class AttributeType(Enum):
SIZE = 1
TYPE = 2
COLOR = 3
POSITION = 4
NUMBER = 5
'''
def provide_data(regime, n,a,s):
#code = ['SHAPE', 'LINE', "COLOR", 'NUMBER', 'POSITION', 'SIZE','TYPE',
# 'PROGRESSION', "XOR", "OR", 'AND', 'CONSISTENT_UNION']
base=4000
data_files=[]
data_dir = '/home/lab/zkc/reason/process_data/reason_data/reason_data/RAVEN-10000/'
for subdir in os.listdir(data_dir):
for filename in os.listdir(data_dir + subdir):
if "npz" in filename and "train" in filename:
data_files.append(data_dir+subdir+"/"+filename)
train_files=[[] for _ in range(n)]
for data_file in data_files:
name_=data_file[:-4].split("/")[-1].split("_")[3:]
for number_ in name_:
train_files[int(number_)].append(data_file)
df=[]
for i in range(n):
random.shuffle(train_files[i])
df.extend(train_files[i][:int(base*a[i])])
#print(x, int(base*a[i]))
return df
def save_normalization_stats(regime, batch_size=100):
'''Compute the mean and standard deviation jointly across all channels.'''
print('Saving normalization stats')
data_dir = '../data/'+regime+'/'
data_files = os.listdir(data_dir)
data_files = [data_dir + data_file for data_file in data_files]
train_files = [data_file for data_file in data_files if 'train' in data_file]
loader = torch.utils.data.DataLoader(PreprocessDataset(train_files, 'image'), batch_size=batch_size)
print('Computing x_mean')
sum = 0
n = 0
count = 0
for x in loader:
sum += x.sum()
n += x.numel()
count += batch_size
if count % 100000 == 0:
print(count, '/', len(train_files))
x_mean = float(sum / n)
print('Computing x_sd')
sum = 0
n = 0
count = 0
for x in loader:
sum += ((x - x_mean)**2).sum()
n += x.numel()
count += batch_size
if count % 100000 == 0:
print(count, '/', len(train_files))
x_sd = float(np.sqrt(sum / n))
misc.save_file((x_mean, x_sd), 'save_state/'+regime+'/normalization_stats.pkl')
return x_mean, x_sd | [
"os.mkdir",
"numpy.load",
"misc.load_file",
"random.shuffle",
"os.path.exists",
"misc.save_file",
"data.structure.Structure",
"data.structure.Triple",
"os.listdir",
"numpy.sqrt"
] | [((641, 652), 'data.structure.Structure', 'Structure', ([], {}), '()\n', (650, 652), False, 'from data.structure import RelationType, ObjectType, AttributeType, Triple, Structure\n'), ((1251, 1271), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (1261, 1271), False, 'import os\n'), ((1862, 1900), 'os.path.exists', 'os.path.exists', (["('save_state/' + regime)"], {}), "('save_state/' + regime)\n", (1876, 1900), False, 'import os\n'), ((1943, 2033), 'misc.save_file', 'misc.save_file', (['structure_to_files', "('save_state/' + regime + '/structure_to_files.pkl')"], {}), "(structure_to_files, 'save_state/' + regime +\n '/structure_to_files.pkl')\n", (1957, 2033), False, 'import misc\n'), ((6527, 6547), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (6537, 6547), False, 'import os\n'), ((7351, 7371), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (7361, 7371), False, 'import os\n'), ((8190, 8277), 'misc.save_file', 'misc.save_file', (['(x_mean, x_sd)', "('save_state/' + regime + '/normalization_stats.pkl')"], {}), "((x_mean, x_sd), 'save_state/' + regime +\n '/normalization_stats.pkl')\n", (8204, 8277), False, 'import misc\n'), ((479, 497), 'numpy.load', 'np.load', (['data_file'], {}), '(data_file)\n', (486, 497), True, 'import numpy as np\n'), ((871, 967), 'data.structure.Triple', 'Triple', (['RelationType[triple_arr[2]]', 'ObjectType[triple_arr[0]]', 'AttributeType[triple_arr[1]]'], {}), '(RelationType[triple_arr[2]], ObjectType[triple_arr[0]],\n AttributeType[triple_arr[1]])\n', (877, 967), False, 'from data.structure import RelationType, ObjectType, AttributeType, Triple, Structure\n'), ((1511, 1529), 'numpy.load', 'np.load', (['data_file'], {}), '(data_file)\n', (1518, 1529), True, 'import numpy as np\n'), ((1908, 1940), 'os.mkdir', 'os.mkdir', (["('save_state/' + regime)"], {}), "('save_state/' + regime)\n", (1916, 1940), False, 'import os\n'), ((2281, 2301), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2291, 2301), False, 'import os\n'), ((6574, 6603), 'os.listdir', 'os.listdir', (['(data_dir + subdir)'], {}), '(data_dir + subdir)\n', (6584, 6603), False, 'import os\n'), ((6990, 7020), 'random.shuffle', 'random.shuffle', (['train_files[i]'], {}), '(train_files[i])\n', (7004, 7020), False, 'import random\n'), ((8168, 8184), 'numpy.sqrt', 'np.sqrt', (['(sum / n)'], {}), '(sum / n)\n', (8175, 8184), True, 'import numpy as np\n'), ((2558, 2624), 'os.path.exists', 'os.path.exists', (["('save_state/' + regime + '/structure_to_files.pkl')"], {}), "('save_state/' + regime + '/structure_to_files.pkl')\n", (2572, 2624), False, 'import os\n'), ((2707, 2773), 'misc.load_file', 'misc.load_file', (["('save_state/' + regime + '/structure_to_files.pkl')"], {}), "('save_state/' + regime + '/structure_to_files.pkl')\n", (2721, 2773), False, 'import misc\n'), ((3459, 3525), 'os.path.exists', 'os.path.exists', (["('save_state/' + regime + '/structure_to_files.pkl')"], {}), "('save_state/' + regime + '/structure_to_files.pkl')\n", (3473, 3525), False, 'import os\n'), ((2478, 2498), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2488, 2498), False, 'import os\n'), ((3608, 3674), 'misc.load_file', 'misc.load_file', (["('save_state/' + regime + '/structure_to_files.pkl')"], {}), "('save_state/' + regime + '/structure_to_files.pkl')\n", (3622, 3674), False, 'import misc\n'), ((4232, 4281), 'os.listdir', 'os.listdir', (['"""/home/zkc/reason/andshapecolormask/"""'], {}), "('/home/zkc/reason/andshapecolormask/')\n", (4242, 4281), False, 'import os\n'), ((4455, 4521), 'os.path.exists', 'os.path.exists', (["('save_state/' + regime + '/structure_to_files.pkl')"], {}), "('save_state/' + regime + '/structure_to_files.pkl')\n", (4469, 4521), False, 'import os\n'), ((4600, 4666), 'misc.load_file', 'misc.load_file', (["('save_state/' + regime + '/structure_to_files.pkl')"], {}), "('save_state/' + regime + '/structure_to_files.pkl')\n", (4614, 4666), False, 'import misc\n')] |
import numpy as np
from .kernels import gaussian
from .utils import estimate_bandwidth
class Ckde:
def __init__(self):
self.y_train = None
self.w_train = None
self.m_train = None
self.n_y = None
self.n_w = None
self.bandwidth_y = None
self.bandwidth_w = None
self.s = None
self.d = None
self.kernel = gaussian
def fit(self, y_train, w_train, bandwidth=None):
self.y_train = np.copy(y_train)
self.w_train = np.copy(w_train)
self.m_train = self.y_train.shape[0]
self.n_y, self.n_w = self.y_train.shape[1], self.w_train.shape[1]
if bandwidth is None:
x_train = np.concatenate((self.y_train, self.w_train), axis=1)
bandwidth = estimate_bandwidth(x_train)
self.bandwidth_y = bandwidth[:self.n_y]
self.bandwidth_w = bandwidth[self.n_y:]
else:
assert bandwidth.any() > 0, f'Bandwidth needs to be greater than zero. Got {bandwidth}.'
self.bandwidth_y = bandwidth[:self.n_y]
self.bandwidth_w = bandwidth[self.n_y:]
self.s = np.ones(self.m_train)
return self
def score_samples(self, y_test, w_test):
kernel_w_values = self.kernel((w_test - self.w_train[:, None]) / (self.bandwidth_w * self.s[:, None, None]))
self.d = np.prod(kernel_w_values, axis=1)
self.d = self.m_train * self.d / np.sum(self.d, axis=0)
kernel_y_values = self.kernel((y_test - self.y_train[:, None]) / (self.bandwidth_y * self.s[:, None, None]))
scores = 1 / (self.m_train * np.prod(self.bandwidth_y)) * np.sum(
(self.d / (self.s[:, None] ** self.n_y)) * np.prod(kernel_y_values, axis=2), axis=0)
return scores
| [
"numpy.sum",
"numpy.copy",
"numpy.ones",
"numpy.prod",
"numpy.concatenate"
] | [((475, 491), 'numpy.copy', 'np.copy', (['y_train'], {}), '(y_train)\n', (482, 491), True, 'import numpy as np\n'), ((515, 531), 'numpy.copy', 'np.copy', (['w_train'], {}), '(w_train)\n', (522, 531), True, 'import numpy as np\n'), ((1150, 1171), 'numpy.ones', 'np.ones', (['self.m_train'], {}), '(self.m_train)\n', (1157, 1171), True, 'import numpy as np\n'), ((1372, 1404), 'numpy.prod', 'np.prod', (['kernel_w_values'], {'axis': '(1)'}), '(kernel_w_values, axis=1)\n', (1379, 1404), True, 'import numpy as np\n'), ((704, 756), 'numpy.concatenate', 'np.concatenate', (['(self.y_train, self.w_train)'], {'axis': '(1)'}), '((self.y_train, self.w_train), axis=1)\n', (718, 756), True, 'import numpy as np\n'), ((1446, 1468), 'numpy.sum', 'np.sum', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (1452, 1468), True, 'import numpy as np\n'), ((1624, 1649), 'numpy.prod', 'np.prod', (['self.bandwidth_y'], {}), '(self.bandwidth_y)\n', (1631, 1649), True, 'import numpy as np\n'), ((1716, 1748), 'numpy.prod', 'np.prod', (['kernel_y_values'], {'axis': '(2)'}), '(kernel_y_values, axis=2)\n', (1723, 1748), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 16:52:31 2019
@author: amber
"""
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
# In[2]:
df = pd.read_csv("iteration_error.txt", sep = "\x1b| |=", header = None, names = range(12), engine='python') #
print(df.iloc[0,3])
print(df.iloc[0,4])
print(df.iloc[0,5])
print(df.iloc[0,6])
if df.iloc[0,6]=='nodes,': # g2o output template
error =np.array(df[10], dtype = float)
else: # gtsam output template
error =np.array(df[9], dtype = float)
fig, ax = plt.subplots()
plt.plot(error, 'b')
plt.savefig('iteration_error')
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((571, 585), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (583, 585), True, 'import matplotlib.pyplot as plt\n'), ((587, 607), 'matplotlib.pyplot.plot', 'plt.plot', (['error', '"""b"""'], {}), "(error, 'b')\n", (595, 607), True, 'import matplotlib.pyplot as plt\n'), ((608, 638), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""iteration_error"""'], {}), "('iteration_error')\n", (619, 638), True, 'import matplotlib.pyplot as plt\n'), ((639, 649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (647, 649), True, 'import matplotlib.pyplot as plt\n'), ((460, 489), 'numpy.array', 'np.array', (['df[10]'], {'dtype': 'float'}), '(df[10], dtype=float)\n', (468, 489), True, 'import numpy as np\n'), ((530, 558), 'numpy.array', 'np.array', (['df[9]'], {'dtype': 'float'}), '(df[9], dtype=float)\n', (538, 558), True, 'import numpy as np\n')] |
'''
<NAME>
<EMAIL>
October 28, 2017
'''
import sys
import random
import numpy as np
import scipy as sc
import matplotlib as mlp
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy import special
from scipy import stats
def readFileData(fileName):
'''
Reads in break data from files
@params:
fileName(string): File name
Returns:
data (np.array): array of data read from file
'''
#Attempt to read text file and extact data into a list
try:
file_object = open(str(fileName), "r").read().splitlines()[:]
#seperate columns and strip white space
data_list = [float(my_string.strip()) for my_string in file_object]
except OSError as err:
print("OS error: {0}".format(err))
return
except IOError as err:
print("File read error: {0}".format(err))
return
except:
print("Unexpected error:{0}".format(sys.exc_info()[0]))
return
data = np.asarray(data_list)
return data
def gaussian1D(x, mu, var):
"""
Calculates 1D gaussian density
Args:
x (flost) = point of interest
mu (float) = mean
var (float) = Variance squared
"""
small = 1e-8
e = (x-mu)*(1/(var+small))
e = e*(x-mu)
e = np.exp(-0.5*e)
return 1.0/(np.power(2*np.pi*var,0.5))*e
def resample(weight, N):
'''
Execuates the standard multinomal resampling method
Useful Reference: http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=1521264
Params:
weight (nd.array) = 1xN array of weights for each particle
N (int) = number of particles
Returns:
y (nd.array) = 1xN array of indexes of particles to keep
'''
w_sort = np.sort(weight)
w_ind = np.argsort(weight)
y = np.argsort(weight)
for i in range(0, N):
u = np.random.uniform()
t = 0
for k in range(0, N):
t = t + w_sort[k]
if (u<t):
y[i] = w_ind[k]
break
return y
def resampleSystematic(weight, N):
'''
Execuates the systematic resampling method
Params:
weight (nd.array) = 1xN array of weights (Particle values here)
N (int) = number of wieghts or particles
Returns:
y (nd.array) = 1xN array of indexes of particles to keep
'''
w_sort = np.sort(weight)
w_ind = np.argsort(weight)
y = np.argsort(weight)
#Set up the partitions
w_part = np.array([float(i)/N for i in range(0,N)])
#Now sample the little pertabation
u = np.random.uniform(0,1.0/N)
#Add pertabation to partitions
w_part = w_part + u
#Now resample
for i in range(0, N):
t = 0
for k in range(0, N):
t = t + w_sort[k]
if (w_part[i]<t):
y[i] = w_ind[k]
break
return y
def bootStrapFilter(y, nsteps, N, phi = 0.98, sigma = 0.16, beta = 0.70, ess = float("inf"), resamp = 'standard'):
'''
Executes bootstrap filter
Args:
y (nd.array) = [D] array of observation data
nsteps (int) = number of timesteps
N (int) = number of particles
phi, sigma, beta (float) = hyperparameters
eff (float) = ESS trigger (default is inf, so resample every timestep)
resamp (string) = resampling method (standard, systematic)
Returns:
x (nd.array) = [nsteps, D] array of states
w_hist (nd.array) = [nsteps, D] array of filtering distributions g(y|x)
'''
small = 1e-5
x = np.zeros((nsteps, N)) + small
w_log = np.zeros((nsteps, N)) + np.log(1.0/N)
w = np.zeros((nsteps, N))
w_hist = np.zeros((nsteps, N))
#Initialize x, weights, log-weights
x[0,:] = np.random.normal(phi*x[0,:], 2*sigma, N) #Initialize on a normal with 0 mean
w_log[0,:] = np.log(gaussian1D(y[0], 0, beta*beta*np.exp(x[0,:])) + small)
w_log[0,:] = w_log[0,:] - np.max(w_log[0,:])
w[0,:] = np.exp(w_log[0,:])/np.sum(np.exp(w_log[0,:]))
w_hist[0,:] = gaussian1D(y[0], 0, beta*beta*np.exp(x[0,:]))
#Iterate over timesteps
for i in range(1,nsteps):
#First, sample particles for states
x[i,:] = np.random.normal(phi*x[i-1,:], sigma, N)
#Second update the importance weights
w_log[i,:] = w_log[i-1,:] + np.log(gaussian1D(y[i], 0, beta*beta*np.exp(x[i,:])) + small)
w_hist[i,:] = np.exp(w_log[i,:] - w_log[i-1,:])
w_log[i,:] = w_log[i,:] - np.max(w_log[i,:])
w[i,:] = np.exp(w_log[i,:])/np.sum(np.exp(w_log[i,:]))
#Calculate Kish's effective sample size
neff = 1.0/np.sum(np.power(w[i,:],2))
#ESS trigger
if(neff < ess):
#Third resample the points
if(resamp == 'systematic'):
ind = resampleSystematic(w[i,:],N)
else: #Standard resampling
ind = resample(w[i,:],N)
x = np.take(x, ind, 1)
w[i,:] = 1.0/N + np.zeros((N))
w_log[i,:] = np.log(w[i,:])
return x, w_hist
if __name__ == '__main__':
plt.close('all')
mlp.rcParams['font.family'] = ['times new roman'] # default is sans-serif
rc('text', usetex=True)
#========== Read in data from file ==========
y = readFileData("logreturns2012to2014.csv")
#========== Problem 1 (a) ==========
#Set up subplots
f, ax = plt.subplots(1, 1, figsize=(7, 6))
f.suptitle('Homework 5 Problem 1(a)', fontsize=14)
#Params
nsteps = 500 #Timesteps
N = 40 #Particles
phi = 0.98; sigma = 0.16; beta = 0.70
x, w_hist = bootStrapFilter(y, nsteps, N, 0.98, 0.16, 0.70)
t = range(0,nsteps)
ax.plot(t, np.sum(x[:,:],1)/N, 'k', label='x mean')
ax.plot(t, y[:nsteps], 'o', markersize=3.5, label='observation')
ax.set_xlabel('t')
ax.legend()
#========== Problem 1 (b) ==========
#Set up subplots
f, ax = plt.subplots(1, 1, figsize=(7, 6))
f.suptitle('Homework 5 Problem 1(b)', fontsize=14)
#Params
N = 20
nsteps = 500 #Timesteps
B = 10 #Number of betas
beta_like = np.zeros((10,B))
for j, beta in enumerate(np.linspace(0.25,2,B)):
for i in range(10):
#Bootstrap filter
x, w_hist = bootStrapFilter(y, nsteps, N, 0.98, 0.16, beta)
#Commute log marginal likelyhood
w_sum = np.sum(w_hist, 1) + 1e-5
beta_like[i,j] = np.sum(np.log(w_sum) - np.log(N))
ax.boxplot(beta_like)
ax.set_xticklabels(["%.2f" % num for num in np.linspace(0.25,2,B)])
ax.set_xlabel(r'$\beta$')
ax.set_ylabel(r'$log(p)$')
#========== Problem 1 (c) ==========
#Set up subplots
f, ax = plt.subplots(2, 2, figsize=(9, 9))
f.suptitle('Homework 5 Problem 1(c)', fontsize=14)
#Params
nsteps = 500 #Timesteps
N = 10 #Particles
phi = 0.98; sigma = 0.16; beta = 0.70
x, w_hist = bootStrapFilter(y, nsteps, N, 0.98, 0.16, 0.70)
ess_x, ess_w_hist = bootStrapFilter(y, nsteps, N, 0.98, 0.16, 0.70, 0.5*N)
syst_x, syst_w_hist = bootStrapFilter(y, nsteps, N, 0.98, 0.16, 0.70, N, resamp = 'systematic')
syst_x2, syst_w_hist2 = bootStrapFilter(y, nsteps, N, 0.98, 0.16, 0.70, 0.5*N, resamp = 'systematic')
t = range(0,nsteps)
ax[0,0].set_title("Multinomial Resampling [ESS = Np]")
for i in range(1,N):
ax[0,0].plot(t, x[:,i], linewidth=1.0)
ax[0,1].set_title("Multinomial Resampling [ESS = 0.5*Np]")
for i in range(1,N):
ax[0,1].plot(t, ess_x[:,i], linewidth=1.0)
ax[1,0].set_title("Systematic Resampling [ESS = Np]")
for i in range(1,N):
ax[1,0].plot(t, syst_x[:,i], linewidth=1.0)
ax[1,1].set_title("Systematic Resampling [ESS = 0.5*Np]")
for i in range(1,N):
ax[1,1].plot(t, syst_x2[:,i], linewidth=1.0)
for ax0 in ax.reshape(-1):
ax0.set_xlim([400, 500])
ax0.set_xlabel('t')
ax0.set_xlabel('x')
plt.tight_layout(rect=[0,0, 1.0, 0.93])
plt.show() | [
"matplotlib.rc",
"numpy.sum",
"numpy.argsort",
"numpy.exp",
"numpy.random.normal",
"sys.exc_info",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.close",
"numpy.power",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.asarray",
"numpy.so... | [((977, 998), 'numpy.asarray', 'np.asarray', (['data_list'], {}), '(data_list)\n', (987, 998), True, 'import numpy as np\n'), ((1284, 1300), 'numpy.exp', 'np.exp', (['(-0.5 * e)'], {}), '(-0.5 * e)\n', (1290, 1300), True, 'import numpy as np\n'), ((1733, 1748), 'numpy.sort', 'np.sort', (['weight'], {}), '(weight)\n', (1740, 1748), True, 'import numpy as np\n'), ((1761, 1779), 'numpy.argsort', 'np.argsort', (['weight'], {}), '(weight)\n', (1771, 1779), True, 'import numpy as np\n'), ((1788, 1806), 'numpy.argsort', 'np.argsort', (['weight'], {}), '(weight)\n', (1798, 1806), True, 'import numpy as np\n'), ((2357, 2372), 'numpy.sort', 'np.sort', (['weight'], {}), '(weight)\n', (2364, 2372), True, 'import numpy as np\n'), ((2385, 2403), 'numpy.argsort', 'np.argsort', (['weight'], {}), '(weight)\n', (2395, 2403), True, 'import numpy as np\n'), ((2412, 2430), 'numpy.argsort', 'np.argsort', (['weight'], {}), '(weight)\n', (2422, 2430), True, 'import numpy as np\n'), ((2562, 2591), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0 / N)'], {}), '(0, 1.0 / N)\n', (2579, 2591), True, 'import numpy as np\n'), ((3628, 3649), 'numpy.zeros', 'np.zeros', (['(nsteps, N)'], {}), '((nsteps, N))\n', (3636, 3649), True, 'import numpy as np\n'), ((3663, 3684), 'numpy.zeros', 'np.zeros', (['(nsteps, N)'], {}), '((nsteps, N))\n', (3671, 3684), True, 'import numpy as np\n'), ((3739, 3784), 'numpy.random.normal', 'np.random.normal', (['(phi * x[0, :])', '(2 * sigma)', 'N'], {}), '(phi * x[0, :], 2 * sigma, N)\n', (3755, 3784), True, 'import numpy as np\n'), ((5067, 5083), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5076, 5083), True, 'import matplotlib.pyplot as plt\n'), ((5166, 5189), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (5168, 5189), False, 'from matplotlib import rc\n'), ((5365, 5399), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 6)'}), '(1, 1, figsize=(7, 6))\n', (5377, 5399), True, 'import matplotlib.pyplot as plt\n'), ((5893, 5927), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 6)'}), '(1, 1, figsize=(7, 6))\n', (5905, 5927), True, 'import matplotlib.pyplot as plt\n'), ((6079, 6096), 'numpy.zeros', 'np.zeros', (['(10, B)'], {}), '((10, B))\n', (6087, 6096), True, 'import numpy as np\n'), ((6681, 6715), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(9, 9)'}), '(2, 2, figsize=(9, 9))\n', (6693, 6715), True, 'import matplotlib.pyplot as plt\n'), ((7925, 7965), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0, 1.0, 0.93]'}), '(rect=[0, 0, 1.0, 0.93])\n', (7941, 7965), True, 'import matplotlib.pyplot as plt\n'), ((7969, 7979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7977, 7979), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1865), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1863, 1865), True, 'import numpy as np\n'), ((3540, 3561), 'numpy.zeros', 'np.zeros', (['(nsteps, N)'], {}), '((nsteps, N))\n', (3548, 3561), True, 'import numpy as np\n'), ((3582, 3603), 'numpy.zeros', 'np.zeros', (['(nsteps, N)'], {}), '((nsteps, N))\n', (3590, 3603), True, 'import numpy as np\n'), ((3606, 3621), 'numpy.log', 'np.log', (['(1.0 / N)'], {}), '(1.0 / N)\n', (3612, 3621), True, 'import numpy as np\n'), ((3925, 3944), 'numpy.max', 'np.max', (['w_log[0, :]'], {}), '(w_log[0, :])\n', (3931, 3944), True, 'import numpy as np\n'), ((3957, 3976), 'numpy.exp', 'np.exp', (['w_log[0, :]'], {}), '(w_log[0, :])\n', (3963, 3976), True, 'import numpy as np\n'), ((4187, 4232), 'numpy.random.normal', 'np.random.normal', (['(phi * x[i - 1, :])', 'sigma', 'N'], {}), '(phi * x[i - 1, :], sigma, N)\n', (4203, 4232), True, 'import numpy as np\n'), ((4395, 4432), 'numpy.exp', 'np.exp', (['(w_log[i, :] - w_log[i - 1, :])'], {}), '(w_log[i, :] - w_log[i - 1, :])\n', (4401, 4432), True, 'import numpy as np\n'), ((6126, 6149), 'numpy.linspace', 'np.linspace', (['(0.25)', '(2)', 'B'], {}), '(0.25, 2, B)\n', (6137, 6149), True, 'import numpy as np\n'), ((1316, 1346), 'numpy.power', 'np.power', (['(2 * np.pi * var)', '(0.5)'], {}), '(2 * np.pi * var, 0.5)\n', (1324, 1346), True, 'import numpy as np\n'), ((3983, 4002), 'numpy.exp', 'np.exp', (['w_log[0, :]'], {}), '(w_log[0, :])\n', (3989, 4002), True, 'import numpy as np\n'), ((4051, 4066), 'numpy.exp', 'np.exp', (['x[0, :]'], {}), '(x[0, :])\n', (4057, 4066), True, 'import numpy as np\n'), ((4463, 4482), 'numpy.max', 'np.max', (['w_log[i, :]'], {}), '(w_log[i, :])\n', (4469, 4482), True, 'import numpy as np\n'), ((4499, 4518), 'numpy.exp', 'np.exp', (['w_log[i, :]'], {}), '(w_log[i, :])\n', (4505, 4518), True, 'import numpy as np\n'), ((4911, 4929), 'numpy.take', 'np.take', (['x', 'ind', '(1)'], {}), '(x, ind, 1)\n', (4918, 4929), True, 'import numpy as np\n'), ((4998, 5013), 'numpy.log', 'np.log', (['w[i, :]'], {}), '(w[i, :])\n', (5004, 5013), True, 'import numpy as np\n'), ((5665, 5683), 'numpy.sum', 'np.sum', (['x[:, :]', '(1)'], {}), '(x[:, :], 1)\n', (5671, 5683), True, 'import numpy as np\n'), ((4525, 4544), 'numpy.exp', 'np.exp', (['w_log[i, :]'], {}), '(w_log[i, :])\n', (4531, 4544), True, 'import numpy as np\n'), ((4620, 4640), 'numpy.power', 'np.power', (['w[i, :]', '(2)'], {}), '(w[i, :], 2)\n', (4628, 4640), True, 'import numpy as np\n'), ((4959, 4970), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4967, 4970), True, 'import numpy as np\n'), ((6358, 6375), 'numpy.sum', 'np.sum', (['w_hist', '(1)'], {}), '(w_hist, 1)\n', (6364, 6375), True, 'import numpy as np\n'), ((6521, 6544), 'numpy.linspace', 'np.linspace', (['(0.25)', '(2)', 'B'], {}), '(0.25, 2, B)\n', (6532, 6544), True, 'import numpy as np\n'), ((3870, 3885), 'numpy.exp', 'np.exp', (['x[0, :]'], {}), '(x[0, :])\n', (3876, 3885), True, 'import numpy as np\n'), ((6419, 6432), 'numpy.log', 'np.log', (['w_sum'], {}), '(w_sum)\n', (6425, 6432), True, 'import numpy as np\n'), ((6435, 6444), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (6441, 6444), True, 'import numpy as np\n'), ((930, 944), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (942, 944), False, 'import sys\n'), ((4348, 4363), 'numpy.exp', 'np.exp', (['x[i, :]'], {}), '(x[i, :])\n', (4354, 4363), True, 'import numpy as np\n')] |
import numpy as np
# Computing a histogram using np.histogram on a uint8 image with bins=256
# doesn't work and results in aliasing problems. We use a fully specified set
# of bins to ensure that each uint8 value false into its own bin.
_DEFAULT_ENTROPY_BINS = tuple(np.arange(-0.5, 255.51, 1))
def cross_entropy(image, threshold, bins=_DEFAULT_ENTROPY_BINS):
"""Compute cross-entropy between distributions above and below a threshold.
Parameters
----------
image : array
The input array of values.
threshold : float
The value dividing the foreground and background in ``image``.
bins : int or array of float, optional
The number of bins or the bin edges. (Any valid value to the ``bins``
argument of ``np.histogram`` will work here.) For an exact calculation,
each unique value should have its own bin. The default value for bins
ensures exact handling of uint8 images: ``bins=256`` results in
aliasing problems due to bin width not being equal to 1.
Returns
-------
nu : float
The cross-entropy target value as defined in [1]_.
Notes
-----
See Li and Lee, 1993 [1]_; this is the objective function `threshold_li`
minimizes. This function can be improved but this implementation most
closely matches equation 8 in [1]_ and equations 1-3 in [2]_.
References
----------
.. [1] <NAME>. and <NAME>. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
:DOI:`10.1016/0031-3203(93)90115-D`
.. [2] <NAME>. and <NAME>. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
:DOI:`10.1016/S0167-8655(98)00057-9`
"""
histogram, bin_edges = np.histogram(image, bins=bins, density=True)
bin_centers = np.convolve(bin_edges, [0.5, 0.5], mode='valid')
t = np.flatnonzero(bin_centers > threshold)[0]
m0a = np.sum(histogram[:t]) # 0th moment, background
m0b = np.sum(histogram[t:])
m1a = np.sum(histogram[:t] * bin_centers[:t]) # 1st moment, background
m1b = np.sum(histogram[t:] * bin_centers[t:])
mua = m1a / m0a # mean value, background
mub = m1b / m0b
nu = -m1a * np.log(mua) - m1b * np.log(mub)
return nu
def threshold_li(image, *, tolerance=None):
"""Compute threshold value by Li's iterative Minimum Cross Entropy method.
Parameters
----------
image : ndarray
Input image.
tolerance : float, optional
Finish the computation when the change in the threshold in an iteration
is less than this value. By default, this is half of the range of the
input image, divided by 256.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] <NAME>. and <NAME>. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
:DOI:`10.1016/0031-3203(93)90115-D`
.. [2] <NAME>. and <NAME>. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
:DOI:`10.1016/S0167-8655(98)00057-9`
.. [3] <NAME>. and <NAME>. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
:DOI:`10.1117/1.1631315`
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_li(image)
>>> binary = image > thresh
"""
image_range = np.max(image) - np.min(image)
tolerance = tolerance or 0.5 * image_range / 256
# Initial estimate
t_curr = np.mean(image)
t_next = t_curr + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(t_next - t_curr) > tolerance:
t_curr = t_next
foreground = (image > t_curr)
mean_fore = np.mean(image[foreground])
mean_back = np.mean(image[~foreground])
t_next = ((mean_back - mean_fore) /
(np.log(mean_back) - np.log(mean_fore)))
threshold = t_next
return threshold
| [
"numpy.sum",
"numpy.log",
"numpy.flatnonzero",
"numpy.histogram",
"numpy.mean",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.convolve"
] | [((269, 295), 'numpy.arange', 'np.arange', (['(-0.5)', '(255.51)', '(1)'], {}), '(-0.5, 255.51, 1)\n', (278, 295), True, 'import numpy as np\n'), ((1814, 1858), 'numpy.histogram', 'np.histogram', (['image'], {'bins': 'bins', 'density': '(True)'}), '(image, bins=bins, density=True)\n', (1826, 1858), True, 'import numpy as np\n'), ((1877, 1925), 'numpy.convolve', 'np.convolve', (['bin_edges', '[0.5, 0.5]'], {'mode': '"""valid"""'}), "(bin_edges, [0.5, 0.5], mode='valid')\n", (1888, 1925), True, 'import numpy as np\n'), ((1987, 2008), 'numpy.sum', 'np.sum', (['histogram[:t]'], {}), '(histogram[:t])\n', (1993, 2008), True, 'import numpy as np\n'), ((2045, 2066), 'numpy.sum', 'np.sum', (['histogram[t:]'], {}), '(histogram[t:])\n', (2051, 2066), True, 'import numpy as np\n'), ((2077, 2116), 'numpy.sum', 'np.sum', (['(histogram[:t] * bin_centers[:t])'], {}), '(histogram[:t] * bin_centers[:t])\n', (2083, 2116), True, 'import numpy as np\n'), ((2153, 2192), 'numpy.sum', 'np.sum', (['(histogram[t:] * bin_centers[t:])'], {}), '(histogram[t:] * bin_centers[t:])\n', (2159, 2192), True, 'import numpy as np\n'), ((3937, 3951), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (3944, 3951), True, 'import numpy as np\n'), ((1934, 1973), 'numpy.flatnonzero', 'np.flatnonzero', (['(bin_centers > threshold)'], {}), '(bin_centers > threshold)\n', (1948, 1973), True, 'import numpy as np\n'), ((3817, 3830), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (3823, 3830), True, 'import numpy as np\n'), ((3833, 3846), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (3839, 3846), True, 'import numpy as np\n'), ((4235, 4261), 'numpy.mean', 'np.mean', (['image[foreground]'], {}), '(image[foreground])\n', (4242, 4261), True, 'import numpy as np\n'), ((4282, 4309), 'numpy.mean', 'np.mean', (['image[~foreground]'], {}), '(image[~foreground])\n', (4289, 4309), True, 'import numpy as np\n'), ((2275, 2286), 'numpy.log', 'np.log', (['mua'], {}), '(mua)\n', (2281, 2286), True, 'import numpy as np\n'), ((2295, 2306), 'numpy.log', 'np.log', (['mub'], {}), '(mub)\n', (2301, 2306), True, 'import numpy as np\n'), ((4374, 4391), 'numpy.log', 'np.log', (['mean_back'], {}), '(mean_back)\n', (4380, 4391), True, 'import numpy as np\n'), ((4394, 4411), 'numpy.log', 'np.log', (['mean_fore'], {}), '(mean_fore)\n', (4400, 4411), True, 'import numpy as np\n')] |
"""
Introduction to Radar Course
Authors
=======
<NAME>, <NAME>, <NAME>
MIT Lincoln Laboratory
Lexington, MA 02421
Distribution Statement
======================
DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited.
This material is based upon work supported by the United States Air Force under Air
Force Contract No. FA8702-15-D-0001. Any opinions, findings, conclusions or
recommendations expressed in this material are those of the author(s) and do not
necessarily reflect the views of the United States Air Force.
© 2021 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S.
Government rights in this work are defined by DFARS 252.227-7013 or
DFARS 252.227-7014 as detailed above. Use of this work other than as specifically
authorized by the U.S. Government may violate any copyrights that exist in this work.
RAMS ID: 1016938
"""
from IPython.display import display, HTML
import ipywidgets as wdg
import matplotlib.patches as ptch
import matplotlib.pyplot as pyp
import rad.air as air
import rad.plot as plt
import rad.const as cnst
import rad.radar as rd
import rad.robby as rby
import rad.toys as ts
import math
import numpy as np
#-------Lab 1.1: Introduction to Labs-------
# Example 1.1.1
def ex_1_1_1():
ts.sine(
amp = 1,
freq = 1,
phase = 0
)
# Example 1.1.2
def ex_1_1_2():
ts.sine(
amp=3,
freq=2,
phase=0,
widgets=[]
)
#-------Lab 1.2: Introduction to Radar-------
# Example 1.2.1
def ex_1_2_1():
ts.wave()
# Example 1.2.2
def ex_1_2_2():
ts.prop_loss()
# Example 1.2.3
def ex_1_2_3():
ts.sine_prop_generic()
# Example 1.2.4
def ex_1_2_4():
ts.ranging(
rx_omni=True,
tx_omni=True,
tgt_hide=False,
tgt_x = 50,
tgt_y = 50,
widgets=['run', 'x', 'y']
)
# Example 1.2.5
def ex_1_2_5():
ts.ranging(
rx_omni=True,
tx_omni=True,
max_range=400,
tgt_hide=True,
tgt_x=75,
tgt_y=-100,
widgets=['dets', 'run']
)
# Example 1.2.6
def ex_1_2_6():
ts.ranging(
rx_omni=True,
tx_omni=False,
tgt_hide=False,
tgt_x=50,
tgt_y=50,
widgets=['tx_az', 'tx_beamw', 'dets', 'run', 'x', 'y']
)
# Example 1.2.7
def ex_1_2_7():
ts.ranging(
rx_omni=True,
tx_omni=False,
tgt_hide=True,
tgt_x=50,
tgt_y=50,
widgets=['tx_az', 'tx_beamw', 'dets', 'run']
)
# Example 1.2.8
def ex_1_2_8():
ts.ranging(
rx_omni=False,
tx_omni=True,
tgt_hide=False,
tgt_x=50,
tgt_y=50,
widgets=['rx_az', 'rx_beamw', 'dets', 'run', 'x', 'y']
)
# Example 1.2.9
def ex_1_2_9():
ts.ranging(
rx_omni=False,
tx_omni=True,
tgt_hide=True,
tgt_x=40,
tgt_y=-20,
widgets=['rx_az', 'rx_beamw', 'dets', 'run']
)
# Example 1.2.10
def ex_1_2_10():
ts.dish_pat()
# Example 1.2.11
def ex_1_2_11():
ts.array(
num_elem=7,
dx=4,
widgets=['tx_az', 'run', 'x', 'y']
)
# Example 1.2.12
def ex_1_2_12():
ts.doppler()
# Example 1.2.13
def ex_1_2_13():
ts.radar_wave()
#-------Lab 2.1: Radar Range Equation-------
# Example 2.1.1
def ex_2_1_1a():
ts.snr(
noise_energy=-20,
show_snr=False,
signal_energy=10,
widgets=[]
)
# Example 2.1.2
def ex_2_1_1b():
ts.snr(
noise_energy=0,
show_snr=False,
signal_energy=0,
widgets=[]
)
# Example 2.1.3
def ex_2_1_2():
ts.snr()
# Example 2.1.4
def ex_2_1_3():
ts.sine_prop()
# Example 2.1.5
def ex_2_1_4():
ts.friis()
# Example 2.1.6
def ex_2_1_5():
ts.radar_range_power()
# Example 2.1.7
def ex_2_1_6():
ts.radar_range_energy()
# Example 2.1.8
def ex_2_1_7():
ts.radar_range_snr()
# Example 2.1.9
def ex_2_1_8():
ts.radar_range_det()
#-------Lab 2.2: Basic Radar Design-------
# Example 2.2.1
def ex_2_2_1():
ts.radar_range_det()
# Example 2.2.2
def ex_2_2_2():
ts.design(
max_rcs=-5,
min_range=100,
min_snr=15,
metrics=['price', 'range', 'rcs', 'snr'],
widgets=['freq', 'energy', 'noise_temp', 'r', 'radius', 'rcs']
)
# Example 2.2.3
def ex_2_2_3():
ts.dish_pat(show_beamw=True)
# Example 2.2.4
def ex_2_2_4():
ts.rect_pat(show_beamw=True)
# Example 2.2.5
def ex_2_2_5():
ts.design(
max_beamw=3,
max_price=110000,
max_rcs=-10,
min_range=120,
min_snr=14,
metrics=['beamw', 'price', 'range', 'rcs', 'snr'],
widgets=['freq', 'energy', 'noise_temp', 'r', 'radius', 'rcs']
)
# Example 2.2.6
def ex_2_2_6():
test_tgt = rd.Target()
test_tgt.rcs = 5
r = 6E3
az = math.pi/2 - math.pi/4
test_tgt.pos = np.array([r*math.cos(az), r*math.sin(az)])
rby.robby(targets=[test_tgt], reset=False, widgets=['freq', 'energy', 'noise_temp', 'radius'])
#-------Lab 3.1: Radar Transmissions and Receptions-------
# Example 3.1.1
def ex_3_1_1():
ts.sine_pulse(
freq=1,
prf=0.1,
widgets=['energy', 'freq', 'pulsewidth']
)
# Example 3.1.2
def ex_3_1_2():
ts.sine_pulse(
show_duty=True,
show_pri=True,
widgets=['energy', 'freq', 'prf', 'pulsewidth']
)
# Example 3.1.3
def ex_3_1_3():
ts.lfm()
# Example 3.1.4
def ex_3_1_4():
ts.dish_pat()
# Example 3.1.5
def ex_3_1_5():
ts.array()
# Example 3.1.6
def ex_3_1_6():
ts.delay_steer()
# Example 3.1.7
def ex_3_1_7():
ts.phase_steer()
# Example 3.1.8
def ex_3_1_8():
ts.pol()
# Example 3.1.9
def ex_3_1_9():
ts.matched_filter(
start_freq=1,
stop_freq=1,
widgets=['delay', 'pulsewidth']
)
# Example 3.1.10
def ex_3_1_10():
ts.range_res(
start_freq=1,
stop_freq=1,
widgets=['range', 'pulsewidth']
)
# Example 3.1.11
def ex_3_1_11():
ts.matched_filter()
# Example 3.1.12
def ex_3_1_12():
ts.range_res()
# Example 3.1.13
def ex_3_1_13():
test_tgt = rd.Target()
test_tgt.rcs = 5
r = 6E3
az = math.pi/2 - math.pi/4
test_tgt.pos = np.array([r*math.cos(az), r*math.sin(az)])
rby.robby(targets=[test_tgt], reset=False, widgets=['bandw', 'coherent', 'freq', 'energy', 'noise_temp', 'num_integ', 'radius'])
#-------Lab 3.2: Detection-------
# Example 3.2.1
def ex_3_2_1():
ts.radar_range_det()
# Example 3.2.2
def ex_3_2_2():
ts.radar_range_det(highlight=False)
# Example 3.2.3
def ex_3_2_3():
ts.detect_game()
# Example 3.2.4
def ex_3_2_4():
ts.threshold()
# Example 3.2.5
def ex_3_2_5():
ts.roc()
# Example 3.2.6
def ex_3_2_6():
test_tgt = rd.Target()
test_tgt.rcs = 5
r = 6E3
az = math.pi/2 - math.pi/4
test_tgt.pos = np.array([r*math.cos(az), r*math.sin(az)])
rby.robby(
targets=[test_tgt],
dets=True,
reset=False,
widgets=['bandw', 'coherent', 'det_thresh', 'freq', 'energy', 'noise_temp', 'num_integ', 'radius']
)
#-------Lab 4.1: Target Parameter Estimation-------
# Example 4.1.1
def ex_4_1_1():
ts.dish_pat()
# Example 4.1.2
def ex_4_1_2():
test_tgt = rd.Target()
test_tgt.rcs = 10
r = 5E3
az = math.pi/2 - math.pi/4
test_tgt.pos = np.array([r*math.cos(az), r*math.sin(az)])
rby.robby(
targets=[test_tgt],
energy=0.8,
freq=4E3,
radius=0.8,
reset=False,
widgets=[]
)
# Example 4.1.3
def ex_4_1_3():
ts.cross_range()
# Example 4.1.4
def ex_4_1_4():
ts.doppler()
# Example 4.1.5
def ex_4_1_5():
ts.cw()
# Example 4.1.6
def ex_4_1_6():
ts.cw(
freq=2E3,
dr=55,
integ_time=15,
targ_line=False,
widgets=[]
)
# Example 4.1.7
def ex_4_1_7():
ts.rdi()
#-------Lab 4.2: Target Tracking-------
# Example 4.2.1
def ex_4_2_1():
# Test route
test_route = air.Route()
test_route.start = np.array([-10E3, 0])
test_route.end = np.array([0, 10E3])
test_route.lifetime = 100
test_route.vel = (test_route.end - test_route.start)/test_route.lifetime
test_route.speed = np.sqrt(test_route.vel[0]**2 + test_route.vel[1]**2)
test_route.max_range = 10E3
# Plot and return
rby.robby(
targets=air.to_target([test_route]),
radius=1.0,
freq=5E3,
reset=True,
dets=True,
scan_rate=8,
bandw=2,
widgets=['det_thresh']
)
# Example 4.2.2
def ex_4_2_2():
ts.propagation()
# Example 4.2.3
def ex_4_2_3():
ts.gnn()
# Example 4.2.4
def ex_4_2_4():
ts.ekf()
# Example 4.2.5
def ex_4_2_5():
# Test route
routes = air.routes(6, 10E3, 200)
targets = air.to_target(routes)
# Plot and return
rby.robby(
targets=targets,
radius=1.0,
freq=5E3,
reset=True,
dets=True,
pulses=True,
scan_rate=8,
bandw=2
)
#-------Lab 5.1: Radar Design Revisited-------
# Example 5.1.1
def ex_5_1_1():
routes = air.routes(6, 10E3, 150)
air.plot_routes(routes)
targets = air.to_target(routes)
return routes, targets
# Example 5.1.2
def ex_5_1_2():
ts.design(
bandw=0.5,
bandw_lim=[0.1, 3],
energy=1,
energy_lim=[0, 1],
freq=500,
freq_lim=[100, 3000],
max_price=50E3,
noise_temp=1000,
noise_temp_lim=[500, 1200],
num_integ=1,
num_integ_lim=[1, 10],
r=1,
r_lim=[1, 20],
radius=0.1,
radius_lim=[0.1, 0.5],
rcs=10,
rcs_lim=[-10, 25],
scan_rate=1,
scan_rate_lim=[1, 10],
metrics=['price'],
widgets=['bandw', 'coherent', 'freq', 'energy', 'noise_temp', 'num_integ', 'radius', 'scan_rate']
)
# Example 5.1.3
def ex_5_1_2b():
ts.design(
bandw=0.5,
bandw_lim=[0.1, 3],
energy=1,
energy_lim=[0, 1],
freq=500,
freq_lim=[100, 3000],
max_price=50E3,
min_snr=5,
noise_temp=1000,
noise_temp_lim=[500, 1200],
num_integ=1,
num_integ_lim=[1, 10],
r=1,
r_lim=[1, 20],
radius=0.1,
radius_lim=[0.1, 0.5],
rcs=10,
rcs_lim=[-10, 25],
scan_rate=1,
scan_rate_lim=[1, 10],
metrics=['price', 'snr'],
widgets=['bandw', 'coherent', 'freq', 'energy', 'min_snr', 'noise_temp', 'num_integ', 'r', 'radius', 'rcs', 'scan_rate']
)
# Example 5.1.4
def ex_5_1_3a():
# Test route
test_route = air.Route()
test_route.start = np.array([-10E3, 0])
test_route.end = np.array([0, 10E3])
test_route.lifetime = 100
test_route.vel = (test_route.end - test_route.start)/test_route.lifetime
test_route.speed = np.sqrt(test_route.vel[0]**2 + test_route.vel[1]**2)
test_route.max_range = 10E3
# Plot and return
air.plot_routes([test_route])
return air.to_target([test_route])
# Example 5.1.5
def ex_5_1_3b(test_tgt):
rby.robby(targets=test_tgt, max_price=50E3, reset=True, show_price=True)
# Example 5.1.6
def ex_5_1_4a(routes, targets):
air.plot_routes(routes)
# Example 5.1.7
def ex_5_1_4b(routes, targets):
rby.robby(targets=targets, max_price=50E3, reset=True, show_price=True) | [
"rad.toys.cross_range",
"rad.toys.rect_pat",
"rad.toys.rdi",
"rad.toys.roc",
"rad.air.plot_routes",
"rad.toys.sine_prop",
"rad.toys.radar_range_power",
"rad.toys.doppler",
"rad.toys.array",
"rad.radar.Target",
"rad.toys.delay_steer",
"rad.toys.detect_game",
"rad.toys.phase_steer",
"rad.toy... | [((1469, 1500), 'rad.toys.sine', 'ts.sine', ([], {'amp': '(1)', 'freq': '(1)', 'phase': '(0)'}), '(amp=1, freq=1, phase=0)\n', (1476, 1500), True, 'import rad.toys as ts\n'), ((1574, 1617), 'rad.toys.sine', 'ts.sine', ([], {'amp': '(3)', 'freq': '(2)', 'phase': '(0)', 'widgets': '[]'}), '(amp=3, freq=2, phase=0, widgets=[])\n', (1581, 1617), True, 'import rad.toys as ts\n'), ((1744, 1753), 'rad.toys.wave', 'ts.wave', ([], {}), '()\n', (1751, 1753), True, 'import rad.toys as ts\n'), ((1791, 1805), 'rad.toys.prop_loss', 'ts.prop_loss', ([], {}), '()\n', (1803, 1805), True, 'import rad.toys as ts\n'), ((1847, 1869), 'rad.toys.sine_prop_generic', 'ts.sine_prop_generic', ([], {}), '()\n', (1867, 1869), True, 'import rad.toys as ts\n'), ((1911, 2016), 'rad.toys.ranging', 'ts.ranging', ([], {'rx_omni': '(True)', 'tx_omni': '(True)', 'tgt_hide': '(False)', 'tgt_x': '(50)', 'tgt_y': '(50)', 'widgets': "['run', 'x', 'y']"}), "(rx_omni=True, tx_omni=True, tgt_hide=False, tgt_x=50, tgt_y=50,\n widgets=['run', 'x', 'y'])\n", (1921, 2016), True, 'import rad.toys as ts\n'), ((2108, 2228), 'rad.toys.ranging', 'ts.ranging', ([], {'rx_omni': '(True)', 'tx_omni': '(True)', 'max_range': '(400)', 'tgt_hide': '(True)', 'tgt_x': '(75)', 'tgt_y': '(-100)', 'widgets': "['dets', 'run']"}), "(rx_omni=True, tx_omni=True, max_range=400, tgt_hide=True, tgt_x=\n 75, tgt_y=-100, widgets=['dets', 'run'])\n", (2118, 2228), True, 'import rad.toys as ts\n'), ((2327, 2462), 'rad.toys.ranging', 'ts.ranging', ([], {'rx_omni': '(True)', 'tx_omni': '(False)', 'tgt_hide': '(False)', 'tgt_x': '(50)', 'tgt_y': '(50)', 'widgets': "['tx_az', 'tx_beamw', 'dets', 'run', 'x', 'y']"}), "(rx_omni=True, tx_omni=False, tgt_hide=False, tgt_x=50, tgt_y=50,\n widgets=['tx_az', 'tx_beamw', 'dets', 'run', 'x', 'y'])\n", (2337, 2462), True, 'import rad.toys as ts\n'), ((2550, 2674), 'rad.toys.ranging', 'ts.ranging', ([], {'rx_omni': '(True)', 'tx_omni': '(False)', 'tgt_hide': '(True)', 'tgt_x': '(50)', 'tgt_y': '(50)', 'widgets': "['tx_az', 'tx_beamw', 'dets', 'run']"}), "(rx_omni=True, tx_omni=False, tgt_hide=True, tgt_x=50, tgt_y=50,\n widgets=['tx_az', 'tx_beamw', 'dets', 'run'])\n", (2560, 2674), True, 'import rad.toys as ts\n'), ((2766, 2901), 'rad.toys.ranging', 'ts.ranging', ([], {'rx_omni': '(False)', 'tx_omni': '(True)', 'tgt_hide': '(False)', 'tgt_x': '(50)', 'tgt_y': '(50)', 'widgets': "['rx_az', 'rx_beamw', 'dets', 'run', 'x', 'y']"}), "(rx_omni=False, tx_omni=True, tgt_hide=False, tgt_x=50, tgt_y=50,\n widgets=['rx_az', 'rx_beamw', 'dets', 'run', 'x', 'y'])\n", (2776, 2901), True, 'import rad.toys as ts\n'), ((2993, 3118), 'rad.toys.ranging', 'ts.ranging', ([], {'rx_omni': '(False)', 'tx_omni': '(True)', 'tgt_hide': '(True)', 'tgt_x': '(40)', 'tgt_y': '(-20)', 'widgets': "['rx_az', 'rx_beamw', 'dets', 'run']"}), "(rx_omni=False, tx_omni=True, tgt_hide=True, tgt_x=40, tgt_y=-20,\n widgets=['rx_az', 'rx_beamw', 'dets', 'run'])\n", (3003, 3118), True, 'import rad.toys as ts\n'), ((3212, 3225), 'rad.toys.dish_pat', 'ts.dish_pat', ([], {}), '()\n', (3223, 3225), True, 'import rad.toys as ts\n'), ((3269, 3331), 'rad.toys.array', 'ts.array', ([], {'num_elem': '(7)', 'dx': '(4)', 'widgets': "['tx_az', 'run', 'x', 'y']"}), "(num_elem=7, dx=4, widgets=['tx_az', 'run', 'x', 'y'])\n", (3277, 3331), True, 'import rad.toys as ts\n'), ((3409, 3421), 'rad.toys.doppler', 'ts.doppler', ([], {}), '()\n', (3419, 3421), True, 'import rad.toys as ts\n'), ((3465, 3480), 'rad.toys.radar_wave', 'ts.radar_wave', ([], {}), '()\n', (3478, 3480), True, 'import rad.toys as ts\n'), ((3569, 3639), 'rad.toys.snr', 'ts.snr', ([], {'noise_energy': '(-20)', 'show_snr': '(False)', 'signal_energy': '(10)', 'widgets': '[]'}), '(noise_energy=-20, show_snr=False, signal_energy=10, widgets=[])\n', (3575, 3639), True, 'import rad.toys as ts\n'), ((3716, 3783), 'rad.toys.snr', 'ts.snr', ([], {'noise_energy': '(0)', 'show_snr': '(False)', 'signal_energy': '(0)', 'widgets': '[]'}), '(noise_energy=0, show_snr=False, signal_energy=0, widgets=[])\n', (3722, 3783), True, 'import rad.toys as ts\n'), ((3859, 3867), 'rad.toys.snr', 'ts.snr', ([], {}), '()\n', (3865, 3867), True, 'import rad.toys as ts\n'), ((3909, 3923), 'rad.toys.sine_prop', 'ts.sine_prop', ([], {}), '()\n', (3921, 3923), True, 'import rad.toys as ts\n'), ((3965, 3975), 'rad.toys.friis', 'ts.friis', ([], {}), '()\n', (3973, 3975), True, 'import rad.toys as ts\n'), ((4017, 4039), 'rad.toys.radar_range_power', 'ts.radar_range_power', ([], {}), '()\n', (4037, 4039), True, 'import rad.toys as ts\n'), ((4081, 4104), 'rad.toys.radar_range_energy', 'ts.radar_range_energy', ([], {}), '()\n', (4102, 4104), True, 'import rad.toys as ts\n'), ((4146, 4166), 'rad.toys.radar_range_snr', 'ts.radar_range_snr', ([], {}), '()\n', (4164, 4166), True, 'import rad.toys as ts\n'), ((4204, 4224), 'rad.toys.radar_range_det', 'ts.radar_range_det', ([], {}), '()\n', (4222, 4224), True, 'import rad.toys as ts\n'), ((4310, 4330), 'rad.toys.radar_range_det', 'ts.radar_range_det', ([], {}), '()\n', (4328, 4330), True, 'import rad.toys as ts\n'), ((4368, 4530), 'rad.toys.design', 'ts.design', ([], {'max_rcs': '(-5)', 'min_range': '(100)', 'min_snr': '(15)', 'metrics': "['price', 'range', 'rcs', 'snr']", 'widgets': "['freq', 'energy', 'noise_temp', 'r', 'radius', 'rcs']"}), "(max_rcs=-5, min_range=100, min_snr=15, metrics=['price', 'range',\n 'rcs', 'snr'], widgets=['freq', 'energy', 'noise_temp', 'r', 'radius',\n 'rcs'])\n", (4377, 4530), True, 'import rad.toys as ts\n'), ((4606, 4634), 'rad.toys.dish_pat', 'ts.dish_pat', ([], {'show_beamw': '(True)'}), '(show_beamw=True)\n', (4617, 4634), True, 'import rad.toys as ts\n'), ((4672, 4700), 'rad.toys.rect_pat', 'ts.rect_pat', ([], {'show_beamw': '(True)'}), '(show_beamw=True)\n', (4683, 4700), True, 'import rad.toys as ts\n'), ((4738, 4942), 'rad.toys.design', 'ts.design', ([], {'max_beamw': '(3)', 'max_price': '(110000)', 'max_rcs': '(-10)', 'min_range': '(120)', 'min_snr': '(14)', 'metrics': "['beamw', 'price', 'range', 'rcs', 'snr']", 'widgets': "['freq', 'energy', 'noise_temp', 'r', 'radius', 'rcs']"}), "(max_beamw=3, max_price=110000, max_rcs=-10, min_range=120,\n min_snr=14, metrics=['beamw', 'price', 'range', 'rcs', 'snr'], widgets=\n ['freq', 'energy', 'noise_temp', 'r', 'radius', 'rcs'])\n", (4747, 4942), True, 'import rad.toys as ts\n'), ((5049, 5060), 'rad.radar.Target', 'rd.Target', ([], {}), '()\n', (5058, 5060), True, 'import rad.radar as rd\n'), ((5196, 5294), 'rad.robby.robby', 'rby.robby', ([], {'targets': '[test_tgt]', 'reset': '(False)', 'widgets': "['freq', 'energy', 'noise_temp', 'radius']"}), "(targets=[test_tgt], reset=False, widgets=['freq', 'energy',\n 'noise_temp', 'radius'])\n", (5205, 5294), True, 'import rad.robby as rby\n'), ((5392, 5464), 'rad.toys.sine_pulse', 'ts.sine_pulse', ([], {'freq': '(1)', 'prf': '(0.1)', 'widgets': "['energy', 'freq', 'pulsewidth']"}), "(freq=1, prf=0.1, widgets=['energy', 'freq', 'pulsewidth'])\n", (5405, 5464), True, 'import rad.toys as ts\n'), ((5532, 5629), 'rad.toys.sine_pulse', 'ts.sine_pulse', ([], {'show_duty': '(True)', 'show_pri': '(True)', 'widgets': "['energy', 'freq', 'prf', 'pulsewidth']"}), "(show_duty=True, show_pri=True, widgets=['energy', 'freq',\n 'prf', 'pulsewidth'])\n", (5545, 5629), True, 'import rad.toys as ts\n'), ((5697, 5705), 'rad.toys.lfm', 'ts.lfm', ([], {}), '()\n', (5703, 5705), True, 'import rad.toys as ts\n'), ((5747, 5760), 'rad.toys.dish_pat', 'ts.dish_pat', ([], {}), '()\n', (5758, 5760), True, 'import rad.toys as ts\n'), ((5798, 5808), 'rad.toys.array', 'ts.array', ([], {}), '()\n', (5806, 5808), True, 'import rad.toys as ts\n'), ((5850, 5866), 'rad.toys.delay_steer', 'ts.delay_steer', ([], {}), '()\n', (5864, 5866), True, 'import rad.toys as ts\n'), ((5908, 5924), 'rad.toys.phase_steer', 'ts.phase_steer', ([], {}), '()\n', (5922, 5924), True, 'import rad.toys as ts\n'), ((5966, 5974), 'rad.toys.pol', 'ts.pol', ([], {}), '()\n', (5972, 5974), True, 'import rad.toys as ts\n'), ((6016, 6093), 'rad.toys.matched_filter', 'ts.matched_filter', ([], {'start_freq': '(1)', 'stop_freq': '(1)', 'widgets': "['delay', 'pulsewidth']"}), "(start_freq=1, stop_freq=1, widgets=['delay', 'pulsewidth'])\n", (6033, 6093), True, 'import rad.toys as ts\n'), ((6169, 6241), 'rad.toys.range_res', 'ts.range_res', ([], {'start_freq': '(1)', 'stop_freq': '(1)', 'widgets': "['range', 'pulsewidth']"}), "(start_freq=1, stop_freq=1, widgets=['range', 'pulsewidth'])\n", (6181, 6241), True, 'import rad.toys as ts\n'), ((6317, 6336), 'rad.toys.matched_filter', 'ts.matched_filter', ([], {}), '()\n', (6334, 6336), True, 'import rad.toys as ts\n'), ((6380, 6394), 'rad.toys.range_res', 'ts.range_res', ([], {}), '()\n', (6392, 6394), True, 'import rad.toys as ts\n'), ((6449, 6460), 'rad.radar.Target', 'rd.Target', ([], {}), '()\n', (6458, 6460), True, 'import rad.radar as rd\n'), ((6596, 6728), 'rad.robby.robby', 'rby.robby', ([], {'targets': '[test_tgt]', 'reset': '(False)', 'widgets': "['bandw', 'coherent', 'freq', 'energy', 'noise_temp', 'num_integ', 'radius']"}), "(targets=[test_tgt], reset=False, widgets=['bandw', 'coherent',\n 'freq', 'energy', 'noise_temp', 'num_integ', 'radius'])\n", (6605, 6728), True, 'import rad.robby as rby\n'), ((6801, 6821), 'rad.toys.radar_range_det', 'ts.radar_range_det', ([], {}), '()\n', (6819, 6821), True, 'import rad.toys as ts\n'), ((6863, 6898), 'rad.toys.radar_range_det', 'ts.radar_range_det', ([], {'highlight': '(False)'}), '(highlight=False)\n', (6881, 6898), True, 'import rad.toys as ts\n'), ((6940, 6956), 'rad.toys.detect_game', 'ts.detect_game', ([], {}), '()\n', (6954, 6956), True, 'import rad.toys as ts\n'), ((6998, 7012), 'rad.toys.threshold', 'ts.threshold', ([], {}), '()\n', (7010, 7012), True, 'import rad.toys as ts\n'), ((7054, 7062), 'rad.toys.roc', 'ts.roc', ([], {}), '()\n', (7060, 7062), True, 'import rad.toys as ts\n'), ((7115, 7126), 'rad.radar.Target', 'rd.Target', ([], {}), '()\n', (7124, 7126), True, 'import rad.radar as rd\n'), ((7262, 7423), 'rad.robby.robby', 'rby.robby', ([], {'targets': '[test_tgt]', 'dets': '(True)', 'reset': '(False)', 'widgets': "['bandw', 'coherent', 'det_thresh', 'freq', 'energy', 'noise_temp',\n 'num_integ', 'radius']"}), "(targets=[test_tgt], dets=True, reset=False, widgets=['bandw',\n 'coherent', 'det_thresh', 'freq', 'energy', 'noise_temp', 'num_integ',\n 'radius'])\n", (7271, 7423), True, 'import rad.robby as rby\n'), ((7551, 7564), 'rad.toys.dish_pat', 'ts.dish_pat', ([], {}), '()\n', (7562, 7564), True, 'import rad.toys as ts\n'), ((7614, 7625), 'rad.radar.Target', 'rd.Target', ([], {}), '()\n', (7623, 7625), True, 'import rad.radar as rd\n'), ((7762, 7858), 'rad.robby.robby', 'rby.robby', ([], {'targets': '[test_tgt]', 'energy': '(0.8)', 'freq': '(4000.0)', 'radius': '(0.8)', 'reset': '(False)', 'widgets': '[]'}), '(targets=[test_tgt], energy=0.8, freq=4000.0, radius=0.8, reset=\n False, widgets=[])\n', (7771, 7858), True, 'import rad.robby as rby\n'), ((7944, 7960), 'rad.toys.cross_range', 'ts.cross_range', ([], {}), '()\n', (7958, 7960), True, 'import rad.toys as ts\n'), ((7998, 8010), 'rad.toys.doppler', 'ts.doppler', ([], {}), '()\n', (8008, 8010), True, 'import rad.toys as ts\n'), ((8048, 8055), 'rad.toys.cw', 'ts.cw', ([], {}), '()\n', (8053, 8055), True, 'import rad.toys as ts\n'), ((8093, 8162), 'rad.toys.cw', 'ts.cw', ([], {'freq': '(2000.0)', 'dr': '(55)', 'integ_time': '(15)', 'targ_line': '(False)', 'widgets': '[]'}), '(freq=2000.0, dr=55, integ_time=15, targ_line=False, widgets=[])\n', (8098, 8162), True, 'import rad.toys as ts\n'), ((8243, 8251), 'rad.toys.rdi', 'ts.rdi', ([], {}), '()\n', (8249, 8251), True, 'import rad.toys as ts\n'), ((8369, 8380), 'rad.air.Route', 'air.Route', ([], {}), '()\n', (8378, 8380), True, 'import rad.air as air\n'), ((8404, 8427), 'numpy.array', 'np.array', (['[-10000.0, 0]'], {}), '([-10000.0, 0])\n', (8412, 8427), True, 'import numpy as np\n'), ((8446, 8468), 'numpy.array', 'np.array', (['[0, 10000.0]'], {}), '([0, 10000.0])\n', (8454, 8468), True, 'import numpy as np\n'), ((8596, 8652), 'numpy.sqrt', 'np.sqrt', (['(test_route.vel[0] ** 2 + test_route.vel[1] ** 2)'], {}), '(test_route.vel[0] ** 2 + test_route.vel[1] ** 2)\n', (8603, 8652), True, 'import numpy as np\n'), ((8958, 8974), 'rad.toys.propagation', 'ts.propagation', ([], {}), '()\n', (8972, 8974), True, 'import rad.toys as ts\n'), ((9012, 9020), 'rad.toys.gnn', 'ts.gnn', ([], {}), '()\n', (9018, 9020), True, 'import rad.toys as ts\n'), ((9058, 9066), 'rad.toys.ekf', 'ts.ekf', ([], {}), '()\n', (9064, 9066), True, 'import rad.toys as ts\n'), ((9135, 9162), 'rad.air.routes', 'air.routes', (['(6)', '(10000.0)', '(200)'], {}), '(6, 10000.0, 200)\n', (9145, 9162), True, 'import rad.air as air\n'), ((9174, 9195), 'rad.air.to_target', 'air.to_target', (['routes'], {}), '(routes)\n', (9187, 9195), True, 'import rad.air as air\n'), ((9227, 9340), 'rad.robby.robby', 'rby.robby', ([], {'targets': 'targets', 'radius': '(1.0)', 'freq': '(5000.0)', 'reset': '(True)', 'dets': '(True)', 'pulses': '(True)', 'scan_rate': '(8)', 'bandw': '(2)'}), '(targets=targets, radius=1.0, freq=5000.0, reset=True, dets=True,\n pulses=True, scan_rate=8, bandw=2)\n', (9236, 9340), True, 'import rad.robby as rby\n'), ((9503, 9530), 'rad.air.routes', 'air.routes', (['(6)', '(10000.0)', '(150)'], {}), '(6, 10000.0, 150)\n', (9513, 9530), True, 'import rad.air as air\n'), ((9532, 9555), 'rad.air.plot_routes', 'air.plot_routes', (['routes'], {}), '(routes)\n', (9547, 9555), True, 'import rad.air as air\n'), ((9570, 9591), 'rad.air.to_target', 'air.to_target', (['routes'], {}), '(routes)\n', (9583, 9591), True, 'import rad.air as air\n'), ((9661, 10124), 'rad.toys.design', 'ts.design', ([], {'bandw': '(0.5)', 'bandw_lim': '[0.1, 3]', 'energy': '(1)', 'energy_lim': '[0, 1]', 'freq': '(500)', 'freq_lim': '[100, 3000]', 'max_price': '(50000.0)', 'noise_temp': '(1000)', 'noise_temp_lim': '[500, 1200]', 'num_integ': '(1)', 'num_integ_lim': '[1, 10]', 'r': '(1)', 'r_lim': '[1, 20]', 'radius': '(0.1)', 'radius_lim': '[0.1, 0.5]', 'rcs': '(10)', 'rcs_lim': '[-10, 25]', 'scan_rate': '(1)', 'scan_rate_lim': '[1, 10]', 'metrics': "['price']", 'widgets': "['bandw', 'coherent', 'freq', 'energy', 'noise_temp', 'num_integ', 'radius',\n 'scan_rate']"}), "(bandw=0.5, bandw_lim=[0.1, 3], energy=1, energy_lim=[0, 1], freq=\n 500, freq_lim=[100, 3000], max_price=50000.0, noise_temp=1000,\n noise_temp_lim=[500, 1200], num_integ=1, num_integ_lim=[1, 10], r=1,\n r_lim=[1, 20], radius=0.1, radius_lim=[0.1, 0.5], rcs=10, rcs_lim=[-10,\n 25], scan_rate=1, scan_rate_lim=[1, 10], metrics=['price'], widgets=[\n 'bandw', 'coherent', 'freq', 'energy', 'noise_temp', 'num_integ',\n 'radius', 'scan_rate'])\n", (9670, 10124), True, 'import rad.toys as ts\n'), ((10312, 10818), 'rad.toys.design', 'ts.design', ([], {'bandw': '(0.5)', 'bandw_lim': '[0.1, 3]', 'energy': '(1)', 'energy_lim': '[0, 1]', 'freq': '(500)', 'freq_lim': '[100, 3000]', 'max_price': '(50000.0)', 'min_snr': '(5)', 'noise_temp': '(1000)', 'noise_temp_lim': '[500, 1200]', 'num_integ': '(1)', 'num_integ_lim': '[1, 10]', 'r': '(1)', 'r_lim': '[1, 20]', 'radius': '(0.1)', 'radius_lim': '[0.1, 0.5]', 'rcs': '(10)', 'rcs_lim': '[-10, 25]', 'scan_rate': '(1)', 'scan_rate_lim': '[1, 10]', 'metrics': "['price', 'snr']", 'widgets': "['bandw', 'coherent', 'freq', 'energy', 'min_snr', 'noise_temp',\n 'num_integ', 'r', 'radius', 'rcs', 'scan_rate']"}), "(bandw=0.5, bandw_lim=[0.1, 3], energy=1, energy_lim=[0, 1], freq=\n 500, freq_lim=[100, 3000], max_price=50000.0, min_snr=5, noise_temp=\n 1000, noise_temp_lim=[500, 1200], num_integ=1, num_integ_lim=[1, 10], r\n =1, r_lim=[1, 20], radius=0.1, radius_lim=[0.1, 0.5], rcs=10, rcs_lim=[\n -10, 25], scan_rate=1, scan_rate_lim=[1, 10], metrics=['price', 'snr'],\n widgets=['bandw', 'coherent', 'freq', 'energy', 'min_snr', 'noise_temp',\n 'num_integ', 'r', 'radius', 'rcs', 'scan_rate'])\n", (10321, 10818), True, 'import rad.toys as ts\n'), ((11047, 11058), 'rad.air.Route', 'air.Route', ([], {}), '()\n', (11056, 11058), True, 'import rad.air as air\n'), ((11082, 11105), 'numpy.array', 'np.array', (['[-10000.0, 0]'], {}), '([-10000.0, 0])\n', (11090, 11105), True, 'import numpy as np\n'), ((11124, 11146), 'numpy.array', 'np.array', (['[0, 10000.0]'], {}), '([0, 10000.0])\n', (11132, 11146), True, 'import numpy as np\n'), ((11274, 11330), 'numpy.sqrt', 'np.sqrt', (['(test_route.vel[0] ** 2 + test_route.vel[1] ** 2)'], {}), '(test_route.vel[0] ** 2 + test_route.vel[1] ** 2)\n', (11281, 11330), True, 'import numpy as np\n'), ((11390, 11419), 'rad.air.plot_routes', 'air.plot_routes', (['[test_route]'], {}), '([test_route])\n', (11405, 11419), True, 'import rad.air as air\n'), ((11431, 11458), 'rad.air.to_target', 'air.to_target', (['[test_route]'], {}), '([test_route])\n', (11444, 11458), True, 'import rad.air as air\n'), ((11505, 11580), 'rad.robby.robby', 'rby.robby', ([], {'targets': 'test_tgt', 'max_price': '(50000.0)', 'reset': '(True)', 'show_price': '(True)'}), '(targets=test_tgt, max_price=50000.0, reset=True, show_price=True)\n', (11514, 11580), True, 'import rad.robby as rby\n'), ((11631, 11654), 'rad.air.plot_routes', 'air.plot_routes', (['routes'], {}), '(routes)\n', (11646, 11654), True, 'import rad.air as air\n'), ((11708, 11782), 'rad.robby.robby', 'rby.robby', ([], {'targets': 'targets', 'max_price': '(50000.0)', 'reset': '(True)', 'show_price': '(True)'}), '(targets=targets, max_price=50000.0, reset=True, show_price=True)\n', (11717, 11782), True, 'import rad.robby as rby\n'), ((8739, 8766), 'rad.air.to_target', 'air.to_target', (['[test_route]'], {}), '([test_route])\n', (8752, 8766), True, 'import rad.air as air\n'), ((5156, 5168), 'math.cos', 'math.cos', (['az'], {}), '(az)\n', (5164, 5168), False, 'import math\n'), ((5172, 5184), 'math.sin', 'math.sin', (['az'], {}), '(az)\n', (5180, 5184), False, 'import math\n'), ((6556, 6568), 'math.cos', 'math.cos', (['az'], {}), '(az)\n', (6564, 6568), False, 'import math\n'), ((6572, 6584), 'math.sin', 'math.sin', (['az'], {}), '(az)\n', (6580, 6584), False, 'import math\n'), ((7222, 7234), 'math.cos', 'math.cos', (['az'], {}), '(az)\n', (7230, 7234), False, 'import math\n'), ((7238, 7250), 'math.sin', 'math.sin', (['az'], {}), '(az)\n', (7246, 7250), False, 'import math\n'), ((7722, 7734), 'math.cos', 'math.cos', (['az'], {}), '(az)\n', (7730, 7734), False, 'import math\n'), ((7738, 7750), 'math.sin', 'math.sin', (['az'], {}), '(az)\n', (7746, 7750), False, 'import math\n')] |
# Function to create a potential profile
import numpy as np
def wire_profile(x,param):
'''
V(x-mean) = peak * log(sqrt(h^2 + x^2)/rho)
'''
(peak,mean,h,rho) = param
return peak*np.log((1.0/(rho))*np.sqrt((x-mean)**2 + h**2))
def V_x_wire(x,list_b):
'''
Input:
x : 1d linear grid
list_b : list of gate parameters as (V,mu,h,rho) where V(x) = V*ln(sqrt(h^2 + (x-mu)^2)/rho)
Output:
V(x) : potential profile
'''
wire_profiles = [wire_profile(x,p) for p in list_b]
V = np.sum(wire_profiles,axis=0)
return V
| [
"numpy.sum",
"numpy.sqrt"
] | [((534, 563), 'numpy.sum', 'np.sum', (['wire_profiles'], {'axis': '(0)'}), '(wire_profiles, axis=0)\n', (540, 563), True, 'import numpy as np\n'), ((218, 251), 'numpy.sqrt', 'np.sqrt', (['((x - mean) ** 2 + h ** 2)'], {}), '((x - mean) ** 2 + h ** 2)\n', (225, 251), True, 'import numpy as np\n')] |
import sys
import numpy as np
import cv2
import time
import argparse
import yolov2tiny
np_dtype = {
'FP32': np.float32,
'FP16': np.float16,
'INT8': np.int8,
}
def resize_input(im):
imsz = cv2.resize(im, (416, 416))
imsz = imsz / 255.0
imsz = imsz[:, :, ::-1]
return np.asarray(imsz, dtype=np.float32)
def image_object_detection(in_image, out_image, precision):
frame = cv2.imread(in_image)
y2t = yolov2tiny.YOLO2_TINY(
[1, 416, 416, 3], "./y2t_weights.onnx", precision)
t_end2end = time.time()
_frame = resize_input(frame)
_frame = np.expand_dims(_frame, axis=0)
t_inference = time.time()
tout = y2t.inference(_frame)
t_inference = time.time() - t_inference
tout = np.squeeze(tout)
#assert(tout.dtype == np_dtype[precision])
np.save(precision, tout)
tout = tout.astype(np.float32)
frame = yolov2tiny.postprocessing(
tout, cv2.resize(frame, (416, 416), interpolation=cv2.INTER_CUBIC)
)
t_end2end = time.time() - t_end2end
cv2.imwrite(out_image, frame)
print("DNN inference elapsed time: %.3f" % t_inference)
print("End-to-end elapsed time : %.3f" % t_end2end)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("IN_IMAGE", help="path to the input jpg")
parser.add_argument("OUT_IMAGE", help="path to the output jpg")
parser.add_argument("PRECISION", choices=np_dtype.keys(),
help="Precision used for convolution")
args = parser.parse_args()
image_object_detection(args.IN_IMAGE, args.OUT_IMAGE, args.PRECISION)
if __name__ == "__main__":
main()
| [
"yolov2tiny.YOLO2_TINY",
"numpy.save",
"argparse.ArgumentParser",
"cv2.imwrite",
"numpy.asarray",
"numpy.expand_dims",
"time.time",
"cv2.imread",
"numpy.squeeze",
"cv2.resize"
] | [((208, 234), 'cv2.resize', 'cv2.resize', (['im', '(416, 416)'], {}), '(im, (416, 416))\n', (218, 234), False, 'import cv2\n'), ((298, 332), 'numpy.asarray', 'np.asarray', (['imsz'], {'dtype': 'np.float32'}), '(imsz, dtype=np.float32)\n', (308, 332), True, 'import numpy as np\n'), ((407, 427), 'cv2.imread', 'cv2.imread', (['in_image'], {}), '(in_image)\n', (417, 427), False, 'import cv2\n'), ((439, 511), 'yolov2tiny.YOLO2_TINY', 'yolov2tiny.YOLO2_TINY', (['[1, 416, 416, 3]', '"""./y2t_weights.onnx"""', 'precision'], {}), "([1, 416, 416, 3], './y2t_weights.onnx', precision)\n", (460, 511), False, 'import yolov2tiny\n'), ((538, 549), 'time.time', 'time.time', ([], {}), '()\n', (547, 549), False, 'import time\n'), ((597, 627), 'numpy.expand_dims', 'np.expand_dims', (['_frame'], {'axis': '(0)'}), '(_frame, axis=0)\n', (611, 627), True, 'import numpy as np\n'), ((647, 658), 'time.time', 'time.time', ([], {}), '()\n', (656, 658), False, 'import time\n'), ((748, 764), 'numpy.squeeze', 'np.squeeze', (['tout'], {}), '(tout)\n', (758, 764), True, 'import numpy as np\n'), ((818, 842), 'numpy.save', 'np.save', (['precision', 'tout'], {}), '(precision, tout)\n', (825, 842), True, 'import numpy as np\n'), ((1044, 1073), 'cv2.imwrite', 'cv2.imwrite', (['out_image', 'frame'], {}), '(out_image, frame)\n', (1055, 1073), False, 'import cv2\n'), ((1220, 1245), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1243, 1245), False, 'import argparse\n'), ((710, 721), 'time.time', 'time.time', ([], {}), '()\n', (719, 721), False, 'import time\n'), ((932, 992), 'cv2.resize', 'cv2.resize', (['frame', '(416, 416)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(frame, (416, 416), interpolation=cv2.INTER_CUBIC)\n', (942, 992), False, 'import cv2\n'), ((1015, 1026), 'time.time', 'time.time', ([], {}), '()\n', (1024, 1026), False, 'import time\n')] |
# pyphenocam
import os as _os
from . import config
import numpy as np
from bs4 import BeautifulSoup as _BS
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import re
import requests
import pandas as pd
from . import utils
from . import imageprocessing
__all__ = ['_get_lines', 'parse_fname', 'process_files']
def get_sites_df():
"""Returns a pandas data frame with a list of all the phenocam sites
"""
url = "http://phenocam.sr.unh.edu/webcam/network/table"
r = requests.get('https://phenocam.sr.unh.edu/webcam/network/table')
df = pd.read_html(r.text)[0]
df.columns = ['site', 'lat', 'lon', 'elevation', 'description']
return df
#return pd.read_csv(config.get_url('SITEURL'))
SITES_DF = get_sites_df()
def get_sitenames():
"""returns a list of all the site names in the network
"""
#allsites = get_sites_df()
#return list(allsites.site.unique())
return list(SITES_DF.site)
#url = "http://phenocam.sr.unh.edu/webcam/gallery"
#html_page = urllib2.urlopen(url)
#soup = _BS(html_page, "lxml")
#sites = []
#for link in soup.findAll('a'):
# href = link.get('href')
# if href and href.startswith('/webcam/sites'):
# sites.append(href.split('/')[-2])
#return sites
def get_site(sitename='harvard', cache_dname=None, load_all=False):
"""
"""
if not sitename in get_sitenames():
raise Exception("Site {} not in network".format(sitename))
if not cache_dname:
site_dname = config.get_cache_dname()
else:
site_dname = cache_dname
config.set_cache_dname(site_dname)
return SiteData(sitename, site_dname, load_all=load_all)
ROI_TYPES = {'AG': 'Agriculture',
'DB': 'Deciduous Broadleaf',
'EB': 'Evergreen Broadleaf',
'DN': 'Deciduous Needleaf',
'EN': 'Evergreen Needleleaf',
'GR': 'Grassland',
'NV': 'Non-vegetated',
'RF': 'Reference Panel',
'SH': 'Shrub',
'UN': 'Understory',
'WL': 'Wetland',
'XX': 'Mixed/Canopy/Other'}
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
class SiteData():
"""SiteData is the main class that encapsulates the data for a single site
"""
def __init__(self, sitename, site_dname, load_all=False):
"""sitename -> is the name of the site to initialize,
this must be an exact match for one of the strings in get_sitenames
site_dname -> path to the local directory we'll be saving phenocam
images and data into
load_all -> boolean flag indicating whether to download all data
immediately. By default data are downloaded as needed
"""
if not _os.path.exists(site_dname):
_os.makedirs(site_dname)
self.site_dname = site_dname
self.sitename = sitename
self.csvs = []
self.pngs = []
self.tifs = []
self.others = []
self.roi_url = "http://phenocam.sr.unh.edu/data/archive/{}/ROI".format(
sitename)
html_page = urllib.request.urlopen(self.roi_url, context=ctx)
soup = _BS(html_page, "lxml")
for link in soup.findAll('a'):
href = link.get('href')
if href.endswith('.csv'):
self.csvs.append(href)
elif href.endswith('.png'):
self.pngs.append(href)
elif href.endswith('.tif'):
self.tifs.append(href)
else:
self.others.append(href)
self.rois = {}
for tif in self.tifs:
try:
sitename_, roitype, roisequence, maskindex = tif.split('_')
self.rois[roitype] = self.rois.get(roitype, {})
self.rois[roitype]['mask'] = self.rois[roitype].get('mask', {})
self.rois[roitype]['mask'][roisequence] = \
self.rois[roitype]['mask'].get(roisequence, {})
self.rois[roitype]['mask'][roisequence][maskindex[:-4]] = \
self.rois[roitype]['mask'][roisequence].get(maskindex[:-4], {})
self.rois[roitype]['mask'][roisequence][maskindex[:-4]] = tif
except ValueError:
pass #old style name schema, ignoring
for csv in self.csvs:
if csv.startswith(sitename):
parts = csv.split('_')
if len(parts) < 4:
pass
elif parts[3] == 'timeseries':
sitename_, roitype, roisequence, which, v = parts
elif parts[3] == 'gcc90':
sitename_, roitype, roisequence, which, length, v = parts
elif len(parts) == 5:
sitename_, roitype, roisequence, length, v = parts
which = 'gcc90'
if len(parts) == 4:
sitename_, roitype, roisequence, roi = parts
self.rois[roitype]['roicsv'] = self.rois[
parts[1]].get('roicsv', {})
self.rois[roitype]['roicsv'][roisequence] = csv
else:
self.rois[roitype][which] = self.rois[
roitype].get(which, {})
self.rois[roitype][which][length] = self.rois[
roitype][which].get(length, {})
self.rois[roitype][which][length][
v[:-4]] = self.rois[roitype][which][length].get(v[:-4], {})
self.rois[roitype][which][length][v[:-4]] = csv
try:
one_day_fname = [c for c in self.csvs if "_1day" in c][0]
url=self.roi_url + '/' + one_day_fname
self.one_day = utils.pd_read_csv(url, comment="#", parse_dates=['date'])
# self.one_day = pd.read_csv(
# self.roi_url + '/' + one_day_fname, comment="#", parse_dates=['date'])
self.one_day.index = self.one_day.date
self.midday_fnames = self.one_day.midday_filename.tolist()
self.midday_fnames = [
value for value in self.midday_fnames if not str(value) == 'nan']
timeseries_fname = [c for c in self.csvs if "_timeseries_" in c][-1]
timeseries = pd.read_csv(
self.roi_url + '/' + timeseries_fname, comment="#", parse_dates=['date'])
timeseries.index = timeseries.date
self.all_fnames = timeseries.filename
self.all_fnames = [
value for value in self.all_fnames if not str(value) == 'nan']
except:
pass
url = "http://phenocam.sr.unh.edu/webcam/browse/{}/".format(self.sitename)
html_page = urllib.request.urlopen(url, context=ctx)
soup = _BS(html_page, "lxml")
years = {}
for y in soup.find_all('div', {"class":"yearsummary"}):
year = int(y.text.split()[1])
years[year] = {}
for table in y.find_all('table'):
for a in table.find_all('a'):
month = int(a.get('href').split('/')[-2])
years[year][month] = {}
self.data = years
allsites = SITES_DF # get_sites_df()
self.y, self.x = allsites[allsites.site == sitename].values[0][1:3]
def get_days(self, dt):
fnameurl = config.get_url('FNAMEURL_BROWSE')
url = fnameurl.format(self.sitename, dt.year, dt.month, dt.day)[:-3]
html_page = urllib.request.urlopen(url, context=ctx)
soup = _BS(html_page, "lxml")
ir_lookup = {}
days = {}
for link in soup.findAll("div", {"class":"calday"}):
monthday = link.findAll("div", {"class":"monthday"})
if monthday:
day = int(monthday[0].findAll('strong')[0].text)
images = int(link.findAll("div", {"class":"imagecount"})[0].text.split('=')[1])
days[day] = images
return days
def get_closest_fname(self, dt):
fnameurl = config.get_url('FNAMEURL_BROWSE')
if dt.year not in self.data:
raise Exception("data for year {} not available".format(dt.year))
if dt.month not in self.data[dt.year]:
raise Exception("data for year/month {}/{} not available".format(dt.year, dt.month))
if not self.data[dt.year][dt.month]:
self.data[dt.year][dt.month] = self.get_days(dt)
day_search_order = np.vstack((np.arange(30), np.arange(30)*-1)).reshape((-1,),order='F')[1:]
for offset in day_search_order:
d = dt.day + offset
if d in self.data[dt.year][dt.month] and self.data[dt.year][dt.month][d] > 0:
break
url = fnameurl.format(self.sitename, dt.year, dt.month, dt.day+offset)
html_page = urllib.request.urlopen(url, context=ctx)
soup = _BS(html_page, "lxml")
ir_lookup = {}
for link in soup.findAll('a'):
href = link.get('href')
if href and href.endswith('jpg'):
ir_fname = href.split('/')[-1]
ir_dt = utils.parse_fname(ir_fname)[-1]
ir_lookup[ir_dt] = ir_fname
return ir_lookup[utils.nearest_date(list(ir_lookup.keys()), dt)]
def list_rois(self,):
"""Returns a list of the rois associated with this site"""
return list(self.rois.keys())
def get_roi_fname(self, roi_type=None, roi_sequence=None, roi_num=None):
"""Returns a local filename to the roi request
Downloads a local copy if one doesn't exist"""
if not roi_type:
roi_type = list(self.rois.keys())[0]
if not roi_sequence:
roi_sequence = list(self.rois[roi_type]['mask'].keys())[0]
if not roi_num:
roi_num = list(self.rois[roi_type]['mask'][roi_sequence].keys())[0]
roi_fname = self.rois[roi_type]['mask'][roi_sequence][roi_num]
local_fname = _os.path.join(self.site_dname, self.sitename, roi_fname)
# print local_fname
if not _os.path.exists(local_fname):
roi_tif_url = self.roi_url + "/" + roi_fname
utils.get_url_file(roi_tif_url, local_fname)
return local_fname
def get_roi(self, roi_type=None, roi_sequence=None, roi_num=None, masked=False):
"""Returns a boolean numpy array for a specified ROI """
fname = self.get_roi_fname(roi_type, roi_sequence, roi_num)
roi = imageprocessing.get_boolean_photo_array(fname)
if not roi[0, 0]:
roi = np.logical_not(roi)
if masked:
roi = np.ma.masked_where(roi == 0, roi)
return roi
def convert_fname_to_url(self, fname, IR=False):
"""returns the full url to a specified fname
"""
site, year, month, day, time = fname.split('_')
url = "http://phenocam.sr.unh.edu/data/archive/{}/{}/{}/{}".format(
site, year, month, fname)
return url
def convert_fname_to_cachefname(self, fname):
site, year, month, day, time = fname.split('_')
cache_fname = _os.path.join(self.site_dname, site, year, month, fname)
return cache_fname
def get_local_image_fname(self, fname, IR=False):
"""if it hasn't been previously downloaded a local copy of the file
with a name of fname will be downloaded
if IR is True it will also download the cooresponding IR image
"""
url = self.convert_fname_to_url(fname)
print(url)
local_fname = self.convert_fname_to_cachefname(fname)
local_dname = _os.path.split(local_fname)[0]
if not _os.path.exists(local_dname):
_os.makedirs(local_dname)
if not _os.path.exists(local_fname):
utils.get_url_file(url, local_fname)
if IR:
ir_fname = utils.convert_fname_to_ir(fname)
local_ir_fname = utils.convert_fname_to_ir(local_fname)
if not _os.path.exists(local_ir_fname):
ir_url = self.convert_fname_to_url(fname)
print(ir_url)
utils.get_url_file(ir_url, local_ir_fname)
if IR:
return local_ir_fname
else:
return local_fname
def get_local_image(self, fname, IR=False):
"""if it hasn't been previously downloaded a local copy of the file
with a name of fname will be downloaded
if IR is True it will also download the cooresponding IR image
"""
if IR:
local_ir_fname = self.get_local_image_fname(fname, IR)
return imageprocessing.get_photo_array(local_ir_fname)
else:
local_fname = self.get_local_image_fname(fname, IR)
return imageprocessing.get_photo_array(local_fname)
def get_midday_image(self, which):
if type(which) == int:
midday_fname = self.midday_fnames[which]
else:
midday_fname = utils.fcl(self.one_day, which).midday_filename
return self.get_local_image(midday_fname)
def get_data(self, roi_type=None, which='gcc90', length='1day', version='max'):
if not roi_type:
roi_type = list(self.rois.keys())[0]
if version == 'max':
version = max(list(self.rois[roi_type][which][length].keys()))
df = utils.pd_read_csv(
self.roi_url + '/' + self.rois[roi_type][which][length][version], comment="#", parse_dates=['date'])
df.index = df.date
return df
| [
"pandas.read_html",
"os.makedirs",
"numpy.ma.masked_where",
"pandas.read_csv",
"ssl.create_default_context",
"numpy.logical_not",
"os.path.exists",
"numpy.arange",
"requests.get",
"bs4.BeautifulSoup",
"os.path.split",
"os.path.join"
] | [((2275, 2303), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (2301, 2303), False, 'import ssl\n'), ((562, 626), 'requests.get', 'requests.get', (['"""https://phenocam.sr.unh.edu/webcam/network/table"""'], {}), "('https://phenocam.sr.unh.edu/webcam/network/table')\n", (574, 626), False, 'import requests\n'), ((637, 657), 'pandas.read_html', 'pd.read_html', (['r.text'], {}), '(r.text)\n', (649, 657), True, 'import pandas as pd\n'), ((3427, 3449), 'bs4.BeautifulSoup', '_BS', (['html_page', '"""lxml"""'], {}), "(html_page, 'lxml')\n", (3430, 3449), True, 'from bs4 import BeautifulSoup as _BS\n'), ((7136, 7158), 'bs4.BeautifulSoup', '_BS', (['html_page', '"""lxml"""'], {}), "(html_page, 'lxml')\n", (7139, 7158), True, 'from bs4 import BeautifulSoup as _BS\n'), ((7921, 7943), 'bs4.BeautifulSoup', '_BS', (['html_page', '"""lxml"""'], {}), "(html_page, 'lxml')\n", (7924, 7943), True, 'from bs4 import BeautifulSoup as _BS\n'), ((9286, 9308), 'bs4.BeautifulSoup', '_BS', (['html_page', '"""lxml"""'], {}), "(html_page, 'lxml')\n", (9289, 9308), True, 'from bs4 import BeautifulSoup as _BS\n'), ((10414, 10470), 'os.path.join', '_os.path.join', (['self.site_dname', 'self.sitename', 'roi_fname'], {}), '(self.site_dname, self.sitename, roi_fname)\n', (10427, 10470), True, 'import os as _os\n'), ((11592, 11648), 'os.path.join', '_os.path.join', (['self.site_dname', 'site', 'year', 'month', 'fname'], {}), '(self.site_dname, site, year, month, fname)\n', (11605, 11648), True, 'import os as _os\n'), ((2993, 3020), 'os.path.exists', '_os.path.exists', (['site_dname'], {}), '(site_dname)\n', (3008, 3020), True, 'import os as _os\n'), ((3035, 3059), 'os.makedirs', '_os.makedirs', (['site_dname'], {}), '(site_dname)\n', (3047, 3059), True, 'import os as _os\n'), ((6621, 6711), 'pandas.read_csv', 'pd.read_csv', (["(self.roi_url + '/' + timeseries_fname)"], {'comment': '"""#"""', 'parse_dates': "['date']"}), "(self.roi_url + '/' + timeseries_fname, comment='#', parse_dates\n =['date'])\n", (6632, 6711), True, 'import pandas as pd\n'), ((10518, 10546), 'os.path.exists', '_os.path.exists', (['local_fname'], {}), '(local_fname)\n', (10533, 10546), True, 'import os as _os\n'), ((11025, 11044), 'numpy.logical_not', 'np.logical_not', (['roi'], {}), '(roi)\n', (11039, 11044), True, 'import numpy as np\n'), ((11084, 11117), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(roi == 0)', 'roi'], {}), '(roi == 0, roi)\n', (11102, 11117), True, 'import numpy as np\n'), ((12104, 12131), 'os.path.split', '_os.path.split', (['local_fname'], {}), '(local_fname)\n', (12118, 12131), True, 'import os as _os\n'), ((12153, 12181), 'os.path.exists', '_os.path.exists', (['local_dname'], {}), '(local_dname)\n', (12168, 12181), True, 'import os as _os\n'), ((12196, 12221), 'os.makedirs', '_os.makedirs', (['local_dname'], {}), '(local_dname)\n', (12208, 12221), True, 'import os as _os\n'), ((12240, 12268), 'os.path.exists', '_os.path.exists', (['local_fname'], {}), '(local_fname)\n', (12255, 12268), True, 'import os as _os\n'), ((12484, 12515), 'os.path.exists', '_os.path.exists', (['local_ir_fname'], {}), '(local_ir_fname)\n', (12499, 12515), True, 'import os as _os\n'), ((8873, 8886), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (8882, 8886), True, 'import numpy as np\n'), ((8888, 8901), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (8897, 8901), True, 'import numpy as np\n')] |
import os
os.environ["NUMBA_DISABLE_JIT"] = "1" # reconsider
import pyequion
from dataclasses import dataclass
from dataclasses_json import dataclass_json
import numpy as np
import simplejson
import json
IS_LOCAL = False
# Flask application - For local debugging
if IS_LOCAL:
from flask import Flask
from flask_jsonrpc import JSONRPC
from flask_cors import CORS
from flask import request # TEST
app = Flask(__name__)
CORS(app)
"""
Testing:
{
"endpoint": "App.create_equilibrium",
"params": {
"compounds": ["NaCl"],
"closingEqType": 0,
"initial_feed_mass_balance": ["Cl-"]
}
}
{
"concentrations": [10],
"temperature": 25.0,
"extraParameter": 0.0034,
"allowPrecipitation": false,
"nonidealityType": 0,
}
"""
@dataclass_json
@dataclass
class EquilibriumModel:
reactions: list
reactionsLatex: list
solidReactionsLatex: list
sys_eq: dict
@dataclass_json
@dataclass
class SolutionResult:
c_molal: list
gamma: list
pH: float
I: float
sc: float
DIC: float
solid_names: list
specie_names: list
saturation_index: dict
preciptation_conc: dict
ionic_activity_prod: dict
log_K_solubility: dict
idx: list
reactions: list
# sys_eq = None
# @conditional_decorator(app.route('/api', methods = ['POST']), IS_LOCAL)
# @app.route('/api', methods = ['POST'])
def pyequion_api(request):
# def pyequion_api():
"""
Output: {
'reactions': list of reactions in the system
}
"""
# request = #TESTING
# global sys_eq
# Set CORS headers for the preflight request
if request.method == "OPTIONS":
# Allows GET requests from any origin with the Content-Type
# header and caches preflight response for an 3600s
headers = {
"Access-Control-Allow-Origin": "*", # https://caiofcm.github.io/ todo
"Access-Control-Allow-Methods": "GET",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Max-Age": "3600",
}
return ("", 204, headers)
# Set CORS headers for the main request
headers = {"Access-Control-Allow-Origin": "*"}
request_json = request.get_json()
print(request_json)
if "method" not in request_json:
json_resp = json.dumps(
{
"status": "fail",
"error": {
"message": "Request endpoint should be <App.create_equilibrium> or <App.solve_equilibrium> "
},
}
)
return (json_resp, 200, headers)
endpoint = request_json["method"]
as_json_str = ""
if endpoint == "App.startup":
return (json.dumps({"result": "Started OK"}), 200, headers)
elif endpoint == "App.create_equilibrium":
params = request_json["params"]
compounds = params["compounds"]
closingEqType = params["closingEqType"]
initial_feed_mass_balance = params["initial_feed_mass_balance"]
# Creating the Equilibrium System
sys_eq = pyequion.create_equilibrium( # PASSING ALLOW_PRECIPITATION IS WRONG! Such as polymorph formation, cannot precipitation all phases
feed_compounds=compounds,
# allow_precipitation=allowPrecipitation,
closing_equation_type=closingEqType,
initial_feed_mass_balance=initial_feed_mass_balance, # REMOVE ME
)
# Serializing EquilibriumSystem
stringfied_sys_eq = json.dumps(
sys_eq, cls=pyequion.utils_api.NpEncoder
)
as_dict_sys_eq = json.loads(stringfied_sys_eq)
# Getting Solid Reactions
if sys_eq.solid_reactions_but_not_equation is not None:
solid_possible_reactions = (
pyequion.rbuilder.conv_reaction_engine_to_db_like(
sys_eq.solid_reactions_but_not_equation
)
)
pyequion.rbuilder.fill_reactions_with_solid_name_underscore(
solid_possible_reactions
)
# for item in reactions:
# solid_possible_reactions = [{k:float(v) for k,v in item.items() if k[0].isupper()} for item in reactions]
solid_possible_reactions_latex = (
pyequion.rbuilder.format_reaction_list_as_latex_mhchem(
solid_possible_reactions
)
)
else:
solid_possible_reactions_latex = []
# Getting Aqueous Reactions
latex_reactions = (
pyequion.rbuilder.format_reaction_list_as_latex_mhchem(
sys_eq.reactionsStorage
)
)
# Creating API Response
resp = EquilibriumModel(
reactions=sys_eq.reactionsStorage,
reactionsLatex=latex_reactions,
solidReactionsLatex=solid_possible_reactions_latex,
sys_eq=as_dict_sys_eq,
)
# cache.set('eq_sys', resp, timeout=5 * 60)
print(resp)
eq_out = {"result": resp.to_dict()}
# FIX THIS: Stupid way to remove NaN:
as_json_str = simplejson.dumps(eq_out, ignore_nan=True)
# back_to_dict = json.loads(as_json_str)
elif endpoint == "App.solve_equilibrium":
params = request_json["params"]
concentrations = params["concentrations"]
temperature = params["temperature"]
extraParameter = params["extraParameter"]
allowPrecipitation = params["allowPrecipitation"]
nonidealityType = params["nonidealityType"]
sys_eq_serialized = params["sys_eq"]
sys_eq_deslrd = pyequion.utils_api.create_eq_sys_from_serialized(
sys_eq_serialized
)
solResult = solve_equilibrium(
sys_eq_deslrd,
concentrations,
temperature,
extraParameter,
allowPrecipitation,
nonidealityType,
)
eq_sol_out = {"result": solResult.to_dict()}
as_json_str = simplejson.dumps(eq_sol_out, ignore_nan=True)
else:
# raise ValueError('Unknown method')
as_json_str = json.dumps(
{
"status": "fail",
"error": {
"message": "Request endpoint should be <App.create_equilibrium> or <App.solve_equilibrium> "
},
}
)
return (as_json_str, 200, headers)
def solve_equilibrium(
sys_eq,
concentrations,
temperature,
extraParameter,
allowPrecipitation,
nonidealityType,
):
"""
Output: {
'reactions': list of reactions in the system
}
"""
# rv = cache.get('eq_sys')
if not sys_eq:
raise ValueError("Equilibrium System not defined")
TK = temperature + 273.15
extra_param = extraParameter if extraParameter else np.nan
if (
sys_eq.closing_equation_type
== pyequion.ClosingEquationType.CARBON_TOTAL
):
extra_param *= 1e-3
args = (np.array(concentrations) * 1e-3, TK, extra_param)
xguess = None # np.ones(sys_eq.idx_control.idx['size'])*1e-1
fugacity_calc = (
"pr"
if sys_eq.closing_equation_type == pyequion.ClosingEquationType.OPEN
else "ideal"
)
solResult_pyEq = pyequion.solve_equilibrium(
sys_eq,
args=args,
x_guess=xguess,
allow_precipitation=allowPrecipitation,
activity_model_type=pyequion.TypeActivityCalculation[nonidealityType],
fugacity_calculation=fugacity_calc,
)
print("DONE")
# print(solResult_pyEq)
solResult = SolutionResult(
solResult_pyEq.c_molal,
solResult_pyEq.gamma,
solResult_pyEq.pH,
solResult_pyEq.I,
solResult_pyEq.sc,
solResult_pyEq.DIC,
# 2.0,
solResult_pyEq.solid_names,
solResult_pyEq.specie_names,
solResult_pyEq.saturation_index,
solResult_pyEq.preciptation_conc,
solResult_pyEq.ionic_activity_prod,
solResult_pyEq.log_K_solubility,
solResult_pyEq.idx,
solResult_pyEq.reactions,
)
return solResult # .to_dict()
def pyequion_api_local_flask():
return pyequion_api(request)
if IS_LOCAL:
app.route("/api", methods=["POST"])(pyequion_api_local_flask)
if __name__ == "__main__":
app.run(debug=True)
| [
"pyequion.create_equilibrium",
"json.loads",
"pyequion.rbuilder.fill_reactions_with_solid_name_underscore",
"flask_cors.CORS",
"simplejson.dumps",
"flask.Flask",
"pyequion.utils_api.create_eq_sys_from_serialized",
"json.dumps",
"pyequion.solve_equilibrium",
"numpy.array",
"pyequion.rbuilder.form... | [((426, 441), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (431, 441), False, 'from flask import Flask\n'), ((447, 456), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (451, 456), False, 'from flask_cors import CORS\n'), ((2226, 2244), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2242, 2244), False, 'from flask import request\n'), ((7272, 7494), 'pyequion.solve_equilibrium', 'pyequion.solve_equilibrium', (['sys_eq'], {'args': 'args', 'x_guess': 'xguess', 'allow_precipitation': 'allowPrecipitation', 'activity_model_type': 'pyequion.TypeActivityCalculation[nonidealityType]', 'fugacity_calculation': 'fugacity_calc'}), '(sys_eq, args=args, x_guess=xguess,\n allow_precipitation=allowPrecipitation, activity_model_type=pyequion.\n TypeActivityCalculation[nonidealityType], fugacity_calculation=\n fugacity_calc)\n', (7298, 7494), False, 'import pyequion\n'), ((2326, 2470), 'json.dumps', 'json.dumps', (["{'status': 'fail', 'error': {'message':\n 'Request endpoint should be <App.create_equilibrium> or <App.solve_equilibrium> '\n }}"], {}), "({'status': 'fail', 'error': {'message':\n 'Request endpoint should be <App.create_equilibrium> or <App.solve_equilibrium> '\n }})\n", (2336, 2470), False, 'import json\n'), ((2722, 2758), 'json.dumps', 'json.dumps', (["{'result': 'Started OK'}"], {}), "({'result': 'Started OK'})\n", (2732, 2758), False, 'import json\n'), ((3081, 3229), 'pyequion.create_equilibrium', 'pyequion.create_equilibrium', ([], {'feed_compounds': 'compounds', 'closing_equation_type': 'closingEqType', 'initial_feed_mass_balance': 'initial_feed_mass_balance'}), '(feed_compounds=compounds, closing_equation_type\n =closingEqType, initial_feed_mass_balance=initial_feed_mass_balance)\n', (3108, 3229), False, 'import pyequion\n'), ((3509, 3561), 'json.dumps', 'json.dumps', (['sys_eq'], {'cls': 'pyequion.utils_api.NpEncoder'}), '(sys_eq, cls=pyequion.utils_api.NpEncoder)\n', (3519, 3561), False, 'import json\n'), ((3609, 3638), 'json.loads', 'json.loads', (['stringfied_sys_eq'], {}), '(stringfied_sys_eq)\n', (3619, 3638), False, 'import json\n'), ((4558, 4637), 'pyequion.rbuilder.format_reaction_list_as_latex_mhchem', 'pyequion.rbuilder.format_reaction_list_as_latex_mhchem', (['sys_eq.reactionsStorage'], {}), '(sys_eq.reactionsStorage)\n', (4612, 4637), False, 'import pyequion\n'), ((5130, 5171), 'simplejson.dumps', 'simplejson.dumps', (['eq_out'], {'ignore_nan': '(True)'}), '(eq_out, ignore_nan=True)\n', (5146, 5171), False, 'import simplejson\n'), ((6996, 7020), 'numpy.array', 'np.array', (['concentrations'], {}), '(concentrations)\n', (7004, 7020), True, 'import numpy as np\n'), ((3795, 3890), 'pyequion.rbuilder.conv_reaction_engine_to_db_like', 'pyequion.rbuilder.conv_reaction_engine_to_db_like', (['sys_eq.solid_reactions_but_not_equation'], {}), '(sys_eq.\n solid_reactions_but_not_equation)\n', (3844, 3890), False, 'import pyequion\n'), ((3950, 4040), 'pyequion.rbuilder.fill_reactions_with_solid_name_underscore', 'pyequion.rbuilder.fill_reactions_with_solid_name_underscore', (['solid_possible_reactions'], {}), '(\n solid_possible_reactions)\n', (4009, 4040), False, 'import pyequion\n'), ((4286, 4371), 'pyequion.rbuilder.format_reaction_list_as_latex_mhchem', 'pyequion.rbuilder.format_reaction_list_as_latex_mhchem', (['solid_possible_reactions'], {}), '(solid_possible_reactions\n )\n', (4340, 4371), False, 'import pyequion\n'), ((5627, 5694), 'pyequion.utils_api.create_eq_sys_from_serialized', 'pyequion.utils_api.create_eq_sys_from_serialized', (['sys_eq_serialized'], {}), '(sys_eq_serialized)\n', (5675, 5694), False, 'import pyequion\n'), ((6010, 6055), 'simplejson.dumps', 'simplejson.dumps', (['eq_sol_out'], {'ignore_nan': '(True)'}), '(eq_sol_out, ignore_nan=True)\n', (6026, 6055), False, 'import simplejson\n'), ((6133, 6277), 'json.dumps', 'json.dumps', (["{'status': 'fail', 'error': {'message':\n 'Request endpoint should be <App.create_equilibrium> or <App.solve_equilibrium> '\n }}"], {}), "({'status': 'fail', 'error': {'message':\n 'Request endpoint should be <App.create_equilibrium> or <App.solve_equilibrium> '\n }})\n", (6143, 6277), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 09:44:15 2019
@author: dberke
Code to define a class for a model fit to an absorption line.
"""
import matplotlib
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from scipy.optimize import curve_fit, OptimizeWarning
from tqdm import tqdm
import unyt as u
from varconlib.exceptions import PositiveAmplitudeError
from varconlib.fitting import gaussian, integrated_gaussian
from varconlib.miscellaneous import (shift_wavelength, velocity2wavelength,
wavelength2index, wavelength2velocity)
# This line prevents the wavelength formatting from being in the form of
# scientific notation.
matplotlib.rcParams['axes.formatter.useoffset'] = False
# Don't use TeX for font rendering, as these are just diagnostic plots and it
# slows everything way down.
matplotlib.rcParams['text.usetex'] = False
class GaussianFit(object):
"""A class to fit an absorption line and store information about the fit.
"""
def __init__(self, transition, observation, order, radial_velocity=None,
close_up_plot_path=None, context_plot_path=None,
integrated=True, verbose=False):
"""Construct a fit to an absorption feature using a Gaussian or
integrated Gaussian.
Parameters
----------
transition : `transition_line.Transition` object
A `Transition` object representing the absorption feature to fit.
observation : `obs2d.HARPSFile2DScience` object
A `HARPSFile2DScience` object to find the absorption feature in.
order : int
The order in the e2ds file to fit the transition in. Zero-indexed,
so ranging from [0-71].
Optional
--------
radial_velocity : `unyt.unyt_quantity`
A radial velocity (dimensions of length / time) for the object in
the observation. Most of the time the radial velocity should be
picked up from the observation itself, but for certain objects
such as asteroids the supplied radial velocity may not be correct.
In such cases, this parameter can be used to override the given
radial velocity.
close_up_plot_path : string or `pathlib.Path`
The file name to save a close-up plot of the fit to.
context_plot_path : string or `pathlib.Path`
The file name to save a wider context plot (±25 km/s) around the
fitted feature to.
integrated : bool, Default : True
Controls whether to attempt to fit a feature with an integrated
Gaussian instead of a Gaussian.
verbose : bool, Default : False
Whether to print out extra diagnostic information while running
the function.
"""
# Store the transition.
self.transition = transition
# Grab some observation-specific information from the observation.
self.dateObs = observation.dateObs
self.BERV = observation.BERV
self.airmass = observation.airmass
self.exptime = observation.exptime
self.calibrationFile = observation.calibrationFile
self.calibrationSource = observation.calibrationSource
self.order = int(order)
# Store the plot paths.
self.close_up_plot_path = close_up_plot_path
self.context_plot_path = context_plot_path
# Define some useful numbers and variables.
# The ranges in velocity space to search around to find the minimum of
# an absorption line.
search_range_vel = 5 * u.km / u.s
# The range in velocity space to consider to find the continuum.
continuum_range_vel = 25 * u.km / u.s
# The number of pixels either side of the flux minimum to use in the
# fit.
pixel_range = 3
# If no radial velocity is given, use the radial velocity from the
# supplied observation. This is mostly for use with things like
# asteroids that might not have a radial velocity assigned.
if radial_velocity is None:
radial_velocity = observation.radialVelocity
# Shift the wavelength being searched for to correct for the radial
# velocity of the star.
nominal_wavelength = self.transition.wavelength.to(u.angstrom)
self.correctedWavelength = shift_wavelength(nominal_wavelength,
radial_velocity)
if verbose:
tqdm.write('Given RV {:.2f}: line {:.3f} should be at {:.3f}'.
format(radial_velocity,
nominal_wavelength.to(u.angstrom),
self.correctedWavelength.to(u.angstrom)))
self.baryArray = observation.barycentricArray[self.order]
self.fluxArray = observation.photonFluxArray[self.order]
self.errorArray = observation.errorArray[self.order]
# Figure out the range in wavelength space to search around the nominal
# wavelength for the flux minimum, as well as the range to take for
# measuring the continuum.
search_range = velocity2wavelength(search_range_vel,
self.correctedWavelength)
self.continuumRange = velocity2wavelength(continuum_range_vel,
self.correctedWavelength)
low_search_index = wavelength2index(self.correctedWavelength -
search_range,
self.baryArray)
high_search_index = wavelength2index(self.correctedWavelength +
search_range,
self.baryArray)
self.lowContinuumIndex = wavelength2index(self.correctedWavelength
- self.continuumRange,
self.baryArray)
self.highContinuumIndex = wavelength2index(self.correctedWavelength
+ self.continuumRange,
self.baryArray)
self.centralIndex = low_search_index + \
self.fluxArray[low_search_index:high_search_index].argmin()
self.continuumLevel = self.fluxArray[self.lowContinuumIndex:
self.highContinuumIndex].max()
self.fluxMinimum = self.fluxArray[self.centralIndex]
self.lowFitIndex = self.centralIndex - pixel_range
self.highFitIndex = self.centralIndex + pixel_range + 1
# Grab the wavelengths, fluxes, and errors from the region to be fit.
self.wavelengths = self.baryArray[self.lowFitIndex:self.highFitIndex]
self.fluxes = self.fluxArray[self.lowFitIndex:self.highFitIndex]
self.errors = self.errorArray[self.lowFitIndex:self.highFitIndex]
self.lineDepth = self.continuumLevel - self.fluxMinimum
self.normalizedLineDepth = self.lineDepth / self.continuumLevel
self.initial_guess = (self.lineDepth * -1,
self.correctedWavelength.to(u.angstrom).value,
0.05,
self.continuumLevel)
if verbose:
tqdm.write('Attempting to fit line at {:.4f} with initial guess:'.
format(self.correctedWavelength))
if verbose:
tqdm.write('Initial parameters are:\n{}\n{}\n{}\n{}'.format(
*self.initial_guess))
# Do the fitting:
try:
if integrated:
wavelengths_lower = observation.pixelLowerArray
wavelengths_upper = observation.pixelUpperArray
pixel_edges_lower = wavelengths_lower[self.order,
self.lowFitIndex:
self.highFitIndex]
pixel_edges_upper = wavelengths_upper[self.order,
self.lowFitIndex:
self.highFitIndex]
self.popt, self.pcov = curve_fit(integrated_gaussian,
(pixel_edges_lower.value,
pixel_edges_upper.value),
self.fluxes,
sigma=self.errors,
absolute_sigma=True,
p0=self.initial_guess,
method='lm', maxfev=10000)
else:
self.popt, self.pcov = curve_fit(gaussian,
self.wavelengths.value,
self.fluxes,
sigma=self.errors,
absolute_sigma=True,
p0=self.initial_guess,
method='lm', maxfev=10000)
except (OptimizeWarning, RuntimeError):
print(self.continuumLevel)
print(self.lineDepth)
print(self.initial_guess)
self.plotFit(close_up_plot_path, context_plot_path,
plot_fit=False, verbose=True)
raise
if verbose:
print(self.popt)
print(self.pcov)
# Recover the fitted values for the parameters:
self.amplitude = self.popt[0]
self.mean = self.popt[1] * u.angstrom
self.sigma = self.popt[2] * u.angstrom
if self.amplitude > 0:
err_msg = ('Fit for'
f' {self.transition.wavelength.to(u.angstrom)}'
' has a positive amplitude.')
tqdm.write(err_msg)
self.plotFit(close_up_plot_path, context_plot_path,
plot_fit=True, verbose=verbose)
raise PositiveAmplitudeError(err_msg)
# Find 1-σ errors from the covariance matrix:
self.perr = np.sqrt(np.diag(self.pcov))
self.amplitudeErr = self.perr[0]
self.meanErr = self.perr[1] * u.angstrom
self.meanErrVel = abs(wavelength2velocity(self.mean,
self.mean +
self.meanErr))
self.sigmaErr = self.perr[2] * u.angstrom
if (self.chiSquaredNu > 1):
self.meanErr *= np.sqrt(self.chiSquaredNu)
if verbose:
tqdm.write('χ^2_ν = {}'.format(self.chiSquaredNu))
# Find the full width at half max.
# 2.354820 ≈ 2 * sqrt(2 * ln(2)), the relationship of FWHM to the
# standard deviation of a Gaussian.
self.FWHM = 2.354820 * self.sigma
self.FWHMErr = 2.354820 * self.sigmaErr
self.velocityFWHM = wavelength2velocity(self.mean,
self.mean +
self.FWHM).to(u.km/u.s)
self.velocityFWHMErr = wavelength2velocity(self.mean,
self.mean +
self.FWHMErr).to(u.km/u.s)
# Compute the offset between the input wavelength and the wavelength
# found in the fit.
self.offset = self.correctedWavelength - self.mean
self.offsetErr = self.meanErr
self.velocityOffset = wavelength2velocity(self.correctedWavelength,
self.mean)
self.velocityOffsetErr = wavelength2velocity(self.mean,
self.mean +
self.offsetErr)
if verbose:
print(self.continuumLevel)
print(self.fluxMinimum)
print(self.wavelengths)
@property
def chiSquared(self):
if not hasattr(self, '_chiSquared'):
residuals = self.fluxes - gaussian(self.wavelengths.value,
*self.popt)
self._chiSquared = sum((residuals / self.errors) ** 2)
return self._chiSquared
@property
def chiSquaredNu(self):
return self.chiSquared / 3 # ν = 7 (pixels) - 4 (params)
@property
def label(self):
return self.transition.label + '_' + str(self.order)
def getFitInformation(self):
"""Return a list of information about the fit which can be written as
a CSV file.
Returns
-------
list
A list containing the following information about the fit:
1. Observation date, in ISO format
2. The amplitude of the fit (in photons)
3. The error on the amplitude (in photons)
4. The mean of the fit (in Å)
5. The error on the mean (in Å)
6. The error on the mean (in m/s in velocity space)
7. The sigma of the fitted Gaussian (in Å)
8. The error on the sigma (in Å)
9. The offset from expected wavelength (in m/s)
10. The error on the offset (in m/s)
11. The FWHM (in velocity space)
12. The error on the FWHM (in m/s)
13. The chi-squared-nu value
14. The order the fit was made on (starting at 0, so in [0, 71].
15. The mean airmass of the observation.
"""
return [self.dateObs.isoformat(timespec='milliseconds'),
self.amplitude,
self.amplitudeErr,
self.mean.value,
self.meanErr.value,
self.meanErrVel.value,
self.sigma.value,
self.sigmaErr.value,
self.velocityOffset.to(u.m/u.s).value,
self.velocityOffsetErr.to(u.m/u.s).value,
self.velocityFWHM.to(u.m/u.s).value,
self.velocityFWHMErr.to(u.m/u.s).value,
self.chiSquaredNu,
self.order,
self.airmass]
def plotFit(self, close_up_plot_path=None,
context_plot_path=None,
plot_fit=True,
verbose=False):
"""Plot a graph of this fit.
This method will produce a 'close-up' plot of just the fitted region
itself, in order to check out the fit has worked out, and a wider
'context' plot of the area around the feature.
Optional
--------
close_up_plot_path : string or `pathlib.Path`
The file name to save a close-up plot of the fit to. If not given,
will default to using the value providing when initializing the
fit.
context_plot_path : string or `pathlib.Path`
The file name to save a wider context plot (±25 km/s) around the
fitted feature to. If not given, will default to using the value
provided when initializing the fit.
plot_fit : bool, Default : True
If *True*, plot the mean of the fit and the fitted Gaussian.
Otherwise, don't plot those two things. This allows creating plots
of failed fits to see the context of the data.
verbose : bool, Default : False
If *True*, the function will print out additional information as it
runs.
"""
edge_pixels = (509, 510, 1021, 1022, 1533, 1534, 2045, 2046,
2557, 2558, 3069, 3070, 3581, 3582)
# If no plot paths are given, assume we want to use the ones given
# when initializing the fit.
if close_up_plot_path is None:
close_up_plot_path = self.close_up_plot_path
if context_plot_path is None:
context_plot_path = self.context_plot_path
# Set up the figure.
fig = plt.figure(figsize=(7, 5), dpi=100, tight_layout=True)
gs = GridSpec(nrows=2, ncols=1, height_ratios=[4, 1], hspace=0)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1], sharex=ax1)
ax1.tick_params(axis='x', direction='in')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2.set_ylim(bottom=-3, top=3)
ax2.yaxis.set_major_locator(ticker.FixedLocator([-2, -1, 0, 1, 2]))
for pixel in edge_pixels:
ax1.axvline(x=self.baryArray[pixel-1], ymin=0, ymax=0.2,
color='LimeGreen',
linestyle='--')
ax1.set_ylabel('Flux (photo-electrons)')
ax2.set_xlabel('Wavelength ($\\AA$)')
ax2.set_ylabel('Residuals\n($\\sigma$)')
ax1.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.2f}'))
ax1.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:>7.1e}'))
plt.xticks(horizontalalignment='right')
ax1.set_xlim(left=self.correctedWavelength - self.continuumRange,
right=self.correctedWavelength + self.continuumRange)
# Set y-limits so a fit doesn't balloon the plot scale out.
ax1.set_ylim(top=self.continuumLevel * 1.25,
bottom=self.fluxMinimum * 0.93)
# Plot the expected and measured wavelengths.
ax1.axvline(self.correctedWavelength.to(u.angstrom),
color='LightSteelBlue', linestyle=':', alpha=0.8,
label=r'RV-corrected $\lambda=${:.3f}'.format(
self.correctedWavelength.to(u.angstrom)))
# Don't plot the mean if this is a failed fit.
if hasattr(self, 'mean') and hasattr(self, 'velocityOffset'):
ax1.axvline(self.mean.to(u.angstrom),
color='IndianRed', alpha=0.7,
label='Mean ({:.4f}, {:+.2f})'.
format(self.mean.to(u.angstrom),
self.velocityOffset.to(u.m/u.s)),
linestyle='-')
# Plot the actual data.
ax1.errorbar(self.baryArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
self.fluxArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
yerr=self.errorArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
color='SandyBrown', ecolor='Sienna',
marker='o', markersize=5,
label='Flux', barsabove=True)
# Generate some x-values across the plot range.
x = np.linspace(self.baryArray[self.lowContinuumIndex].value,
self.baryArray[self.highContinuumIndex].value, 1000)
# Plot the initial guess for the gaussian.
ax1.plot(x, gaussian(x, *self.initial_guess),
color='SlateGray', label='Initial guess',
linestyle='--', alpha=0.5)
# Plot the fitted gaussian, unless this is a failed fit attempt.
if plot_fit:
ax1.plot(x, gaussian(x, *self.popt),
color='DarkGreen', alpha=0.5,
linestyle='-.',
label=r'Fit ($\chi^2_\nu=${:.3f}, $\sigma=${:.4f})'.
format(self.chiSquaredNu, self.sigma))
# Replace underscore in label so LaTeX won't crash on it.
ax1.legend(loc='upper center', framealpha=0.6, fontsize=9,
ncol=2,
title=self.label.replace('_', r'\_') if\
matplotlib.rcParams['text.usetex'] else self.label,
title_fontsize=10,
labelspacing=0.4)
# Add in some guidelines.
ax2.axhline(color='Gray', linestyle='-')
ax2.axhline(y=1, color='SkyBlue', linestyle='--')
ax2.axhline(y=-1, color='SkyBlue', linestyle='--')
ax2.axhline(y=2, color='LightSteelBlue', linestyle=':')
ax2.axhline(y=-2, color='LightSteelBlue', linestyle=':')
# Plot the residuals on the lower axis.
residuals = (self.fluxes - gaussian(self.wavelengths.value,
*self.popt)) / self.errors
ax2.plot(self.wavelengths, residuals, color='Navy', alpha=0.6,
linestyle='', marker='D', linewidth=1.5, markersize=5)
# Save the resultant plot.
fig.savefig(str(context_plot_path), format="png")
if verbose:
tqdm.write('Created wider context plot at {}'.format(
context_plot_path))
# Now create a close-in version to focus on the fit.
ax1.set_xlim(left=self.baryArray[self.lowFitIndex - 1],
right=self.baryArray[self.highFitIndex])
ax1.set_ylim(top=self.fluxes.max() * 1.15,
bottom=self.fluxes.min() * 0.95)
fig.savefig(str(close_up_plot_path), format="png")
if verbose:
tqdm.write('Created close up plot at {}'.format(
close_up_plot_path))
plt.close(fig)
| [
"tqdm.tqdm.write",
"matplotlib.ticker.StrMethodFormatter",
"varconlib.miscellaneous.wavelength2index",
"matplotlib.pyplot.close",
"varconlib.fitting.gaussian",
"matplotlib.ticker.FixedLocator",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.figure",
"numpy.linspace",
"varconlib.exceptions.Positive... | [((4497, 4550), 'varconlib.miscellaneous.shift_wavelength', 'shift_wavelength', (['nominal_wavelength', 'radial_velocity'], {}), '(nominal_wavelength, radial_velocity)\n', (4513, 4550), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((5290, 5353), 'varconlib.miscellaneous.velocity2wavelength', 'velocity2wavelength', (['search_range_vel', 'self.correctedWavelength'], {}), '(search_range_vel, self.correctedWavelength)\n', (5309, 5353), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((5428, 5494), 'varconlib.miscellaneous.velocity2wavelength', 'velocity2wavelength', (['continuum_range_vel', 'self.correctedWavelength'], {}), '(continuum_range_vel, self.correctedWavelength)\n', (5447, 5494), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((5573, 5646), 'varconlib.miscellaneous.wavelength2index', 'wavelength2index', (['(self.correctedWavelength - search_range)', 'self.baryArray'], {}), '(self.correctedWavelength - search_range, self.baryArray)\n', (5589, 5646), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((5764, 5837), 'varconlib.miscellaneous.wavelength2index', 'wavelength2index', (['(self.correctedWavelength + search_range)', 'self.baryArray'], {}), '(self.correctedWavelength + search_range, self.baryArray)\n', (5780, 5837), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((5962, 6047), 'varconlib.miscellaneous.wavelength2index', 'wavelength2index', (['(self.correctedWavelength - self.continuumRange)', 'self.baryArray'], {}), '(self.correctedWavelength - self.continuumRange, self.baryArray\n )\n', (5978, 6047), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((6177, 6262), 'varconlib.miscellaneous.wavelength2index', 'wavelength2index', (['(self.correctedWavelength + self.continuumRange)', 'self.baryArray'], {}), '(self.correctedWavelength + self.continuumRange, self.baryArray\n )\n', (6193, 6262), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((11899, 11955), 'varconlib.miscellaneous.wavelength2velocity', 'wavelength2velocity', (['self.correctedWavelength', 'self.mean'], {}), '(self.correctedWavelength, self.mean)\n', (11918, 11955), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((12040, 12098), 'varconlib.miscellaneous.wavelength2velocity', 'wavelength2velocity', (['self.mean', '(self.mean + self.offsetErr)'], {}), '(self.mean, self.mean + self.offsetErr)\n', (12059, 12098), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((16312, 16366), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 5)', 'dpi': '(100)', 'tight_layout': '(True)'}), '(figsize=(7, 5), dpi=100, tight_layout=True)\n', (16322, 16366), True, 'import matplotlib.pyplot as plt\n'), ((16380, 16438), 'matplotlib.gridspec.GridSpec', 'GridSpec', ([], {'nrows': '(2)', 'ncols': '(1)', 'height_ratios': '[4, 1]', 'hspace': '(0)'}), '(nrows=2, ncols=1, height_ratios=[4, 1], hspace=0)\n', (16388, 16438), False, 'from matplotlib.gridspec import GridSpec\n'), ((17242, 17281), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'horizontalalignment': '"""right"""'}), "(horizontalalignment='right')\n", (17252, 17281), True, 'import matplotlib.pyplot as plt\n'), ((19034, 19149), 'numpy.linspace', 'np.linspace', (['self.baryArray[self.lowContinuumIndex].value', 'self.baryArray[self.highContinuumIndex].value', '(1000)'], {}), '(self.baryArray[self.lowContinuumIndex].value, self.baryArray[\n self.highContinuumIndex].value, 1000)\n', (19045, 19149), True, 'import numpy as np\n'), ((21479, 21493), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (21488, 21493), True, 'import matplotlib.pyplot as plt\n'), ((10222, 10241), 'tqdm.tqdm.write', 'tqdm.write', (['err_msg'], {}), '(err_msg)\n', (10232, 10241), False, 'from tqdm import tqdm\n'), ((10381, 10412), 'varconlib.exceptions.PositiveAmplitudeError', 'PositiveAmplitudeError', (['err_msg'], {}), '(err_msg)\n', (10403, 10412), False, 'from varconlib.exceptions import PositiveAmplitudeError\n'), ((10496, 10514), 'numpy.diag', 'np.diag', (['self.pcov'], {}), '(self.pcov)\n', (10503, 10514), True, 'import numpy as np\n'), ((10637, 10693), 'varconlib.miscellaneous.wavelength2velocity', 'wavelength2velocity', (['self.mean', '(self.mean + self.meanErr)'], {}), '(self.mean, self.mean + self.meanErr)\n', (10656, 10693), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((10910, 10936), 'numpy.sqrt', 'np.sqrt', (['self.chiSquaredNu'], {}), '(self.chiSquaredNu)\n', (10917, 10936), True, 'import numpy as np\n'), ((16707, 16745), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['[-2, -1, 0, 1, 2]'], {}), '([-2, -1, 0, 1, 2])\n', (16726, 16745), True, 'import matplotlib.ticker as ticker\n'), ((17118, 17154), 'matplotlib.ticker.StrMethodFormatter', 'ticker.StrMethodFormatter', (['"""{x:.2f}"""'], {}), "('{x:.2f}')\n", (17143, 17154), True, 'import matplotlib.ticker as ticker\n'), ((17194, 17232), 'matplotlib.ticker.StrMethodFormatter', 'ticker.StrMethodFormatter', (['"""{x:>7.1e}"""'], {}), "('{x:>7.1e}')\n", (17219, 17232), True, 'import matplotlib.ticker as ticker\n'), ((19240, 19272), 'varconlib.fitting.gaussian', 'gaussian', (['x', '*self.initial_guess'], {}), '(x, *self.initial_guess)\n', (19248, 19272), False, 'from varconlib.fitting import gaussian, integrated_gaussian\n'), ((8430, 8625), 'scipy.optimize.curve_fit', 'curve_fit', (['integrated_gaussian', '(pixel_edges_lower.value, pixel_edges_upper.value)', 'self.fluxes'], {'sigma': 'self.errors', 'absolute_sigma': '(True)', 'p0': 'self.initial_guess', 'method': '"""lm"""', 'maxfev': '(10000)'}), "(integrated_gaussian, (pixel_edges_lower.value, pixel_edges_upper.\n value), self.fluxes, sigma=self.errors, absolute_sigma=True, p0=self.\n initial_guess, method='lm', maxfev=10000)\n", (8439, 8625), False, 'from scipy.optimize import curve_fit, OptimizeWarning\n'), ((9017, 9167), 'scipy.optimize.curve_fit', 'curve_fit', (['gaussian', 'self.wavelengths.value', 'self.fluxes'], {'sigma': 'self.errors', 'absolute_sigma': '(True)', 'p0': 'self.initial_guess', 'method': '"""lm"""', 'maxfev': '(10000)'}), "(gaussian, self.wavelengths.value, self.fluxes, sigma=self.errors,\n absolute_sigma=True, p0=self.initial_guess, method='lm', maxfev=10000)\n", (9026, 9167), False, 'from scipy.optimize import curve_fit, OptimizeWarning\n'), ((11300, 11353), 'varconlib.miscellaneous.wavelength2velocity', 'wavelength2velocity', (['self.mean', '(self.mean + self.FWHM)'], {}), '(self.mean, self.mean + self.FWHM)\n', (11319, 11353), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((11494, 11550), 'varconlib.miscellaneous.wavelength2velocity', 'wavelength2velocity', (['self.mean', '(self.mean + self.FWHMErr)'], {}), '(self.mean, self.mean + self.FWHMErr)\n', (11513, 11550), False, 'from varconlib.miscellaneous import shift_wavelength, velocity2wavelength, wavelength2index, wavelength2velocity\n'), ((12461, 12505), 'varconlib.fitting.gaussian', 'gaussian', (['self.wavelengths.value', '*self.popt'], {}), '(self.wavelengths.value, *self.popt)\n', (12469, 12505), False, 'from varconlib.fitting import gaussian, integrated_gaussian\n'), ((19495, 19518), 'varconlib.fitting.gaussian', 'gaussian', (['x', '*self.popt'], {}), '(x, *self.popt)\n', (19503, 19518), False, 'from varconlib.fitting import gaussian, integrated_gaussian\n'), ((20527, 20571), 'varconlib.fitting.gaussian', 'gaussian', (['self.wavelengths.value', '*self.popt'], {}), '(self.wavelengths.value, *self.popt)\n', (20535, 20571), False, 'from varconlib.fitting import gaussian, integrated_gaussian\n')] |
import numpy as np
import torch
def permute(x, n_in, kernels_layer, num_channels_to_permute):
x_new = torch.clone(x)
for i in range(num_channels_to_permute):
idx = torch.randperm(kernels_layer)
idx = (idx * n_in) + i
print(idx)
print(np.arange(i, x.shape[1], n_in))
x_new[:, i:x.shape[1]:n_in, :, :] = x[:, idx, :, :]
return x_new
# 5 filters
x = torch.zeros(100, 20, 5, 5)
permute(x, 4, 5, 4)
# 0 1 2 3
# 4 5 6 7
# 8 9 10 11
# 12 13 14 15
# 16 17 18 19 | [
"torch.zeros",
"numpy.arange",
"torch.randperm",
"torch.clone"
] | [((402, 428), 'torch.zeros', 'torch.zeros', (['(100)', '(20)', '(5)', '(5)'], {}), '(100, 20, 5, 5)\n', (413, 428), False, 'import torch\n'), ((108, 122), 'torch.clone', 'torch.clone', (['x'], {}), '(x)\n', (119, 122), False, 'import torch\n'), ((182, 211), 'torch.randperm', 'torch.randperm', (['kernels_layer'], {}), '(kernels_layer)\n', (196, 211), False, 'import torch\n'), ((276, 306), 'numpy.arange', 'np.arange', (['i', 'x.shape[1]', 'n_in'], {}), '(i, x.shape[1], n_in)\n', (285, 306), True, 'import numpy as np\n')] |
import numpy as np
from pathlib import Path
import gym
from envs.atari.get_avg_score import get_average_score
from algorithms.discrete_policy import DiscretePolicyParams
from algorithms.utils import generate_save_location
def main():
hidden_layers = (32, 32)
date = "09-05-2020"
discrete_policy_params = DiscretePolicyParams(
actor_layers=hidden_layers, actor_activation="tanh"
)
save_base_path = Path("data")
env_names = ["CartPole-v1", "Acrobot-v1", "MountainCar-v0"]
num_epochs = [50, 30, 30]
num_seeds = 10
demo_nums = [1, 3, 10, 30, 100]
overall_list = []
for env_name, epoch in zip(env_names, num_epochs):
overall_list.append(env_name)
env_list = []
for demo_num in demo_nums:
means, std_devs =[], []
for seed in range(num_seeds):
save_location = generate_save_location(
save_base_path,
discrete_policy_params.actor_layers,
f"BC",
env_name,
seed,
f"demos-{demo_num}",
date,
)
env = gym.make(env_name).env
mean_score, std_dev = get_average_score(
network_load=save_location / f"BC-{epoch}-epochs.pth",
env=env,
episode_timeout=10000,
num_trials=25,
params=discrete_policy_params,
chooser_params=(None, None, None),
)
means.append(mean_score)
std_devs.append(std_dev)
demo_std_dev = np.sqrt(np.mean(np.array(std_devs) ** 2))
env_list.append(f"Num demos: {demo_num}\t {np.round(np.mean(means), 1)} \pm {np.round(demo_std_dev, 1)}")
overall_list.append(env_list)
for entry in overall_list:
print(entry)
if __name__ == "__main__":
main()
| [
"algorithms.utils.generate_save_location",
"gym.make",
"pathlib.Path",
"algorithms.discrete_policy.DiscretePolicyParams",
"numpy.array",
"numpy.mean",
"envs.atari.get_avg_score.get_average_score",
"numpy.round"
] | [((331, 404), 'algorithms.discrete_policy.DiscretePolicyParams', 'DiscretePolicyParams', ([], {'actor_layers': 'hidden_layers', 'actor_activation': '"""tanh"""'}), "(actor_layers=hidden_layers, actor_activation='tanh')\n", (351, 404), False, 'from algorithms.discrete_policy import DiscretePolicyParams\n'), ((443, 455), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (447, 455), False, 'from pathlib import Path\n'), ((899, 1028), 'algorithms.utils.generate_save_location', 'generate_save_location', (['save_base_path', 'discrete_policy_params.actor_layers', 'f"""BC"""', 'env_name', 'seed', 'f"""demos-{demo_num}"""', 'date'], {}), "(save_base_path, discrete_policy_params.actor_layers,\n f'BC', env_name, seed, f'demos-{demo_num}', date)\n", (921, 1028), False, 'from algorithms.utils import generate_save_location\n'), ((1277, 1471), 'envs.atari.get_avg_score.get_average_score', 'get_average_score', ([], {'network_load': "(save_location / f'BC-{epoch}-epochs.pth')", 'env': 'env', 'episode_timeout': '(10000)', 'num_trials': '(25)', 'params': 'discrete_policy_params', 'chooser_params': '(None, None, None)'}), "(network_load=save_location / f'BC-{epoch}-epochs.pth',\n env=env, episode_timeout=10000, num_trials=25, params=\n discrete_policy_params, chooser_params=(None, None, None))\n", (1294, 1471), False, 'from envs.atari.get_avg_score import get_average_score\n'), ((1215, 1233), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1223, 1233), False, 'import gym\n'), ((1737, 1755), 'numpy.array', 'np.array', (['std_devs'], {}), '(std_devs)\n', (1745, 1755), True, 'import numpy as np\n'), ((1853, 1878), 'numpy.round', 'np.round', (['demo_std_dev', '(1)'], {}), '(demo_std_dev, 1)\n', (1861, 1878), True, 'import numpy as np\n'), ((1828, 1842), 'numpy.mean', 'np.mean', (['means'], {}), '(means)\n', (1835, 1842), True, 'import numpy as np\n')] |
import numpy
import setuptools
from Cython.Build import cythonize
# True, if annotated Cython source files that highlight Python interactions should be created
ANNOTATE = False
# True, if all Cython compiler optimizations should be disabled
DEBUG = False
sources = [
'**/*.pyx'
]
library_dirs = [
'../cpp/build/subprojects/common',
'../cpp/build/subprojects/tsa'
]
runtime_library_dirs = [
'cpp/build/subprojects/common',
'cpp/build/subprojects/tsa'
]
libraries = [
'rlcommon',
'rltsa'
]
include_dirs = [
'../cpp/subprojects/common/include',
'../cpp/subprojects/tsa/include'
]
define_macros = [
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")
]
compiler_directives = {
'boundscheck': DEBUG,
'wraparound': DEBUG,
'cdivision': not DEBUG,
'initializedcheck': DEBUG
}
extensions = [
setuptools.Extension(name='*', language='c++', sources=sources, library_dirs=library_dirs, libraries=libraries,
runtime_library_dirs=runtime_library_dirs, include_dirs=include_dirs,
define_macros=define_macros)
]
setuptools.setup(
name='syndrome-learner',
version='0.1.0',
description='A rule learning algorithm for learning syndrome definitions',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['rl.common', 'rl.tsa', 'rl.testbed'],
install_requires=[
'numpy>=1.21.0',
'scipy>=1.7.0',
'Cython>=0.29.0',
'scikit-learn>=0.24.0',
'liac-arff>=2.5.0',
'pandas>=1.2.0'
],
python_requires='>=3.7',
ext_modules=cythonize(extensions, language_level='3', annotate=ANNOTATE, compiler_directives=compiler_directives),
include_dirs=[numpy.get_include()])
| [
"setuptools.Extension",
"Cython.Build.cythonize",
"numpy.get_include"
] | [((850, 1074), 'setuptools.Extension', 'setuptools.Extension', ([], {'name': '"""*"""', 'language': '"""c++"""', 'sources': 'sources', 'library_dirs': 'library_dirs', 'libraries': 'libraries', 'runtime_library_dirs': 'runtime_library_dirs', 'include_dirs': 'include_dirs', 'define_macros': 'define_macros'}), "(name='*', language='c++', sources=sources,\n library_dirs=library_dirs, libraries=libraries, runtime_library_dirs=\n runtime_library_dirs, include_dirs=include_dirs, define_macros=\n define_macros)\n", (870, 1074), False, 'import setuptools\n'), ((1615, 1720), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {'language_level': '"""3"""', 'annotate': 'ANNOTATE', 'compiler_directives': 'compiler_directives'}), "(extensions, language_level='3', annotate=ANNOTATE,\n compiler_directives=compiler_directives)\n", (1624, 1720), False, 'from Cython.Build import cythonize\n'), ((1736, 1755), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1753, 1755), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains several metric functions.
"""
import numpy as np
from skimage.metrics import structural_similarity as ssim
def SNR(xhat, xref):
"""Computes the SNR metric.
Arguments
---------
xhat: numpy array
The noised data.
xref: numpy array
The noise-free image.
Returns
-------
float
The SNR value in dB.
"""
return 10*np.log10(np.mean(xref**2)/np.mean((xref-xhat)**2))
def NMSE(xhat, xref):
"""Computes the normalized mean square metric.
Arguments
---------
xhat: numpy array
The noised data.
xref: numpy array
The noise-free image.
Returns
-------
float
The NMSE value.
"""
return np.linalg.norm(xref - xhat)**2 / np.linalg.norm(xref)**2
def aSAD(xhat, xref):
"""Computes the averaged Spectral Angle Distance metric.
The input data number of dimensions can be:
* 1: the data are spectra,
* 2: the data are matrices of shape (n, M),
* 3: the data are matrices of shape (m, n, M)
where M is the spectrum size.
Arguments
---------
xhat: numpy array
The noised data.
xref: numpy array
The noise-free image.
Returns
-------
float
The (mean) aSAD value.
"""
if xref.ndim == 1:
return float(
np.arccos(
np.dot(xhat.T, xref)/(
np.linalg.norm(xhat)*np.linalg.norm(xref))))
elif xref.ndim == 2:
tmp = np.zeros(xref.shape[0])
for cnt in range(xref.shape[0]):
tmp[cnt] = aSAD(xhat=xhat[cnt, :], xref=xref[cnt, :])
return tmp.mean()
elif xref.ndim == 3:
return aSAD(
xhat=xhat.reshape((-1, xhat.shape[2])),
xref=xref.reshape((-1, xhat.shape[2])))
def SSIM(xhat, xref):
"""Computes the structural similarity index.
Arguments
---------
xhat: numpy array
The noised data.
xref: numpy array
The noise-free image.
Returns
-------
float
The (mean) SSIM value.
"""
if xref.ndim == 3:
multichannel = True
elif xref.ndim == 2:
multichannel = False
else:
raise ValueError('Invalid data number of dimension.')
return ssim(xref, xhat, multichannel=multichannel)
| [
"numpy.zeros",
"skimage.metrics.structural_similarity",
"numpy.linalg.norm",
"numpy.mean",
"numpy.dot"
] | [((2324, 2367), 'skimage.metrics.structural_similarity', 'ssim', (['xref', 'xhat'], {'multichannel': 'multichannel'}), '(xref, xhat, multichannel=multichannel)\n', (2328, 2367), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((780, 807), 'numpy.linalg.norm', 'np.linalg.norm', (['(xref - xhat)'], {}), '(xref - xhat)\n', (794, 807), True, 'import numpy as np\n'), ((813, 833), 'numpy.linalg.norm', 'np.linalg.norm', (['xref'], {}), '(xref)\n', (827, 833), True, 'import numpy as np\n'), ((1550, 1573), 'numpy.zeros', 'np.zeros', (['xref.shape[0]'], {}), '(xref.shape[0])\n', (1558, 1573), True, 'import numpy as np\n'), ((457, 475), 'numpy.mean', 'np.mean', (['(xref ** 2)'], {}), '(xref ** 2)\n', (464, 475), True, 'import numpy as np\n'), ((474, 501), 'numpy.mean', 'np.mean', (['((xref - xhat) ** 2)'], {}), '((xref - xhat) ** 2)\n', (481, 501), True, 'import numpy as np\n'), ((1422, 1442), 'numpy.dot', 'np.dot', (['xhat.T', 'xref'], {}), '(xhat.T, xref)\n', (1428, 1442), True, 'import numpy as np\n'), ((1465, 1485), 'numpy.linalg.norm', 'np.linalg.norm', (['xhat'], {}), '(xhat)\n', (1479, 1485), True, 'import numpy as np\n'), ((1486, 1506), 'numpy.linalg.norm', 'np.linalg.norm', (['xref'], {}), '(xref)\n', (1500, 1506), True, 'import numpy as np\n')] |
import numpy as np
from plots import plot_legendre_polynomials
x = np.linspace(-1, 1, 1001)
plot_legendre_polynomials(x, n=5)
| [
"plots.plot_legendre_polynomials",
"numpy.linspace"
] | [((69, 93), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1001)'], {}), '(-1, 1, 1001)\n', (80, 93), True, 'import numpy as np\n'), ((94, 127), 'plots.plot_legendre_polynomials', 'plot_legendre_polynomials', (['x'], {'n': '(5)'}), '(x, n=5)\n', (119, 127), False, 'from plots import plot_legendre_polynomials\n')] |
import cv2
import math
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import numpy as np
from keras.wrappers.scikit_learn import KerasClassifier
import keras
from keras.utils import np_utils
from keras.utils import to_categorical
from skimage.transform import resize
from sklearn.model_selection import train_test_split, GridSearchCV
import PIL
from PIL import Image
from keras.layers import Dense, BatchNormalization, Conv2D, Conv3D, Flatten
from keras import Model, Input
import os
import pandas as pd
from pandas import read_csv
from keras.models import model_from_json
import random
import imutils
#movies_directory contains the directory of movie files
#frames_directory is the directory that will contain more diretories
#Each of these directories will have individual frames from each movie
#Will additionally output a .csv file with three columns:
#Movie Name, First Frame, Number of Frames
#This csv defines movie clips.
#Example: ants4-2 2400 10 is the movie clip from ants4-2 starting at frame 2400 that is 10 frames long
def movies_to_frames(movies_directory, frames_directory, length, height, clip_length, out_csv=None, frames_flag=0):
cwd = os.getcwd()
#Setting up necessary structures for building the CSV
movie_names = []
first_frames = []
clip_lengths = []
for movie in os.listdir(movies_directory):
aug_nums = 55
randangles = random.sample(range(1, 359), aug_nums)
# randangles = [0,0,0,0,0]
#Process movie names
movie_path = movies_directory + movie #This is where the movie comes from
movie_name_orig, extension = os.path.splitext(movie)
print(movie_name_orig)
if extension != ".mp4":
continue
movie_dir_name = frames_directory + movie_name_orig #This is where we will output the frames
#Making movie-specific directories, empty for now
if not os.path.isdir(movie_dir_name):
os.mkdir(movie_dir_name)
os.chdir(movie_dir_name)
cap = cv2.VideoCapture(movie_path)
framerate = cap.get(5)
#Some videos are not 60 FPS for some reason, so we will force 60
#Some videos are not exactly 60 FPS, but near it.
FPS_flag = 0
frame_every = 0
#FPS_flag = 0 if 60 FPS, 1 otherwise
if framerate > 70:
FPS_flag = 1
frame_every = int(math.floor(framerate/60))
frame_num = 0
resize_dims = (length,height)
#Goes through every frame of the movie at 60 FPS
while(cap.isOpened()):
#frame is the .jpg image. If you do anything like rotation, resizing, etc., perform the operation on 'frame'
frame_ID = cap.get(1)
ret, frame = cap.read()
if(ret != True):
break
if FPS_flag == 0 or (FPS_flag == 1 and (frame_ID % frame_every == 0)):
#Adds to the csv
if (frame_num != 0) and (frame_num % clip_length == 0):
movie_names.append(movie_name_orig + "_orig")
for i in range(aug_nums):
movie_names.append(movie_name_orig + "_rot{}".format(i+1))
first_frames.extend([frame_num - clip_length]*(aug_nums+1))
clip_lengths.extend([clip_length]*(aug_nums+1))
#Writes the original frame image to the proper directory, if the frames_flag is on
file_name_orig = movie_name_orig + "_orig_frame{}.jpg".format(frame_num)
file_names_rot = []
for i in range(aug_nums):
file_name_rot = movie_name_orig + "_rot{}_frame{}.jpg".format(i+1, frame_num)
file_names_rot.append(file_name_rot)
frame_num += 1
#THIS IS WHERE FRAMES ARE WRITTEN
if (frames_flag == 1):
# resized_frame = cv2.resize(frame, resize_dims)
# cv2.imwrite(file_name_orig, resized_frame)
# # horizontally flipped frames (flipping over y axis)
# horizflipped_frame = cv2.flip(resized_frame, 1)
# cv2.imwrite(file_name_fliphoriz, horizflipped_frame)
# # vertically flipped frames (flipping over x axis)
# vertflipped_frame = cv2.flip(resized_frame, 0)
# cv2.imwrite(file_name_flipvert, vertflipped_frame)
# # rotated frames
# rotated_frame = cv2.rotate(resized_frame, cv2.ROTATE_180)
# cv2.imwrite(file_name_rotate, rotated_frame)
# rotate the (original frames) first, with random angles, then resize them
resized_frame = cv2.resize(frame, resize_dims)
cv2.imwrite(file_name_orig, resized_frame)
for ang, file_name in zip(randangles, file_names_rot):
rotated_frame = imutils.rotate(resized_frame, angle=ang)
cv2.imwrite(file_name, rotated_frame)
cap.release()
dict = {'Movie Name': movie_names, 'First Frame': first_frames, 'Clip Length': clip_lengths}
df = pd.DataFrame(dict)
os.chdir(cwd)
if out_csv == None:
out_csv = cwd + "/Frame_Parameters_%d_Frame_Clips.csv" % clip_length
df = pd.DataFrame(dict)
df.to_csv(out_csv, index=False)
return out_csv
def compute_optical_flow(movie_name, first_index, num_frames, movies_directory):
# now that we've added rotations, etc., the movie folder is not in fact the full name of the movie
# find index of the underscore in movie_name
underscore_idx = movie_name.find('_')
# take all characters before that underscore to be movie_folder
movie_folder = movie_name[0:underscore_idx]
if(movies_directory[-1] == '/'):
frames_directory = movies_directory + movie_folder + '/'
else:
frames_directory = movies_directory + '/' + movie_folder + '/'
if not os.path.isdir(frames_directory):
print("{} is not a valid movie or {} does not contain this movie".format(movie_name, movies_directory))
raise
try:
first_frame_name = frames_directory + movie_name + "_frame{}.jpg".format(first_index)
first_frame = cv2.imread(first_frame_name, cv2.IMREAD_GRAYSCALE)
prev_frame = first_frame
except:
print("First frame number ({}) is more than number of frames".format(first_index))
raise
height = first_frame.shape[0]
length = first_frame.shape[1]
num_flows = num_frames-1
flow_shape =(num_flows, height, length)
final_index = first_index + num_frames - 1
#In the computation of flow, pair of corresponding frames receives a flow field
#Each flow field gives a vector to every single pixel
#Hence, the shape of each of these arrays should be (num_frames-1, height,length)
x_coords = np.zeros(flow_shape)
y_coords = np.zeros(flow_shape)
for frame_index in range(first_index, final_index):
try:
curr_frame_name = frames_directory + movie_name + "_frame{}.jpg".format(frame_index+1)
curr_frame = cv2.imread(curr_frame_name, cv2.IMREAD_GRAYSCALE)
except:
print("Frame {} is out of bounds. Movie {} has {} frames".format(first_index, movie_name, len(os.listdir(frames_directory))))
raise
#using the Farneback method to estimate optical flow
#IMPORTANT: This method uses consecutive frames to estimate flow
#To get global motion, we will average vectors over space and time
flow = cv2.calcOpticalFlowFarneback(prev_frame, curr_frame, flow =None, pyr_scale=0.5, levels=3, winsize=15,
iterations=3,poly_n =5, poly_sigma=1.2, flags=0)
x_coord = flow[..., 0]
y_coord = flow[..., 1]
x_coords[frame_index-first_index] = x_coord
y_coords[frame_index-first_index] = y_coord
prev_frame = curr_frame
mean_x = np.mean(x_coords)
mean_y = np.mean(y_coords)
return mean_x, mean_y
#The input is a .csv file and an output path
#The input csv file should contain a 3 columns:
#Movie Name,First Frame, Clip Length
def optical_flows(csv_file, movies_directory, out_csv=None):
movie_names = []
first_indexes = []
clip_lengths = []
x_directions = []
y_directions = []
df = pd.read_csv(csv_file)
num_movies = len(df['Movie Name'])
for i in range(num_movies): # these include orig, fliphoriz, flipvert, and rotate
movie_name = df['Movie Name'][i]
first_index = df['First Frame'][i]
clip_length = df['Clip Length'][i]
x, y = compute_optical_flow(movie_name, first_index, clip_length, movies_directory)
movie_names.append(movie_name)
first_indexes.append(first_index)
clip_lengths.append(clip_length)
x_directions.append(x)
y_directions.append(y)
if out_csv == None:
out_csv = os.getcwd() + "/flows_%d_frame_clips.csv" % clip_lengths[0]
dict = {'Movie Name': movie_names, 'First Frame': first_indexes, 'Clip Length': clip_lengths,
'x': x_directions, 'y': y_directions}
df = pd.DataFrame(dict)
df.to_csv(out_csv, index=False)
return out_csv
def create_csvs():
if (os.getcwd()[-1] == '/'):
input_movies_directory = os.getcwd() + 'Movies/'
frames_directory = os.getcwd() + 'Frames/'
else:
input_movies_directory = os.getcwd() + '/Movies/'
frames_directory = os.getcwd() + '/Frames/'
frame_parameters_csv = movies_to_frames(input_movies_directory, frames_directory, 64, 36, 240, frames_flag=1)
# frame_parameters_csv = "/home/macleanlab/josh/NaturalMotionCNN/Frame_Parameters_240_Frame_Clips.csv"
flows_csv = optical_flows(frame_parameters_csv, frames_directory)
create_csvs()
#------------------DATA PREPARATION---------------
#Converts movie clips to numpy arrays
#Elements of movies_directory should be directories
#Inside each directory will contain frames of a movie
#Output is tuple (movie_name, frames of the movie in order in a np array of dimensionality (num_frames, 36,64,3))
def movie_clip_to_arr(movies_directory, movie_name, first_index, height, length, clip_length):
movie_folder = movie_name[0:movie_name.find('_')]
frame_directory = movies_directory + movie_folder
movie_data = np.zeros((clip_length,height,length,3))
for frame_num in range(first_index, first_index+clip_length):
frame_path = frame_directory + "/" + movie_name + "_frame%d.jpg" % frame_num
frame = Image.open(frame_path, mode='r')
frame_data = np.asarray(frame, dtype='uint8')
movie_data[frame_num-first_index] = frame_data
movie_data = movie_data.astype('uint8')
# movie_data = movie_data/255 #Normalization
return (movie_name, movie_data)
#Creates the dataset, assuming that the frames have been processed and that the parameters file has been created
#x[i] will have shape (num_frames=240, 36, 64, 3)
#Therefore, x will have shape (num_movies, 240, 36, 64, 3)
#y has shape (num_movies, 2)
#y[i] = [x_direction, y_direction] of movie indexed with i
def regression_dataset(movies_directory, parameters_file, height, length, clip_length):
df = pd.read_csv(parameters_file)
num_movies = len(df['Movie Name'])
x = np.zeros((num_movies, clip_length,height,length,3), dtype='uint8')
y = np.zeros((num_movies, 2))
for i in range(num_movies):
movie_name = df['Movie Name'][i]
x_dir = df['x'][i]
y_dir = df['y'][i]
first_index = df['First Frame'][i]
clip_len = df['Clip Length'][i]
_, movie_data = movie_clip_to_arr(movies_directory, movie_name, first_index, height, length, clip_length)
x[i] = movie_data
y[i] = np.array([x_dir, y_dir])
return x,y
# 1: 0; 2: 90; 3: 180; 4: 270
def categorical_dataset(movies_directory, parameters_file, height, length,clip_length):
df = pd.read_csv(parameters_file)
num_movies = len(df['Movie Name'])
x = np.zeros((num_movies, clip_length,height,length,3), dtype='uint8')
y = np.zeros(num_movies)
for i in range(num_movies):
movie_name = df['Movie Name'][i]
x_dir = df['x'][i]
y_dir = df['y'][i]
first_index = df['First Frame'][i]
clip_len = df['Clip Length'][i]
_, movie_data = movie_clip_to_arr(movies_directory, movie_name, first_index, height, length, clip_length)
x[i] = movie_data
theta = np.arctan2(y_dir,x_dir)
if theta >= -np.pi/8 and theta < np.pi/8:
category = 0
elif theta >= np.pi/8 and theta < 3*np.pi/8:
category = 1
elif theta >= 3*np.pi/8 and theta < 5*np.pi/8:
category = 2
elif theta >= 5*np.pi/8 and theta < 7*np.pi/8:
category = 3
elif theta >= 7*np.pi/16 or theta < -7*np.pi/8:
category = 4
elif theta >= -7*np.pi/8 and theta < -5*np.pi/8:
category = 5
elif theta >= -5*np.pi/8 and theta < -3*np.pi/8:
category = 6
elif theta >= -3*np.pi/8 and theta < -np.pi/8:
category = 7
y[i] = category
return x,y
cwd = os.getcwd()
if(cwd[-1]=='/'):
frames_directory = os.getcwd() + 'Frames/'
flows_csv = os.getcwd() + 'flows_240_frame_clips.csv'
else:
frames_directory = os.getcwd() + '/Frames/'
flows_csv = os.getcwd() + '/flows_240_frame_clips.csv'
# #-----------------SETTING UP THE DATASET-----------------------
# #Use this if you want to try testing only for direction
# #x,y = categorical_dataset(frames_directory, flows_csv, 36, 64, 240)
x,y = regression_dataset(frames_directory, flows_csv, 36, 64, 240)
# x_train, x_val, y_train, y_val = train_test_split(x,y, test_size=0.25, random_state=42)
# np.save('x_all', x)
# np.save('y_all', y)
np.savez_compressed('natural_data', x=x, y=y)
| [
"os.mkdir",
"numpy.arctan2",
"pandas.read_csv",
"numpy.savez_compressed",
"numpy.mean",
"cv2.calcOpticalFlowFarneback",
"os.chdir",
"pandas.DataFrame",
"cv2.imwrite",
"cv2.resize",
"numpy.asarray",
"imutils.rotate",
"os.listdir",
"os.getcwd",
"os.path.isdir",
"numpy.zeros",
"math.flo... | [((13327, 13338), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13336, 13338), False, 'import os\n'), ((13975, 14020), 'numpy.savez_compressed', 'np.savez_compressed', (['"""natural_data"""'], {'x': 'x', 'y': 'y'}), "('natural_data', x=x, y=y)\n", (13994, 14020), True, 'import numpy as np\n'), ((1190, 1201), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1199, 1201), False, 'import os\n'), ((1344, 1372), 'os.listdir', 'os.listdir', (['movies_directory'], {}), '(movies_directory)\n', (1354, 1372), False, 'import os\n'), ((5230, 5248), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (5242, 5248), True, 'import pandas as pd\n'), ((5254, 5267), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (5262, 5267), False, 'import os\n'), ((5380, 5398), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (5392, 5398), True, 'import pandas as pd\n'), ((6964, 6984), 'numpy.zeros', 'np.zeros', (['flow_shape'], {}), '(flow_shape)\n', (6972, 6984), True, 'import numpy as np\n'), ((7000, 7020), 'numpy.zeros', 'np.zeros', (['flow_shape'], {}), '(flow_shape)\n', (7008, 7020), True, 'import numpy as np\n'), ((8072, 8089), 'numpy.mean', 'np.mean', (['x_coords'], {}), '(x_coords)\n', (8079, 8089), True, 'import numpy as np\n'), ((8103, 8120), 'numpy.mean', 'np.mean', (['y_coords'], {}), '(y_coords)\n', (8110, 8120), True, 'import numpy as np\n'), ((8460, 8481), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (8471, 8481), True, 'import pandas as pd\n'), ((9274, 9292), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (9286, 9292), True, 'import pandas as pd\n'), ((10468, 10510), 'numpy.zeros', 'np.zeros', (['(clip_length, height, length, 3)'], {}), '((clip_length, height, length, 3))\n', (10476, 10510), True, 'import numpy as np\n'), ((11356, 11384), 'pandas.read_csv', 'pd.read_csv', (['parameters_file'], {}), '(parameters_file)\n', (11367, 11384), True, 'import pandas as pd\n'), ((11432, 11501), 'numpy.zeros', 'np.zeros', (['(num_movies, clip_length, height, length, 3)'], {'dtype': '"""uint8"""'}), "((num_movies, clip_length, height, length, 3), dtype='uint8')\n", (11440, 11501), True, 'import numpy as np\n'), ((11507, 11532), 'numpy.zeros', 'np.zeros', (['(num_movies, 2)'], {}), '((num_movies, 2))\n', (11515, 11532), True, 'import numpy as np\n'), ((12068, 12096), 'pandas.read_csv', 'pd.read_csv', (['parameters_file'], {}), '(parameters_file)\n', (12079, 12096), True, 'import pandas as pd\n'), ((12144, 12213), 'numpy.zeros', 'np.zeros', (['(num_movies, clip_length, height, length, 3)'], {'dtype': '"""uint8"""'}), "((num_movies, clip_length, height, length, 3), dtype='uint8')\n", (12152, 12213), True, 'import numpy as np\n'), ((12219, 12239), 'numpy.zeros', 'np.zeros', (['num_movies'], {}), '(num_movies)\n', (12227, 12239), True, 'import numpy as np\n'), ((1639, 1662), 'os.path.splitext', 'os.path.splitext', (['movie'], {}), '(movie)\n', (1655, 1662), False, 'import os\n'), ((1999, 2023), 'os.chdir', 'os.chdir', (['movie_dir_name'], {}), '(movie_dir_name)\n', (2007, 2023), False, 'import os\n'), ((2039, 2067), 'cv2.VideoCapture', 'cv2.VideoCapture', (['movie_path'], {}), '(movie_path)\n', (2055, 2067), False, 'import cv2\n'), ((6043, 6074), 'os.path.isdir', 'os.path.isdir', (['frames_directory'], {}), '(frames_directory)\n', (6056, 6074), False, 'import os\n'), ((6328, 6378), 'cv2.imread', 'cv2.imread', (['first_frame_name', 'cv2.IMREAD_GRAYSCALE'], {}), '(first_frame_name, cv2.IMREAD_GRAYSCALE)\n', (6338, 6378), False, 'import cv2\n'), ((7663, 7817), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prev_frame', 'curr_frame'], {'flow': 'None', 'pyr_scale': '(0.5)', 'levels': '(3)', 'winsize': '(15)', 'iterations': '(3)', 'poly_n': '(5)', 'poly_sigma': '(1.2)', 'flags': '(0)'}), '(prev_frame, curr_frame, flow=None, pyr_scale=\n 0.5, levels=3, winsize=15, iterations=3, poly_n=5, poly_sigma=1.2, flags=0)\n', (7691, 7817), False, 'import cv2\n'), ((10677, 10709), 'PIL.Image.open', 'Image.open', (['frame_path'], {'mode': '"""r"""'}), "(frame_path, mode='r')\n", (10687, 10709), False, 'from PIL import Image\n'), ((10731, 10763), 'numpy.asarray', 'np.asarray', (['frame'], {'dtype': '"""uint8"""'}), "(frame, dtype='uint8')\n", (10741, 10763), True, 'import numpy as np\n'), ((11900, 11924), 'numpy.array', 'np.array', (['[x_dir, y_dir]'], {}), '([x_dir, y_dir])\n', (11908, 11924), True, 'import numpy as np\n'), ((12609, 12633), 'numpy.arctan2', 'np.arctan2', (['y_dir', 'x_dir'], {}), '(y_dir, x_dir)\n', (12619, 12633), True, 'import numpy as np\n'), ((13381, 13392), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13390, 13392), False, 'import os\n'), ((13421, 13432), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13430, 13432), False, 'import os\n'), ((13492, 13503), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13501, 13503), False, 'import os\n'), ((13533, 13544), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13542, 13544), False, 'import os\n'), ((1923, 1952), 'os.path.isdir', 'os.path.isdir', (['movie_dir_name'], {}), '(movie_dir_name)\n', (1936, 1952), False, 'import os\n'), ((1966, 1990), 'os.mkdir', 'os.mkdir', (['movie_dir_name'], {}), '(movie_dir_name)\n', (1974, 1990), False, 'import os\n'), ((7215, 7264), 'cv2.imread', 'cv2.imread', (['curr_frame_name', 'cv2.IMREAD_GRAYSCALE'], {}), '(curr_frame_name, cv2.IMREAD_GRAYSCALE)\n', (7225, 7264), False, 'import cv2\n'), ((9055, 9066), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9064, 9066), False, 'import os\n'), ((9377, 9388), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9386, 9388), False, 'import os\n'), ((9435, 9446), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9444, 9446), False, 'import os\n'), ((9486, 9497), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9495, 9497), False, 'import os\n'), ((9554, 9565), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9563, 9565), False, 'import os\n'), ((9606, 9617), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9615, 9617), False, 'import os\n'), ((2404, 2430), 'math.floor', 'math.floor', (['(framerate / 60)'], {}), '(framerate / 60)\n', (2414, 2430), False, 'import math\n'), ((4788, 4818), 'cv2.resize', 'cv2.resize', (['frame', 'resize_dims'], {}), '(frame, resize_dims)\n', (4798, 4818), False, 'import cv2\n'), ((4839, 4881), 'cv2.imwrite', 'cv2.imwrite', (['file_name_orig', 'resized_frame'], {}), '(file_name_orig, resized_frame)\n', (4850, 4881), False, 'import cv2\n'), ((4997, 5037), 'imutils.rotate', 'imutils.rotate', (['resized_frame'], {'angle': 'ang'}), '(resized_frame, angle=ang)\n', (5011, 5037), False, 'import imutils\n'), ((5062, 5099), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'rotated_frame'], {}), '(file_name, rotated_frame)\n', (5073, 5099), False, 'import cv2\n'), ((7388, 7416), 'os.listdir', 'os.listdir', (['frames_directory'], {}), '(frames_directory)\n', (7398, 7416), False, 'import os\n')] |
from __future__ import absolute_import, print_function, division
# Standard
import sys
from os.path import realpath, join, split
from vtool.tests import grabdata
# Scientific
import numpy as np
import cv2
# TPL
import pyhesaff
import utool
utool.inject_colored_exceptions()
def get_test_image():
img_fname = 'zebra.jpg'
if '--zebra.png' in sys.argv:
img_fname = 'zebra.jpg'
if '--lena.png' in sys.argv:
img_fname = 'lena.jpg'
if '--jeff.png' in sys.argv:
img_fname = 'jeff.png'
imgdir = grabdata.get_testdata_dir()
img_fpath = realpath(join(imgdir, img_fname))
return img_fpath
def load_test_data(short=False, n=0, use_cpp=False, **kwargs):
if 'short' not in vars():
short = False
# Read Image
#ellipse.rrr()
nScales = 4
nSamples = 16
img_fpath = get_test_image()
imgBGR = cv2.imread(img_fpath)
imgLAB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2LAB)
imgL = imgLAB[:, :, 0]
detect_kwargs = {
'scale_min': 20,
'scale_max': 100
}
detect_kwargs.update(kwargs)
if not use_cpp:
kpts, desc = pyhesaff.detect_feats(img_fpath, **detect_kwargs)
else:
# Try the new C++ code
[kpts], [desc] = pyhesaff.detect_feats_list([img_fpath], **detect_kwargs)
if short and n > 0:
extra_fxs = []
if split(img_fpath)[1] == 'zebra.png':
extra_fxs = [374, 520, 880][0:1]
fxs = np.array(spaced_elements2(kpts, n).tolist() + extra_fxs)
kpts = kpts[fxs]
desc = desc[fxs]
test_data = locals()
return test_data
def spaced_elements2(list_, n):
if n is None:
return np.arange(len(list_))
if n == 0:
return np.empty(0)
indexes = np.arange(len(list_))
stride = len(indexes) // n
return indexes[0:-1:stride]
def spaced_elements(list_, n):
if n is None:
return 'list'
indexes = np.arange(len(list_))
stride = len(indexes) // n
return list_[indexes[0:-1:stride]]
| [
"cv2.cvtColor",
"numpy.empty",
"pyhesaff.detect_feats_list",
"utool.inject_colored_exceptions",
"vtool.tests.grabdata.get_testdata_dir",
"cv2.imread",
"pyhesaff.detect_feats",
"os.path.split",
"os.path.join"
] | [((240, 273), 'utool.inject_colored_exceptions', 'utool.inject_colored_exceptions', ([], {}), '()\n', (271, 273), False, 'import utool\n'), ((533, 560), 'vtool.tests.grabdata.get_testdata_dir', 'grabdata.get_testdata_dir', ([], {}), '()\n', (558, 560), False, 'from vtool.tests import grabdata\n'), ((865, 886), 'cv2.imread', 'cv2.imread', (['img_fpath'], {}), '(img_fpath)\n', (875, 886), False, 'import cv2\n'), ((900, 939), 'cv2.cvtColor', 'cv2.cvtColor', (['imgBGR', 'cv2.COLOR_BGR2LAB'], {}), '(imgBGR, cv2.COLOR_BGR2LAB)\n', (912, 939), False, 'import cv2\n'), ((586, 609), 'os.path.join', 'join', (['imgdir', 'img_fname'], {}), '(imgdir, img_fname)\n', (590, 609), False, 'from os.path import realpath, join, split\n'), ((1119, 1168), 'pyhesaff.detect_feats', 'pyhesaff.detect_feats', (['img_fpath'], {}), '(img_fpath, **detect_kwargs)\n', (1140, 1168), False, 'import pyhesaff\n'), ((1235, 1291), 'pyhesaff.detect_feats_list', 'pyhesaff.detect_feats_list', (['[img_fpath]'], {}), '([img_fpath], **detect_kwargs)\n', (1261, 1291), False, 'import pyhesaff\n'), ((1718, 1729), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1726, 1729), True, 'import numpy as np\n'), ((1351, 1367), 'os.path.split', 'split', (['img_fpath'], {}), '(img_fpath)\n', (1356, 1367), False, 'from os.path import realpath, join, split\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 16:38:05 2019
@author: Peter
"""
import os
import numpy as np
import h5py, PIL
from uclahedp.tools import hdf as hdftools
from uclahedp.tools import csv as csvtools
from uclahedp.tools import util
#Used for natural sorting filenames
import re
#Nice regex natural sorting algorithm found on stack overflow:
#https://stackoverflow.com/questions/4836710/does-python-have-a-built-in-function-for-string-natural-sort
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def imgDirToRaw(run, probe, img_dir, dest, csv_dir, verbose=False):
#Import attributes for this run/probe
attrs = csvtools.getAllAttrs(csv_dir, run, probe)
#Check for keys always required by this function
req_keys = [ 'run_folder']
csvtools.missingKeys(attrs, req_keys, fatal_error=True)
run_folder = attrs['run_folder'][0]
src = os.path.join(img_dir, run_folder)
#Go through the directory and fine all the image files
imgfiles = []
for root, dirs, files in os.walk(src):
files = [f for f in files if f[0] != '.'] #Exclude files beginning in .
for file in files:
imgfiles.append(os.path.join(src, file))
#Natural-sort the images by filename
imgfiles = natural_sort(imgfiles)
nframes = len(imgfiles)
#remove files if they already exist
if os.path.exists(dest.file):
os.remove(dest.file)
#Create the destination file
with h5py.File(dest.file, "a") as df:
#Assume all images are the same shape, load the first one to figure
#out the array dimensions
img = PIL.Image.open(imgfiles[0])
nxpx, nypx = img.size
#Bands will include the names of the different channels
nchan = len(img.getbands())
#Create the dest group, throw error if it exists
if dest.group != '/' and dest.group in df.keys():
raise hdftools.hdfGroupExists(dest)
grp = df[dest.group]
#Initialize the output data array
if 'data' in grp.keys():
raise hdftools.hdfDatasetExists(str(dest) + ' -> ' + "'data'")
#Create the dataset + associated attributes
grp.require_dataset("data", (nframes, nxpx, nypx, nchan), np.float32,
chunks=(1, nxpx, nypx, 1),
compression='gzip')
grp['data'].attrs['unit'] = ''
#Initialize time-remaining printout
tr = util.timeRemaining(nframes, reportevery=5)
#Actually put the images into the file
for i,f in enumerate(imgfiles):
tr.updateTimeRemaining(i)
img = np.array(PIL.Image.open(f))
img = np.reshape(img, [nxpx, nypx, nchan])
#Rotate images
for chan in range(nchan):
img[:,:,chan] = np.rot90(img[:,:,chan], k=3)
grp['data'][i, :,:,:] = img
dimlabels = ['frames', 'xpixels', 'ypixels', 'chan']
grp['data'].attrs['dimensions'] = [s.encode('utf-8') for s in dimlabels]
#Write the attrs dictioanry into attributes of the new data group
hdftools.writeAttrs(attrs, grp)
#Create the axes
grp.require_dataset('frames', (nframes,), np.float32, chunks=True)[:] = np.arange(nframes)
grp['frames'].attrs['unit'] = ''
grp.require_dataset('xpixels', (nxpx,), np.float32, chunks=True)[:] = np.arange(nxpx)
grp['xpixels'].attrs['unit'] = ''
grp.require_dataset('ypixels', (nypx,), np.float32, chunks=True)[:] = np.arange(nypx)
grp['ypixels'].attrs['unit'] = ''
grp.require_dataset('chan', (nchan,), np.float32, chunks=True)[:] = np.arange(nchan)
grp['chan'].attrs['unit'] = ''
return dest
if __name__ == "__main__":
#data_dir = os.path.join("F:","LAPD_Jul2019")
data_dir = os.path.join("/Volumes", "PVH_DATA","LAPD_Sept2019")
img_dir = os.path.join(data_dir, 'PIMAX')
dest = hdftools.hdfPath(os.path.join(data_dir ,"RAW", "run22.01_pimax4.hdf5"))
csv_dir = os.path.join(data_dir,"METADATA")
run = 22.01
probe = 'pimax4'
imgDirToRaw(run, probe, img_dir, dest, csv_dir, verbose=False) | [
"uclahedp.tools.csv.missingKeys",
"uclahedp.tools.csv.getAllAttrs",
"os.remove",
"h5py.File",
"re.split",
"uclahedp.tools.hdf.writeAttrs",
"os.walk",
"os.path.exists",
"PIL.Image.open",
"uclahedp.tools.util.timeRemaining",
"numpy.rot90",
"numpy.arange",
"numpy.reshape",
"uclahedp.tools.hdf... | [((813, 854), 'uclahedp.tools.csv.getAllAttrs', 'csvtools.getAllAttrs', (['csv_dir', 'run', 'probe'], {}), '(csv_dir, run, probe)\n', (833, 854), True, 'from uclahedp.tools import csv as csvtools\n'), ((959, 1014), 'uclahedp.tools.csv.missingKeys', 'csvtools.missingKeys', (['attrs', 'req_keys'], {'fatal_error': '(True)'}), '(attrs, req_keys, fatal_error=True)\n', (979, 1014), True, 'from uclahedp.tools import csv as csvtools\n'), ((1073, 1106), 'os.path.join', 'os.path.join', (['img_dir', 'run_folder'], {}), '(img_dir, run_folder)\n', (1085, 1106), False, 'import os\n'), ((1222, 1234), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (1229, 1234), False, 'import os\n'), ((1580, 1605), 'os.path.exists', 'os.path.exists', (['dest.file'], {}), '(dest.file)\n', (1594, 1605), False, 'import os\n'), ((4221, 4274), 'os.path.join', 'os.path.join', (['"""/Volumes"""', '"""PVH_DATA"""', '"""LAPD_Sept2019"""'], {}), "('/Volumes', 'PVH_DATA', 'LAPD_Sept2019')\n", (4233, 4274), False, 'import os\n'), ((4307, 4338), 'os.path.join', 'os.path.join', (['data_dir', '"""PIMAX"""'], {}), "(data_dir, 'PIMAX')\n", (4319, 4338), False, 'import os\n'), ((4438, 4472), 'os.path.join', 'os.path.join', (['data_dir', '"""METADATA"""'], {}), "(data_dir, 'METADATA')\n", (4450, 4472), False, 'import os\n'), ((1615, 1635), 'os.remove', 'os.remove', (['dest.file'], {}), '(dest.file)\n', (1624, 1635), False, 'import os\n'), ((1686, 1711), 'h5py.File', 'h5py.File', (['dest.file', '"""a"""'], {}), "(dest.file, 'a')\n", (1695, 1711), False, 'import h5py, PIL\n'), ((1862, 1889), 'PIL.Image.open', 'PIL.Image.open', (['imgfiles[0]'], {}), '(imgfiles[0])\n', (1876, 1889), False, 'import h5py, PIL\n'), ((2735, 2777), 'uclahedp.tools.util.timeRemaining', 'util.timeRemaining', (['nframes'], {'reportevery': '(5)'}), '(nframes, reportevery=5)\n', (2753, 2777), False, 'from uclahedp.tools import util\n'), ((3463, 3494), 'uclahedp.tools.hdf.writeAttrs', 'hdftools.writeAttrs', (['attrs', 'grp'], {}), '(attrs, grp)\n', (3482, 3494), True, 'from uclahedp.tools import hdf as hdftools\n'), ((3601, 3619), 'numpy.arange', 'np.arange', (['nframes'], {}), '(nframes)\n', (3610, 3619), True, 'import numpy as np\n'), ((3748, 3763), 'numpy.arange', 'np.arange', (['nxpx'], {}), '(nxpx)\n', (3757, 3763), True, 'import numpy as np\n'), ((3893, 3908), 'numpy.arange', 'np.arange', (['nypx'], {}), '(nypx)\n', (3902, 3908), True, 'import numpy as np\n'), ((4037, 4053), 'numpy.arange', 'np.arange', (['nchan'], {}), '(nchan)\n', (4046, 4053), True, 'import numpy as np\n'), ((4368, 4421), 'os.path.join', 'os.path.join', (['data_dir', '"""RAW"""', '"""run22.01_pimax4.hdf5"""'], {}), "(data_dir, 'RAW', 'run22.01_pimax4.hdf5')\n", (4380, 4421), False, 'import os\n'), ((2163, 2192), 'uclahedp.tools.hdf.hdfGroupExists', 'hdftools.hdfGroupExists', (['dest'], {}), '(dest)\n', (2186, 2192), True, 'from uclahedp.tools import hdf as hdftools\n'), ((2982, 3018), 'numpy.reshape', 'np.reshape', (['img', '[nxpx, nypx, nchan]'], {}), '(img, [nxpx, nypx, nchan])\n', (2992, 3018), True, 'import numpy as np\n'), ((617, 642), 're.split', 're.split', (['"""([0-9]+)"""', 'key'], {}), "('([0-9]+)', key)\n", (625, 642), False, 'import re\n'), ((1376, 1399), 'os.path.join', 'os.path.join', (['src', 'file'], {}), '(src, file)\n', (1388, 1399), False, 'import os\n'), ((2932, 2949), 'PIL.Image.open', 'PIL.Image.open', (['f'], {}), '(f)\n', (2946, 2949), False, 'import h5py, PIL\n'), ((3125, 3155), 'numpy.rot90', 'np.rot90', (['img[:, :, chan]'], {'k': '(3)'}), '(img[:, :, chan], k=3)\n', (3133, 3155), True, 'import numpy as np\n')] |
#persistence_plot_widget.py
import time
import collections
from PySide import QtGui
from PySide import QtCore
import numpy as np
import pyqtgraph as pg
from waterfall_widget import WaterfallModel
#inject a familiar color scheme into pyqtgraph...
# - this makes it available in the stock gradient editor schemes.
# - we also want it at the top of the gradient editors... there's no stock
# way in python to insert at the top of an ordereddict, so we rebuild it.
newGradients = collections.OrderedDict()
newGradients["rycb"] = {'ticks': [(0.00, ( 0, 0, 0, 255)),
(0.15, ( 0, 0, 255, 255)),
(0.33, ( 0, 255, 255, 255)),
(0.66, (255, 255, 0, 255)),
(1.00, (255, 0, 0, 255))],
'mode': 'rgb'}
for k, v in pg.graphicsItems.GradientEditorItem.Gradients.iteritems():
newGradients[k] = v
pg.graphicsItems.GradientEditorItem.Gradients = newGradients
DECAY_TYPE_LINEAR_WITH_DATA = "linear_data_decay"
DECAY_WITH_DATA = "decay_with_data"
DECAY_WITH_TIME = "decay_with_time"
ALL_DECAY_TIMING = [DECAY_WITH_DATA,
#DECAY_WITH_TIME,
]
def decay_fn_EXPONENTIAL(t_now, t_prev, decay_args, img_data):
#t is either in time or is the number of arrays. We'll call this 'ticks'.
#We don't care which it is, but the decay_args will specify the rate.
half_life = decay_args[0]
t_delta = float(t_now - t_prev)
decay_frac = 0.5 ** (t_delta / half_life)
img_data *= decay_frac
return img_data
def decay_fn_LINEAR(t_now, t_prev, decay_args, img_data):
#t is either in time or is the number of arrays. We'll call this 'ticks'.
#We don't care which it is, but the decay_args will specify the rate.
#half_life is odd for linear decay, but... consistency!
half_life = decay_args[0]
ticks_until_zero = 2*half_life
decay_per_tick = 1.0 / ticks_until_zero
t_delta = t_now - t_prev
img_data -= (t_delta * decay_per_tick)
return img_data #caller will clip the negatives
def rgba_tuple_to_int(rgba_tuple):
return np.array(rgba_tuple, np.uint8).view(np.uint32)[0]
class _PersistentImage(pg.ImageItem):
"""This subclass exists solely to set the alpha on the background color to
zero so that rendering looks correct (with gridlines and such) in the final
plot.
This is a hack in order to preserve the convenient use of pyqtgraph's
setImage function, which makes great use of fn.makeARGB() and the lut usage
(unfortunately the lut does not currently support alpha).
NOTE: originally I was thinking on using zorder (zValue) instead of
background transparency, and drawing the image before the gridlines.
However, it was proving very difficult/annoying to get this to happen, and
would also have resulted in gridlines on top of the image, so a transparent
background approach was chosen instead.
"""
def __init__(self, bg_color):
super(_PersistentImage, self).__init__()
assert len(bg_color) == 3 #no alpha
rgba_match = bg_color + (255, )
rgba_no_alpha = bg_color + (0, )
self._rgba_match = rgba_tuple_to_int(rgba_match)
self._bg_no_alpha = rgba_tuple_to_int(rgba_no_alpha)
def render(self, *args, **kwargs):
super(_PersistentImage, self).render(*args, **kwargs)
#fully rendered array->image now exists in self.qimage. We want to
#assign alpha=0 to all pixels that have the background color.
#View the image as a numpy array again...
ptr = self.qimage.constBits()
w = self.qimage.width()
h = self.qimage.height()
img_array = np.fromstring(ptr, dtype = np.uint32, count=(w*h))
#knock out the alpha for anywhere where there is a bg color...
img_array[img_array == self._rgba_match] = self._bg_no_alpha
#convert back to an image...
img_array = img_array.view(np.uint8).reshape((h, w, 4))
self.qimage = pg.functions.makeQImage(img_array,
alpha=True,
transpose=False)
class PersistencePlotWidget(pg.PlotWidget):
"""Persistence plot widget."""
def __init__(self, parent=None, background='default',
decay_timing = DECAY_WITH_DATA,
decay_fn = decay_fn_LINEAR,
decay_args = [10, ], #10 arrays until 0.5 decay (20 for full)
data_model = None, #a WaterfallModel (for now)
**kargs):
self._init_complete = False #base class init below triggers a lot
pg.PlotWidget.__init__(self, parent, background, **kargs)
#grab the rgb of the background color for palette matching later...
self._bg_color = self.backgroundBrush().color().toTuple()[:3]
self.setMenuEnabled(False)
self.plotItem.getViewBox().setMouseEnabled(x = False, y = False)
if decay_timing not in ALL_DECAY_TIMING:
raise ValueError("Unsupported decay timing: %s" % decay_timing)
self._decay_timing = decay_timing
self._decay_fn = decay_fn
self._decay_args = decay_args
self._data_model = data_model
self._persistent_img = None #the persistent image
self._img_array = None #the persistent data (same shape as image)
#The value of self._prev_t doesn't matter for the first round since the
#first plot has nothing to decay...
self._prev_t = 0
#We will always have a gradient editor for providing our LUT, but it
#may not be visible. It can be referenced for layout, though... just grab
#it after initializing the PersistencePlotWidget.
self.gradient_editor = pg.GradientWidget(parent = self,
orientation = "left")
self.gradient_editor.setStyleSheet('background-color: black')
self.gradient_editor.loadPreset("rycb") #we injected this scheme
self.gradient_editor.sigGradientChanged.connect(self._onGradientChange)
self._LUT_PTS = 256
self._latest_lut = self._get_lut()
if self._data_model:
assert isinstance(data_model, WaterfallModel)
try:
self._data_model.sigNewDataRow.connect(self.onNewModelData)
except AttributeError:
raise ValueError("data_model must be a WaterfallModel") #for now
self._reset_requested = False
self._init_complete = True
def _onGradientChange(self):
if self._persistent_img:
self._persistent_img.setLookupTable(self._get_lut())
def onNewModelData(self, data):
(time_s, y_data, metadata) = data
x_data = self._data_model.get_x_data()
self.plot(x = x_data, y = y_data)
def _get_lut(self):
lut = self.gradient_editor.getLookupTable(self._LUT_PTS)
#make sure that the lowest value drops to the background...
lut[0] = self._bg_color
self._latest_lut = lut
return lut
def _InitPersistentImage(self):
if self._persistent_img is None:
self._persistent_img = _PersistentImage(self._bg_color)
self._persistent_img.setLookupTable(self._get_lut())
else:
#if we already have a persistent image, we need to explicitly clear
#it due to pytgraph/PySide/Qt's (?) frustrating memory preservation
#(somehow)...
if self._persistent_img.qimage:
bg = rgba_tuple_to_int(self._bg_color + (255, ))
self._persistent_img.qimage.fill(bg)
def _UpdatePersistentImage(self):
#safety check: if we have zero height or width we can't make an image...
if min(self.size().toTuple()) <= 0:
return
#Make sure we have an image to start with!
if (self._persistent_img is None) or self._reset_requested:
self._InitPersistentImage()
self._reset_requested = False
#Make a temporary blank image canvas to render the new plot to...
img_size = self.size()
#img_size = self.plotItem.vb.size().toSize()
tmp_plt_img = QtGui.QImage(img_size, QtGui.QImage.Format_RGB32)
#Draw the new plot to the temporary image...
# - assumes it has already been plotted correctly (with plot())
painter = QtGui.QPainter(tmp_plt_img)
self.render(painter)
#Now crop out the plot area...
# - we only decay the plot area itelf
crop_rect = self.plotItem.vb.geometry().toRect()
cropped_img = tmp_plt_img.copy(crop_rect)
#Get a pointer to the start of the 32-bit (RGB32) image data...
ptr = cropped_img.constBits()
#Convert the image array to a numpy array...
w = cropped_img.width()
h = cropped_img.height()
new_img_array = np.fromstring(ptr,
dtype = np.int32,
count=(w*h))
new_img_array = new_img_array.reshape(h, w)
#Get rid of the temporary QPainter and QImage...
# - removing the painter explicitly resolves a troubling segfault
# issue that Qt/PySide was having.
del painter # <-- segfaults happen without this!
del tmp_plt_img
del cropped_img
#Fix the array orientation...
new_img_array = np.rot90(new_img_array, 3)
#Normalize the array (0->1) for easy color scaling...
new_img_array -= new_img_array.min()
new_img_array /= new_img_array.max()
#new_img_array *= (new_img_array > 0.5) #deletes old records
#Figure out what period we are decaying over...
t_prev = self._prev_t
if self._decay_timing == DECAY_WITH_DATA:
t_now = self._prev_t + 1
else:
t_now = time.time()
self._prev_t = t_now
#Initialize the persistent image if it does not exist...
if self._img_array is None:
self._img_array = np.zeros((w, h))
else:
#decay the old image...
self._img_array = self._decay_fn(t_now,
t_prev,
self._decay_args,
self._img_array)
#Add the shiny new signal...
self._img_array += new_img_array
#Ensure we don't oversaturate...
self._img_array = self._img_array.clip(0, 1)
self._persistent_img.setImage(self._img_array)
#Get the exact range in use by the plot to avoid image scaling...
# - there is probably a cleaner way to get this vs digging down to the
# ViewBox state, but I can't find it at the moment.
# - note that below we're using the (left_x, top_y, width, height) version
# of the QRectF constructor.
# - top *should* be ymax... but only ymin works. This is odd!!
(xmin, xmax), (ymin, ymax) = self.plotItem.vb.state["viewRange"]
x = xmin
y = ymin #should be ymax (!? - see above)
width = (xmax - xmin)
height = (ymax - ymin)
#we clear every time, so need to re-add the image every time...
self.addItem(self._persistent_img, ignoreBounds = True)
self._persistent_img.setRect(pg.QtCore.QRectF(x, y, width, height))
def plot(self, *args, **kwargs):
kwargs["clear"] = True
#pyqtgraph uses __getattr__ to get down to the PlotItem from a
#PlotWidget, and this messes up inheritance. We need to go directly...
ret = self.plotItem.plot(*args, **kwargs)
self._UpdatePersistentImage()
return ret
def resizeEvent(self, event):
#Our persistence is entirely contained within the image, so on resize
#we can only really restart from scratch...
self.reset_plot()
super(PersistencePlotWidget, self).resizeEvent(event)
def reset_plot(self):
# Reset current plot
if not self._init_complete:
return
self._reset_requested = True
self._img_array = None
| [
"pyqtgraph.functions.makeQImage",
"pyqtgraph.GradientWidget",
"pyqtgraph.QtCore.QRectF",
"numpy.zeros",
"time.time",
"PySide.QtGui.QPainter",
"PySide.QtGui.QImage",
"numpy.rot90",
"numpy.array",
"collections.OrderedDict",
"pyqtgraph.PlotWidget.__init__",
"numpy.fromstring",
"pyqtgraph.graphi... | [((483, 508), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (506, 508), False, 'import collections\n'), ((876, 933), 'pyqtgraph.graphicsItems.GradientEditorItem.Gradients.iteritems', 'pg.graphicsItems.GradientEditorItem.Gradients.iteritems', ([], {}), '()\n', (931, 933), True, 'import pyqtgraph as pg\n'), ((3796, 3844), 'numpy.fromstring', 'np.fromstring', (['ptr'], {'dtype': 'np.uint32', 'count': '(w * h)'}), '(ptr, dtype=np.uint32, count=w * h)\n', (3809, 3844), True, 'import numpy as np\n'), ((4110, 4173), 'pyqtgraph.functions.makeQImage', 'pg.functions.makeQImage', (['img_array'], {'alpha': '(True)', 'transpose': '(False)'}), '(img_array, alpha=True, transpose=False)\n', (4133, 4173), True, 'import pyqtgraph as pg\n'), ((4751, 4808), 'pyqtgraph.PlotWidget.__init__', 'pg.PlotWidget.__init__', (['self', 'parent', 'background'], {}), '(self, parent, background, **kargs)\n', (4773, 4808), True, 'import pyqtgraph as pg\n'), ((5908, 5958), 'pyqtgraph.GradientWidget', 'pg.GradientWidget', ([], {'parent': 'self', 'orientation': '"""left"""'}), "(parent=self, orientation='left')\n", (5925, 5958), True, 'import pyqtgraph as pg\n'), ((8426, 8475), 'PySide.QtGui.QImage', 'QtGui.QImage', (['img_size', 'QtGui.QImage.Format_RGB32'], {}), '(img_size, QtGui.QImage.Format_RGB32)\n', (8438, 8475), False, 'from PySide import QtGui\n'), ((8628, 8655), 'PySide.QtGui.QPainter', 'QtGui.QPainter', (['tmp_plt_img'], {}), '(tmp_plt_img)\n', (8642, 8655), False, 'from PySide import QtGui\n'), ((9156, 9203), 'numpy.fromstring', 'np.fromstring', (['ptr'], {'dtype': 'np.int32', 'count': '(w * h)'}), '(ptr, dtype=np.int32, count=w * h)\n', (9169, 9203), True, 'import numpy as np\n'), ((9696, 9722), 'numpy.rot90', 'np.rot90', (['new_img_array', '(3)'], {}), '(new_img_array, 3)\n', (9704, 9722), True, 'import numpy as np\n'), ((10178, 10189), 'time.time', 'time.time', ([], {}), '()\n', (10187, 10189), False, 'import time\n'), ((10360, 10376), 'numpy.zeros', 'np.zeros', (['(w, h)'], {}), '((w, h))\n', (10368, 10376), True, 'import numpy as np\n'), ((11719, 11756), 'pyqtgraph.QtCore.QRectF', 'pg.QtCore.QRectF', (['x', 'y', 'width', 'height'], {}), '(x, y, width, height)\n', (11735, 11756), True, 'import pyqtgraph as pg\n'), ((2187, 2217), 'numpy.array', 'np.array', (['rgba_tuple', 'np.uint8'], {}), '(rgba_tuple, np.uint8)\n', (2195, 2217), True, 'import numpy as np\n')] |
from utilities import get_spherical_distance
import uuid
import os
import numpy as np
from settings import *
def create_skeleton(gps,skel_folder='skeletons'):
d = 0
trail_length = 0
id = uuid.uuid4()
path = os.path.join(skel_folder,str(id))
print(path)
skel_file = open(path,'w')
n = len(gps)
for i in range(n-1):
step = get_spherical_distance(gps[i][0],gps[i][1],gps[i+1][0],gps[i+1][1])
d += step
trail_length += step
if (d>skip_rate):
skel_file.write("{},{}\n".format(gps[i+1][0],gps[i+1][1]))
d=0
skel_file.close()
routes_folder = os.path.join(data_location,'routes')
# if(not os.path.exists(routes_folder)):
os.makedirs(os.path.join(routes_folder,str(id)))
if trail_length < min_route_length:
os.remove(path)
return None
return str(id)
def create_skeleton_from_file_path(file_path,usecols = (0,1),folder='skeletons'):
file = open(file_path,'rb')
gps = np.loadtxt(file,delimiter=',',usecols=usecols,skiprows=1)
return create_skeleton(gps,folder)
if __name__ == '__main__':
create_skeleton_from_file_path('up_1.txt') | [
"os.remove",
"uuid.uuid4",
"numpy.loadtxt",
"os.path.join",
"utilities.get_spherical_distance"
] | [((200, 212), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (210, 212), False, 'import uuid\n'), ((582, 619), 'os.path.join', 'os.path.join', (['data_location', '"""routes"""'], {}), "(data_location, 'routes')\n", (594, 619), False, 'import os\n'), ((926, 986), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'delimiter': '""","""', 'usecols': 'usecols', 'skiprows': '(1)'}), "(file, delimiter=',', usecols=usecols, skiprows=1)\n", (936, 986), True, 'import numpy as np\n'), ((347, 421), 'utilities.get_spherical_distance', 'get_spherical_distance', (['gps[i][0]', 'gps[i][1]', 'gps[i + 1][0]', 'gps[i + 1][1]'], {}), '(gps[i][0], gps[i][1], gps[i + 1][0], gps[i + 1][1])\n', (369, 421), False, 'from utilities import get_spherical_distance\n'), ((755, 770), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (764, 770), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from scipy.spatial import distance
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
sns.set()
def plot_dtw_dist(x, y, figsize=(12, 12), annotation=True, col_map="PuOr"):
'''
Take two arrays columns array to plot the dynaic time wrapping map
'''
assert x.shape[1] == 1, \
"first array needs to be a column array of shape (n,1)"
assert y.shape[1] == 1, \
"second array needs to be a column array of shape (m,1)"
dist, path = fastdtw(x, y, dist=euclidean)
n_timestamps_1 = len(x)
n_timestamps_2 = len(y)
matrix_path = np.zeros((n_timestamps_1, n_timestamps_2))
for i in tuple(path)[::-1]:
matrix_path[i] = 1
matrix_path = np.transpose(matrix_path)[::-1]
matrix_dist = np.transpose(distance.cdist(x, y, 'euclidean'))[::-1]
fig, axScatter = plt.subplots(figsize=figsize)
sns.heatmap(matrix_dist, annot=annotation, ax=axScatter,
cbar=False, cmap=col_map)
sns.heatmap(matrix_dist, annot=annotation, ax=axScatter,
cbar=False, mask=matrix_path < 1,
annot_kws={"weight": "bold"},
cmap=sns.dark_palette((210, 90, 0), n_colors=2, input="husl"))
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("left", 1, pad=0.1, sharey=axScatter)
# make some labels invisible
axScatter.xaxis.set_tick_params(labelbottom=False)
axScatter.yaxis.set_tick_params(labelleft=False)
axHistx.xaxis.set_tick_params(labelbottom=False)
axHistx.yaxis.set_tick_params(labelleft=False)
axHisty.xaxis.set_tick_params(labelbottom=False)
axHisty.yaxis.set_tick_params(labelleft=False)
axHistx.plot(x)
axHisty.plot(y, range(len(y)))
plt.show()
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"scipy.spatial.distance.cdist",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"seaborn.dark_palette",
"numpy.zeros",
"numpy.transpose",
"matplotlib.pyplot.subplots",
"fastdtw.fastdtw",
"seaborn.set"
] | [((239, 248), 'seaborn.set', 'sns.set', ([], {}), '()\n', (246, 248), True, 'import seaborn as sns\n'), ((624, 653), 'fastdtw.fastdtw', 'fastdtw', (['x', 'y'], {'dist': 'euclidean'}), '(x, y, dist=euclidean)\n', (631, 653), False, 'from fastdtw import fastdtw\n'), ((729, 771), 'numpy.zeros', 'np.zeros', (['(n_timestamps_1, n_timestamps_2)'], {}), '((n_timestamps_1, n_timestamps_2))\n', (737, 771), True, 'import numpy as np\n'), ((976, 1005), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (988, 1005), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1098), 'seaborn.heatmap', 'sns.heatmap', (['matrix_dist'], {'annot': 'annotation', 'ax': 'axScatter', 'cbar': '(False)', 'cmap': 'col_map'}), '(matrix_dist, annot=annotation, ax=axScatter, cbar=False, cmap=\n col_map)\n', (1022, 1098), True, 'import seaborn as sns\n'), ((1361, 1391), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axScatter'], {}), '(axScatter)\n', (1380, 1391), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((1946, 1956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1954, 1956), True, 'import matplotlib.pyplot as plt\n'), ((850, 875), 'numpy.transpose', 'np.transpose', (['matrix_path'], {}), '(matrix_path)\n', (862, 875), True, 'import numpy as np\n'), ((913, 946), 'scipy.spatial.distance.cdist', 'distance.cdist', (['x', 'y', '"""euclidean"""'], {}), "(x, y, 'euclidean')\n", (927, 946), False, 'from scipy.spatial import distance\n'), ((1288, 1344), 'seaborn.dark_palette', 'sns.dark_palette', (['(210, 90, 0)'], {'n_colors': '(2)', 'input': '"""husl"""'}), "((210, 90, 0), n_colors=2, input='husl')\n", (1304, 1344), True, 'import seaborn as sns\n')] |
"""
@author: <NAME> (2017, Vrije Universiteit Brussel)
Experiments for figure 4 in the paper.
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
sys.path.insert(0, '.')
sys.path.insert(0, '..')
from gp_utilities import utils_experiment, utils_parameters
start_seed = 13
num_queries = 25
num_iter = 100
num_obj = 5
plt.figure(figsize=(10.3, 5))
plt_idx = 1
for noise_level in [0.01]:
plt.title('{} obj., {} noise, avg over {}'.format(num_obj, noise_level, num_iter))
for s in [
[
['linear-zero', [False, False]],
['linear', [False, False]],
['zero', [False, False]],
],
[
['zero', ['beginning', 'beginning']],
['zero', ['full', 'full']],
['zero', [False, False]],
],
[
['linear-zero', ['full', 'full']],
['linear-zero', [False, False]],
['zero', ['full', 'full']],
],
]:
for [prior_type, reference_points] in s:
plt.subplot(1, 3, plt_idx)
# 5 is the number of diff query types;
# 50 is the number of queries we ask
all_query_types = ['pairwise', 'clustering', 'ranking', 'top_rank']
utl_vals = np.zeros((len(all_query_types) * num_iter, num_queries))
iter_idx = 0
for query_type in all_query_types:
params = utils_parameters.get_parameter_dict(query_type=query_type, num_objectives=num_obj, utility_noise=noise_level)
params['reference min'] = reference_points[0]
params['reference max'] = reference_points[1]
params['gp prior mean'] = prior_type
params['num queries'] = num_queries
params['seed'] = start_seed
for _ in range(num_iter):
experiment = utils_experiment.Experiment(params)
result = experiment.run(recalculate=False)
utl_vals[iter_idx] = result[0]
params['seed'] += 1
iter_idx += 1
style = '-'
color = 'limegreen'
if params['gp prior mean'] == 'zero' and (params['reference min'] == False and params['reference max'] == False):
color = 'black'
style = '-'
elif params['gp prior mean'] == 'linear' and (params['reference min'] == False and params['reference max'] == False):
color = 'maroon'
style = ':'
elif params['gp prior mean'] == 'linear-zero' and (params['reference min'] == False and params['reference max'] == False):
color = 'darkorange'
style ='--'
elif params['gp prior mean'] == 'zero' and (params['reference min'] == 'beginning' or params['reference max'] == 'beginning'):
color = 'turquoise'
elif params['gp prior mean'] == 'zero' and (params['reference min'] == 'full' or params['reference max'] == 'full'):
color = 'royalblue'
style = ':'
elif params['gp prior mean'] == 'linear-zero' and (params['reference min'] == 'beginning' or params['reference max'] == 'beginning'):
color = 'limegreen'
style = '--'
else:
print("you forgot something....")
print(params['gp prior mean'])
print(params['reference min'])
if params['gp prior mean'] == 'linear-zero':
params['gp prior mean'] = 'lin. prior (start)'
elif params['gp prior mean'] == 'linear':
params['gp prior mean'] = 'lin. prior (full)'
else:
params['gp prior mean'] = 'zero prior'
if params['reference min'] == 'beginning':
params['reference min'] = 'start'
if params['reference max'] == 'beginning':
params['reference max'] = 'start'
if plt_idx == 1:
label = '{}'.format(params['gp prior mean'], fontsize=15)
elif plt_idx == 2:
if params['reference min'] or params['reference max']:
label = 'ref. points ({})'.format(params['reference min']) if params['reference max']==False else 'ref. points ({})'.format(params['reference max'])
else:
label = 'no ref. points'
else:
if params['reference min'] != False or params['reference max'] != False:
label = '{}, \nref. points ({})'.format(params['gp prior mean'], params['reference min'])
else:
label = '{}, \nno ref. points'.format(params['gp prior mean'], params['reference min'])
plt.plot(range(1, num_queries+1), np.mean(utl_vals, axis=0), style, color=color, label=label, linewidth=3)
if plt_idx > 1:
plt.yticks([])
else:
plt.yticks([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], fontsize=15)
plt.ylabel('utility', fontsize=20)
plt.ylim([0.4, 0.95])
plt.xlim([1, num_queries])
plt.xticks([1, 5, 10, 15, 20, 25], fontsize=15)
plt.xlabel('query', fontsize=20)
plt.legend(fontsize=13, loc=4)
plt.gca().set_ylim(top=1.0)
plt_idx += 1
plt.tight_layout(rect=(-0.015, -0.02, 1.015, 1.02))
dir_plots = './result_plots'
if not os.path.exists(dir_plots):
os.mkdir(dir_plots)
plt.savefig(os.path.join(dir_plots, 'mono_prior+refpoints_{}'.format(num_iter)))
plt.show()
| [
"os.mkdir",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"os.path.exists",
"sys.path.insert",
"gp_utilities.utils_experiment.Experiment",
"matplotlib.pyplot.figure",
"numpy.mea... | [((171, 194), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (186, 194), False, 'import sys\n'), ((195, 219), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (210, 219), False, 'import sys\n'), ((342, 371), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10.3, 5)'}), '(figsize=(10.3, 5))\n', (352, 371), True, 'import matplotlib.pyplot as plt\n'), ((5355, 5406), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '(-0.015, -0.02, 1.015, 1.02)'}), '(rect=(-0.015, -0.02, 1.015, 1.02))\n', (5371, 5406), True, 'import matplotlib.pyplot as plt\n'), ((5575, 5585), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5583, 5585), True, 'import matplotlib.pyplot as plt\n'), ((5443, 5468), 'os.path.exists', 'os.path.exists', (['dir_plots'], {}), '(dir_plots)\n', (5457, 5468), False, 'import os\n'), ((5474, 5493), 'os.mkdir', 'os.mkdir', (['dir_plots'], {}), '(dir_plots)\n', (5482, 5493), False, 'import os\n'), ((5104, 5125), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.4, 0.95]'], {}), '([0.4, 0.95])\n', (5112, 5125), True, 'import matplotlib.pyplot as plt\n'), ((5134, 5160), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[1, num_queries]'], {}), '([1, num_queries])\n', (5142, 5160), True, 'import matplotlib.pyplot as plt\n'), ((5169, 5216), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1, 5, 10, 15, 20, 25]'], {'fontsize': '(15)'}), '([1, 5, 10, 15, 20, 25], fontsize=15)\n', (5179, 5216), True, 'import matplotlib.pyplot as plt\n'), ((5225, 5257), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""query"""'], {'fontsize': '(20)'}), "('query', fontsize=20)\n", (5235, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5266, 5296), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(13)', 'loc': '(4)'}), '(fontsize=13, loc=4)\n', (5276, 5296), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1057), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'plt_idx'], {}), '(1, 3, plt_idx)\n', (1042, 1057), True, 'import matplotlib.pyplot as plt\n'), ((4946, 4960), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4956, 4960), True, 'import matplotlib.pyplot as plt\n'), ((4987, 5047), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]'], {'fontsize': '(15)'}), '([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], fontsize=15)\n', (4997, 5047), True, 'import matplotlib.pyplot as plt\n'), ((5060, 5094), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""utility"""'], {'fontsize': '(20)'}), "('utility', fontsize=20)\n", (5070, 5094), True, 'import matplotlib.pyplot as plt\n'), ((1418, 1532), 'gp_utilities.utils_parameters.get_parameter_dict', 'utils_parameters.get_parameter_dict', ([], {'query_type': 'query_type', 'num_objectives': 'num_obj', 'utility_noise': 'noise_level'}), '(query_type=query_type, num_objectives=\n num_obj, utility_noise=noise_level)\n', (1453, 1532), False, 'from gp_utilities import utils_experiment, utils_parameters\n'), ((4836, 4861), 'numpy.mean', 'np.mean', (['utl_vals'], {'axis': '(0)'}), '(utl_vals, axis=0)\n', (4843, 4861), True, 'import numpy as np\n'), ((5305, 5314), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5312, 5314), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1914), 'gp_utilities.utils_experiment.Experiment', 'utils_experiment.Experiment', (['params'], {}), '(params)\n', (1906, 1914), False, 'from gp_utilities import utils_experiment, utils_parameters\n')] |
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
names = np.chararray( 24,unicode=True, itemsize=15)
percent = np.zeros(24)
f = open('output_c3.txt')
i = 0
for line in f.readlines():
# print(line)
templine = line.split('\t')
# print(templine)
temp = templine[0]
temp = temp.strip('.TIF').split('_')
names[i] = temp[1]
percent[i] = float(templine[3])
print(names[i],percent[i])
i+=1
PTX20 = percent[[1,13]]
PTX10 = percent[[3,15]]
TGFB2 = percent[[5,17]]
TGFB1 = percent[[7,19]]
untreated = percent[[9,21]]
stopper = percent[[11,23]]
PTX20 = percent[[0,12]]
PTX10 = percent[[2,14]]
TGFB2 = percent[[4,16]]
TGFB1 = percent[[6,18]]
untreated = percent[[8,20]]
stopper = percent[[10,22]]
x = np.array([1,2,3,4,5,6])
width = 0.8
means = [np.average(untreated),np.average(stopper),np.average(TGFB1),np.average(TGFB2),np.average(PTX10),np.average(PTX20)]
var = [np.var(untreated),np.var(stopper),np.var(TGFB1),np.var(TGFB2),np.var(PTX10),np.var(PTX20)]
sem = [stats.sem(untreated),stats.sem(stopper),stats.sem(TGFB1),stats.sem(TGFB2),stats.sem(PTX10),stats.sem(PTX20)]
for i in [0,1]:
plt.errorbar(x=1+width*0.125+width*(i*0.25),y=untreated[i],fmt='ko')
plt.errorbar(x=2+width*0.125+width*(i*0.25),y=stopper[i],fmt='ko')
plt.errorbar(x=3+width*0.125+width*(i*0.25),y=TGFB1[i],fmt='ko')
plt.errorbar(x=4+width*0.125+width*(i*0.25),y=TGFB2[i],fmt='ko')
plt.errorbar(x=5+width*0.125+width*(i*0.25),y=PTX10[i],fmt='ko')
plt.errorbar(x=6+width*0.125+width*(i*0.25),y=PTX20[i],fmt='ko')
# plt.bar(left=x,height=means,yerr=sem,width=width)
plt.bar(left=x,height=means,width=width)
ax = plt.gca()
ax.set_xticks(x)
ax.set_xticklabels(('Untreated', 'Stopper', 'TGFB1', 'TGFB2','PTX10','PTX20'))
plt.xlim(xmin=0.2,xmax=6+width)
vals = ax.get_yticks()
ax.set_yticklabels(['{:4.0f}%'.format(x) for x in vals])
plt.ylabel('Migration region covered by cells')
plt.xlabel('Experimental condition')
plt.savefig('comparison.png')
#0=untreated
#1=stopper
#2=TGFB1
#3=TGFB2
#4=PTX10
#5=PTX20
n=2 #two replicates
SE_TGFB1 = np.sqrt(var[0]/n+var[2]/n)
DF_TGFB1 = np.round((var[0]/n+var[2]/n)**2 / ( (var[0]/n)**2/3 + (var[2]/n)**2/3 ))
t_TGFB1 = (means[0]-means[2])/SE_TGFB1
p_TGFB1 = stats.t.cdf(x=t_TGFB1,df=DF_TGFB1)
print('TGFB1')
print(SE_TGFB1,DF_TGFB1,t_TGFB1,p_TGFB1)
SE_TGFB2 = np.sqrt(var[0]/n+var[3]/n)
DF_TGFB2 = np.round((var[0]/n+var[3]/n)**2 / ( (var[0]/n)**2/3 + (var[3]/n)**2/3 ))
t_TGFB2 = (means[0]-means[3])/SE_TGFB2
p_TGFB2 = stats.t.cdf(x=t_TGFB2,df=DF_TGFB2)
print('TGFB2')
print(SE_TGFB2,DF_TGFB2,t_TGFB2,p_TGFB2)
SE_PTX10 = np.sqrt(var[0]/n+var[4]/n)
DF_PTX10 = np.round((var[0]/n+var[4]/n)**2 / ( (var[0]/n)**2/3 + (var[4]/n)**2/3 ))
t_PTX10 = (means[4]-means[0])/SE_PTX10
p_PTX10 = stats.t.cdf(x=t_PTX10,df=DF_PTX10)
print('PTX10')
print(SE_PTX10,DF_PTX10,t_PTX10,p_PTX10)
SE_PTX20 = np.sqrt(var[0]/n+var[5]/n)
DF_PTX20 = np.round((var[0]/n+var[5]/n)**2 / ( (var[0]/n)**2/3 + (var[5]/n)**2/3 ))
t_PTX20 = (means[5]-means[0])/SE_PTX20
p_PTX20 = stats.t.cdf(x=t_PTX20,df=DF_PTX20)
print('PTX20')
print(SE_PTX20,DF_PTX20,t_PTX20,p_PTX20)
print(means)
print(sem)
| [
"matplotlib.pyplot.xlim",
"numpy.average",
"matplotlib.pyplot.bar",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"numpy.var",
"matplotlib.pyplot.errorbar",
"numpy.array",
"scipy.stats.sem",
"matplotlib.pyplot.gca",
"numpy.chararray",
"matplotlib.pyplot.xlabel",
"numpy.round",
"scipy.stats.t.... | [((85, 128), 'numpy.chararray', 'np.chararray', (['(24)'], {'unicode': '(True)', 'itemsize': '(15)'}), '(24, unicode=True, itemsize=15)\n', (97, 128), True, 'import numpy as np\n'), ((139, 151), 'numpy.zeros', 'np.zeros', (['(24)'], {}), '(24)\n', (147, 151), True, 'import numpy as np\n'), ((756, 784), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (764, 784), True, 'import numpy as np\n'), ((1621, 1663), 'matplotlib.pyplot.bar', 'plt.bar', ([], {'left': 'x', 'height': 'means', 'width': 'width'}), '(left=x, height=means, width=width)\n', (1628, 1663), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1676), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1674, 1676), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1807), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmin': '(0.2)', 'xmax': '(6 + width)'}), '(xmin=0.2, xmax=6 + width)\n', (1781, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1935), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Migration region covered by cells"""'], {}), "('Migration region covered by cells')\n", (1898, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1936, 1972), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Experimental condition"""'], {}), "('Experimental condition')\n", (1946, 1972), True, 'import matplotlib.pyplot as plt\n'), ((1973, 2002), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""comparison.png"""'], {}), "('comparison.png')\n", (1984, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2095, 2127), 'numpy.sqrt', 'np.sqrt', (['(var[0] / n + var[2] / n)'], {}), '(var[0] / n + var[2] / n)\n', (2102, 2127), True, 'import numpy as np\n'), ((2133, 2227), 'numpy.round', 'np.round', (['((var[0] / n + var[2] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[2] / n) ** \n 2 / 3))'], {}), '((var[0] / n + var[2] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[2] /\n n) ** 2 / 3))\n', (2141, 2227), True, 'import numpy as np\n'), ((2255, 2290), 'scipy.stats.t.cdf', 'stats.t.cdf', ([], {'x': 't_TGFB1', 'df': 'DF_TGFB1'}), '(x=t_TGFB1, df=DF_TGFB1)\n', (2266, 2290), False, 'from scipy import stats\n'), ((2358, 2390), 'numpy.sqrt', 'np.sqrt', (['(var[0] / n + var[3] / n)'], {}), '(var[0] / n + var[3] / n)\n', (2365, 2390), True, 'import numpy as np\n'), ((2396, 2490), 'numpy.round', 'np.round', (['((var[0] / n + var[3] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[3] / n) ** \n 2 / 3))'], {}), '((var[0] / n + var[3] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[3] /\n n) ** 2 / 3))\n', (2404, 2490), True, 'import numpy as np\n'), ((2518, 2553), 'scipy.stats.t.cdf', 'stats.t.cdf', ([], {'x': 't_TGFB2', 'df': 'DF_TGFB2'}), '(x=t_TGFB2, df=DF_TGFB2)\n', (2529, 2553), False, 'from scipy import stats\n'), ((2621, 2653), 'numpy.sqrt', 'np.sqrt', (['(var[0] / n + var[4] / n)'], {}), '(var[0] / n + var[4] / n)\n', (2628, 2653), True, 'import numpy as np\n'), ((2659, 2753), 'numpy.round', 'np.round', (['((var[0] / n + var[4] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[4] / n) ** \n 2 / 3))'], {}), '((var[0] / n + var[4] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[4] /\n n) ** 2 / 3))\n', (2667, 2753), True, 'import numpy as np\n'), ((2781, 2816), 'scipy.stats.t.cdf', 'stats.t.cdf', ([], {'x': 't_PTX10', 'df': 'DF_PTX10'}), '(x=t_PTX10, df=DF_PTX10)\n', (2792, 2816), False, 'from scipy import stats\n'), ((2884, 2916), 'numpy.sqrt', 'np.sqrt', (['(var[0] / n + var[5] / n)'], {}), '(var[0] / n + var[5] / n)\n', (2891, 2916), True, 'import numpy as np\n'), ((2922, 3016), 'numpy.round', 'np.round', (['((var[0] / n + var[5] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[5] / n) ** \n 2 / 3))'], {}), '((var[0] / n + var[5] / n) ** 2 / ((var[0] / n) ** 2 / 3 + (var[5] /\n n) ** 2 / 3))\n', (2930, 3016), True, 'import numpy as np\n'), ((3044, 3079), 'scipy.stats.t.cdf', 'stats.t.cdf', ([], {'x': 't_PTX20', 'df': 'DF_PTX20'}), '(x=t_PTX20, df=DF_PTX20)\n', (3055, 3079), False, 'from scipy import stats\n'), ((801, 822), 'numpy.average', 'np.average', (['untreated'], {}), '(untreated)\n', (811, 822), True, 'import numpy as np\n'), ((823, 842), 'numpy.average', 'np.average', (['stopper'], {}), '(stopper)\n', (833, 842), True, 'import numpy as np\n'), ((843, 860), 'numpy.average', 'np.average', (['TGFB1'], {}), '(TGFB1)\n', (853, 860), True, 'import numpy as np\n'), ((861, 878), 'numpy.average', 'np.average', (['TGFB2'], {}), '(TGFB2)\n', (871, 878), True, 'import numpy as np\n'), ((879, 896), 'numpy.average', 'np.average', (['PTX10'], {}), '(PTX10)\n', (889, 896), True, 'import numpy as np\n'), ((897, 914), 'numpy.average', 'np.average', (['PTX20'], {}), '(PTX20)\n', (907, 914), True, 'import numpy as np\n'), ((924, 941), 'numpy.var', 'np.var', (['untreated'], {}), '(untreated)\n', (930, 941), True, 'import numpy as np\n'), ((942, 957), 'numpy.var', 'np.var', (['stopper'], {}), '(stopper)\n', (948, 957), True, 'import numpy as np\n'), ((958, 971), 'numpy.var', 'np.var', (['TGFB1'], {}), '(TGFB1)\n', (964, 971), True, 'import numpy as np\n'), ((972, 985), 'numpy.var', 'np.var', (['TGFB2'], {}), '(TGFB2)\n', (978, 985), True, 'import numpy as np\n'), ((986, 999), 'numpy.var', 'np.var', (['PTX10'], {}), '(PTX10)\n', (992, 999), True, 'import numpy as np\n'), ((1000, 1013), 'numpy.var', 'np.var', (['PTX20'], {}), '(PTX20)\n', (1006, 1013), True, 'import numpy as np\n'), ((1022, 1042), 'scipy.stats.sem', 'stats.sem', (['untreated'], {}), '(untreated)\n', (1031, 1042), False, 'from scipy import stats\n'), ((1043, 1061), 'scipy.stats.sem', 'stats.sem', (['stopper'], {}), '(stopper)\n', (1052, 1061), False, 'from scipy import stats\n'), ((1062, 1078), 'scipy.stats.sem', 'stats.sem', (['TGFB1'], {}), '(TGFB1)\n', (1071, 1078), False, 'from scipy import stats\n'), ((1079, 1095), 'scipy.stats.sem', 'stats.sem', (['TGFB2'], {}), '(TGFB2)\n', (1088, 1095), False, 'from scipy import stats\n'), ((1096, 1112), 'scipy.stats.sem', 'stats.sem', (['PTX10'], {}), '(PTX10)\n', (1105, 1112), False, 'from scipy import stats\n'), ((1113, 1129), 'scipy.stats.sem', 'stats.sem', (['PTX20'], {}), '(PTX20)\n', (1122, 1129), False, 'from scipy import stats\n'), ((1152, 1237), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': '(1 + width * 0.125 + width * (i * 0.25))', 'y': 'untreated[i]', 'fmt': '"""ko"""'}), "(x=1 + width * 0.125 + width * (i * 0.25), y=untreated[i], fmt='ko'\n )\n", (1164, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1303), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': '(2 + width * 0.125 + width * (i * 0.25))', 'y': 'stopper[i]', 'fmt': '"""ko"""'}), "(x=2 + width * 0.125 + width * (i * 0.25), y=stopper[i], fmt='ko')\n", (1237, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1372), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': '(3 + width * 0.125 + width * (i * 0.25))', 'y': 'TGFB1[i]', 'fmt': '"""ko"""'}), "(x=3 + width * 0.125 + width * (i * 0.25), y=TGFB1[i], fmt='ko')\n", (1308, 1372), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1441), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': '(4 + width * 0.125 + width * (i * 0.25))', 'y': 'TGFB2[i]', 'fmt': '"""ko"""'}), "(x=4 + width * 0.125 + width * (i * 0.25), y=TGFB2[i], fmt='ko')\n", (1377, 1441), True, 'import matplotlib.pyplot as plt\n'), ((1434, 1510), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': '(5 + width * 0.125 + width * (i * 0.25))', 'y': 'PTX10[i]', 'fmt': '"""ko"""'}), "(x=5 + width * 0.125 + width * (i * 0.25), y=PTX10[i], fmt='ko')\n", (1446, 1510), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1579), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': '(6 + width * 0.125 + width * (i * 0.25))', 'y': 'PTX20[i]', 'fmt': '"""ko"""'}), "(x=6 + width * 0.125 + width * (i * 0.25), y=PTX20[i], fmt='ko')\n", (1515, 1579), True, 'import matplotlib.pyplot as plt\n')] |
"""Nuts and bolts of arim.scat.crack_2d_scat"""
import numpy as np
import numba
import scipy.integrate as si
from numpy.core.umath import sin, cos, pi, exp, sqrt
import ctypes
import scipy
@numba.njit(cache=True)
def basis_function(k):
k_00 = 1e-1
if abs(k) <= k_00:
return 1 - 1 / 18 * k ** 2 + 1 / 792 * k ** 4
else:
return 105.0 / k ** 7 * (k * (k * k - 15) * cos(k) - (6 * k * k - 15) * sin(k))
@numba.njit(cache=True)
def sigma(k, k0):
return sqrt(np.complex(k * k - k0 * k0)).conjugate()
@numba.njit(cache=True)
def F(xi, xi2, h, beta):
# input: float, returns a complex
k = xi2 * h * beta
F = basis_function(k)
sigma_1 = sigma(beta, xi)
sigma_2 = sigma(beta, 1)
L2 = -((beta ** 2 - 0.5) ** 2) + beta ** 2 * sigma_1 * sigma_2
return F ** 2 * L2
@numba.njit(cache=True)
def P(k):
# input: float, returns a complex
k_00 = 1e-1
if abs(k) <= k_00:
F = 1 + 1j * k - 5 / 9 * k ** 2 - 2j / 9 * k ** 3
else:
sk = (exp(2j * k) - 1) / 2j
ck = (exp(2j * k) + 1) / 2
F = 105 / k ** 7 * (k * (k ** 2 - 15) * ck - (6 * k ** 2 - 15) * sk)
return F ** 2
@numba.njit(cache=True)
def A_x_F1(x, xi, xi2, h_nodes, z):
return F(xi, xi2, h_nodes, 1 - x ** 2) / sqrt(2 - x ** 2) * cos((1 - x ** 2) * z)
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_x_F1_real(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_x_F1(x, xi, xi2, h_nodes, z).real
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_x_F1_imag(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_x_F1(x, xi, xi2, h_nodes, z).imag
@numba.njit(cache=True)
def A_x_F2(x, xi, xi2, h_nodes, z):
return F(xi, xi2, h_nodes, 1 + x ** 2) / sqrt(2 + x ** 2) * cos((1 + x ** 2) * z)
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_x_F2_real(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_x_F2(x, xi, xi2, h_nodes, z).real
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_x_F2_imag(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_x_F2(x, xi, xi2, h_nodes, z).imag
@numba.njit(cache=True)
def A_x_F(x, xi, xi2, h_nodes, z):
return (
-(((1 + 1j * x ** 2) ** 2 - 0.5) ** 2)
* P(xi2 * h_nodes * (1 + 1j * x ** 2))
/ sqrt(2 + 1j * x ** 2)
* exp(-1j * pi / 4)
* exp(1j * (z - 2 * xi2 * h_nodes))
+ (xi + 1j * x ** 2) ** 2
* P(xi2 * h_nodes * (xi + 1j * x ** 2))
* sqrt(2 * xi + 1j * x ** 2)
* x ** 2
* exp(1j * pi / 4)
* exp(1j * xi * (z - 2 * xi2 * h_nodes))
) * exp(-(z - 2 * xi2 * h_nodes) * x ** 2)
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_x_F_real(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_x_F(x, xi, xi2, h_nodes, z).real
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_x_F_imag(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_x_F(x, xi, xi2, h_nodes, z).imag
def A_x(xi, xi2, h_nodes, num_nodes):
# From fn_Qx_matrix
# Notes: the longest in this function is the numerical integration with scipy.integrate.quad.
# To speed it up, the integrand is compiled with numba. The quad function requires
# a function f8(f8, voidptr) so the arguments xi, xi2, h_nodes and z, needed for the
# calculation of the integrand, are packed.
# integral around branch point
z_1 = xi2 * h_nodes * np.arange(num_nodes)
# Pack arguments for LowLevelCallable.
# data is updated at every loop. The main loop is NOT thread-safe. If the main loop
# becomes parallel some day, make "data" local.
data = np.array([xi, xi2, h_nodes, 0.0])
data_ptr = ctypes.cast(data.ctypes, ctypes.c_void_p)
quad_args = dict(limit=200)
# For num_nodes = 4, I_12 looks like [a3, a2, a1, a0, a1, a2, a3] (size: 2*num_nodes-1)
# Build the second half first, then copy it to the first half
I_12 = np.zeros(2 * num_nodes - 1, np.complex)
for i, z in enumerate(z_1):
data[3] = z
if i < 2: # two first iterations, coefficients a0 and a1
int_F1_real = scipy.LowLevelCallable(A_x_F1_real.ctypes, data_ptr)
int_F1_imag = scipy.LowLevelCallable(A_x_F1_imag.ctypes, data_ptr)
int_F2_real = scipy.LowLevelCallable(A_x_F2_real.ctypes, data_ptr)
int_F2_imag = scipy.LowLevelCallable(A_x_F2_imag.ctypes, data_ptr)
I_12[i + num_nodes - 1] = 4j * (
si.quad(int_F1_real, 0, 1, **quad_args)[0]
+ 1j * si.quad(int_F1_imag, 0, 1, **quad_args)[0]
) + 4 * (
si.quad(int_F2_real, 0, 50, **quad_args)[0]
+ 1j * si.quad(int_F2_imag, 0, 50, **quad_args)[0]
)
else:
int_F_real = scipy.LowLevelCallable(A_x_F_real.ctypes, data_ptr)
int_F_imag = scipy.LowLevelCallable(A_x_F_imag.ctypes, data_ptr)
I_12[i + num_nodes - 1] = 4j * (
si.quad(int_F_real, 0, 70, **quad_args)[0]
+ 1j * si.quad(int_F_imag, 0, 70, **quad_args)[0]
)
I_12[: num_nodes - 1] = I_12[: num_nodes - 1 : -1]
v_ind = np.arange(num_nodes)
m_ind = (
np.full((num_nodes, num_nodes), num_nodes - 1) + v_ind[:, np.newaxis] - v_ind
)
return I_12[m_ind]
@numba.njit(cache=True)
def A_z_F1(x, xi, xi2, h_nodes, z):
return (
F(xi, xi2, h_nodes, xi - x ** 2)
/ sqrt(2 * xi - x ** 2)
* cos((xi - x ** 2) * z)
)
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_z_F1_real(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_z_F1(x, xi, xi2, h_nodes, z).real
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_z_F1_imag(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_z_F1(x, xi, xi2, h_nodes, z).imag
@numba.njit(cache=True)
def A_z_F2(x, xi, xi2, h_nodes, z):
return (
F(xi, xi2, h_nodes, xi + x ** 2)
/ sqrt(2 * xi + x ** 2)
* cos((xi + x ** 2) * z)
)
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_z_F2_real(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_z_F2(x, xi, xi2, h_nodes, z).real
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_z_F2_imag(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_z_F2(x, xi, xi2, h_nodes, z).imag
@numba.njit(cache=True)
def A_z_F(x, xi, xi2, h_nodes, z):
return (
-(((xi + 1j * x ** 2) ** 2 - 0.5) ** 2)
* P(xi2 * h_nodes * (xi + 1j * x ** 2))
/ sqrt(2 * xi + 1j * x ** 2)
* exp(-1j * pi / 4)
* exp(xi * 1j * (z - 2 * xi2 * h_nodes))
+ (1 + 1j * x ** 2) ** 2
* P(xi2 * h_nodes * (1 + 1j * x ** 2))
* sqrt(2 + 1j * x ** 2)
* x ** 2
* exp(1j * pi / 4)
* exp(1j * (z - 2 * xi2 * h_nodes))
) * exp(-(z - 2 * xi2 * h_nodes) * x ** 2)
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_z_F_real(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_z_F(x, xi, xi2, h_nodes, z).real
@numba.cfunc("f8(f8, voidptr)", cache=True)
def A_z_F_imag(x, data):
xi, xi2, h_nodes, z = numba.carray(data, 4, dtype=numba.float64)
return A_z_F(x, xi, xi2, h_nodes, z).imag
def A_z(xi, xi2, h_nodes, num_nodes):
# from fn_Qz_matrix
# integral around branch point
z_1 = xi2 * h_nodes * np.arange(num_nodes)
# Pack arguments for LowLevelCallable.
# data is updated at every loop. The main loop is NOT thread-safe. If the main loop
# becomes parallel some day, make "data" local.
data = np.array([xi, xi2, h_nodes, 0.0])
data_ptr = ctypes.cast(data.ctypes, ctypes.c_void_p)
quad_args = dict(limit=200)
# For num_nodes = 4, I_12 looks like [a3, a2, a1, a0, a1, a2, a3] (size: 2*num_nodes-1)
# Build the second half first, then copy it to the first half
I_12 = np.zeros(2 * num_nodes - 1, np.complex)
for i, z in enumerate(z_1):
data[3] = z
if i < 2: # two first iterations, coefficients a0 and a1
int_F1_real = scipy.LowLevelCallable(A_z_F1_real.ctypes, data_ptr)
int_F1_imag = scipy.LowLevelCallable(A_z_F1_imag.ctypes, data_ptr)
int_F2_real = scipy.LowLevelCallable(A_z_F2_real.ctypes, data_ptr)
int_F2_imag = scipy.LowLevelCallable(A_z_F2_imag.ctypes, data_ptr)
I_12[i + num_nodes - 1] = 4j * (
si.quad(int_F1_real, 0, sqrt(xi), **quad_args)[0]
+ 1j * si.quad(int_F1_imag, 0, sqrt(xi), **quad_args)[0]
) + 4 * (
si.quad(int_F2_real, 0, 50, **quad_args)[0]
+ 1j * si.quad(int_F2_imag, 0, 50, **quad_args)[0]
)
else:
int_F_real = scipy.LowLevelCallable(A_z_F_real.ctypes, data_ptr)
int_F_imag = scipy.LowLevelCallable(A_z_F_imag.ctypes, data_ptr)
I_12[i + num_nodes - 1] = 4j * (
si.quad(int_F_real, 0, 70, **quad_args)[0]
+ 1j * si.quad(int_F_imag, 0, 70, **quad_args)[0]
)
I_12[: num_nodes - 1] = I_12[: num_nodes - 1 : -1]
v_ind = np.arange(num_nodes)
m_ind = (
np.full((num_nodes, num_nodes), num_nodes - 1) + v_ind[:, np.newaxis] - v_ind
)
return I_12[m_ind]
@numba.jit(nopython=True, nogil=True, cache=True)
def crack_2d_scat_kernel(
phi_in,
phi_out_array,
vel_L,
vel_T,
density,
frequency,
use_incident_L,
use_incident_T,
x_nodes,
h_nodes,
A_x,
A_z,
S_LL,
S_LT,
S_TL,
S_TT,
):
"""
work on one incident angle in order to cache the results of two to four
linear solve
"""
# Lamé coefficients, see http://subsurfwiki.org/wiki/Template:Elastic_modulus
lame_lambda = density * (vel_L ** 2 - 2 * vel_T ** 2)
lame_mu = density * vel_T ** 2
omega = 2 * pi * frequency
xi1 = 2 * pi * frequency / vel_L
xi2 = 2 * pi * frequency / vel_T
lambda_L = vel_L / frequency
lambda_T = vel_T / frequency
xi = vel_T / vel_L
k_L = xi1 # alias
k_T = xi2 # alias
a_L = -1j * k_L * pi / xi2 ** 2 # incident L wave
a_T = -1j * k_T * pi / xi2 ** 2 # incident S wave
# normal vector to the crack
nv = np.array([0.0, 1.0], np.complex128) # force to complex to please numba
sv = np.array(
[-sin(phi_in), -cos(phi_in)], np.complex128
) # force to complex to please numba
tv = np.array([sv[1], -sv[0]], np.complex128)
if use_incident_L:
b_L = exp(1j * k_L * x_nodes * sv[0]) * basis_function(-k_L * h_nodes * sv[0])
b_x = -2 * sv[0] * sv[1] * b_L
b_z = -(1 / xi ** 2 - 2 * sv[0] ** 2) * b_L
vxL = np.linalg.solve(A_x, b_x)
vzL = np.linalg.solve(A_z, b_z)
if use_incident_T:
b_T = exp(1j * k_T * x_nodes * sv[0]) * basis_function(-k_T * h_nodes * sv[0])
b_x = -(tv[0] * sv[1] + tv[1] * sv[0]) * b_T
b_z = -2 * tv[1] * sv[1] * b_T
vxT = np.linalg.solve(A_x, b_x)
vzT = np.linalg.solve(A_z, b_z)
for j, phi_out in enumerate(phi_out_array):
ev = np.array([sin(phi_out), cos(phi_out)], np.complex128)
tv = np.array([ev[1], -ev[0]], np.complex128)
c_L = basis_function(xi1 * h_nodes * ev[0]) * exp(-1j * xi1 * ev[0] * x_nodes)
c_T = basis_function(xi2 * h_nodes * ev[0]) * exp(-1j * xi2 * ev[0] * x_nodes)
if use_incident_L:
v_L = np.array([a_L * np.dot(vxL, c_L), a_L * np.dot(vzL, c_L)])
v_T = np.array([a_L * np.dot(vxL, c_T), a_L * np.dot(vzL, c_T)])
S_LL[j] = (
1
/ 4
* sqrt(2 / pi)
* exp(-1j * pi / 4)
* xi1 ** (5 / 2)
* (
lame_lambda / (density * omega ** 2) * (np.dot(v_L, nv))
+ 2
* lame_mu
/ (density * omega ** 2)
* np.dot(v_L, ev)
* np.dot(ev, nv)
)
/ sqrt(lambda_L)
)
S_LT[j] = (
1
/ 4
* sqrt(2 / pi)
* exp(-1j * pi / 4)
* xi2 ** (5 / 2)
* lame_mu
/ (density * omega ** 2)
* (np.dot(v_T, tv) * np.dot(ev, nv) + np.dot(v_T, ev) * np.dot(tv, nv))
/ sqrt(lambda_T)
)
if use_incident_T:
v_L = np.array([a_T * np.dot(vxT, c_L), a_T * np.dot(vzT, c_L)])
v_T = np.array([a_T * np.dot(vxT, c_T), a_T * np.dot(vzT, c_T)])
# This is the same expression as for LL and LT but v_L and v_T are
# different.
# Add a minus sign compared to the original code because change of
# polarisation.
S_TL[j] = -(
1
/ 4
* sqrt(2 / pi)
* exp(-1j * pi / 4)
* xi1 ** (5 / 2)
* (
lame_lambda / (density * omega ** 2) * (np.dot(v_L, nv))
+ 2
* lame_mu
/ (density * omega ** 2)
* np.dot(v_L, ev)
* np.dot(ev, nv)
)
/ sqrt(lambda_L)
)
S_TT[j] = -(
1
/ 4
* sqrt(2 / pi)
* exp(-1j * pi / 4)
* xi2 ** (5 / 2)
* lame_mu
/ (density * omega ** 2)
* (np.dot(v_T, tv) * np.dot(ev, nv) + np.dot(v_T, ev) * np.dot(tv, nv))
/ sqrt(lambda_T)
)
return S_LL, S_LT, S_TL, S_TT
@numba.jit(nopython=True, nogil=True, cache=True, parallel=False)
def crack_2d_scat_matrix(
phi_in_vect,
phi_out_array,
vel_L,
vel_T,
density,
frequency,
use_incident_L,
use_incident_T,
x_nodes,
h_nodes,
A_x,
A_z,
S_LL,
S_LT,
S_TL,
S_TT,
):
"""
call the kernel in the case where there is one phi_in for many phi_out
(use optimised kernel)
"""
# todo: set parallel=True if one day numba supports cache=True with this flag.
assert phi_in_vect.ndim == 1
assert phi_out_array.ndim == 2
for i in range(phi_in_vect.shape[0]):
crack_2d_scat_kernel(
phi_in_vect[i],
phi_out_array[:, i],
vel_L,
vel_T,
density,
frequency,
use_incident_L,
use_incident_T,
x_nodes,
h_nodes,
A_x,
A_z,
S_LL[:, i],
S_LT[:, i],
S_TL[:, i],
S_TT[:, i],
)
return S_LL, S_LT, S_TL, S_TT
@numba.jit(nopython=True, nogil=True, cache=True, parallel=False)
def crack_2d_scat_general(
phi_in_array,
phi_out_array,
vel_L,
vel_T,
density,
frequency,
use_incident_L,
use_incident_T,
x_nodes,
h_nodes,
A_x,
A_z,
S_LL,
S_LT,
S_TL,
S_TT,
):
"""
call the kernel in the case where there is one phi_in for one phi_out
(no optimisation available)
"""
# todo: set parallel=True if one day numba supports cache=True with this flag.
for i in range(phi_in_array.shape[0]):
for j in range(phi_out_array.shape[1]):
# pass a slice which is writeable
crack_2d_scat_kernel(
phi_in_array[i, j],
phi_out_array[i, j : j + 1],
vel_L,
vel_T,
density,
frequency,
use_incident_L,
use_incident_T,
x_nodes,
h_nodes,
A_x,
A_z,
S_LL[i, j : j + 1],
S_LT[i, j : j + 1],
S_TL[i, j : j + 1],
S_TT[i, j : j + 1],
)
return S_LL, S_LT, S_TL, S_TT
| [
"numpy.core.umath.cos",
"numpy.full",
"numba.carray",
"scipy.integrate.quad",
"numba.njit",
"numpy.zeros",
"numpy.core.umath.sin",
"scipy.LowLevelCallable",
"numpy.core.umath.exp",
"ctypes.cast",
"numpy.array",
"numba.jit",
"numpy.arange",
"numpy.dot",
"numpy.linalg.solve",
"numpy.core... | [((193, 215), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (203, 215), False, 'import numba\n'), ((433, 455), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (443, 455), False, 'import numba\n'), ((534, 556), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (544, 556), False, 'import numba\n'), ((822, 844), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (832, 844), False, 'import numba\n'), ((1169, 1191), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (1179, 1191), False, 'import numba\n'), ((1317, 1359), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (1328, 1359), False, 'import numba\n'), ((1505, 1547), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (1516, 1547), False, 'import numba\n'), ((1693, 1715), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (1703, 1715), False, 'import numba\n'), ((1841, 1883), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (1852, 1883), False, 'import numba\n'), ((2029, 2071), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (2040, 2071), False, 'import numba\n'), ((2217, 2239), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (2227, 2239), False, 'import numba\n'), ((2748, 2790), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (2759, 2790), False, 'import numba\n'), ((2934, 2976), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (2945, 2976), False, 'import numba\n'), ((5457, 5479), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (5467, 5479), False, 'import numba\n'), ((5644, 5686), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (5655, 5686), False, 'import numba\n'), ((5832, 5874), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (5843, 5874), False, 'import numba\n'), ((6020, 6042), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (6030, 6042), False, 'import numba\n'), ((6207, 6249), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (6218, 6249), False, 'import numba\n'), ((6395, 6437), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (6406, 6437), False, 'import numba\n'), ((6583, 6605), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (6593, 6605), False, 'import numba\n'), ((7114, 7156), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (7125, 7156), False, 'import numba\n'), ((7300, 7342), 'numba.cfunc', 'numba.cfunc', (['"""f8(f8, voidptr)"""'], {'cache': '(True)'}), "('f8(f8, voidptr)', cache=True)\n", (7311, 7342), False, 'import numba\n'), ((9515, 9563), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)', 'cache': '(True)'}), '(nopython=True, nogil=True, cache=True)\n', (9524, 9563), False, 'import numba\n'), ((13959, 14023), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)', 'cache': '(True)', 'parallel': '(False)'}), '(nopython=True, nogil=True, cache=True, parallel=False)\n', (13968, 14023), False, 'import numba\n'), ((15022, 15086), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)', 'cache': '(True)', 'parallel': '(False)'}), '(nopython=True, nogil=True, cache=True, parallel=False)\n', (15031, 15086), False, 'import numba\n'), ((1412, 1454), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (1424, 1454), False, 'import numba\n'), ((1600, 1642), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (1612, 1642), False, 'import numba\n'), ((1936, 1978), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (1948, 1978), False, 'import numba\n'), ((2124, 2166), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (2136, 2166), False, 'import numba\n'), ((2842, 2884), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (2854, 2884), False, 'import numba\n'), ((3028, 3070), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (3040, 3070), False, 'import numba\n'), ((3781, 3814), 'numpy.array', 'np.array', (['[xi, xi2, h_nodes, 0.0]'], {}), '([xi, xi2, h_nodes, 0.0])\n', (3789, 3814), True, 'import numpy as np\n'), ((3830, 3871), 'ctypes.cast', 'ctypes.cast', (['data.ctypes', 'ctypes.c_void_p'], {}), '(data.ctypes, ctypes.c_void_p)\n', (3841, 3871), False, 'import ctypes\n'), ((4075, 4114), 'numpy.zeros', 'np.zeros', (['(2 * num_nodes - 1)', 'np.complex'], {}), '(2 * num_nodes - 1, np.complex)\n', (4083, 4114), True, 'import numpy as np\n'), ((5304, 5324), 'numpy.arange', 'np.arange', (['num_nodes'], {}), '(num_nodes)\n', (5313, 5324), True, 'import numpy as np\n'), ((5739, 5781), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (5751, 5781), False, 'import numba\n'), ((5927, 5969), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (5939, 5969), False, 'import numba\n'), ((6302, 6344), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (6314, 6344), False, 'import numba\n'), ((6490, 6532), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (6502, 6532), False, 'import numba\n'), ((7208, 7250), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (7220, 7250), False, 'import numba\n'), ((7394, 7436), 'numba.carray', 'numba.carray', (['data', '(4)'], {'dtype': 'numba.float64'}), '(data, 4, dtype=numba.float64)\n', (7406, 7436), False, 'import numba\n'), ((7825, 7858), 'numpy.array', 'np.array', (['[xi, xi2, h_nodes, 0.0]'], {}), '([xi, xi2, h_nodes, 0.0])\n', (7833, 7858), True, 'import numpy as np\n'), ((7874, 7915), 'ctypes.cast', 'ctypes.cast', (['data.ctypes', 'ctypes.c_void_p'], {}), '(data.ctypes, ctypes.c_void_p)\n', (7885, 7915), False, 'import ctypes\n'), ((8119, 8158), 'numpy.zeros', 'np.zeros', (['(2 * num_nodes - 1)', 'np.complex'], {}), '(2 * num_nodes - 1, np.complex)\n', (8127, 8158), True, 'import numpy as np\n'), ((9362, 9382), 'numpy.arange', 'np.arange', (['num_nodes'], {}), '(num_nodes)\n', (9371, 9382), True, 'import numpy as np\n'), ((10477, 10512), 'numpy.array', 'np.array', (['[0.0, 1.0]', 'np.complex128'], {}), '([0.0, 1.0], np.complex128)\n', (10485, 10512), True, 'import numpy as np\n'), ((10672, 10712), 'numpy.array', 'np.array', (['[sv[1], -sv[0]]', 'np.complex128'], {}), '([sv[1], -sv[0]], np.complex128)\n', (10680, 10712), True, 'import numpy as np\n'), ((1292, 1313), 'numpy.core.umath.cos', 'cos', (['((1 - x ** 2) * z)'], {}), '((1 - x ** 2) * z)\n', (1295, 1313), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((1816, 1837), 'numpy.core.umath.cos', 'cos', (['((1 + x ** 2) * z)'], {}), '((1 + x ** 2) * z)\n', (1819, 1837), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((2706, 2744), 'numpy.core.umath.exp', 'exp', (['(-(z - 2 * xi2 * h_nodes) * x ** 2)'], {}), '(-(z - 2 * xi2 * h_nodes) * x ** 2)\n', (2709, 2744), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((3565, 3585), 'numpy.arange', 'np.arange', (['num_nodes'], {}), '(num_nodes)\n', (3574, 3585), True, 'import numpy as np\n'), ((5612, 5634), 'numpy.core.umath.cos', 'cos', (['((xi - x ** 2) * z)'], {}), '((xi - x ** 2) * z)\n', (5615, 5634), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((6175, 6197), 'numpy.core.umath.cos', 'cos', (['((xi + x ** 2) * z)'], {}), '((xi + x ** 2) * z)\n', (6178, 6197), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((7072, 7110), 'numpy.core.umath.exp', 'exp', (['(-(z - 2 * xi2 * h_nodes) * x ** 2)'], {}), '(-(z - 2 * xi2 * h_nodes) * x ** 2)\n', (7075, 7110), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((7609, 7629), 'numpy.arange', 'np.arange', (['num_nodes'], {}), '(num_nodes)\n', (7618, 7629), True, 'import numpy as np\n'), ((10928, 10953), 'numpy.linalg.solve', 'np.linalg.solve', (['A_x', 'b_x'], {}), '(A_x, b_x)\n', (10943, 10953), True, 'import numpy as np\n'), ((10968, 10993), 'numpy.linalg.solve', 'np.linalg.solve', (['A_z', 'b_z'], {}), '(A_z, b_z)\n', (10983, 10993), True, 'import numpy as np\n'), ((11210, 11235), 'numpy.linalg.solve', 'np.linalg.solve', (['A_x', 'b_x'], {}), '(A_x, b_x)\n', (11225, 11235), True, 'import numpy as np\n'), ((11250, 11275), 'numpy.linalg.solve', 'np.linalg.solve', (['A_z', 'b_z'], {}), '(A_z, b_z)\n', (11265, 11275), True, 'import numpy as np\n'), ((11405, 11445), 'numpy.array', 'np.array', (['[ev[1], -ev[0]]', 'np.complex128'], {}), '([ev[1], -ev[0]], np.complex128)\n', (11413, 11445), True, 'import numpy as np\n'), ((1273, 1289), 'numpy.core.umath.sqrt', 'sqrt', (['(2 - x ** 2)'], {}), '(2 - x ** 2)\n', (1277, 1289), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((1797, 1813), 'numpy.core.umath.sqrt', 'sqrt', (['(2 + x ** 2)'], {}), '(2 + x ** 2)\n', (1801, 1813), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((4260, 4312), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_x_F1_real.ctypes', 'data_ptr'], {}), '(A_x_F1_real.ctypes, data_ptr)\n', (4282, 4312), False, 'import scipy\n'), ((4339, 4391), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_x_F1_imag.ctypes', 'data_ptr'], {}), '(A_x_F1_imag.ctypes, data_ptr)\n', (4361, 4391), False, 'import scipy\n'), ((4418, 4470), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_x_F2_real.ctypes', 'data_ptr'], {}), '(A_x_F2_real.ctypes, data_ptr)\n', (4440, 4470), False, 'import scipy\n'), ((4497, 4549), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_x_F2_imag.ctypes', 'data_ptr'], {}), '(A_x_F2_imag.ctypes, data_ptr)\n', (4519, 4549), False, 'import scipy\n'), ((4922, 4973), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_x_F_real.ctypes', 'data_ptr'], {}), '(A_x_F_real.ctypes, data_ptr)\n', (4944, 4973), False, 'import scipy\n'), ((4999, 5050), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_x_F_imag.ctypes', 'data_ptr'], {}), '(A_x_F_imag.ctypes, data_ptr)\n', (5021, 5050), False, 'import scipy\n'), ((5347, 5393), 'numpy.full', 'np.full', (['(num_nodes, num_nodes)', '(num_nodes - 1)'], {}), '((num_nodes, num_nodes), num_nodes - 1)\n', (5354, 5393), True, 'import numpy as np\n'), ((5580, 5601), 'numpy.core.umath.sqrt', 'sqrt', (['(2 * xi - x ** 2)'], {}), '(2 * xi - x ** 2)\n', (5584, 5601), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((6143, 6164), 'numpy.core.umath.sqrt', 'sqrt', (['(2 * xi + x ** 2)'], {}), '(2 * xi + x ** 2)\n', (6147, 6164), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((8303, 8355), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_z_F1_real.ctypes', 'data_ptr'], {}), '(A_z_F1_real.ctypes, data_ptr)\n', (8325, 8355), False, 'import scipy\n'), ((8382, 8434), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_z_F1_imag.ctypes', 'data_ptr'], {}), '(A_z_F1_imag.ctypes, data_ptr)\n', (8404, 8434), False, 'import scipy\n'), ((8461, 8513), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_z_F2_real.ctypes', 'data_ptr'], {}), '(A_z_F2_real.ctypes, data_ptr)\n', (8483, 8513), False, 'import scipy\n'), ((8540, 8592), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_z_F2_imag.ctypes', 'data_ptr'], {}), '(A_z_F2_imag.ctypes, data_ptr)\n', (8562, 8592), False, 'import scipy\n'), ((8980, 9031), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_z_F_real.ctypes', 'data_ptr'], {}), '(A_z_F_real.ctypes, data_ptr)\n', (9002, 9031), False, 'import scipy\n'), ((9057, 9108), 'scipy.LowLevelCallable', 'scipy.LowLevelCallable', (['A_z_F_imag.ctypes', 'data_ptr'], {}), '(A_z_F_imag.ctypes, data_ptr)\n', (9079, 9108), False, 'import scipy\n'), ((9405, 9451), 'numpy.full', 'np.full', (['(num_nodes, num_nodes)', '(num_nodes - 1)'], {}), '((num_nodes, num_nodes), num_nodes - 1)\n', (9412, 9451), True, 'import numpy as np\n'), ((10750, 10783), 'numpy.core.umath.exp', 'exp', (['(1.0j * k_L * x_nodes * sv[0])'], {}), '(1.0j * k_L * x_nodes * sv[0])\n', (10753, 10783), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((11031, 11064), 'numpy.core.umath.exp', 'exp', (['(1.0j * k_T * x_nodes * sv[0])'], {}), '(1.0j * k_T * x_nodes * sv[0])\n', (11034, 11064), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((11500, 11534), 'numpy.core.umath.exp', 'exp', (['(-1.0j * xi1 * ev[0] * x_nodes)'], {}), '(-1.0j * xi1 * ev[0] * x_nodes)\n', (11503, 11534), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((11587, 11621), 'numpy.core.umath.exp', 'exp', (['(-1.0j * xi2 * ev[0] * x_nodes)'], {}), '(-1.0j * xi2 * ev[0] * x_nodes)\n', (11590, 11621), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((490, 517), 'numpy.complex', 'np.complex', (['(k * k - k0 * k0)'], {}), '(k * k - k0 * k0)\n', (500, 517), True, 'import numpy as np\n'), ((1014, 1027), 'numpy.core.umath.exp', 'exp', (['(2.0j * k)'], {}), '(2.0j * k)\n', (1017, 1027), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((1050, 1063), 'numpy.core.umath.exp', 'exp', (['(2.0j * k)'], {}), '(2.0j * k)\n', (1053, 1063), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((2452, 2487), 'numpy.core.umath.exp', 'exp', (['(1.0j * (z - 2 * xi2 * h_nodes))'], {}), '(1.0j * (z - 2 * xi2 * h_nodes))\n', (2455, 2487), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((2659, 2699), 'numpy.core.umath.exp', 'exp', (['(1.0j * xi * (z - 2 * xi2 * h_nodes))'], {}), '(1.0j * xi * (z - 2 * xi2 * h_nodes))\n', (2662, 2699), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((6825, 6865), 'numpy.core.umath.exp', 'exp', (['(xi * 1.0j * (z - 2 * xi2 * h_nodes))'], {}), '(xi * 1.0j * (z - 2 * xi2 * h_nodes))\n', (6828, 6865), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((7030, 7065), 'numpy.core.umath.exp', 'exp', (['(1.0j * (z - 2 * xi2 * h_nodes))'], {}), '(1.0j * (z - 2 * xi2 * h_nodes))\n', (7033, 7065), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((10579, 10590), 'numpy.core.umath.sin', 'sin', (['phi_in'], {}), '(phi_in)\n', (10582, 10590), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((10593, 10604), 'numpy.core.umath.cos', 'cos', (['phi_in'], {}), '(phi_in)\n', (10596, 10604), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((11348, 11360), 'numpy.core.umath.sin', 'sin', (['phi_out'], {}), '(phi_out)\n', (11351, 11360), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((11362, 11374), 'numpy.core.umath.cos', 'cos', (['phi_out'], {}), '(phi_out)\n', (11365, 11374), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((12272, 12286), 'numpy.core.umath.sqrt', 'sqrt', (['lambda_L'], {}), '(lambda_L)\n', (12276, 12286), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((12636, 12650), 'numpy.core.umath.sqrt', 'sqrt', (['lambda_T'], {}), '(lambda_T)\n', (12640, 12650), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((394, 400), 'numpy.core.umath.cos', 'cos', (['k'], {}), '(k)\n', (397, 400), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((422, 428), 'numpy.core.umath.sin', 'sin', (['k'], {}), '(k)\n', (425, 428), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((2424, 2443), 'numpy.core.umath.exp', 'exp', (['(-1.0j * pi / 4)'], {}), '(-1.0j * pi / 4)\n', (2427, 2443), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((2632, 2650), 'numpy.core.umath.exp', 'exp', (['(1.0j * pi / 4)'], {}), '(1.0j * pi / 4)\n', (2635, 2650), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((6797, 6816), 'numpy.core.umath.exp', 'exp', (['(-1.0j * pi / 4)'], {}), '(-1.0j * pi / 4)\n', (6800, 6816), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((7003, 7021), 'numpy.core.umath.exp', 'exp', (['(1.0j * pi / 4)'], {}), '(1.0j * pi / 4)\n', (7006, 7021), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((13528, 13542), 'numpy.core.umath.sqrt', 'sqrt', (['lambda_L'], {}), '(lambda_L)\n', (13532, 13542), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((13893, 13907), 'numpy.core.umath.sqrt', 'sqrt', (['lambda_T'], {}), '(lambda_T)\n', (13897, 13907), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((2392, 2415), 'numpy.core.umath.sqrt', 'sqrt', (['(2 + 1.0j * x ** 2)'], {}), '(2 + 1.0j * x ** 2)\n', (2396, 2415), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((5113, 5152), 'scipy.integrate.quad', 'si.quad', (['int_F_real', '(0)', '(70)'], {}), '(int_F_real, 0, 70, **quad_args)\n', (5120, 5152), True, 'import scipy.integrate as si\n'), ((6760, 6788), 'numpy.core.umath.sqrt', 'sqrt', (['(2 * xi + 1.0j * x ** 2)'], {}), '(2 * xi + 1.0j * x ** 2)\n', (6764, 6788), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((9171, 9210), 'scipy.integrate.quad', 'si.quad', (['int_F_real', '(0)', '(70)'], {}), '(int_F_real, 0, 70, **quad_args)\n', (9178, 9210), True, 'import scipy.integrate as si\n'), ((11682, 11698), 'numpy.dot', 'np.dot', (['vxL', 'c_L'], {}), '(vxL, c_L)\n', (11688, 11698), True, 'import numpy as np\n'), ((11706, 11722), 'numpy.dot', 'np.dot', (['vzL', 'c_L'], {}), '(vzL, c_L)\n', (11712, 11722), True, 'import numpy as np\n'), ((11759, 11775), 'numpy.dot', 'np.dot', (['vxL', 'c_T'], {}), '(vxL, c_T)\n', (11765, 11775), True, 'import numpy as np\n'), ((11783, 11799), 'numpy.dot', 'np.dot', (['vzL', 'c_T'], {}), '(vzL, c_T)\n', (11789, 11799), True, 'import numpy as np\n'), ((12726, 12742), 'numpy.dot', 'np.dot', (['vxT', 'c_L'], {}), '(vxT, c_L)\n', (12732, 12742), True, 'import numpy as np\n'), ((12750, 12766), 'numpy.dot', 'np.dot', (['vzT', 'c_L'], {}), '(vzT, c_L)\n', (12756, 12766), True, 'import numpy as np\n'), ((12803, 12819), 'numpy.dot', 'np.dot', (['vxT', 'c_T'], {}), '(vxT, c_T)\n', (12809, 12819), True, 'import numpy as np\n'), ((12827, 12843), 'numpy.dot', 'np.dot', (['vzT', 'c_T'], {}), '(vzT, c_T)\n', (12833, 12843), True, 'import numpy as np\n'), ((2578, 2606), 'numpy.core.umath.sqrt', 'sqrt', (['(2 * xi + 1.0j * x ** 2)'], {}), '(2 * xi + 1.0j * x ** 2)\n', (2582, 2606), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((4611, 4650), 'scipy.integrate.quad', 'si.quad', (['int_F1_real', '(0)', '(1)'], {}), '(int_F1_real, 0, 1, **quad_args)\n', (4618, 4650), True, 'import scipy.integrate as si\n'), ((4758, 4798), 'scipy.integrate.quad', 'si.quad', (['int_F2_real', '(0)', '(50)'], {}), '(int_F2_real, 0, 50, **quad_args)\n', (4765, 4798), True, 'import scipy.integrate as si\n'), ((5179, 5218), 'scipy.integrate.quad', 'si.quad', (['int_F_imag', '(0)', '(70)'], {}), '(int_F_imag, 0, 70, **quad_args)\n', (5186, 5218), True, 'import scipy.integrate as si\n'), ((6954, 6977), 'numpy.core.umath.sqrt', 'sqrt', (['(2 + 1.0j * x ** 2)'], {}), '(2 + 1.0j * x ** 2)\n', (6958, 6977), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((8816, 8856), 'scipy.integrate.quad', 'si.quad', (['int_F2_real', '(0)', '(50)'], {}), '(int_F2_real, 0, 50, **quad_args)\n', (8823, 8856), True, 'import scipy.integrate as si\n'), ((9237, 9276), 'scipy.integrate.quad', 'si.quad', (['int_F_imag', '(0)', '(70)'], {}), '(int_F_imag, 0, 70, **quad_args)\n', (9244, 9276), True, 'import scipy.integrate as si\n'), ((11914, 11933), 'numpy.core.umath.exp', 'exp', (['(-1.0j * pi / 4)'], {}), '(-1.0j * pi / 4)\n', (11917, 11933), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((12045, 12060), 'numpy.dot', 'np.dot', (['v_L', 'nv'], {}), '(v_L, nv)\n', (12051, 12060), True, 'import numpy as np\n'), ((12221, 12235), 'numpy.dot', 'np.dot', (['ev', 'nv'], {}), '(ev, nv)\n', (12227, 12235), True, 'import numpy as np\n'), ((12549, 12564), 'numpy.dot', 'np.dot', (['v_T', 'tv'], {}), '(v_T, tv)\n', (12555, 12564), True, 'import numpy as np\n'), ((12567, 12581), 'numpy.dot', 'np.dot', (['ev', 'nv'], {}), '(ev, nv)\n', (12573, 12581), True, 'import numpy as np\n'), ((12584, 12599), 'numpy.dot', 'np.dot', (['v_T', 'ev'], {}), '(v_T, ev)\n', (12590, 12599), True, 'import numpy as np\n'), ((12602, 12616), 'numpy.dot', 'np.dot', (['tv', 'nv'], {}), '(tv, nv)\n', (12608, 12616), True, 'import numpy as np\n'), ((4677, 4716), 'scipy.integrate.quad', 'si.quad', (['int_F1_imag', '(0)', '(1)'], {}), '(int_F1_imag, 0, 1, **quad_args)\n', (4684, 4716), True, 'import scipy.integrate as si\n'), ((4825, 4865), 'scipy.integrate.quad', 'si.quad', (['int_F2_imag', '(0)', '(50)'], {}), '(int_F2_imag, 0, 50, **quad_args)\n', (4832, 4865), True, 'import scipy.integrate as si\n'), ((8679, 8687), 'numpy.core.umath.sqrt', 'sqrt', (['xi'], {}), '(xi)\n', (8683, 8687), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((8883, 8923), 'scipy.integrate.quad', 'si.quad', (['int_F2_imag', '(0)', '(50)'], {}), '(int_F2_imag, 0, 50, **quad_args)\n', (8890, 8923), True, 'import scipy.integrate as si\n'), ((11883, 11895), 'numpy.core.umath.sqrt', 'sqrt', (['(2 / pi)'], {}), '(2 / pi)\n', (11887, 11895), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((12183, 12198), 'numpy.dot', 'np.dot', (['v_L', 'ev'], {}), '(v_L, ev)\n', (12189, 12198), True, 'import numpy as np\n'), ((13170, 13189), 'numpy.core.umath.exp', 'exp', (['(-1.0j * pi / 4)'], {}), '(-1.0j * pi / 4)\n', (13173, 13189), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((13301, 13316), 'numpy.dot', 'np.dot', (['v_L', 'nv'], {}), '(v_L, nv)\n', (13307, 13316), True, 'import numpy as np\n'), ((13477, 13491), 'numpy.dot', 'np.dot', (['ev', 'nv'], {}), '(ev, nv)\n', (13483, 13491), True, 'import numpy as np\n'), ((13806, 13821), 'numpy.dot', 'np.dot', (['v_T', 'tv'], {}), '(v_T, tv)\n', (13812, 13821), True, 'import numpy as np\n'), ((13824, 13838), 'numpy.dot', 'np.dot', (['ev', 'nv'], {}), '(ev, nv)\n', (13830, 13838), True, 'import numpy as np\n'), ((13841, 13856), 'numpy.dot', 'np.dot', (['v_T', 'ev'], {}), '(v_T, ev)\n', (13847, 13856), True, 'import numpy as np\n'), ((13859, 13873), 'numpy.dot', 'np.dot', (['tv', 'nv'], {}), '(tv, nv)\n', (13865, 13873), True, 'import numpy as np\n'), ((8752, 8760), 'numpy.core.umath.sqrt', 'sqrt', (['xi'], {}), '(xi)\n', (8756, 8760), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((12412, 12431), 'numpy.core.umath.exp', 'exp', (['(-1.0j * pi / 4)'], {}), '(-1.0j * pi / 4)\n', (12415, 12431), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((13139, 13151), 'numpy.core.umath.sqrt', 'sqrt', (['(2 / pi)'], {}), '(2 / pi)\n', (13143, 13151), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((13439, 13454), 'numpy.dot', 'np.dot', (['v_L', 'ev'], {}), '(v_L, ev)\n', (13445, 13454), True, 'import numpy as np\n'), ((12381, 12393), 'numpy.core.umath.sqrt', 'sqrt', (['(2 / pi)'], {}), '(2 / pi)\n', (12385, 12393), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((13669, 13688), 'numpy.core.umath.exp', 'exp', (['(-1.0j * pi / 4)'], {}), '(-1.0j * pi / 4)\n', (13672, 13688), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n'), ((13638, 13650), 'numpy.core.umath.sqrt', 'sqrt', (['(2 / pi)'], {}), '(2 / pi)\n', (13642, 13650), False, 'from numpy.core.umath import sin, cos, pi, exp, sqrt\n')] |
import torch
from torch import Tensor
import torch.nn as nn
import numpy as np
import signal_perceptron as sp
from utils import *
import time
#Train loops for first set of experiments (check exp1.py)
def train_pytorch(x_train,y_train,model,PATH,epochs,optimizer,loss_fn):
total_hist=[]
final_loss=[]
learned_epochs=[]
total_time=[]
for i in y_train:
model.load_state_dict(torch.load(PATH))
i = i.unsqueeze(1)
history_train=[]
learned_epoch=[]
time_backward=np.zeros(epochs)
for j in range(0,epochs):
start=time.time()
pred=model(x_train)
loss = loss_fn(pred,i)
optimizer.zero_grad()
loss.backward()
optimizer.step()
end = time.time()-start
time_backward[j]=end
history_train.append([j,loss])
if not bool(learned_epoch):
if loss<=.001:
learned_epoch.append(j)
learned_epochs.append(learned_epoch)
final_loss.append(loss.detach().numpy())
total_hist.append(history_train)
total_time.append(time_backward)
l=0
for i in final_loss:
l=l+i
avg_fl=l/len(final_loss)
return total_hist,avg_fl,learned_epochs,total_time
def train_numpy(x_train,y_train,model,epochs,learning_rate,loss_fn):
total_hist=[]
final_loss=[]
learned_epochs=[]
total_time=[]
for i in y_train:
model.reset_params()
#i = i.unsqueeze(1)
history_train=[]
learned_epoch=[]
time_backward=np.zeros(epochs)
for j in range(0,epochs):
start=time.time()
pred,signals=model.forward(x_train)
loss = loss_fn(pred, i)
loss = np.mean(loss)
sp.GD_MSE_SP_step(i, x_train, model,learning_rate)
end = time.time()-start
time_backward[j]=end
history_train.append([j,loss])
if not bool(learned_epoch):
if loss<=.001:
learned_epoch.append(j)
learned_epochs.append(learned_epoch)
final_loss.append(loss)
total_hist.append(history_train)
total_time.append(time_backward)
#print(total_hist[1])
l=0
for i in final_loss:
l=l+i
avg_fl=l/len(final_loss)
return total_hist,avg_fl,learned_epochs,total_time
#Train loops for second set of experiments (check exp2.py)
def train_mh_pytorch(x_train,y_train,model,PATH,epochs,optimizer,loss_fn):
y_train=torch.transpose(y_train, 0, 1)
total_hist=[]
final_loss=[]
learned_epoch=[]
model.load_state_dict(torch.load(PATH))
history_train=[]
time_backward=np.zeros(epochs)
for j in range(0,epochs):
start=time.time()
pred=model(x_train)
loss = loss_fn(pred,y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
end = time.time()-start
time_backward[j]=end
history_train.append([j,loss.detach().numpy()])
if not bool(learned_epoch):
if loss<=.001:
learned_epoch.append(j)
final_loss=loss.detach().numpy()
total_hist.append(history_train)
return total_hist,final_loss,learned_epoch,time_backward
def train_mh_numpy(x_train,y_train,model,epochs,learning_rate,loss_fn):
total_hist=[]
final_loss=[]
learned_epoch=[]
history_train=[]
time_backward=np.zeros(epochs)
for j in range(0,epochs):
start=time.time()
pred,signals=model.forward(x_train)
loss = loss_fn(pred, y_train)
loss = np.mean(loss)
sp.GD_MSE_SP_step(y_train, x_train, model,learning_rate)
end = time.time()-start
time_backward[j]=end
history_train.append([j,loss])
if not bool(learned_epoch):
if loss<=.001:
learned_epoch.append(j)
final_loss.append(loss)
total_hist.append(history_train)
return total_hist,final_loss,learned_epoch,time_backward
"""MNIST TRAINING LOOPS"""
def train_mnist(dataloader, model, loss_fn, optimizer,device):
size = len(dataloader.dataset)
time_backward=[]
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
start=time.time()
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
end = time.time()-start
time_backward.append(end)
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
tb=np.asarray(time_backward)
return loss ,tb
def test_mnist(dataloader, model, loss_fn,device):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
return (100*correct) ,test_loss
#Train loops for third set of experiments (check exp3.py)
def train_linear_numpy(y_train,sp_matrix):
alphas=[]
for i in y_train:
alphas_i = np.linalg.inv(sp_matrix).dot(i)
alphas.append(alphas_i)
return alphas
def test_linear_numpy(x_test,y_test,model,alphas,loss_fn):
total_loss=[]
for i in range(0,len(y_test)):
model.load_params(alphas[i])
test_loss = 0
y=y_test[i]
for j in range(0,len(x_test)):
pred=model.forward(j)
test_loss += loss_fn(pred, y[j])
#correct += ( np.sqrt(pred - y)<0.0001).type(np.float).sum().item()
test_loss /= len(x_test)
total_loss.append(test_loss)
return total_loss
| [
"torch.load",
"numpy.asarray",
"numpy.zeros",
"time.time",
"numpy.mean",
"numpy.linalg.inv",
"signal_perceptron.GD_MSE_SP_step",
"torch.no_grad",
"torch.transpose"
] | [((2540, 2570), 'torch.transpose', 'torch.transpose', (['y_train', '(0)', '(1)'], {}), '(y_train, 0, 1)\n', (2555, 2570), False, 'import torch\n'), ((2711, 2727), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (2719, 2727), True, 'import numpy as np\n'), ((3453, 3469), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (3461, 3469), True, 'import numpy as np\n'), ((518, 534), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (526, 534), True, 'import numpy as np\n'), ((1589, 1605), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (1597, 1605), True, 'import numpy as np\n'), ((2654, 2670), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (2664, 2670), False, 'import torch\n'), ((2772, 2783), 'time.time', 'time.time', ([], {}), '()\n', (2781, 2783), False, 'import time\n'), ((3514, 3525), 'time.time', 'time.time', ([], {}), '()\n', (3523, 3525), False, 'import time\n'), ((3623, 3636), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (3630, 3636), True, 'import numpy as np\n'), ((3645, 3702), 'signal_perceptron.GD_MSE_SP_step', 'sp.GD_MSE_SP_step', (['y_train', 'x_train', 'model', 'learning_rate'], {}), '(y_train, x_train, model, learning_rate)\n', (3662, 3702), True, 'import signal_perceptron as sp\n'), ((4321, 4332), 'time.time', 'time.time', ([], {}), '()\n', (4330, 4332), False, 'import time\n'), ((4723, 4748), 'numpy.asarray', 'np.asarray', (['time_backward'], {}), '(time_backward)\n', (4733, 4748), True, 'import numpy as np\n'), ((4946, 4961), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4959, 4961), False, 'import torch\n'), ((401, 417), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (411, 417), False, 'import torch\n'), ((587, 598), 'time.time', 'time.time', ([], {}), '()\n', (596, 598), False, 'import time\n'), ((1658, 1669), 'time.time', 'time.time', ([], {}), '()\n', (1667, 1669), False, 'import time\n'), ((1773, 1786), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (1780, 1786), True, 'import numpy as np\n'), ((1799, 1850), 'signal_perceptron.GD_MSE_SP_step', 'sp.GD_MSE_SP_step', (['i', 'x_train', 'model', 'learning_rate'], {}), '(i, x_train, model, learning_rate)\n', (1816, 1850), True, 'import signal_perceptron as sp\n'), ((2942, 2953), 'time.time', 'time.time', ([], {}), '()\n', (2951, 2953), False, 'import time\n'), ((3716, 3727), 'time.time', 'time.time', ([], {}), '()\n', (3725, 3727), False, 'import time\n'), ((4508, 4519), 'time.time', 'time.time', ([], {}), '()\n', (4517, 4519), False, 'import time\n'), ((775, 786), 'time.time', 'time.time', ([], {}), '()\n', (784, 786), False, 'import time\n'), ((1868, 1879), 'time.time', 'time.time', ([], {}), '()\n', (1877, 1879), False, 'import time\n'), ((5529, 5553), 'numpy.linalg.inv', 'np.linalg.inv', (['sp_matrix'], {}), '(sp_matrix)\n', (5542, 5553), True, 'import numpy as np\n')] |
import numpy
from rdkit.ML.Cluster import Murtagh
print('1')
d = numpy.array([[10.0, 5.0], [20.0, 20.0], [30.0, 10.0], [30.0, 15.0], [5.0, 10.0]], numpy.float)
print('2')
# clusters = Murtagh.ClusterData(d,len(d),Murtagh.WARDS)
# for i in range(len(clusters)):
# clusters[i].Print()
# print('3')
dists = []
for i in range(len(d)):
for j in range(i):
dist = sum((d[i] - d[j])**2)
dists.append(dist)
dists = numpy.array(dists)
print('Wards:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.WARDS, isDistData=1)
clusters[0].Print()
print('SLINK:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.SLINK, isDistData=1)
clusters[0].Print()
print('CLINK:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.CLINK, isDistData=1)
clusters[0].Print()
print('UPGMA:')
clusters = Murtagh.ClusterData(dists, len(d), Murtagh.UPGMA, isDistData=1)
clusters[0].Print()
| [
"numpy.array"
] | [((69, 168), 'numpy.array', 'numpy.array', (['[[10.0, 5.0], [20.0, 20.0], [30.0, 10.0], [30.0, 15.0], [5.0, 10.0]]', 'numpy.float'], {}), '([[10.0, 5.0], [20.0, 20.0], [30.0, 10.0], [30.0, 15.0], [5.0, \n 10.0]], numpy.float)\n', (80, 168), False, 'import numpy\n'), ((423, 441), 'numpy.array', 'numpy.array', (['dists'], {}), '(dists)\n', (434, 441), False, 'import numpy\n')] |
from pytest import fixture, mark
import qcodes
import numpy as np
from ADCProcessor import (
Unpacker, DigitalDownconversion, Filter, Synchronizer, TvMode
)
def adc(shape, if_freq, signal, noise, markers=False, segment_offset=None):
'''
Generate adc input that simulates a noisy readout signal generated by
a Rabi type experiment.
shape: `tuple`
(averages, segments, samples, channel)
if_freq: `float`
Carrier frequency of the input signal, 1=Nyquist frequency
signal: `float`
Signal amplitude
noise: `float`
Noise amplitude
markers: `bool`
If True, add
segment_offset: `int`, optional
Index of the first segment. Defaults to a random number.
'''
averages, segments, samples, channels = shape
# for each waveform, determine the state of the qubit (True or False)
excited_exp = (1-np.cos(3*np.pi*(np.arange(segments))/segments))/2
excited_shot = excited_exp[:,None] > np.random.rand(averages, segments, channels)
# map the state to iq points
ground = -(1+1j)/np.sqrt(2)
excited = (1+1j)/np.sqrt(2)
iq_shot = np.where(excited_shot, signal*excited, signal*ground)
# modulate carrier with iq points
carrier = np.exp(1j*np.pi*if_freq*np.arange(samples))
waveforms = np.real(iq_shot[:,:,None,:]*carrier[:,None])
# determine sequence start offset once
segment_offset = np.random.randint(0, segments)
# produce infinitely
while True:
# add noise
if noise:
waveforms += noise*(-1+2*np.random.rand(averages, segments, samples, channels))
# convert to short
scale = (2**13) if markers else (2**15)
data = np.clip(scale*waveforms, -scale, scale-1).astype(np.int16)
if markers:
# marker 1 is the sequence start marker
# marker 2 is at a fixed position
m1_start = min(10, samples)
m1_stop = min(m1_start+10, samples)
data &= 0x3fff
data[:, 0, m1_start:m1_stop, 0] |= 0x8000
data[:, :, m1_start:m1_stop, 0] |= 0x4000
# convert to 2d and add sequence start offset
data.shape = (data.shape[0]*data.shape[1],) + data.shape[2:]
data = np.roll(data, segment_offset, 0)
yield data
@fixture(scope='module')
def instrument():
return qcodes.Instrument('ins')
class TestUnpacker(object):
@fixture
def unpacker(self, instrument, markers):
unpacker = Unpacker(instrument, 'unpacker')
unpacker.markers.set(markers)
return unpacker
@fixture(params=[0,1,2])
def markers(self, request):
return request.param
# sign extension
@fixture
def sign_extension(self, markers):
return Unpacker.sign_extension_factory(16-markers)
def test_sign_extension(self, sign_extension, markers):
mask = (1<<(16-markers))-1
data = np.random.randint(-(1<<(15-markers)), (1<<(15-markers))-1, (8,8),
dtype=np.int16)
assert np.all(data == sign_extension(data & mask))
# marker extraction
@mark.parametrize('raw,digital_ref,markers', [
(np.array([0, 0, 0, 0]), None, 0),
(np.array([[0, 0, 0, 0]]), None, 0),
(np.array([0x8000, 0x4000, 0x2000, 0xc000]), np.array([1,0,0,1]), 1),
(np.array([0x8000, 0x4000, 0x2000, 0xc000]), np.array([2,1,0,3]), 2),
(np.array([[0x8000, 0x4000, 0x2000, 0xc000]]*2), np.array([0b1001]*2), 1),
(np.array([[0x8000, 0x4000, 0x2000, 0xc000]]*2), np.array([0b10010011]*2), 2),
])
def test_pack_markers(self, unpacker, raw, digital_ref):
raw = raw.astype(np.int16)
digital = unpacker.pack_markers(raw)
assert np.all(digital == digital_ref)
# float conversion
def test_call(self, unpacker):
pass
class TestDigitalDownconversion(object):
pass
class TestFilter(object):
pass
class TestSynchronizer(object):
pass
class TestTvMode(object):
@fixture
def tvmode(self):
return TvMode('tvmode')
def test(self, tvmode):
tvmode.unpacker.markers.set(2)
tvmode.sync.method.set('two')
tvmode.ddc.intermediate_frequency.set(0.5)
shape = (64, 64, 64, 1)
if_freq = 0.5
signal = 0.5
noise = 0.1
source = adc(shape, if_freq, signal, noise, markers=True)
block_iter = tvmode(source)
block_iter.__next__()
pass | [
"ADCProcessor.TvMode",
"numpy.roll",
"qcodes.Instrument",
"pytest.fixture",
"ADCProcessor.Unpacker",
"numpy.clip",
"ADCProcessor.Unpacker.sign_extension_factory",
"numpy.random.randint",
"numpy.where",
"numpy.arange",
"numpy.real",
"numpy.array",
"numpy.random.rand",
"numpy.all",
"numpy.... | [((2318, 2341), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2325, 2341), False, 'from pytest import fixture, mark\n'), ((1154, 1211), 'numpy.where', 'np.where', (['excited_shot', '(signal * excited)', '(signal * ground)'], {}), '(excited_shot, signal * excited, signal * ground)\n', (1162, 1211), True, 'import numpy as np\n'), ((1320, 1370), 'numpy.real', 'np.real', (['(iq_shot[:, :, None, :] * carrier[:, None])'], {}), '(iq_shot[:, :, None, :] * carrier[:, None])\n', (1327, 1370), True, 'import numpy as np\n'), ((1429, 1459), 'numpy.random.randint', 'np.random.randint', (['(0)', 'segments'], {}), '(0, segments)\n', (1446, 1459), True, 'import numpy as np\n'), ((2371, 2395), 'qcodes.Instrument', 'qcodes.Instrument', (['"""ins"""'], {}), "('ins')\n", (2388, 2395), False, 'import qcodes\n'), ((2603, 2628), 'pytest.fixture', 'fixture', ([], {'params': '[0, 1, 2]'}), '(params=[0, 1, 2])\n', (2610, 2628), False, 'from pytest import fixture, mark\n'), ((998, 1042), 'numpy.random.rand', 'np.random.rand', (['averages', 'segments', 'channels'], {}), '(averages, segments, channels)\n', (1012, 1042), True, 'import numpy as np\n'), ((1097, 1107), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1104, 1107), True, 'import numpy as np\n'), ((1129, 1139), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1136, 1139), True, 'import numpy as np\n'), ((2263, 2295), 'numpy.roll', 'np.roll', (['data', 'segment_offset', '(0)'], {}), '(data, segment_offset, 0)\n', (2270, 2295), True, 'import numpy as np\n'), ((2502, 2534), 'ADCProcessor.Unpacker', 'Unpacker', (['instrument', '"""unpacker"""'], {}), "(instrument, 'unpacker')\n", (2510, 2534), False, 'from ADCProcessor import Unpacker, DigitalDownconversion, Filter, Synchronizer, TvMode\n'), ((2777, 2822), 'ADCProcessor.Unpacker.sign_extension_factory', 'Unpacker.sign_extension_factory', (['(16 - markers)'], {}), '(16 - markers)\n', (2808, 2822), False, 'from ADCProcessor import Unpacker, DigitalDownconversion, Filter, Synchronizer, TvMode\n'), ((2932, 3024), 'numpy.random.randint', 'np.random.randint', (['(-(1 << 15 - markers))', '((1 << 15 - markers) - 1)', '(8, 8)'], {'dtype': 'np.int16'}), '(-(1 << 15 - markers), (1 << 15 - markers) - 1, (8, 8),\n dtype=np.int16)\n', (2949, 3024), True, 'import numpy as np\n'), ((3760, 3790), 'numpy.all', 'np.all', (['(digital == digital_ref)'], {}), '(digital == digital_ref)\n', (3766, 3790), True, 'import numpy as np\n'), ((4073, 4089), 'ADCProcessor.TvMode', 'TvMode', (['"""tvmode"""'], {}), "('tvmode')\n", (4079, 4089), False, 'from ADCProcessor import Unpacker, DigitalDownconversion, Filter, Synchronizer, TvMode\n'), ((1284, 1302), 'numpy.arange', 'np.arange', (['samples'], {}), '(samples)\n', (1293, 1302), True, 'import numpy as np\n'), ((1725, 1770), 'numpy.clip', 'np.clip', (['(scale * waveforms)', '(-scale)', '(scale - 1)'], {}), '(scale * waveforms, -scale, scale - 1)\n', (1732, 1770), True, 'import numpy as np\n'), ((3192, 3214), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (3200, 3214), True, 'import numpy as np\n'), ((3235, 3259), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (3243, 3259), True, 'import numpy as np\n'), ((3280, 3317), 'numpy.array', 'np.array', (['[32768, 16384, 8192, 49152]'], {}), '([32768, 16384, 8192, 49152])\n', (3288, 3317), True, 'import numpy as np\n'), ((3324, 3346), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (3332, 3346), True, 'import numpy as np\n'), ((3358, 3395), 'numpy.array', 'np.array', (['[32768, 16384, 8192, 49152]'], {}), '([32768, 16384, 8192, 49152])\n', (3366, 3395), True, 'import numpy as np\n'), ((3402, 3424), 'numpy.array', 'np.array', (['[2, 1, 0, 3]'], {}), '([2, 1, 0, 3])\n', (3410, 3424), True, 'import numpy as np\n'), ((3436, 3479), 'numpy.array', 'np.array', (['([[32768, 16384, 8192, 49152]] * 2)'], {}), '([[32768, 16384, 8192, 49152]] * 2)\n', (3444, 3479), True, 'import numpy as np\n'), ((3484, 3501), 'numpy.array', 'np.array', (['([9] * 2)'], {}), '([9] * 2)\n', (3492, 3501), True, 'import numpy as np\n'), ((3519, 3562), 'numpy.array', 'np.array', (['([[32768, 16384, 8192, 49152]] * 2)'], {}), '([[32768, 16384, 8192, 49152]] * 2)\n', (3527, 3562), True, 'import numpy as np\n'), ((3567, 3586), 'numpy.array', 'np.array', (['([147] * 2)'], {}), '([147] * 2)\n', (3575, 3586), True, 'import numpy as np\n'), ((923, 942), 'numpy.arange', 'np.arange', (['segments'], {}), '(segments)\n', (932, 942), True, 'import numpy as np\n'), ((1576, 1629), 'numpy.random.rand', 'np.random.rand', (['averages', 'segments', 'samples', 'channels'], {}), '(averages, segments, samples, channels)\n', (1590, 1629), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import sqlalchemy, os
from matplotlib import pyplot as plt
import datetime as dt
import seaborn as sns
try:
sqlURL = os.environ['DATABASE_URL']
except:
sqlURL = "postgresql://localhost/AVPerformance"
engine = sqlalchemy.create_engine(sqlURL)
conn = engine.connect()
flights = pd.read_sql('analysed_flights', conn)
flights.loc[42,'flightDate'] = dt.datetime(2019, 11,22)
flights.flightDate = pd.to_datetime(flights.flightDate, errors='coerce', format="%Y-%m-%d")
flights.sort_values(by=['flightDate'], inplace=True)
ContinuousFlights = flights[flights.continuousData]
flights.reset_index(inplace=True)
flights.describe()
flights.loc[0]
# plt.style.available
plt.style.use('ggplot')
def is_outlier(points, thresh=2.5):
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def scatterPlot(flights, x, y, removeOutliers=True):
validData = flights[flights[x].notna() & flights[y].notna()]
if removeOutliers:
validData['isoutlier'] = is_outlier(validData[[x,y]].to_numpy())
filtered = validData[~validData.isoutlier]
print(len(filtered))
else:
filtered = validData
plt.scatter(filtered[x],filtered[y])
plt.xlabel(x)
plt.ylabel(y)
plt.show()
def heatMap(flights, x, y, z, removeOutliers=True):
validData = flights[flights[x].notna() & flights[y].notna() & flights[z].notna()]
if removeOutliers:
validData['isoutlier'] = is_outlier(validData[[x,y,z]].to_numpy())
filtered = validData[~validData.isoutlier]
else:
filtered = validData
filtered.sort_values(axis=0, by=[x,y], inplace=True)
filtered.reset_index(inplace=True)
plt.imshow(filtered[[x,y,z]].to_numpy(dtype='float'), cmap='plasma', interpolation='none', origin='lower', extent=(filtered[x].min(), filtered[x].max(), filtered[y].min(), filtered[y].max()), aspect='auto', vmin=filtered[z].min())
plt.colorbar()
plt.xlabel(x)
plt.ylabel(y)
plt.show()
scatterPlot(flights, 'Climb Average temp vs ISA Actual','Climb Average Vertical Speed Actual', removeOutliers=True )
scatterPlot(flights, 'Takeoff IAS Variance','Takeoff Roll Variance', removeOutliers=True )
scatterPlot(flights, 'Climb Time Actual','Climb Average Vertical Speed Variance', removeOutliers=True )
scatterPlot(flights, 'Take off Fuel Flow Actual','Climb Max CHT Actual', removeOutliers=True )
scatterPlot(flights, 'Climb Average temp vs ISA Actual','Climb Highest Average CHT Actual', removeOutliers=True )
scatterPlot(flights, 'Take off Fuel Flow Actual','Climb Highest Average CHT Actual', removeOutliers=True )
heatMap(flights, 'Climb Average temp vs ISA Actual', 'Take off Fuel Flow Actual', 'Climb Highest Average CHT Actual', removeOutliers=True )
#looking at stability
flightColumns = flights.columns.values
stabilityColumns = []
for column in flightColumns:
if column[-9:] == 'Stability':
stabilityColumns.append(column)
flights[stabilityColumns].describe()
plt.scatter(flights.loc[27:,'Cruise Max Altitude Actual'], flights.loc[27:,'Cruise Intercooler Efficiency Actual'])
plt.scatter(flights.loc[27:,'Cruise Average Temp vs ISA Actual'], flights.loc[27:,'Cruise Intercooler Efficiency Actual'])
plt.scatter(flights.loc[:,'Climb Average temp vs ISA Actual'], flights.loc[:,'Climb Max CHT Actual'])
plt.scatter(flights.loc[:,'Climb Average temp vs ISA Actual'], flights.loc[:,'Climb Max TIT Actual'])
plt.scatter(flights.loc[:,'Climb Average temp vs ISA Actual'], flights.loc[:,'Climb Average Vertical Speed Actual'])
flights.loc[flights['Cruise Intercooler Efficiency Actual'].idxmax(), 'file'] | [
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.median",
"matplotlib.pyplot.scatter",
"datetime.datetime",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.style.use",
"pandas.to_datetime",
"pandas.read_sql",
"sqlalchemy.create_engine",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"... | [((258, 290), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['sqlURL'], {}), '(sqlURL)\n', (282, 290), False, 'import sqlalchemy, os\n'), ((326, 363), 'pandas.read_sql', 'pd.read_sql', (['"""analysed_flights"""', 'conn'], {}), "('analysed_flights', conn)\n", (337, 363), True, 'import pandas as pd\n'), ((395, 420), 'datetime.datetime', 'dt.datetime', (['(2019)', '(11)', '(22)'], {}), '(2019, 11, 22)\n', (406, 420), True, 'import datetime as dt\n'), ((441, 511), 'pandas.to_datetime', 'pd.to_datetime', (['flights.flightDate'], {'errors': '"""coerce"""', 'format': '"""%Y-%m-%d"""'}), "(flights.flightDate, errors='coerce', format='%Y-%m-%d')\n", (455, 511), True, 'import pandas as pd\n'), ((710, 733), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (723, 733), True, 'from matplotlib import pyplot as plt\n'), ((3233, 3354), 'matplotlib.pyplot.scatter', 'plt.scatter', (["flights.loc[27:, 'Cruise Max Altitude Actual']", "flights.loc[27:, 'Cruise Intercooler Efficiency Actual']"], {}), "(flights.loc[27:, 'Cruise Max Altitude Actual'], flights.loc[27:,\n 'Cruise Intercooler Efficiency Actual'])\n", (3244, 3354), True, 'from matplotlib import pyplot as plt\n'), ((3349, 3478), 'matplotlib.pyplot.scatter', 'plt.scatter', (["flights.loc[27:, 'Cruise Average Temp vs ISA Actual']", "flights.loc[27:, 'Cruise Intercooler Efficiency Actual']"], {}), "(flights.loc[27:, 'Cruise Average Temp vs ISA Actual'], flights.\n loc[27:, 'Cruise Intercooler Efficiency Actual'])\n", (3360, 3478), True, 'from matplotlib import pyplot as plt\n'), ((3472, 3580), 'matplotlib.pyplot.scatter', 'plt.scatter', (["flights.loc[:, 'Climb Average temp vs ISA Actual']", "flights.loc[:, 'Climb Max CHT Actual']"], {}), "(flights.loc[:, 'Climb Average temp vs ISA Actual'], flights.loc\n [:, 'Climb Max CHT Actual'])\n", (3483, 3580), True, 'from matplotlib import pyplot as plt\n'), ((3574, 3682), 'matplotlib.pyplot.scatter', 'plt.scatter', (["flights.loc[:, 'Climb Average temp vs ISA Actual']", "flights.loc[:, 'Climb Max TIT Actual']"], {}), "(flights.loc[:, 'Climb Average temp vs ISA Actual'], flights.loc\n [:, 'Climb Max TIT Actual'])\n", (3585, 3682), True, 'from matplotlib import pyplot as plt\n'), ((3676, 3799), 'matplotlib.pyplot.scatter', 'plt.scatter', (["flights.loc[:, 'Climb Average temp vs ISA Actual']", "flights.loc[:, 'Climb Average Vertical Speed Actual']"], {}), "(flights.loc[:, 'Climb Average temp vs ISA Actual'], flights.loc\n [:, 'Climb Average Vertical Speed Actual'])\n", (3687, 3799), True, 'from matplotlib import pyplot as plt\n'), ((846, 871), 'numpy.median', 'np.median', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (855, 871), True, 'import numpy as np\n'), ((883, 922), 'numpy.sum', 'np.sum', (['((points - median) ** 2)'], {'axis': '(-1)'}), '((points - median) ** 2, axis=-1)\n', (889, 922), True, 'import numpy as np\n'), ((932, 945), 'numpy.sqrt', 'np.sqrt', (['diff'], {}), '(diff)\n', (939, 945), True, 'import numpy as np\n'), ((970, 985), 'numpy.median', 'np.median', (['diff'], {}), '(diff)\n', (979, 985), True, 'import numpy as np\n'), ((1418, 1455), 'matplotlib.pyplot.scatter', 'plt.scatter', (['filtered[x]', 'filtered[y]'], {}), '(filtered[x], filtered[y])\n', (1429, 1455), True, 'from matplotlib import pyplot as plt\n'), ((1459, 1472), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (1469, 1472), True, 'from matplotlib import pyplot as plt\n'), ((1477, 1490), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (1487, 1490), True, 'from matplotlib import pyplot as plt\n'), ((1495, 1505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1503, 1505), True, 'from matplotlib import pyplot as plt\n'), ((2168, 2182), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2180, 2182), True, 'from matplotlib import pyplot as plt\n'), ((2187, 2200), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (2197, 2200), True, 'from matplotlib import pyplot as plt\n'), ((2205, 2218), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (2215, 2218), True, 'from matplotlib import pyplot as plt\n'), ((2223, 2233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2231, 2233), True, 'from matplotlib import pyplot as plt\n')] |
import os
import cv2
import numpy as np
import torch.utils.data as td
import pandas as pd
import config as cfg
from datasets import ds_utils
from utils import face_processing as fp
from constants import *
from utils.nn import to_numpy, Batch
from landmarks.lmutils import create_landmark_heatmaps
from utils.face_extractor import FaceExtractor
import utils.geometry
from torchvision import transforms as tf
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
CLASS_NAMES = ['Neutral', 'Happy', 'Sad', 'Surprise', 'Fear', 'Disgust', 'Anger', 'Contempt']
MAX_IMAGES_PER_EXPRESSION = 1000000
class AffectNet(td.Dataset):
classes = CLASS_NAMES
colors = ['tab:gray', 'tab:orange', 'tab:brown', 'tab:pink', 'tab:cyan', 'tab:olive', 'tab:red', 'tab:blue']
markers = ['s', 'o', '>', '<', '^', 'v', 'P', 'd']
def __init__(self, root_dir=cfg.AFFECTNET_ROOT, train=True,
transform=None, crop_type='tight', color=True, start=None, max_samples=None,
outlier_threshold=None, deterministic=None, use_cache=True,
detect_face=False, align_face_orientation=False, min_conf=cfg.MIN_OPENFACE_CONFIDENCE, daug=0,
return_landmark_heatmaps=False, landmark_sigma=9, landmark_ids=range(68),
return_modified_images=False, crop_source='lm_openface', **kwargs):
assert(crop_type in ['fullsize', 'tight', 'loose'])
assert(crop_source in ['bb_ground_truth', 'lm_ground_truth', 'lm_cnn', 'lm_openface'])
self.face_extractor = FaceExtractor()
self.mode = TRAIN if train else VAL
self.crop_source = crop_source
self.use_cache = use_cache
self.detect_face = detect_face
self.align_face_orientation = align_face_orientation
self.return_landmark_heatmaps = return_landmark_heatmaps
self.return_modified_images = return_modified_images
self.landmark_sigma = landmark_sigma
self.landmark_ids = landmark_ids
self.start = start
self.max_samples = max_samples
self.root_dir = root_dir
self.crop_type = crop_type
self.color = color
self.outlier_threshold = outlier_threshold
self.transform = transform
self.fullsize_img_dir = os.path.join(self.root_dir, 'cropped_Annotated')
self.cropped_img_dir = os.path.join(self.root_dir, 'crops', crop_source)
self.feature_dir = os.path.join(self.root_dir, 'features')
annotation_filename = 'training' if train else 'validation'
path_annotations_mod = os.path.join(root_dir, annotation_filename + '.mod.pkl')
if os.path.isfile(path_annotations_mod):
print('Reading pickle file...')
self._annotations = pd.read_pickle(path_annotations_mod)
else:
print('Reading CSV file...')
self._annotations = pd.read_csv(os.path.join(root_dir, annotation_filename+'.csv'))
print('done.')
# drop non-faces
self._annotations = self._annotations[self._annotations.expression < 8]
# Samples in annotation file are somewhat clustered by expression.
# Shuffle to create a more even distribution.
# NOTE: deterministic, always creates the same order
if train:
from sklearn.utils import shuffle
self._annotations = shuffle(self._annotations, random_state=2)
# remove samples with inconsistent expression<->valence/arousal values
self._remove_outliers()
poses = []
confs = []
landmarks = []
for cnt, filename in enumerate(self._annotations.subDirectory_filePath):
if cnt % 1000 == 0:
print(cnt)
filename_noext = os.path.splitext(filename)[0]
conf, lms, pose = ds_utils.read_openface_detection(os.path.join(self.feature_dir, filename_noext))
poses.append(pose)
confs.append(conf)
landmarks.append(lms)
self._annotations['pose'] = poses
self._annotations['conf'] = confs
self._annotations['landmarks_of'] = landmarks
# self.annotations.to_csv(path_annotations_mod, index=False)
self._annotations.to_pickle(path_annotations_mod)
poses = np.abs(np.stack(self._annotations.pose.values))
only_good_image_for_training = True
if train and only_good_image_for_training:
print(len(self._annotations))
min_rot_deg = 30
max_rot_deg = 90
# print('Limiting rotation to +-[{}-{}] degrees...'.format(min_rot_deg, max_rot_deg))
# self._annotations = self._annotations[(poses[:, 0] < np.deg2rad(max_rot_deg)) &
# (poses[:, 1] < np.deg2rad(max_rot_deg)) &
# (poses[:, 2] < np.deg2rad(max_rot_deg))]
# self._annotations = self._annotations[(np.deg2rad(min_rot_deg) < poses[:, 0]) |
# (np.deg2rad(min_rot_deg) < poses[:, 1])]
# self._annotations = self._annotations[np.deg2rad(min_rot_deg) < poses[:, 1] ]
print(len(self._annotations))
# print('Removing OpenFace confs <={:.2f}...'.format(min_conf))
# self._annotations = self._annotations[self._annotations.conf > cfg.MIN_OPENFACE_CONFIDENCE]
# print(len(self._annotations))
# select by Valence/Arousal
# min_arousal = 0.0
# print('Removing arousal <={:.2f}...'.format(min_arousal))
# self._annotations = self._annotations[self._annotations.arousal > min_arousal]
# print(len(self._annotations))
# There is (at least) one missing image in the dataset. Remove by checking face width:
self._annotations = self._annotations[self._annotations.face_width > 0]
# self._annotations_balanced = self._annotations
# self.filter_labels(label_dict_exclude={'expression': 0})
# self.filter_labels(label_dict_exclude={'expression': 1})
# self._annotations = self._annotations[self._annotations.arousal > 0.2]
self.rebalance_classes()
if deterministic is None:
deterministic = self.mode != TRAIN
self.transform = ds_utils.build_transform(deterministic, self.color, daug)
transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
transforms += [fp.ToTensor() ]
transforms += [fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1])] # VGGFace(2)
self.crop_to_tensor = tf.Compose(transforms)
def filter_labels(self, label_dict=None, label_dict_exclude=None):
if label_dict is not None:
print("Applying include filter to labels: {}".format(label_dict))
for k, v in label_dict.items():
self.annotations = self.annotations[self.annotations[k] == v]
if label_dict_exclude is not None:
print("Applying exclude filter to labels: {}".format(label_dict_exclude))
for k, v in label_dict_exclude.items():
self.annotations = self.annotations[self.annotations[k] != v]
print(" Number of images: {}".format(len(self.annotations)))
def rebalance_classes(self, max_images_per_class=MAX_IMAGES_PER_EXPRESSION):
if self.mode == TRAIN:
# balance class sized if neccessary
print('Limiting number of images to {} per class...'.format(max_images_per_class))
# self._annotations = self._annotations.groupby('expression').head(5000)
from sklearn.utils import shuffle
self._annotations['cls_idx'] = self._annotations.groupby('expression').cumcount()
self._annotations = shuffle(self._annotations)
self._annotations_balanced = self._annotations[self._annotations.cls_idx < max_images_per_class]
print(len(self._annotations_balanced))
else:
self._annotations_balanced = self._annotations
# limit number of samples
st,nd = 0, None
if self.start is not None:
st = self.start
if self.max_samples is not None:
nd = st+self.max_samples
self._annotations_balanced = self._annotations_balanced[st:nd]
@property
def labels(self):
return self.annotations['expression'].values
@property
def heights(self):
return self.annotations.face_height.values
@property
def widths(self):
return self.annotations.face_width.values
@property
def annotations(self):
return self._annotations_balanced
@annotations.setter
def annotations(self, new_annots):
self._annotations_balanced = new_annots
def print_stats(self):
print(self._stats_repr())
def _stats_repr(self):
labels = self.annotations.expression
fmt_str = " Class sizes:\n"
for id in np.unique(labels):
count = len(np.where(labels == id)[0])
fmt_str += " {:<6} ({:.2f}%)\t({})\n".format(count, 100.0*count/self.__len__(), self.classes[id])
fmt_str += " --------------------------------\n"
fmt_str += " {:<6}\n".format(len(labels))
return fmt_str
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Split: {}\n'.format(self.mode)
# fmt_str += ' Root Location: {}\n'.format(self.root_dir)
# tmp = ' Transforms (if any): '
# fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
fmt_str += self._stats_repr()
return fmt_str
def __len__(self):
return len(self.annotations)
def get_class_sizes(self):
groups = self.annotations.groupby(by='expression')
return groups.size().values
def _remove_outliers(self):
if self.outlier_threshold is None:
return
from utils.exprec import calc_mahalanobis_covs, get_expression_dists
covs = calc_mahalanobis_covs(self.annotations)
VA = self._annotations.as_matrix(columns=['valence', 'arousal'])
true_class = self._annotations['expression'].values
dists = get_expression_dists(VA, covs)
class_preds = np.argmin(dists, axis=1)
true_class_dist = dists[range(len(dists)), tuple(true_class)]
self._annotations['dist'] = true_class_dist
self._annotations['class_pred'] = class_preds
count_before = len(self._annotations)
self._annotations = self._annotations.loc[self._annotations['dist'] < self.outlier_threshold]
print("Removed {} outliers from dataset (th={}).".format(count_before - len(self._annotations), self.outlier_threshold))
def parse_landmarks(self, landmarks):
try:
vals = [float(s) for s in landmarks.split(';')]
return np.array([(x, y) for x, y in zip(vals[::2], vals[1::2])], dtype=np.float32)
except:
raise ValueError("Invalid landmarks {}".format(landmarks))
def get_bounding_box(self, sample):
l,t,w,h = sample.face_x, sample.face_y, sample.face_width, sample.face_height
r, b = l + w, t + h
# return np.array([l,t,r,b], dtype=np.float32)
# enlarge bounding box
if t > b:
t, b = b, t
h = b-t
assert(h >= 0)
t_new, b_new = int(t + 0.05 * h), int(b + 0.25 * h)
# set width of bbox same as height
h_new = b_new - t_new
cx = (r + l) / 2
l_new, r_new = cx - h_new/2, cx + h_new/2
# in case right eye is actually left of right eye...
if l_new > r_new:
l_new, r_new = r_new, l_new
# extend area by crop border margins
bbox = np.array([l_new, t_new, r_new, b_new], dtype=np.float32)
scalef = cfg.CROP_SIZE / cfg.INPUT_SIZE
bbox_crop = utils.geometry.scaleBB(bbox, scalef, scalef, typeBB=2)
return bbox_crop
def __getitem__(self, idx):
sample = self.annotations.iloc[idx]
filename = sample.subDirectory_filePath
pose = sample.pose
bb = None
landmarks_for_crop = None
landmarks_to_return = self.parse_landmarks(sample.facial_landmarks)
if self.crop_source == 'bb_ground_truth':
bb = self.get_bounding_box(sample)
elif self.crop_source == 'lm_ground_truth':
landmarks_for_crop = landmarks_to_return
elif self.crop_source == 'lm_openface':
of_conf, landmarks_for_crop = sample.conf, sample.landmarks_of
# if OpenFace didn't detect a face, fall back to AffectNet landmarks
if sample.conf <= 0.1:
try:
landmarks_for_crop = self.parse_landmarks(sample.facial_landmarks)
except ValueError:
pass
try:
crop, landmarks, pose, cropper = self.face_extractor.get_face(filename, self.fullsize_img_dir,
self.cropped_img_dir, landmarks=landmarks_for_crop,
bb=bb, pose=pose, use_cache=self.use_cache,
detect_face=False, crop_type=self.crop_type,
aligned=self.align_face_orientation)
except AssertionError:
print(filename)
raise
landmarks, _ = cropper.apply_to_landmarks(landmarks_to_return)
# vis.show_landmarks(crop, landmarks, title='lms affectnet', wait=0, color=(0,0,255))
cropped_sample = {'image': crop, 'landmarks': landmarks, 'pose': pose}
item = self.transform(cropped_sample)
em_val_ar = np.array([[sample.expression, sample.valence, sample.arousal]], dtype=np.float32)
result = self.crop_to_tensor(item)
result.update({
'id': 0,
'fnames': filename,
'expression': em_val_ar
})
if self.return_modified_images:
mod_transforms = tf.Compose([fp.RandomOcclusion()])
crop_occ = mod_transforms(item['image'])
crop_occ = self.crop_to_tensor(crop_occ)
result['image_mod'] = crop_occ
if self.return_landmark_heatmaps:
result['lm_heatmaps'] = create_landmark_heatmaps(result['landmarks'], self.landmark_sigma, self.landmark_ids)
return result
def get_face(self, filename, size=(cfg.CROP_SIZE, cfg.CROP_SIZE), use_cache=True):
sample = self._annotations.loc[self._annotations.subDirectory_filePath == filename].iloc[0]
landmarks = sample.landmarks_of.astype(np.float32)
pose = sample.pose
# if OpenFace didn't detect a face, fall back to AffectNet landmarks
if sample.conf <= 0.9:
landmarks = self.parse_landmarks(sample.facial_landmarks)
crop, landmarks, pose, _ = self.face_extractor.get_face(filename, self.fullsize_img_dir, self.cropped_img_dir,
crop_type='tight', landmarks=landmarks, pose=pose,
use_cache=True, detect_face=False, size=size)
return crop, landmarks, pose
def show_landmarks(self, img, landmarks):
for lm in landmarks:
lm_x, lm_y = lm[0], lm[1]
cv2.circle(img, (int(lm_x), int(lm_y)), 3, (0, 0, 255), -1)
cv2.imshow('landmarks', cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
cv2.waitKey(0)
def extract_features(st=None, nd=None):
""" Extract facial features (landmarks, pose,...) from images """
import glob
from utils import visionLogging as log
img_dirs = sorted(glob.glob(os.path.join(cfg.AFFECTNET_ROOT, 'cropped_Annotated', '*')))[st:nd]
for cnt, img_dir in enumerate(img_dirs):
folder_name = os.path.split(img_dir)[1]
out_dir = os.path.join(cfg.AFFECTNET_ROOT, 'features', folder_name)
log.info("{}/{}".format(cnt, len(img_dirs)))
face_processing.run_open_face(img_dir, out_dir, is_sequence=False)
if __name__ == '__main__':
import argparse
from utils import vis, face_processing
parser = argparse.ArgumentParser()
parser.add_argument('--extract', default=False, type=bool)
parser.add_argument('--st', default=None, type=int)
parser.add_argument('--nd', default=None, type=int)
args = parser.parse_args()
if args.extract:
extract_features(st=args.st, nd=args.nd)
else:
import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
ds = AffectNet(train=True, start=0, align_face_orientation=False, use_cache=False, crop_source='bb_ground_truth')
dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
# print(ds)
for data in dl:
# data = next(iter(dl))
batch = Batch(data, gpu=False)
gt = to_numpy(batch.landmarks)
ocular_dists_inner = np.sqrt(np.sum((gt[:, 42] - gt[:, 39]) ** 2, axis=1))
ocular_dists_outer = np.sqrt(np.sum((gt[:, 45] - gt[:, 36]) ** 2, axis=1))
ocular_dists = np.vstack((ocular_dists_inner, ocular_dists_outer)).mean(axis=0)
print(ocular_dists)
inputs = batch.images.clone()
ds_utils.denormalize(inputs)
imgs = vis.add_landmarks_to_images(inputs.numpy(), batch.landmarks.numpy())
# imgs = vis.add_pose_to_images(imgs, batch.poses.numpy())
# imgs = vis.add_emotion_to_images(imgs, batch.emotions.numpy())
vis.vis_square(imgs, nCols=10, fx=1.0, fy=1.0, normalize=False) | [
"landmarks.lmutils.create_landmark_heatmaps",
"argparse.ArgumentParser",
"numpy.sum",
"utils.face_extractor.FaceExtractor",
"numpy.argmin",
"utils.vis.vis_square",
"os.path.isfile",
"os.path.join",
"numpy.unique",
"torch.utils.data.DataLoader",
"utils.exprec.get_expression_dists",
"cv2.cvtColo... | [((445, 478), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (468, 478), False, 'import warnings\n'), ((16551, 16576), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16574, 16576), False, 'import argparse\n'), ((1546, 1561), 'utils.face_extractor.FaceExtractor', 'FaceExtractor', ([], {}), '()\n', (1559, 1561), False, 'from utils.face_extractor import FaceExtractor\n'), ((2275, 2323), 'os.path.join', 'os.path.join', (['self.root_dir', '"""cropped_Annotated"""'], {}), "(self.root_dir, 'cropped_Annotated')\n", (2287, 2323), False, 'import os\n'), ((2355, 2404), 'os.path.join', 'os.path.join', (['self.root_dir', '"""crops"""', 'crop_source'], {}), "(self.root_dir, 'crops', crop_source)\n", (2367, 2404), False, 'import os\n'), ((2432, 2471), 'os.path.join', 'os.path.join', (['self.root_dir', '"""features"""'], {}), "(self.root_dir, 'features')\n", (2444, 2471), False, 'import os\n'), ((2572, 2628), 'os.path.join', 'os.path.join', (['root_dir', "(annotation_filename + '.mod.pkl')"], {}), "(root_dir, annotation_filename + '.mod.pkl')\n", (2584, 2628), False, 'import os\n'), ((2640, 2676), 'os.path.isfile', 'os.path.isfile', (['path_annotations_mod'], {}), '(path_annotations_mod)\n', (2654, 2676), False, 'import os\n'), ((6424, 6481), 'datasets.ds_utils.build_transform', 'ds_utils.build_transform', (['deterministic', 'self.color', 'daug'], {}), '(deterministic, self.color, daug)\n', (6448, 6481), False, 'from datasets import ds_utils\n'), ((6690, 6712), 'torchvision.transforms.Compose', 'tf.Compose', (['transforms'], {}), '(transforms)\n', (6700, 6712), True, 'from torchvision import transforms as tf\n'), ((9050, 9067), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (9059, 9067), True, 'import numpy as np\n'), ((10247, 10286), 'utils.exprec.calc_mahalanobis_covs', 'calc_mahalanobis_covs', (['self.annotations'], {}), '(self.annotations)\n', (10268, 10286), False, 'from utils.exprec import calc_mahalanobis_covs, get_expression_dists\n'), ((10437, 10467), 'utils.exprec.get_expression_dists', 'get_expression_dists', (['VA', 'covs'], {}), '(VA, covs)\n', (10457, 10467), False, 'from utils.exprec import calc_mahalanobis_covs, get_expression_dists\n'), ((10490, 10514), 'numpy.argmin', 'np.argmin', (['dists'], {'axis': '(1)'}), '(dists, axis=1)\n', (10499, 10514), True, 'import numpy as np\n'), ((11987, 12043), 'numpy.array', 'np.array', (['[l_new, t_new, r_new, b_new]'], {'dtype': 'np.float32'}), '([l_new, t_new, r_new, b_new], dtype=np.float32)\n', (11995, 12043), True, 'import numpy as np\n'), ((14069, 14155), 'numpy.array', 'np.array', (['[[sample.expression, sample.valence, sample.arousal]]'], {'dtype': 'np.float32'}), '([[sample.expression, sample.valence, sample.arousal]], dtype=np.\n float32)\n', (14077, 14155), True, 'import numpy as np\n'), ((15862, 15876), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (15873, 15876), False, 'import cv2\n'), ((16259, 16316), 'os.path.join', 'os.path.join', (['cfg.AFFECTNET_ROOT', '"""features"""', 'folder_name'], {}), "(cfg.AFFECTNET_ROOT, 'features', folder_name)\n", (16271, 16316), False, 'import os\n'), ((16378, 16444), 'utils.face_processing.run_open_face', 'face_processing.run_open_face', (['img_dir', 'out_dir'], {'is_sequence': '(False)'}), '(img_dir, out_dir, is_sequence=False)\n', (16407, 16444), False, 'from utils import vis, face_processing\n'), ((16893, 16913), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (16910, 16913), False, 'import torch\n'), ((16922, 16951), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(0)'], {}), '(0)\n', (16948, 16951), False, 'import torch\n'), ((17088, 17150), 'torch.utils.data.DataLoader', 'td.DataLoader', (['ds'], {'batch_size': '(10)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(ds, batch_size=10, shuffle=False, num_workers=0)\n', (17101, 17150), True, 'import torch.utils.data as td\n'), ((2754, 2790), 'pandas.read_pickle', 'pd.read_pickle', (['path_annotations_mod'], {}), '(path_annotations_mod)\n', (2768, 2790), True, 'import pandas as pd\n'), ((4386, 4425), 'numpy.stack', 'np.stack', (['self._annotations.pose.values'], {}), '(self._annotations.pose.values)\n', (4394, 4425), True, 'import numpy as np\n'), ((6505, 6534), 'utils.face_processing.CenterCrop', 'fp.CenterCrop', (['cfg.INPUT_SIZE'], {}), '(cfg.INPUT_SIZE)\n', (6518, 6534), True, 'from utils import face_processing as fp\n'), ((6559, 6572), 'utils.face_processing.ToTensor', 'fp.ToTensor', ([], {}), '()\n', (6570, 6572), True, 'from utils import face_processing as fp\n'), ((6598, 6644), 'utils.face_processing.Normalize', 'fp.Normalize', (['[0.518, 0.418, 0.361]', '[1, 1, 1]'], {}), '([0.518, 0.418, 0.361], [1, 1, 1])\n', (6610, 6644), True, 'from utils import face_processing as fp\n'), ((7864, 7890), 'sklearn.utils.shuffle', 'shuffle', (['self._annotations'], {}), '(self._annotations)\n', (7871, 7890), False, 'from sklearn.utils import shuffle\n'), ((14653, 14743), 'landmarks.lmutils.create_landmark_heatmaps', 'create_landmark_heatmaps', (["result['landmarks']", 'self.landmark_sigma', 'self.landmark_ids'], {}), "(result['landmarks'], self.landmark_sigma, self.\n landmark_ids)\n", (14677, 14743), False, 'from landmarks.lmutils import create_landmark_heatmaps\n'), ((15816, 15852), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (15828, 15852), False, 'import cv2\n'), ((16215, 16237), 'os.path.split', 'os.path.split', (['img_dir'], {}), '(img_dir)\n', (16228, 16237), False, 'import os\n'), ((17252, 17274), 'utils.nn.Batch', 'Batch', (['data'], {'gpu': '(False)'}), '(data, gpu=False)\n', (17257, 17274), False, 'from utils.nn import to_numpy, Batch\n'), ((17293, 17318), 'utils.nn.to_numpy', 'to_numpy', (['batch.landmarks'], {}), '(batch.landmarks)\n', (17301, 17318), False, 'from utils.nn import to_numpy, Batch\n'), ((17672, 17700), 'datasets.ds_utils.denormalize', 'ds_utils.denormalize', (['inputs'], {}), '(inputs)\n', (17692, 17700), False, 'from datasets import ds_utils\n'), ((17950, 18013), 'utils.vis.vis_square', 'vis.vis_square', (['imgs'], {'nCols': '(10)', 'fx': '(1.0)', 'fy': '(1.0)', 'normalize': '(False)'}), '(imgs, nCols=10, fx=1.0, fy=1.0, normalize=False)\n', (17964, 18013), False, 'from utils import vis, face_processing\n'), ((2890, 2942), 'os.path.join', 'os.path.join', (['root_dir', "(annotation_filename + '.csv')"], {}), "(root_dir, annotation_filename + '.csv')\n", (2902, 2942), False, 'import os\n'), ((3394, 3436), 'sklearn.utils.shuffle', 'shuffle', (['self._annotations'], {'random_state': '(2)'}), '(self._annotations, random_state=2)\n', (3401, 3436), False, 'from sklearn.utils import shuffle\n'), ((16080, 16138), 'os.path.join', 'os.path.join', (['cfg.AFFECTNET_ROOT', '"""cropped_Annotated"""', '"""*"""'], {}), "(cfg.AFFECTNET_ROOT, 'cropped_Annotated', '*')\n", (16092, 16138), False, 'import os\n'), ((17360, 17404), 'numpy.sum', 'np.sum', (['((gt[:, 42] - gt[:, 39]) ** 2)'], {'axis': '(1)'}), '((gt[:, 42] - gt[:, 39]) ** 2, axis=1)\n', (17366, 17404), True, 'import numpy as np\n'), ((17447, 17491), 'numpy.sum', 'np.sum', (['((gt[:, 45] - gt[:, 36]) ** 2)'], {'axis': '(1)'}), '((gt[:, 45] - gt[:, 36]) ** 2, axis=1)\n', (17453, 17491), True, 'import numpy as np\n'), ((3824, 3850), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3840, 3850), False, 'import os\n'), ((3921, 3967), 'os.path.join', 'os.path.join', (['self.feature_dir', 'filename_noext'], {}), '(self.feature_dir, filename_noext)\n', (3933, 3967), False, 'import os\n'), ((9093, 9115), 'numpy.where', 'np.where', (['(labels == id)'], {}), '(labels == id)\n', (9101, 9115), True, 'import numpy as np\n'), ((14402, 14422), 'utils.face_processing.RandomOcclusion', 'fp.RandomOcclusion', ([], {}), '()\n', (14420, 14422), True, 'from utils import face_processing as fp\n'), ((17520, 17571), 'numpy.vstack', 'np.vstack', (['(ocular_dists_inner, ocular_dists_outer)'], {}), '((ocular_dists_inner, ocular_dists_outer))\n', (17529, 17571), True, 'import numpy as np\n')] |
# Copyright (c) 2021, <NAME>, FUNLab, Xiamen University
# All rights reserved.
import os
import torch
import numpy as np
import random
from copy import deepcopy
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from common import make_env, npz_save, run_preparation
def _save(dir, prefix, pname, seed, idx, arr):
fpath = os.path.join(dir, f"{prefix}_{pname}_s{seed}_{idx}.npz")
npz_save(arr, fpath)
print(f"[pretrain | collect] saved to '{fpath}' (size: {len(arr)})")
def collect(args, env_type, RENDER="non-display"):
"""
Collect samples by interacting with site-specific environment,
then save them as npz files.
:param args : (namespace), specs;
:param RENDER: (str), either 'human' or 'non-display';
"""
"""Preparation"""
# useful params
run_dir = args.run_dir
K = args.K
n_ABS = args.n_ABS
splits = args.splits
file_episode_limit = args.file_episode_limit
collect_strategy = args.collect_strategy
num_episodes_per_trial = args.num_episodes_per_trial
seed = args.seed
# replay saving dir
replay_dir = os.path.join(run_dir, "emulator_replays")
if not os.path.isdir(replay_dir):
os.makedirs(replay_dir)
# env
env = make_env(args, TYPE=env_type)
"""Collect data"""
prefixs = ["test", "val", "train"]
for _split, _prefix in zip(splits, prefixs):
idx = 0
cur_episodes = _split
if _prefix in prefixs[:2]:
n = 2 # (1 random + 1 kmeans)
else:
if collect_strategy == "default":
n = 2 # (1 random + 1 kmeans)
elif collect_strategy == "half":
n = 4 # (1 random + 1 random subset + 1 kmeans + 1 kmeans subset)
m = 1 # m random subsets
elif collect_strategy == "third":
n = 6 # (1 random + 2 random subset + 1 kmeans + 2 kmeans subset)
m = 2
while cur_episodes > 0:
episodes = min(cur_episodes, file_episode_limit)
P_GUs = np.zeros((episodes * n, K, K), dtype=np.float32)
P_ABSs = np.zeros((episodes * n, K, K), dtype=np.float32)
P_CGUs = np.zeros((episodes * n, K, K), dtype=np.float32)
for _episode in tqdm(range(episodes)):
if _prefix in prefixs[:2] or \
_prefix == prefixs[-1] and collect_strategy == "default":
# totally random
if _episode % num_episodes_per_trial == 0:
env.reset()
else:
env.walk()
env.render(RENDER)
P_GU, P_ABS, P_CGU = env.get_all_Ps()
P_GUs[n * _episode] = P_GU
P_ABSs[n * _episode] = P_ABS
P_CGUs[n * _episode] = P_CGU
# kmeans
kmeans_P_ABS = env.find_KMEANS_P_ABS()
env.step(kmeans_P_ABS)
env.render(RENDER)
P_GU, P_ABS, P_CGU = env.get_all_Ps()
P_GUs[n * _episode + 1] = P_GU
P_ABSs[n * _episode + 1] = P_ABS
P_CGUs[n * _episode + 1] = P_CGU
elif _prefix == prefixs[-1] and collect_strategy in ["half", "third"]:
# totally random
if _episode % num_episodes_per_trial == 0:
env.reset()
else:
env.walk()
env.render(RENDER)
P_GU, P_ABS, P_CGU = env.get_all_Ps()
P_GUs[n * _episode] = P_GU
P_ABSs[n * _episode] = P_ABS
P_CGUs[n * _episode] = P_CGU
# sample unique abs ids
sampled = random.sample(range(n_ABS), m)
for j, _abs_id in enumerate(sampled):
abs_ids = [_abs_id]
P_GU_aug, P_ABS_aug, P_CGU_aug = env.get_all_Ps_with_augmentation(abs_ids)
P_GUs[n * _episode + j + 1] = P_GU_aug
P_ABSs[n * _episode + j + 1] = P_ABS_aug
P_CGUs[n * _episode + j + 1] = P_CGU_aug
# kmeans
kmeans_P_ABS = env.find_KMEANS_P_ABS()
env.step(kmeans_P_ABS)
env.render(RENDER)
P_GU, P_ABS, P_CGU = env.get_all_Ps()
P_GUs[n * _episode + m + 1] = P_GU
P_ABSs[n * _episode + m + 1] = P_ABS
P_CGUs[n * _episode + m + 1] = P_CGU
# sample unique abs ids
sampled = random.sample(range(n_ABS), m)
for j, _abs_id in enumerate(sampled):
abs_ids = [_abs_id]
P_GU_aug, P_ABS_aug, P_CGU_aug = env.get_all_Ps_with_augmentation(abs_ids)
P_GUs[n * _episode + m + 1 + j + 1] = P_GU_aug
P_ABSs[n * _episode + m + 1 + j + 1] = P_ABS_aug
P_CGUs[n * _episode + m + 1 + j + 1] = P_CGU_aug
for pname, p in zip(["GUs", "ABSs", "CGUs"], [P_GUs, P_ABSs, P_CGUs]):
_save(replay_dir, _prefix, pname, seed, idx, p)
del P_GUs, P_ABSs, P_CGUs
# update counters
cur_episodes -= episodes
idx += 1
env.close()
def collect_3_adaptive_to_variable_entities(args, env_type, RENDER="non-display"):
"""
Collect samples with a range of config (n_ABS, n_GU) with site-specific
environment,
then save them as npz files.
:param args : (namespace), specs;
:param RENDER: (str), either 'human' or 'non-display';
"""
"""Preparation"""
# useful params
run_dir = args.run_dir
K = args.K
n_ABS = args.n_ABS
splits = args.splits
file_episode_limit = args.file_episode_limit
num_episodes_per_trial = args.num_episodes_per_trial
seed = args.seed
variable_n_ABS = args.variable_n_ABS
variable_n_GU = args.variable_n_GU
# replay saving dir
replay_dir = os.path.join(run_dir, "emulator_replays")
if not os.path.isdir(replay_dir):
os.makedirs(replay_dir)
n = 6
# envs
envs = []
for i in range(n):
copyed_args = deepcopy(args)
if variable_n_ABS:
copyed_args.n_ABS = args.n_ABS - i
if variable_n_GU:
copyed_args.n_GU = args.n_GU - i * 25
envs.append(make_env(copyed_args, TYPE=env_type))
"""Collect data"""
prefixs = ["test", "val", "train"]
for _split, _prefix in zip(splits, prefixs):
idx = 0
cur_episodes = _split
while cur_episodes > 0:
episodes = min(cur_episodes, file_episode_limit)
P_GUs = np.zeros((episodes * n * 2, K, K), dtype=np.float32)
P_ABSs = np.zeros((episodes * n * 2, K, K), dtype=np.float32)
P_CGUs = np.zeros((episodes * n * 2, K, K), dtype=np.float32)
for _episode in tqdm(range(episodes)):
# totally random
for i in range(n):
if _episode % num_episodes_per_trial == 0:
envs[i].reset()
else:
envs[i].walk()
envs[i].render(RENDER)
P_GU, P_ABS, P_CGU = envs[i].get_all_Ps()
j = 2* n * _episode + i
P_GUs[j] = P_GU
P_ABSs[j] = P_ABS
P_CGUs[j] = P_CGU
kmeans_P_ABS = envs[i].find_KMEANS_P_ABS()
envs[i].step(kmeans_P_ABS)
envs[i].render(RENDER)
P_GU, P_ABS, P_CGU = envs[i].get_all_Ps()
j = 2 * n * _episode + i + n
P_GUs[j] = P_GU
P_ABSs[j] = P_ABS
P_CGUs[j] = P_CGU
for pname, p in zip(["GUs", "ABSs", "CGUs"], [P_GUs, P_ABSs, P_CGUs]):
_save(replay_dir, _prefix, pname, seed, idx, p)
del P_GUs, P_ABSs, P_CGUs
# update counters
cur_episodes -= episodes
idx += 1
for i in range(n):
envs[i].close()
if __name__ == "__main__":
# get specs
args, run_dir = run_preparation()
print(f"[pretrain | collect] running dir is {str(run_dir)}")
# cuda
torch.set_num_threads(1)
if args.cuda and torch.cuda.is_available():
print("choose to use gpu...")
device = torch.device("cuda:0")
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("chosse to use cpu...")
device = torch.device("cpu")
# seed
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(run_dir, "pretrain_tb"))
if args.collect_strategy == "variable":
assert not (args.variable_n_GU & args.variable_n_ABS)
assert args.variable_n_GU | args.variable_n_ABS
collect_3_adaptive_to_variable_entities(args, "train", args.render)
else:
collect(args, "train", args.render)
print(f"[pretrain | collect] seed={args.seed} done!") | [
"copy.deepcopy",
"numpy.random.seed",
"os.makedirs",
"os.path.isdir",
"torch.manual_seed",
"numpy.zeros",
"common.run_preparation",
"torch.cuda.manual_seed_all",
"torch.set_num_threads",
"random.seed",
"torch.cuda.is_available",
"torch.device",
"common.make_env",
"os.path.join",
"common.... | [((351, 407), 'os.path.join', 'os.path.join', (['dir', 'f"""{prefix}_{pname}_s{seed}_{idx}.npz"""'], {}), "(dir, f'{prefix}_{pname}_s{seed}_{idx}.npz')\n", (363, 407), False, 'import os\n'), ((412, 432), 'common.npz_save', 'npz_save', (['arr', 'fpath'], {}), '(arr, fpath)\n', (420, 432), False, 'from common import make_env, npz_save, run_preparation\n'), ((1121, 1162), 'os.path.join', 'os.path.join', (['run_dir', '"""emulator_replays"""'], {}), "(run_dir, 'emulator_replays')\n", (1133, 1162), False, 'import os\n'), ((1254, 1283), 'common.make_env', 'make_env', (['args'], {'TYPE': 'env_type'}), '(args, TYPE=env_type)\n', (1262, 1283), False, 'from common import make_env, npz_save, run_preparation\n'), ((6239, 6280), 'os.path.join', 'os.path.join', (['run_dir', '"""emulator_replays"""'], {}), "(run_dir, 'emulator_replays')\n", (6251, 6280), False, 'import os\n'), ((8447, 8464), 'common.run_preparation', 'run_preparation', ([], {}), '()\n', (8462, 8464), False, 'from common import make_env, npz_save, run_preparation\n'), ((8546, 8570), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (8567, 8570), False, 'import torch\n'), ((8895, 8923), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8912, 8923), False, 'import torch\n'), ((8928, 8965), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (8954, 8965), False, 'import torch\n'), ((8970, 8995), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (8984, 8995), True, 'import numpy as np\n'), ((9000, 9022), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (9011, 9022), False, 'import random\n'), ((1174, 1199), 'os.path.isdir', 'os.path.isdir', (['replay_dir'], {}), '(replay_dir)\n', (1187, 1199), False, 'import os\n'), ((1209, 1232), 'os.makedirs', 'os.makedirs', (['replay_dir'], {}), '(replay_dir)\n', (1220, 1232), False, 'import os\n'), ((6292, 6317), 'os.path.isdir', 'os.path.isdir', (['replay_dir'], {}), '(replay_dir)\n', (6305, 6317), False, 'import os\n'), ((6327, 6350), 'os.makedirs', 'os.makedirs', (['replay_dir'], {}), '(replay_dir)\n', (6338, 6350), False, 'import os\n'), ((6433, 6447), 'copy.deepcopy', 'deepcopy', (['args'], {}), '(args)\n', (6441, 6447), False, 'from copy import deepcopy\n'), ((8592, 8617), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8615, 8617), False, 'import torch\n'), ((8674, 8696), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (8686, 8696), False, 'import torch\n'), ((8859, 8878), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8871, 8878), False, 'import torch\n'), ((2093, 2141), 'numpy.zeros', 'np.zeros', (['(episodes * n, K, K)'], {'dtype': 'np.float32'}), '((episodes * n, K, K), dtype=np.float32)\n', (2101, 2141), True, 'import numpy as np\n'), ((2163, 2211), 'numpy.zeros', 'np.zeros', (['(episodes * n, K, K)'], {'dtype': 'np.float32'}), '((episodes * n, K, K), dtype=np.float32)\n', (2171, 2211), True, 'import numpy as np\n'), ((2233, 2281), 'numpy.zeros', 'np.zeros', (['(episodes * n, K, K)'], {'dtype': 'np.float32'}), '((episodes * n, K, K), dtype=np.float32)\n', (2241, 2281), True, 'import numpy as np\n'), ((6618, 6654), 'common.make_env', 'make_env', (['copyed_args'], {'TYPE': 'env_type'}), '(copyed_args, TYPE=env_type)\n', (6626, 6654), False, 'from common import make_env, npz_save, run_preparation\n'), ((6929, 6981), 'numpy.zeros', 'np.zeros', (['(episodes * n * 2, K, K)'], {'dtype': 'np.float32'}), '((episodes * n * 2, K, K), dtype=np.float32)\n', (6937, 6981), True, 'import numpy as np\n'), ((7003, 7055), 'numpy.zeros', 'np.zeros', (['(episodes * n * 2, K, K)'], {'dtype': 'np.float32'}), '((episodes * n * 2, K, K), dtype=np.float32)\n', (7011, 7055), True, 'import numpy as np\n'), ((7077, 7129), 'numpy.zeros', 'np.zeros', (['(episodes * n * 2, K, K)'], {'dtype': 'np.float32'}), '((episodes * n * 2, K, K), dtype=np.float32)\n', (7085, 7129), True, 'import numpy as np\n'), ((9077, 9113), 'os.path.join', 'os.path.join', (['run_dir', '"""pretrain_tb"""'], {}), "(run_dir, 'pretrain_tb')\n", (9089, 9113), False, 'import os\n')] |
import db
import cv2
import face
import frame
import yaml
import typedef
import utils
import numpy as np
import copy
import pickle
def main(config):
ss = config['source_scale']
frame_drawer = frame.drawer.Drawer()
capturer = cv2.VideoCapture(config['source'])
face_detector = face.detectors.get(**config['face_detector'])
face_validators = face.validators.get_list(config['face_validators'])
frame_filters = frame.filters.get_list(config['frame_filters'])
face_buffer = face.buffer.FaceBuffer(config['face_buffer_size'])
face_encoder = face.encoders.get(**config['face_encoder'])
face_recognizer = face.recognizers.get(face_encoder, **config['face_recognizer'])
storage = db.initialize(**config['database'])
face_trackers = []
try:
with open('storage.pkl', 'rb') as db_file:
storage = pickle.load(db_file)
print("Database was loaded from file.")
print(f"Number of persons in DB: {len(storage.get_face_ids())}")
except:
print("No databese to load.")
while True:
read_ok, image = capturer.read()
if not read_ok:
cv2.imshow("Frame", typedef.NO_VIDEO_FRAME)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
capturer.release()
break
continue
image_copy = image.copy()
image = cv2.resize(image, None, fx=ss, fy=ss, interpolation=cv2.INTER_CUBIC)
image = frame.filters.apply(frame_filters, image)
face_boxes = face_detector(image)
face_boxes = face.validators.apply(face_validators, image, face_boxes)
face_ids, face_boxes = face.trackers.apply(face_trackers, image, face_boxes)
face_trackers = face.trackers.drop_wasted(face_trackers)
_face_boxes = copy.deepcopy(face_boxes)
face_boxes = face.validators.apply(face_validators, image, face_boxes)
_face_ids = []
for face_id, _face_box in zip(face_ids, _face_boxes):
for face_box in face_boxes:
if _face_box == face_box:
_face_ids.append(face_id)
for face_id, face_box in zip(_face_ids, face_boxes):
face_image = utils.crop(image, *face_box)
face_image = cv2.resize(face_image, tuple(config['face_shape']),
interpolation=cv2.INTER_AREA)
if face_id == typedef.UNKNOWN_FACE_ID:
face_id = utils.generate_tmp_face_id()
tracker = face.trackers.get(**config['face_tracker'])
tracker.init(image, face_box, face_id)
face_trackers.append(tracker)
if utils.is_tmp_id(face_id):
face_buffer.update(face_id, face_image)
if face_buffer.is_full(face_id):
mean_face = face_buffer.get_mean_face(face_id)
recognized_ok, rec_face_id = face_recognizer(mean_face, storage)
tracked_face_id = face_id
if not recognized_ok:
encoded_mean_face = face_encoder(mean_face)
face_id = storage.generate_face_id()
storage.add(face_id, encoded_mean_face)
else:
face_id = rec_face_id
face.trackers.update_face_ids(face_trackers, [tracked_face_id], [face_id])
face_box = tuple(np.int64(np.array(face_box) * (1 / ss)))
frame_drawer.draw_box(image_copy, face_box)
frame_drawer.draw_face_id(image_copy, face_box, face_id)
cv2.imshow("Frame", image_copy)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
capturer.release()
break
cv2.destroyAllWindows()
with open('storage.pkl', 'wb') as db_file:
pickle.dump(storage, db_file)
if __name__ == '__main__':
with open('config.yml', 'r') as file:
main(yaml.safe_load(file))
| [
"pickle.dump",
"face.validators.apply",
"frame.drawer.Drawer",
"pickle.load",
"yaml.safe_load",
"cv2.imshow",
"utils.is_tmp_id",
"face.buffer.FaceBuffer",
"face.encoders.get",
"utils.crop",
"db.initialize",
"cv2.destroyAllWindows",
"cv2.resize",
"copy.deepcopy",
"face.trackers.update_fac... | [((202, 223), 'frame.drawer.Drawer', 'frame.drawer.Drawer', ([], {}), '()\n', (221, 223), False, 'import frame\n'), ((239, 273), 'cv2.VideoCapture', 'cv2.VideoCapture', (["config['source']"], {}), "(config['source'])\n", (255, 273), False, 'import cv2\n'), ((294, 339), 'face.detectors.get', 'face.detectors.get', ([], {}), "(**config['face_detector'])\n", (312, 339), False, 'import face\n'), ((362, 413), 'face.validators.get_list', 'face.validators.get_list', (["config['face_validators']"], {}), "(config['face_validators'])\n", (386, 413), False, 'import face\n'), ((434, 481), 'frame.filters.get_list', 'frame.filters.get_list', (["config['frame_filters']"], {}), "(config['frame_filters'])\n", (456, 481), False, 'import frame\n'), ((500, 550), 'face.buffer.FaceBuffer', 'face.buffer.FaceBuffer', (["config['face_buffer_size']"], {}), "(config['face_buffer_size'])\n", (522, 550), False, 'import face\n'), ((570, 613), 'face.encoders.get', 'face.encoders.get', ([], {}), "(**config['face_encoder'])\n", (587, 613), False, 'import face\n'), ((636, 699), 'face.recognizers.get', 'face.recognizers.get', (['face_encoder'], {}), "(face_encoder, **config['face_recognizer'])\n", (656, 699), False, 'import face\n'), ((714, 749), 'db.initialize', 'db.initialize', ([], {}), "(**config['database'])\n", (727, 749), False, 'import db\n'), ((3774, 3797), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3795, 3797), False, 'import cv2\n'), ((1395, 1463), 'cv2.resize', 'cv2.resize', (['image', 'None'], {'fx': 'ss', 'fy': 'ss', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, None, fx=ss, fy=ss, interpolation=cv2.INTER_CUBIC)\n', (1405, 1463), False, 'import cv2\n'), ((1480, 1521), 'frame.filters.apply', 'frame.filters.apply', (['frame_filters', 'image'], {}), '(frame_filters, image)\n', (1499, 1521), False, 'import frame\n'), ((1585, 1642), 'face.validators.apply', 'face.validators.apply', (['face_validators', 'image', 'face_boxes'], {}), '(face_validators, image, face_boxes)\n', (1606, 1642), False, 'import face\n'), ((1675, 1728), 'face.trackers.apply', 'face.trackers.apply', (['face_trackers', 'image', 'face_boxes'], {}), '(face_trackers, image, face_boxes)\n', (1694, 1728), False, 'import face\n'), ((1753, 1793), 'face.trackers.drop_wasted', 'face.trackers.drop_wasted', (['face_trackers'], {}), '(face_trackers)\n', (1778, 1793), False, 'import face\n'), ((1816, 1841), 'copy.deepcopy', 'copy.deepcopy', (['face_boxes'], {}), '(face_boxes)\n', (1829, 1841), False, 'import copy\n'), ((1863, 1920), 'face.validators.apply', 'face.validators.apply', (['face_validators', 'image', 'face_boxes'], {}), '(face_validators, image, face_boxes)\n', (1884, 1920), False, 'import face\n'), ((3625, 3656), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'image_copy'], {}), "('Frame', image_copy)\n", (3635, 3656), False, 'import cv2\n'), ((3853, 3882), 'pickle.dump', 'pickle.dump', (['storage', 'db_file'], {}), '(storage, db_file)\n', (3864, 3882), False, 'import pickle\n'), ((856, 876), 'pickle.load', 'pickle.load', (['db_file'], {}), '(db_file)\n', (867, 876), False, 'import pickle\n'), ((1150, 1193), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'typedef.NO_VIDEO_FRAME'], {}), "('Frame', typedef.NO_VIDEO_FRAME)\n", (1160, 1193), False, 'import cv2\n'), ((2222, 2250), 'utils.crop', 'utils.crop', (['image', '*face_box'], {}), '(image, *face_box)\n', (2232, 2250), False, 'import utils\n'), ((2688, 2712), 'utils.is_tmp_id', 'utils.is_tmp_id', (['face_id'], {}), '(face_id)\n', (2703, 2712), False, 'import utils\n'), ((3671, 3685), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3682, 3685), False, 'import cv2\n'), ((3967, 3987), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (3981, 3987), False, 'import yaml\n'), ((1212, 1226), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1223, 1226), False, 'import cv2\n'), ((2472, 2500), 'utils.generate_tmp_face_id', 'utils.generate_tmp_face_id', ([], {}), '()\n', (2498, 2500), False, 'import utils\n'), ((2527, 2570), 'face.trackers.get', 'face.trackers.get', ([], {}), "(**config['face_tracker'])\n", (2544, 2570), False, 'import face\n'), ((3345, 3419), 'face.trackers.update_face_ids', 'face.trackers.update_face_ids', (['face_trackers', '[tracked_face_id]', '[face_id]'], {}), '(face_trackers, [tracked_face_id], [face_id])\n', (3374, 3419), False, 'import face\n'), ((3459, 3477), 'numpy.array', 'np.array', (['face_box'], {}), '(face_box)\n', (3467, 3477), True, 'import numpy as np\n')] |
import glob
import librosa
import IPython.display as ipd
import numpy as np
from scipy import signal
win_length = 0.025
hop_length = 0.005
timit_train_wav_data_path = 'timit/data/TRAIN/*/*/*.WAV.wav'
timit_train_wav = glob.glob(timit_train_wav_data_path)
timit_train_wav.sort()
print(f'Total source train wav files: {len(timit_train_wav)}')
timit_train_phns_data_path = 'timit/data/TRAIN/*/*/*.PHN'
timit_train_phns = glob.glob(timit_train_phns_data_path)
timit_train_phns.sort()
timit_test_wav_data_path = 'timit/data/TEST/*/*/*.WAV.wav'
timit_test_wav = glob.glob(timit_test_wav_data_path)
timit_test_wav.sort()
print(f'Total source test wav files: {len(timit_test_wav)}')
timit_test_phns_data_path = 'timit/data/TEST/*/*/*.PHN'
timit_test_phns = glob.glob(timit_test_phns_data_path)
timit_test_phns.sort()
arctic_wav_data_path2 = 'cmu_us_slt_arctic/wav/arctic_*.wav'
arctic_wav2 = glob.glob(arctic_wav_data_path2)
arctic_wav2.sort()
print(f'Total target wav files: {len(arctic_wav2)}')
num_arctic_train2 = int(0.8*len(arctic_wav2))
num_arctic_test2 = len(arctic_wav2) - num_arctic_train2
arctic_train_wav2 = arctic_wav2[:num_arctic_train2]
print(f'Total target train wav files: {len(arctic_train_wav2)}')
arctic_test_wav2 = arctic_wav2[num_arctic_train2:len(arctic_wav2)]
print(f'Total target test wav files: {len(arctic_test_wav2)}')
arctic_phns_data_path2 = 'cmu_us_slt_arctic/lab/*.lab'
arctic_phns2 = glob.glob(arctic_phns_data_path2)
arctic_phns2.sort()
num_arctic_train_phns2 = int(0.8*len(arctic_phns2))
num_arctic_test_phns2 = len(arctic_phns2) - num_arctic_train_phns2
arctic_train_phns2 = arctic_phns2[:num_arctic_train_phns2]
arctic_test_phns2 = arctic_phns2[num_arctic_train_phns2:len(arctic_phns2)]
phns = ['h#', 'aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h', 'axr', 'ay', 'b', 'bcl',
'ch', 'd', 'dcl', 'dh', 'dx', 'eh', 'el', 'em', 'en', 'eng', 'epi',
'er', 'ey', 'f', 'g', 'gcl', 'hh', 'hv', 'ih', 'ix', 'iy', 'jh',
'k', 'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow', 'oy', 'p', 'pau', 'pcl',
'q', 'r', 's', 'sh', 't', 'tcl', 'th', 'uh', 'uw', 'ux', 'v', 'w', 'y', 'z', 'zh']
print(f'Total no of phones: {len(phns)}')
def load_vocab():
phn2idx = {phn: idx for idx, phn in enumerate(phns)}
idx2phn = {idx: phn for idx, phn in enumerate(phns)}
return phn2idx, idx2phn
phn2idx, idx2phn = load_vocab()
print(idx2phn)
def string_to_matrix_dict(string):
line_split = list(string.split("\n"))
matrix = []
for item in line_split:
line = []
for data in item.split(" "):
line.append(data)
matrix.append(line)
return matrix[0:len(matrix)-1]
def get_all_feature_phoneme(timit_train_wav, timit_train_phns):
from tqdm import tqdm
train1_mfccs = []
train1_phns = []
max_duration=4
for i in tqdm(range(len(timit_train_wav))):
time_step1_mfccs=[]
time_step1_phns=[]
y, sr = librosa.load(timit_train_wav[i], sr=None)
phoneme = open(timit_train_phns[i], "r").read()
phoneme = string_to_matrix_dict(phoneme)
if(len(y) > sr*max_duration):
y=y[:sr*max_duration]
else:
y=np.pad(y, (0, sr*max_duration-len(y)), 'constant')
win = int(win_length*sr)
hop = int(hop_length*sr)
y_mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13, win_length=win, hop_length=hop)
count = 0
for j in range(0,len(y),hop):
count = count+1
index = int(j/hop)
time_step1_mfccs.append(y_mfcc[:,index])
x=0
for k in range(1,len(phoneme)-1):
start_index = int(phoneme[k][0])
end_index = int(phoneme[k][1])
if(j>=start_index and j<=end_index):
phn_str = phoneme[k][2]
phn_label = phn2idx[phn_str]
if(phn_label==44):
phn_label=0
phn_one_hot = np.eye(len(phns))[phn_label]
time_step1_phns.append(phn_one_hot)
# time_step1_phns.append(phn_label)
x=x+1
break
if(x==0):
phn_label = 0
phn_one_hot = np.eye(len(phns))[phn_label]
time_step1_phns.append(phn_one_hot)
# time_step1_phns.append(phn_label)
train1_mfccs.append(np.array(time_step1_mfccs))
train1_phns.append(np.array(time_step1_phns))
train1_mfccs=np.array(train1_mfccs)
train1_phns=np.array(train1_phns)
return train1_mfccs, train1_phns
def get_one_feature_phoneme(timit_train_wav, timit_train_phns, sample_no):
from tqdm import tqdm
train1_mfccs = []
train1_phns = []
max_duration=4
for i in tqdm(range(sample_no, sample_no+1)):
time_step1_mfccs=[]
time_step1_phns=[]
y, sr = librosa.load(timit_train_wav[i], sr=None)
phoneme = open(timit_train_phns[i], "r").read()
phoneme = string_to_matrix_dict(phoneme)
if(len(y) > sr*max_duration):
y=y[:sr*max_duration]
else:
y=np.pad(y, (0, sr*max_duration-len(y)), 'constant')
win = int(win_length*sr)
hop = int(hop_length*sr)
y_mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13, win_length=win, hop_length=hop)
count = 0
for j in range(0,len(y),hop):
count = count+1
index = int(j/hop)
time_step1_mfccs.append(y_mfcc[:,index])
x=0
for k in range(1,len(phoneme)-1):
start_index = int(phoneme[k][0])
end_index = int(phoneme[k][1])
if(j>=start_index and j<=end_index):
phn_str = phoneme[k][2]
phn_label = phn2idx[phn_str]
if(phn_label==44):
phn_label=0
phn_one_hot = np.eye(len(phns))[phn_label]
time_step1_phns.append(phn_one_hot)
# time_step1_phns.append(phn_label)
x=x+1
break
if(x==0):
phn_label = 0
phn_one_hot = np.eye(len(phns))[phn_label]
time_step1_phns.append(phn_one_hot)
# time_step1_phns.append(phn_label)
train1_mfccs.append(np.array(time_step1_mfccs))
train1_phns.append(np.array(time_step1_phns))
train1_mfccs=np.array(train1_mfccs)
train1_phns=np.array(train1_phns)
return train1_mfccs, train1_phns
train1_mfccs, train1_phns = get_all_feature_phoneme(timit_train_wav, timit_train_phns)
# print(train1_mfccs.shape)
# print(train1_phns.shape)
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential()
model.add(layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(800,13)))
model.add(layers.Dropout(0.1))
model.add(layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(800,64)))
model.add(layers.Dropout(0.1))
model.add(layers.TimeDistributed(layers.Dense(64, activation="relu")))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(61, activation="softmax"))
model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
BATCH_SIZE=32
EPOCHS=2
history = model.fit(np.array(train1_mfccs), np.array(train1_phns), batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1)
test1_mfccs, test1_phns = get_all_feature_phoneme(timit_test_wav, timit_test_phns)
pred_cat = model.predict(np.array(test1_mfccs))
pred = np.argmax(pred_cat, axis=-1)
# print(pred)
y_te_true = np.argmax(np.array(test1_phns), -1)
# print(pred.shape)
# # print(y_te_true)
# print(np.array(y_te_true).shape)
height,width=pred.shape
acc_count=0
for i in range(height):
for j in range(width):
if(pred[i,j] == y_te_true[i,j]):
acc_count = acc_count+1
accuracy=acc_count/(height*width)
print(f"Accuracy is on full test data is: {accuracy}")
# Take a random sample from test data
sample_no = 220
print(f'Test sample no from source: {timit_test_wav[sample_no]}')
y_test_timit, sr = librosa.load(timit_test_wav[i], sr=None)
# print(len(y_test_timit))
test_feature, test_phns=get_one_feature_phoneme(timit_test_wav, timit_test_phns, sample_no)
pred_cat = model.predict(np.array(test_feature))
pred_phn_for_net2 = pred_cat[0,:,:]
pred = np.argmax(pred_cat, axis=-1)
# print(pred)
y_te_true = np.argmax(np.array(test_phns), -1)
# print(pred.shape)
# # print(y_te_true)
# print(np.array(y_te_true).shape)
height,width=pred.shape
acc_count=0
for i in range(height):
for j in range(width):
if(pred[i,j] == y_te_true[i,j]):
# if(pred[i,j]!=0):
acc_count = acc_count+1
accuracy=acc_count/(height*width)
print(f"Accuracy for Test sample no from source: {arctic_test_wav[sample_no]} is: {accuracy}")
print("Predicted Phones are:")
print(pred)
print("True Phones are:")
print(y_te_true)
def get_all_mel_phoneme(arctic_train_wav2, arctic_train_phns2):
from tqdm import tqdm
train2_mel = []
train2_phns = []
for i in tqdm(range(len(arctic_train_wav2))):
y, sr = librosa.load(arctic_train_wav2[i], sr=None)
phoneme = open(arctic_train_phns2[i], "r").read()
phoneme = string_to_matrix_dict(phoneme)
win = int(win_length*sr)
hop = int(hop_length*sr)
y_mel = librosa.feature.melspectrogram(y=y, sr=sr, win_length=win, hop_length=hop)
count = 0
for j in range(0,len(y),hop):
count = count+1
index = int(j/hop)
train2_mel.append(y_mel[:,index])
x=0
for k in range(1,len(phoneme)-1):
start_index = int(sr*(float(phoneme[k][0])))
next_index = int(sr*(float(phoneme[k+1][0])))
if(j>=start_index and j<=next_index):
phn_str = phoneme[k+1][2]
phn_label = phn2idx[phn_str]
if(phn_label==44):
phn_label=0
phn_one_hot = np.eye(len(phns))[phn_label]
train2_phns.append(phn_one_hot)
# train2_phns.append(phn_label)
x=x+1
break
if(x==0):
phn_label = 0
phn_one_hot = np.eye(len(phns))[phn_label]
train2_phns.append(phn_one_hot)
# train2_phns.append(phn_label)
return train2_mel, train2_phns
def get_one_mel_phoneme(arctic_train_wav2, arctic_train_phns2, sample_no):
from tqdm import tqdm
train2_mel = []
train2_phns = []
for i in tqdm(range(sample_no, sample_no+1)):
y, sr = librosa.load(arctic_train_wav2[i], sr=None)
phoneme = open(arctic_train_phns2[i], "r").read()
phoneme = string_to_matrix_dict(phoneme)
win = int(win_length*sr)
hop = int(hop_length*sr)
y_mel = librosa.feature.melspectrogram(y=y, sr=sr, win_length=win, hop_length=hop)
count = 0
for j in range(0,len(y),hop):
count = count+1
index = int(j/hop)
train2_mel.append(y_mel[:,index])
x=0
for k in range(1,len(phoneme)-1):
start_index = int(sr*(float(phoneme[k][0])))
next_index = int(sr*(float(phoneme[k+1][0])))
if(j>=start_index and j<=next_index):
phn_str = phoneme[k+1][2]
phn_label = phn2idx[phn_str]
if(phn_label==44):
phn_label=0
phn_one_hot = np.eye(len(phns))[phn_label]
train2_phns.append(phn_one_hot)
# train2_phns.append(phn_label)
x=x+1
break
if(x==0):
phn_label = 0
phn_one_hot = np.eye(len(phns))[phn_label]
train2_phns.append(phn_one_hot)
# train2_phns.append(phn_label)
return train2_mel, train2_phns
train2_mel, train2_phns=get_all_mel_phoneme(arctic_train_wav2, arctic_train_phns2)
# print(len(train2_phns))
# print(len(train2_mel))
# print(np.array(train2_mel).shape)
# print(np.array(train2_phns).shape)
model = keras.Sequential()
model.add(layers.Dense(128, input_dim=len(phns), activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(128, activation='linear'))
model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
model.summary()
BATCH_SIZE=32
EPOCHS=5
history=model.fit(np.array(train2_phns),np.array(train2_mel),batch_size=BATCH_SIZE,epochs=EPOCHS,validation_split=0.1,verbose=1)
sample_no=4
print(arctic_test_wav2[sample_no])
test2_mel, test2_phns=get_one_mel_phoneme(arctic_test_wav2, arctic_test_phns2, sample_no)
# Eval
pred_mel = model.predict(np.array(test2_phns))
pred_mel=pred_mel.T
print(np.array(test2_phns).shape)
print(pred_mel.shape)
# # print(np.array(test2_mel).shape)
# S_inv = librosa.feature.inverse.mel_to_stft(pred_mel, sr=sr)
# y_inv = librosa.griffinlim(S_inv)
# # ipd.Audio(y, rate=sr, autoplay=True) # load a local WAV file
sr=16000
y_inv=librosa.feature.inverse.mel_to_audio(pred_mel, sr=sr, win_length=400, hop_length=80)
print(len(y_inv))
print(len(y_inv)/sr)
import soundfile as sf
sf.write('output1.wav',y_inv, sr)
phns_mel = [[] for i in range(61)]
# phns_mel[0].append([1,2])
# phns_mel[0].append([2,2])
# phns_mel[1].append([2,2])
print(len(phns_mel))
print(phns_mel)
print(phns_mel[0])
from tqdm import tqdm
train2_mel = []
train2_phns = []
for i in tqdm(range(len(arctic_train_wav2))):
# for i in tqdm(range(1)):
y, sr = librosa.load(arctic_train_wav2[i], sr=None)
phoneme = open(arctic_train_phns2[i], "r").read()
phoneme = string_to_matrix_dict(phoneme)
# phoneme=phoneme[2:len(phoneme)-1]
# print(phoneme)
# end_y = int(phoneme[len(phoneme)-1][1])
win = int(win_length*sr)
hop = int(hop_length*sr)
# y=y[:end_y]
y_mel = librosa.feature.melspectrogram(y=y, sr=sr, win_length=win, hop_length=hop)
# print(y_mel)
# print(y_mel.shape)
count = 0
# # print((phoneme[0][1]))
for j in range(0,len(y),hop):
count = count+1
index = int(j/hop)
# print(index)
# train2_mel.append(y_mel[:,index])
x=0
for k in range(1,len(phoneme)-1):
start_index = int(sr*(float(phoneme[k][0])))
next_index = int(sr*(float(phoneme[k+1][0])))
# print(start_index)
# print(j, start_index)
if(j>=start_index and j<=next_index):
phn_str = phoneme[k+1][2]
phn_label = phn2idx[phn_str]
if(phn_label==44):
phn_label=0
phn_one_hot = np.eye(len(phns))[phn_label]
phns_mel[phn_label].append(y_mel[:,index])
# train2_phns.append(phn_one_hot)
# train2_phns.append(phn_label)
x=x+1
break
if(x==0):
phn_label = 0
phn_one_hot = np.eye(len(phns))[phn_label]
phns_mel[phn_label].append(y_mel[:,index])
# train2_phns.append(phn_one_hot)
# train2_phns.append(phn_label)
# print(count)
# print(len(train2_mel))
# print(train2_phns)
#print(train2_mf2l)
# print(len(train2_phns))
# print(len(y))
# print(phoneme)
# length=0
# for i in range(len(phns_mel)):
# length = length+ len(phns_mel[i])
# print(length)
# print(len(phns_mel[4]))
print(len(phns_mel))
print(len(phns_mel[0]))
print(np.array(phns_mel[0]).shape)
phns_final_mel = np.zeros((61,128))
for i in range(len(phns_mel)):
# print(len(phns_mel[i]))
if(len(phns_mel[i]) != 0):
phns_final_mel[i]=sum(phns_mel[i])/len(phns_mel[i])
for i in tqdm(range(len(phns_mel))):
# print(len(phns_mel[i]))
if(len(phns_mel[i]) != 0):
h,w = np.array(phns_mel[i]).shape
norm_vector = np.zeros((h,1))
for j in range(h):
norm_vector[j] = np.linalg.norm(np.array(phns_mel[i])[j,:])
index=np.argmax(norm_vector)
phns_final_mel[i]=np.array(phns_mel[i])[index,:]
print(phns_final_mel.shape)
print(phns_final_mel[0].shape)
def get_output_mel(arctic_test_wav2, arctic_test_phns2, sample_no):
from tqdm import tqdm
test2_mel = []
test2_phns = []
output_mel=[]
# for i in tqdm(range(len(arctic_test_wav2))):
for i in tqdm(range(sample_no, sample_no+1)):
y, sr = librosa.load(arctic_test_wav2[i], sr=None)
phoneme = open(arctic_test_phns2[i], "r").read()
phoneme = string_to_matrix_dict(phoneme)
# phoneme=phoneme[2:len(phoneme)-1]
# print(phoneme)
# end_y = int(phoneme[len(phoneme)-1][1])
win = int(win_length*sr)
hop = int(hop_length*sr)
# y=y[:end_y]
y_mel = librosa.feature.melspectrogram(y=y, sr=sr, win_length=win, hop_length=hop)
# print(y_mel)
# print(y_mel.shape)
count = 0
# # print((phoneme[0][1]))
for j in range(0,len(y),hop):
count = count+1
index = int(j/hop)
# print(index)
test2_mel.append(y_mel[:,index])
x=0
for k in range(1,len(phoneme)-1):
start_index = int(sr*(float(phoneme[k][0])))
next_index = int(sr*(float(phoneme[k+1][0])))
# print(start_index)
# print(j, start_index)
if(j>=start_index and j<=next_index):
phn_str = phoneme[k+1][2]
phn_label = phn2idx[phn_str]
if(phn_label==44):
phn_label=0
phn_one_hot = np.eye(len(phns))[phn_label]
output_mel.append(phns_final_mel[phn_label])
test2_phns.append(phn_one_hot)
# test2_phns.append(phn_label)
x=x+1
break
if(x==0):
phn_label = 0
phn_one_hot = np.eye(len(phns))[phn_label]
output_mel.append(phns_final_mel[phn_label])
test2_phns.append(phn_one_hot)
# test2_phns.append(phn_label)
# print(count)
# print(len(test2_mel))
# print(train2_phns)
#print(train2_mf2l)
# print(len(test2_phns))
# print(len(y))
# print(phoneme)
return output_mel
sample_no=4
print(arctic_test_wav2[sample_no])
output_mel=get_output_mel(arctic_test_wav2, arctic_test_phns2, sample_no)
output_mel=np.array(output_mel).T
print((output_mel).shape)
# print((output_mel))
y_inv=librosa.feature.inverse.mel_to_audio(output_mel, sr=sr, win_length=400, hop_length=80)
print(len(y_inv))
print(len(y_inv)/sr)
sf.write('output2.wav',y_inv, sr)
| [
"librosa.feature.mfcc",
"tensorflow.keras.layers.Dropout",
"librosa.feature.inverse.mel_to_audio",
"numpy.argmax",
"tensorflow.keras.layers.Dense",
"numpy.zeros",
"soundfile.write",
"librosa.load",
"numpy.array",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.Sequential",
"glob.glob",
"li... | [((220, 256), 'glob.glob', 'glob.glob', (['timit_train_wav_data_path'], {}), '(timit_train_wav_data_path)\n', (229, 256), False, 'import glob\n'), ((421, 458), 'glob.glob', 'glob.glob', (['timit_train_phns_data_path'], {}), '(timit_train_phns_data_path)\n', (430, 458), False, 'import glob\n'), ((560, 595), 'glob.glob', 'glob.glob', (['timit_test_wav_data_path'], {}), '(timit_test_wav_data_path)\n', (569, 595), False, 'import glob\n'), ((754, 790), 'glob.glob', 'glob.glob', (['timit_test_phns_data_path'], {}), '(timit_test_phns_data_path)\n', (763, 790), False, 'import glob\n'), ((890, 922), 'glob.glob', 'glob.glob', (['arctic_wav_data_path2'], {}), '(arctic_wav_data_path2)\n', (899, 922), False, 'import glob\n'), ((1416, 1449), 'glob.glob', 'glob.glob', (['arctic_phns_data_path2'], {}), '(arctic_phns_data_path2)\n', (1425, 1449), False, 'import glob\n'), ((6821, 6839), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (6837, 6839), False, 'from tensorflow import keras\n'), ((7703, 7731), 'numpy.argmax', 'np.argmax', (['pred_cat'], {'axis': '(-1)'}), '(pred_cat, axis=-1)\n', (7712, 7731), True, 'import numpy as np\n'), ((8267, 8307), 'librosa.load', 'librosa.load', (['timit_test_wav[i]'], {'sr': 'None'}), '(timit_test_wav[i], sr=None)\n', (8279, 8307), False, 'import librosa\n'), ((8520, 8548), 'numpy.argmax', 'np.argmax', (['pred_cat'], {'axis': '(-1)'}), '(pred_cat, axis=-1)\n', (8529, 8548), True, 'import numpy as np\n'), ((12427, 12445), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (12443, 12445), False, 'from tensorflow import keras\n'), ((13581, 13669), 'librosa.feature.inverse.mel_to_audio', 'librosa.feature.inverse.mel_to_audio', (['pred_mel'], {'sr': 'sr', 'win_length': '(400)', 'hop_length': '(80)'}), '(pred_mel, sr=sr, win_length=400,\n hop_length=80)\n', (13617, 13669), False, 'import librosa\n'), ((13729, 13763), 'soundfile.write', 'sf.write', (['"""output1.wav"""', 'y_inv', 'sr'], {}), "('output1.wav', y_inv, sr)\n", (13737, 13763), True, 'import soundfile as sf\n'), ((16069, 16088), 'numpy.zeros', 'np.zeros', (['(61, 128)'], {}), '((61, 128))\n', (16077, 16088), True, 'import numpy as np\n'), ((19121, 19211), 'librosa.feature.inverse.mel_to_audio', 'librosa.feature.inverse.mel_to_audio', (['output_mel'], {'sr': 'sr', 'win_length': '(400)', 'hop_length': '(80)'}), '(output_mel, sr=sr, win_length=400,\n hop_length=80)\n', (19157, 19211), False, 'import librosa\n'), ((19248, 19282), 'soundfile.write', 'sf.write', (['"""output2.wav"""', 'y_inv', 'sr'], {}), "('output2.wav', y_inv, sr)\n", (19256, 19282), True, 'import soundfile as sf\n'), ((4510, 4532), 'numpy.array', 'np.array', (['train1_mfccs'], {}), '(train1_mfccs)\n', (4518, 4532), True, 'import numpy as np\n'), ((4549, 4570), 'numpy.array', 'np.array', (['train1_phns'], {}), '(train1_phns)\n', (4557, 4570), True, 'import numpy as np\n'), ((6479, 6501), 'numpy.array', 'np.array', (['train1_mfccs'], {}), '(train1_mfccs)\n', (6487, 6501), True, 'import numpy as np\n'), ((6518, 6539), 'numpy.array', 'np.array', (['train1_phns'], {}), '(train1_phns)\n', (6526, 6539), True, 'import numpy as np\n'), ((6944, 6963), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (6958, 6963), False, 'from tensorflow.keras import layers\n'), ((7069, 7088), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (7083, 7088), False, 'from tensorflow.keras import layers\n'), ((7171, 7190), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (7185, 7190), False, 'from tensorflow.keras import layers\n'), ((7202, 7237), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (7214, 7237), False, 'from tensorflow.keras import layers\n'), ((7249, 7268), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (7263, 7268), False, 'from tensorflow.keras import layers\n'), ((7280, 7318), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(61)'], {'activation': '"""softmax"""'}), "(61, activation='softmax')\n", (7292, 7318), False, 'from tensorflow.keras import layers\n'), ((7468, 7490), 'numpy.array', 'np.array', (['train1_mfccs'], {}), '(train1_mfccs)\n', (7476, 7490), True, 'import numpy as np\n'), ((7492, 7513), 'numpy.array', 'np.array', (['train1_phns'], {}), '(train1_phns)\n', (7500, 7513), True, 'import numpy as np\n'), ((7673, 7694), 'numpy.array', 'np.array', (['test1_mfccs'], {}), '(test1_mfccs)\n', (7681, 7694), True, 'import numpy as np\n'), ((7769, 7789), 'numpy.array', 'np.array', (['test1_phns'], {}), '(test1_phns)\n', (7777, 7789), True, 'import numpy as np\n'), ((8453, 8475), 'numpy.array', 'np.array', (['test_feature'], {}), '(test_feature)\n', (8461, 8475), True, 'import numpy as np\n'), ((8586, 8605), 'numpy.array', 'np.array', (['test_phns'], {}), '(test_phns)\n', (8594, 8605), True, 'import numpy as np\n'), ((12525, 12544), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (12539, 12544), False, 'from tensorflow.keras import layers\n'), ((12556, 12592), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (12568, 12592), False, 'from tensorflow.keras import layers\n'), ((12604, 12623), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (12618, 12623), False, 'from tensorflow.keras import layers\n'), ((12635, 12671), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (12647, 12671), False, 'from tensorflow.keras import layers\n'), ((12683, 12702), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (12697, 12702), False, 'from tensorflow.keras import layers\n'), ((12714, 12750), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (12726, 12750), False, 'from tensorflow.keras import layers\n'), ((12762, 12781), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (12776, 12781), False, 'from tensorflow.keras import layers\n'), ((12793, 12831), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""linear"""'}), "(128, activation='linear')\n", (12805, 12831), False, 'from tensorflow.keras import layers\n'), ((12985, 13006), 'numpy.array', 'np.array', (['train2_phns'], {}), '(train2_phns)\n', (12993, 13006), True, 'import numpy as np\n'), ((13007, 13027), 'numpy.array', 'np.array', (['train2_mel'], {}), '(train2_mel)\n', (13015, 13027), True, 'import numpy as np\n'), ((13267, 13287), 'numpy.array', 'np.array', (['test2_phns'], {}), '(test2_phns)\n', (13275, 13287), True, 'import numpy as np\n'), ((14081, 14124), 'librosa.load', 'librosa.load', (['arctic_train_wav2[i]'], {'sr': 'None'}), '(arctic_train_wav2[i], sr=None)\n', (14093, 14124), False, 'import librosa\n'), ((14420, 14494), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'y', 'sr': 'sr', 'win_length': 'win', 'hop_length': 'hop'}), '(y=y, sr=sr, win_length=win, hop_length=hop)\n', (14450, 14494), False, 'import librosa\n'), ((19043, 19063), 'numpy.array', 'np.array', (['output_mel'], {}), '(output_mel)\n', (19051, 19063), True, 'import numpy as np\n'), ((2928, 2969), 'librosa.load', 'librosa.load', (['timit_train_wav[i]'], {'sr': 'None'}), '(timit_train_wav[i], sr=None)\n', (2940, 2969), False, 'import librosa\n'), ((3313, 3388), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'y', 'sr': 'sr', 'n_mfcc': '(13)', 'win_length': 'win', 'hop_length': 'hop'}), '(y=y, sr=sr, n_mfcc=13, win_length=win, hop_length=hop)\n', (3333, 3388), False, 'import librosa\n'), ((4897, 4938), 'librosa.load', 'librosa.load', (['timit_train_wav[i]'], {'sr': 'None'}), '(timit_train_wav[i], sr=None)\n', (4909, 4938), False, 'import librosa\n'), ((5282, 5357), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'y', 'sr': 'sr', 'n_mfcc': '(13)', 'win_length': 'win', 'hop_length': 'hop'}), '(y=y, sr=sr, n_mfcc=13, win_length=win, hop_length=hop)\n', (5302, 5357), False, 'import librosa\n'), ((6871, 6909), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (6882, 6909), False, 'from tensorflow.keras import layers\n'), ((6996, 7034), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (7007, 7034), False, 'from tensorflow.keras import layers\n'), ((7123, 7158), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (7135, 7158), False, 'from tensorflow.keras import layers\n'), ((9306, 9349), 'librosa.load', 'librosa.load', (['arctic_train_wav2[i]'], {'sr': 'None'}), '(arctic_train_wav2[i], sr=None)\n', (9318, 9349), False, 'import librosa\n'), ((9541, 9615), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'y', 'sr': 'sr', 'win_length': 'win', 'hop_length': 'hop'}), '(y=y, sr=sr, win_length=win, hop_length=hop)\n', (9571, 9615), False, 'import librosa\n'), ((10861, 10904), 'librosa.load', 'librosa.load', (['arctic_train_wav2[i]'], {'sr': 'None'}), '(arctic_train_wav2[i], sr=None)\n', (10873, 10904), False, 'import librosa\n'), ((11097, 11171), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'y', 'sr': 'sr', 'win_length': 'win', 'hop_length': 'hop'}), '(y=y, sr=sr, win_length=win, hop_length=hop)\n', (11127, 11171), False, 'import librosa\n'), ((13315, 13335), 'numpy.array', 'np.array', (['test2_phns'], {}), '(test2_phns)\n', (13323, 13335), True, 'import numpy as np\n'), ((16023, 16044), 'numpy.array', 'np.array', (['phns_mel[0]'], {}), '(phns_mel[0])\n', (16031, 16044), True, 'import numpy as np\n'), ((16404, 16420), 'numpy.zeros', 'np.zeros', (['(h, 1)'], {}), '((h, 1))\n', (16412, 16420), True, 'import numpy as np\n'), ((16533, 16555), 'numpy.argmax', 'np.argmax', (['norm_vector'], {}), '(norm_vector)\n', (16542, 16555), True, 'import numpy as np\n'), ((16944, 16986), 'librosa.load', 'librosa.load', (['arctic_test_wav2[i]'], {'sr': 'None'}), '(arctic_test_wav2[i], sr=None)\n', (16956, 16986), False, 'import librosa\n'), ((17317, 17391), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'y': 'y', 'sr': 'sr', 'win_length': 'win', 'hop_length': 'hop'}), '(y=y, sr=sr, win_length=win, hop_length=hop)\n', (17347, 17391), False, 'import librosa\n'), ((4411, 4437), 'numpy.array', 'np.array', (['time_step1_mfccs'], {}), '(time_step1_mfccs)\n', (4419, 4437), True, 'import numpy as np\n'), ((4466, 4491), 'numpy.array', 'np.array', (['time_step1_phns'], {}), '(time_step1_phns)\n', (4474, 4491), True, 'import numpy as np\n'), ((6380, 6406), 'numpy.array', 'np.array', (['time_step1_mfccs'], {}), '(time_step1_mfccs)\n', (6388, 6406), True, 'import numpy as np\n'), ((6435, 6460), 'numpy.array', 'np.array', (['time_step1_phns'], {}), '(time_step1_phns)\n', (6443, 6460), True, 'import numpy as np\n'), ((16354, 16375), 'numpy.array', 'np.array', (['phns_mel[i]'], {}), '(phns_mel[i])\n', (16362, 16375), True, 'import numpy as np\n'), ((16582, 16603), 'numpy.array', 'np.array', (['phns_mel[i]'], {}), '(phns_mel[i])\n', (16590, 16603), True, 'import numpy as np\n'), ((16491, 16512), 'numpy.array', 'np.array', (['phns_mel[i]'], {}), '(phns_mel[i])\n', (16499, 16512), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Times New Roman']})
rc('text', usetex=True)
def graph_one2one():
dfef = pd.read_csv("comptexnet_e_form_test.csv")
e_form_true = dfef["e_form (eV/atom)"]
e_form_pred = dfef["predicted e_form (eV/atom)"]
dfbg = pd.read_csv("comptexnet_expt_gaps_test.csv")
bg_true = dfbg["bandgap (eV)"]
bg_pred = dfbg["predicted bandgap (eV)"]
dfzt = pd.read_csv("comptexnet_zT_test.csv")
zt_true = dfzt["zT"]
zt_pred = dfzt["predicted zT"]
data = [
{"name": "e_form", "title": r'DFT-PBE $E_f$', "y_label": r'$E_f$ (eV/atom)', "x_label": r'$E_f$ predicted (eV/atom)', "true": e_form_true, "pred": e_form_pred, "markersize": 3},
{"name":"expt_gaps", "title": r'Experimental $E_g$', "y_label": r'$E_g$ (eV)', "x_label": r'$E_g$ predicted (eV)', "true": bg_true, "pred": bg_pred, "markersize": 8},
{"name": "zT", "title": "Experimental Thermoelectric zT", "y_label": r'zT', "x_label": r'zT predicted', "true": zt_true, "pred": zt_pred, "markersize": 20}
]
f, axes = plt.subplots(1, 3)
for i, ax in enumerate(axes):
d = data[i]
true = d["true"].tolist()
pred = d["pred"].tolist()
maxtrue = max(true)
mintrue = min([min(true), 0])
maxtot = max(true + pred) * 1.1
mintot = min([min(true + pred), 0]) * 0.9
# Color by deviation
hues = np.power(np.abs(np.asarray(true) - np.asarray(pred)), 0.5)
hues_mean = np.mean(hues)
hues_std = np.std(hues)
hues = (hues - hues_mean)/hues_std
# pal5 = sns.blend_palette(["blue", "green", "red"], as_cmap=True)
pal5 = "magma_r"
sns.scatterplot(y=true, x=pred, ax=ax, s=d["markersize"], hue=hues, palette=pal5, legend=False, linewidth=0, alpha=1.0)
ax.plot([mintrue, maxtrue], [mintrue, maxtrue], color="black")
ax.set_ylim((mintot, maxtot))
ax.set_xlim((mintot, maxtot))
fontsize = "x-large"
ax.set_xlabel(d["x_label"], fontsize=fontsize)
ax.set_ylabel(d["y_label"], fontsize=fontsize)
ax.set_aspect("equal", "box")
ax.set_title(d["title"], fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=14)
gs1 = gridspec.GridSpec(1, 3)
gs1.update(hspace=0.3) # set the spacing between axes.
# plt.tight_layout(wpad=-2)
return plt
def graph_learning_rate():
n_samples = [100, 500, 1000, 5000, 10000, 51008]
maes_dense = [ 0.75828, 0.49260501, 0.44516, 0.287502, 0.2099714, 0.12084700]
maes_attn = [1.545, 0.662, 0.4463, 0.2353, 0.1562, 0.0859]
series = {
"densenet": maes_dense,
"attention": maes_attn
}
fig, ax = plt.subplots()
for name, data in series.items():
label = "1-layer Attention" if name == "attention" else "2-layer DenseNet"
ax.plot(n_samples, data, marker='o', linestyle='--', linewidth=2, markersize=6, label=label)
fontsize = "x-large"
ax.legend(fontsize=fontsize)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_xlabel(r'Log training set size', fontsize=fontsize)
ax.set_ylabel(r'Log MAE', fontsize=fontsize)
ax.set_title(r'Effect of training set size on error rate for $E_f$ prediction', fontsize=fontsize)
ax.tick_params(axis='both', which='major', labelsize=18)
return plt
if __name__ == "__main__":
# plt = graph_one2one()
plt = graph_learning_rate()
plt.show()
| [
"matplotlib.rc",
"matplotlib.pyplot.show",
"seaborn.scatterplot",
"pandas.read_csv",
"numpy.std",
"numpy.asarray",
"numpy.mean",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplots"
] | [((317, 380), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Times New Roman']})\n", (319, 380), False, 'from matplotlib import rc\n'), ((377, 400), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (379, 400), False, 'from matplotlib import rc\n'), ((435, 476), 'pandas.read_csv', 'pd.read_csv', (['"""comptexnet_e_form_test.csv"""'], {}), "('comptexnet_e_form_test.csv')\n", (446, 476), True, 'import pandas as pd\n'), ((585, 629), 'pandas.read_csv', 'pd.read_csv', (['"""comptexnet_expt_gaps_test.csv"""'], {}), "('comptexnet_expt_gaps_test.csv')\n", (596, 629), True, 'import pandas as pd\n'), ((722, 759), 'pandas.read_csv', 'pd.read_csv', (['"""comptexnet_zT_test.csv"""'], {}), "('comptexnet_zT_test.csv')\n", (733, 759), True, 'import pandas as pd\n'), ((1380, 1398), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (1392, 1398), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2600), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {}), '(1, 3)\n', (2594, 2600), True, 'import matplotlib.gridspec as gridspec\n'), ((3038, 3052), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3050, 3052), True, 'import matplotlib.pyplot as plt\n'), ((3769, 3779), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3777, 3779), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1818), 'numpy.mean', 'np.mean', (['hues'], {}), '(hues)\n', (1812, 1818), True, 'import numpy as np\n'), ((1838, 1850), 'numpy.std', 'np.std', (['hues'], {}), '(hues)\n', (1844, 1850), True, 'import numpy as np\n'), ((2004, 2128), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'y': 'true', 'x': 'pred', 'ax': 'ax', 's': "d['markersize']", 'hue': 'hues', 'palette': 'pal5', 'legend': '(False)', 'linewidth': '(0)', 'alpha': '(1.0)'}), "(y=true, x=pred, ax=ax, s=d['markersize'], hue=hues, palette\n =pal5, legend=False, linewidth=0, alpha=1.0)\n", (2019, 2128), True, 'import seaborn as sns\n'), ((1742, 1758), 'numpy.asarray', 'np.asarray', (['true'], {}), '(true)\n', (1752, 1758), True, 'import numpy as np\n'), ((1761, 1777), 'numpy.asarray', 'np.asarray', (['pred'], {}), '(pred)\n', (1771, 1777), True, 'import numpy as np\n')] |
import numpy
arr = numpy.array(tuple(i for i in map(int, input().split())))
arr = numpy.reshape(arr, (3, 3))
print(arr)
| [
"numpy.reshape"
] | [((83, 109), 'numpy.reshape', 'numpy.reshape', (['arr', '(3, 3)'], {}), '(arr, (3, 3))\n', (96, 109), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
#/usr/bin/python2
'''
By <NAME>. <EMAIL>.
https://www.github.com/kyubyong/cross_vc
'''
from __future__ import print_function
from hparams import Hyperparams as hp
import numpy as np
import tensorflow as tf
from utils import *
import codecs
import re
import os, glob
import tqdm
def load_vocab():
phn2idx = {phn: idx for idx, phn in enumerate(hp.vocab)}
idx2phn = {idx: phn for idx, phn in enumerate(hp.vocab)}
return phn2idx, idx2phn
def load_data(mode="train1"):
if mode in ("train1", "eval1"):
wav_fpaths = glob.glob(hp.timit)
# wav_fpaths = [w for w in wav_fpaths if 'TEST/DR1/FAKS' not in w]
phn_fpaths = [f.replace("WAV.wav", "PHN").replace("wav", 'PHN')
for f in wav_fpaths]
if mode=="train1":
return wav_fpaths[hp.batch_size:], phn_fpaths[hp.batch_size:]
else:
wav_fpaths, phn_fpaths = wav_fpaths[:hp.batch_size], phn_fpaths[:hp.batch_size]
mfccs = np.zeros((hp.batch_size, 1500, hp.n_mfccs), np.float32)
phns = np.zeros((hp.batch_size, 1500), np.int32)
max_length = 0
for i, (w, p) in enumerate(zip(wav_fpaths, phn_fpaths)):
mfcc, phone = load_mfccs_and_phones(w, p)
max_length = max(max_length, len(mfcc))
mfccs[i, :len(mfcc), :] = mfcc
phns[i, :len(phone)] = phone
mfccs = mfccs[:, :max_length, :]
phns = phns[:, :max_length]
return mfccs, phns
elif mode=="train2":
wav_fpaths = glob.glob(hp.arctic)
return wav_fpaths
else: # convert
files = glob.glob(hp.test_data)
mfccs = np.zeros((len(files), 800, hp.n_mfccs), np.float32)
for i, f in enumerate(files):
mfcc, _ = get_mfcc_and_mag(f, trim=True)
mfcc = mfcc[:800]
mfccs[i, :len(mfcc), :] = mfcc
return mfccs
def load_mfccs_and_phones(wav_fpath, phn_fpath):
phn2idx, idx2phn = load_vocab()
mfccs, _ = get_mfcc_and_mag(wav_fpath, trim=False)
phns = np.zeros(shape=(mfccs.shape[0],), dtype=np.int32)
# phones
phones = [line.strip().split()[-1] for line in open(phn_fpath, 'r')]
triphones = []
for a, b, c in zip(["0"] + phones[:-1], phones, phones[1:] + ["0"]):
triphones.append((a, b, c))
for i, line in enumerate(open(phn_fpath, 'r')):
start_point, _, phn = line.strip().split()
bnd = int(start_point) // int(hp.sr * hp.frame_shift) # the ordering number of frames
triphone = triphones[i]
if triphone in phn2idx:
phn = phn2idx[triphone]
elif phn in phn2idx:
phn = phn2idx[phn]
else: # error
print(phn)
phns[bnd:] = phn
return mfccs, phns
def get_batch(mode="train1"):
'''Loads data and put them in mini batch queues.
mode: A string. Either `train1` or `train2`.
'''
# with tf.device('/cpu:0'):
# Load data
if mode=='train1':
wav_fpaths, phn_fpaths = load_data(mode=mode)
# calc total batch count
num_batch = len(wav_fpaths) // hp.batch_size
# Create Queues
wav_fpath, phn_fpath = tf.train.slice_input_producer([wav_fpaths, phn_fpaths], shuffle=True)
# Decoding
mfccs, phones = tf.py_func(load_mfccs_and_phones, [wav_fpath, phn_fpath], [tf.float32, tf.int32]) # (T, n_mfccs)
# Create batch queues
mfccs, phones, wav_fpaths = tf.train.batch([mfccs, phones, wav_fpath],
batch_size=hp.batch_size,
num_threads=32,
shapes=[(None, hp.n_mfccs), (None,), ()],
dynamic_pad=True)
return mfccs, phones, num_batch, wav_fpaths
elif mode=='train2':
wav_fpaths = load_data(mode=mode)
# maxlen, minlen = max(lengths), min(lengths)
# calc total batch count
num_batch = len(wav_fpaths) // hp.batch_size
# Create Queues
wav_fpath, = tf.train.slice_input_producer([wav_fpaths])
# Decoding
mfcc, mag = tf.py_func(get_mfcc_and_mag, [wav_fpath], [tf.float32, tf.float32]) # (T, n_mfccs)
# Cropping
mfcc = mfcc[:hp.maxlen]
mag = mag[:hp.maxlen]
# Create batch queues
mfccs, mags = tf.train.batch([mfcc, mag],
batch_size=hp.batch_size,
num_threads=32,
shapes=[(None, hp.n_mfccs), (None, 1+hp.n_fft//2)],
dynamic_pad=True)
return mfccs, mags, num_batch | [
"tensorflow.py_func",
"numpy.zeros",
"glob.glob",
"tensorflow.train.batch",
"tensorflow.train.slice_input_producer"
] | [((2097, 2146), 'numpy.zeros', 'np.zeros', ([], {'shape': '(mfccs.shape[0],)', 'dtype': 'np.int32'}), '(shape=(mfccs.shape[0],), dtype=np.int32)\n', (2105, 2146), True, 'import numpy as np\n'), ((563, 582), 'glob.glob', 'glob.glob', (['hp.timit'], {}), '(hp.timit)\n', (572, 582), False, 'import os, glob\n'), ((3225, 3294), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[wav_fpaths, phn_fpaths]'], {'shuffle': '(True)'}), '([wav_fpaths, phn_fpaths], shuffle=True)\n', (3254, 3294), True, 'import tensorflow as tf\n'), ((3339, 3425), 'tensorflow.py_func', 'tf.py_func', (['load_mfccs_and_phones', '[wav_fpath, phn_fpath]', '[tf.float32, tf.int32]'], {}), '(load_mfccs_and_phones, [wav_fpath, phn_fpath], [tf.float32, tf.\n int32])\n', (3349, 3425), True, 'import tensorflow as tf\n'), ((3504, 3652), 'tensorflow.train.batch', 'tf.train.batch', (['[mfccs, phones, wav_fpath]'], {'batch_size': 'hp.batch_size', 'num_threads': '(32)', 'shapes': '[(None, hp.n_mfccs), (None,), ()]', 'dynamic_pad': '(True)'}), '([mfccs, phones, wav_fpath], batch_size=hp.batch_size,\n num_threads=32, shapes=[(None, hp.n_mfccs), (None,), ()], dynamic_pad=True)\n', (3518, 3652), True, 'import tensorflow as tf\n'), ((1001, 1056), 'numpy.zeros', 'np.zeros', (['(hp.batch_size, 1500, hp.n_mfccs)', 'np.float32'], {}), '((hp.batch_size, 1500, hp.n_mfccs), np.float32)\n', (1009, 1056), True, 'import numpy as np\n'), ((1076, 1117), 'numpy.zeros', 'np.zeros', (['(hp.batch_size, 1500)', 'np.int32'], {}), '((hp.batch_size, 1500), np.int32)\n', (1084, 1117), True, 'import numpy as np\n'), ((1584, 1604), 'glob.glob', 'glob.glob', (['hp.arctic'], {}), '(hp.arctic)\n', (1593, 1604), False, 'import os, glob\n'), ((1668, 1691), 'glob.glob', 'glob.glob', (['hp.test_data'], {}), '(hp.test_data)\n', (1677, 1691), False, 'import os, glob\n'), ((4113, 4156), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[wav_fpaths]'], {}), '([wav_fpaths])\n', (4142, 4156), True, 'import tensorflow as tf\n'), ((4197, 4264), 'tensorflow.py_func', 'tf.py_func', (['get_mfcc_and_mag', '[wav_fpath]', '[tf.float32, tf.float32]'], {}), '(get_mfcc_and_mag, [wav_fpath], [tf.float32, tf.float32])\n', (4207, 4264), True, 'import tensorflow as tf\n'), ((4416, 4563), 'tensorflow.train.batch', 'tf.train.batch', (['[mfcc, mag]'], {'batch_size': 'hp.batch_size', 'num_threads': '(32)', 'shapes': '[(None, hp.n_mfccs), (None, 1 + hp.n_fft // 2)]', 'dynamic_pad': '(True)'}), '([mfcc, mag], batch_size=hp.batch_size, num_threads=32,\n shapes=[(None, hp.n_mfccs), (None, 1 + hp.n_fft // 2)], dynamic_pad=True)\n', (4430, 4563), True, 'import tensorflow as tf\n')] |
from PySide2 import QtWidgets, QtCore, QtGui
# from PySide2.QtWidgets import QApplication, QLabel, QPushButton, QVBoxLayout, QWidget, QGridLayout
# from PySide2.QtCore import Slot, Qt, QSize
# from PySide2.QtGui import QSizePolicy
import sys
from board import Board, BoardSpec
class BoardWindow(QtWidgets.QWidget):
def __init__(self, board):
QtWidgets.QWidget.__init__(self)
self.board = board
self.board_grid = self.make_board()
self.layout = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.board_grid)
self.setLayout(self.layout)
def make_board(self):
rows, cols = self.board.current.shape
grid = QtWidgets.QGridLayout(self)
for row in range(rows):
for col in range(cols):
text = str(self.board.current[row, col])
if text == '0': text = ''
button = SquareButton(text)
button.clicked.connect(self.tile_clicked(row, col))
grid.addWidget(button, row, col)
return grid
def tile_clicked(self, row, col):
def handler():
val = 1
self.board.mark(val, row, col)
self.board_grid.itemAtPosition(row, col).widget().setText(str(val))
QtWidgets.QApplication.processEvents()
return handler
class SquareButton(QtWidgets.QPushButton):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
def resizeEvent(self, event):
new_size = QtCore.QSize(1, 1)
new_size.scale(event.size(), QtCore.Qt.KeepAspectRatio)
self.resize(new_size)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
board_spec = BoardSpec(9)
board = Board(board_spec)
import numpy as np
board_init = np.zeros((9, 9), dtype=int)
for c in range(9):
board_init[c, c] = c + 1
board.current = board_init
board.calculate_possible()
widget = BoardWindow(board)
widget.resize(800, 600)
widget.show()
sys.exit(app.exec_())
| [
"PySide2.QtCore.QSize",
"PySide2.QtWidgets.QGridLayout",
"PySide2.QtWidgets.QApplication",
"PySide2.QtWidgets.QWidget.__init__",
"numpy.zeros",
"board.BoardSpec",
"PySide2.QtWidgets.QVBoxLayout",
"board.Board",
"PySide2.QtWidgets.QApplication.processEvents"
] | [((1758, 1790), 'PySide2.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1780, 1790), False, 'from PySide2 import QtWidgets, QtCore, QtGui\n'), ((1809, 1821), 'board.BoardSpec', 'BoardSpec', (['(9)'], {}), '(9)\n', (1818, 1821), False, 'from board import Board, BoardSpec\n'), ((1834, 1851), 'board.Board', 'Board', (['board_spec'], {}), '(board_spec)\n', (1839, 1851), False, 'from board import Board, BoardSpec\n'), ((1893, 1920), 'numpy.zeros', 'np.zeros', (['(9, 9)'], {'dtype': 'int'}), '((9, 9), dtype=int)\n', (1901, 1920), True, 'import numpy as np\n'), ((356, 388), 'PySide2.QtWidgets.QWidget.__init__', 'QtWidgets.QWidget.__init__', (['self'], {}), '(self)\n', (382, 388), False, 'from PySide2 import QtWidgets, QtCore, QtGui\n'), ((483, 506), 'PySide2.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (504, 506), False, 'from PySide2 import QtWidgets, QtCore, QtGui\n'), ((678, 705), 'PySide2.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self'], {}), '(self)\n', (699, 705), False, 'from PySide2 import QtWidgets, QtCore, QtGui\n'), ((1607, 1625), 'PySide2.QtCore.QSize', 'QtCore.QSize', (['(1)', '(1)'], {}), '(1, 1)\n', (1619, 1625), False, 'from PySide2 import QtWidgets, QtCore, QtGui\n'), ((1271, 1309), 'PySide2.QtWidgets.QApplication.processEvents', 'QtWidgets.QApplication.processEvents', ([], {}), '()\n', (1307, 1309), False, 'from PySide2 import QtWidgets, QtCore, QtGui\n')] |
"""Module containing peak-detection algorithm and related functionality."""
from collections import defaultdict
import numpy as np
from matplotlib import pyplot
from cycler import cycler
line_colors = ["C1", "C2", "C3", "C4", "C5", "C6"]
line_styles = ["-", "--", ":", "-.", (0, (1, 10)), (0, (5, 10))]
cyl = cycler(color=line_colors) + cycler(linestyle=line_styles)
loop_cy_iter = cyl()
STYLE = defaultdict(lambda: next(loop_cy_iter))
class Peak:
def __init__(self, startidx):
self.born = self.left = self.right = startidx
self.died = None
def get_persistence(self, seq):
return float("inf") if self.died is None else seq[self.born] - seq[self.died]
def get_persistent_homology(seq):
peaks = []
# Maps indices to peaks
idxtopeak = [None for _ in seq]
# Sequence indices sorted by values
indices = range(len(seq))
indices = sorted(indices, key=lambda i: seq[i], reverse=True)
# Process each sample in descending order
for idx in indices:
lftdone = idx > 0 and idxtopeak[idx - 1] is not None
rgtdone = idx < len(seq) - 1 and idxtopeak[idx + 1] is not None
il = idxtopeak[idx - 1] if lftdone else None
ir = idxtopeak[idx + 1] if rgtdone else None
# New peak born
if not lftdone and not rgtdone:
peaks.append(Peak(idx))
idxtopeak[idx] = len(peaks) - 1
# Directly merge to next peak left
if lftdone and not rgtdone:
peaks[il].right += 1
idxtopeak[idx] = il
# Directly merge to next peak right
if not lftdone and rgtdone:
peaks[ir].left -= 1
idxtopeak[idx] = ir
# Merge left and right peaks
if lftdone and rgtdone:
# Left was born earlier: merge right to left
if seq[peaks[il].born] > seq[peaks[ir].born]:
peaks[ir].died = idx
peaks[il].right = peaks[ir].right
idxtopeak[peaks[il].right] = idxtopeak[idx] = il
else:
peaks[il].died = idx
peaks[ir].left = peaks[il].left
idxtopeak[peaks[ir].left] = idxtopeak[idx] = ir
# This is optional convenience
return sorted(peaks, key=lambda p: p.get_persistence(seq), reverse=True)
def integrated_charge(spectrum_file, from_energy, to_energy):
"""Compute the integrated charge between the two energy limits."""
npzfile = np.load(spectrum_file)
energy = npzfile["edges"]
charge = npzfile["counts"]
delta_energy = np.diff(energy)
energy = energy[1:]
mask = (energy >= from_energy) & (energy <= to_energy) # MeV
Q = np.sum(delta_energy[mask] * charge[mask]) # integrated charge, pC
return Q
def peak_position(spectrum_file, from_energy, to_energy):
"""Find position of max charge in given interval."""
npzfile = np.load(spectrum_file)
energy = npzfile["edges"][1:]
charge = npzfile["counts"]
mask = (energy >= from_energy) & (energy <= to_energy) # MeV
energy = energy[mask]
charge = charge[mask]
return energy[np.argmax(charge)] # MeV
def plot_electron_energy_spectrum(spectrum_file, fig_file, ax_title=None):
"""Plot the electron spectrum from file."""
npzfile = np.load(spectrum_file)
energy = npzfile["edges"]
charge = npzfile["counts"]
delta_energy = np.diff(energy)
energy = energy[1:]
mask = (energy > 0) & (energy < 350) # MeV
energy = energy[mask]
charge = np.clip(charge, 0, 60)[mask]
h = get_persistent_homology(charge)
# plot it
fig, ax = pyplot.subplots(figsize=(10, 6))
ax.set_title(ax_title)
ax.step(
energy,
charge,
)
ax.set_xlabel("E (MeV)")
ax.set_ylabel("dQ/dE (pC/MeV)")
for peak_number, peak in enumerate(
h[:6]
): # go through first peaks, in order of importance
peak_index = peak.born
energy_position = energy[peak_index]
charge_value = charge[peak_index]
persistence = peak.get_persistence(charge)
ymin = charge_value - persistence
if np.isinf(persistence):
ymin = 0
Q = np.sum(
delta_energy[peak.left : peak.right] * charge[peak.left : peak.right]
) # integrated charge
ax.annotate(
text=f"#{peak_number}, {Q:.0f} pC, {energy_position:.1f} MeV",
xy=(energy_position + 5, charge_value + 0.02),
xycoords="data",
color=STYLE[str(peak_index)]["color"],
)
ax.axvline(
x=energy_position,
linestyle=STYLE[str(peak_index)]["linestyle"],
color=STYLE[str(peak_index)]["color"],
linewidth=2,
)
ax.fill_between(
energy,
charge,
ymin,
where=(energy > energy[peak.left]) & (energy <= energy[peak.right]),
color=STYLE[str(peak_index)]["color"],
alpha=0.9,
)
fig.savefig(fig_file)
pyplot.close(fig)
def main():
import random
import signac
random.seed(42)
proj = signac.get_project(search=False)
ids = [job.id for job in proj]
job = proj.open_job(id=random.choice(ids))
plot_electron_energy_spectrum(job.fn("final_histogram.npz"), "final_histogram.png")
energy_low = 100
energy_high = 300
Q = integrated_charge(
job.fn("final_histogram.npz"), from_energy=energy_low, to_energy=energy_high
)
pos = peak_position(
job.fn("final_histogram.npz"), from_energy=energy_low, to_energy=energy_high
)
print(
f"{Q:.1f} pc between {energy_low} and {energy_high} MeV, peak at {pos:.1f} MeV"
)
if __name__ == "__main__":
main()
| [
"cycler.cycler",
"numpy.load",
"numpy.sum",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.isinf",
"numpy.clip",
"random.choice",
"numpy.diff",
"random.seed",
"signac.get_project",
"matplotlib.pyplot.subplots"
] | [((311, 336), 'cycler.cycler', 'cycler', ([], {'color': 'line_colors'}), '(color=line_colors)\n', (317, 336), False, 'from cycler import cycler\n'), ((339, 368), 'cycler.cycler', 'cycler', ([], {'linestyle': 'line_styles'}), '(linestyle=line_styles)\n', (345, 368), False, 'from cycler import cycler\n'), ((2451, 2473), 'numpy.load', 'np.load', (['spectrum_file'], {}), '(spectrum_file)\n', (2458, 2473), True, 'import numpy as np\n'), ((2555, 2570), 'numpy.diff', 'np.diff', (['energy'], {}), '(energy)\n', (2562, 2570), True, 'import numpy as np\n'), ((2671, 2712), 'numpy.sum', 'np.sum', (['(delta_energy[mask] * charge[mask])'], {}), '(delta_energy[mask] * charge[mask])\n', (2677, 2712), True, 'import numpy as np\n'), ((2884, 2906), 'numpy.load', 'np.load', (['spectrum_file'], {}), '(spectrum_file)\n', (2891, 2906), True, 'import numpy as np\n'), ((3276, 3298), 'numpy.load', 'np.load', (['spectrum_file'], {}), '(spectrum_file)\n', (3283, 3298), True, 'import numpy as np\n'), ((3380, 3395), 'numpy.diff', 'np.diff', (['energy'], {}), '(energy)\n', (3387, 3395), True, 'import numpy as np\n'), ((3607, 3639), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3622, 3639), False, 'from matplotlib import pyplot\n'), ((5017, 5034), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (5029, 5034), False, 'from matplotlib import pyplot\n'), ((5090, 5105), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (5101, 5105), False, 'import random\n'), ((5118, 5150), 'signac.get_project', 'signac.get_project', ([], {'search': '(False)'}), '(search=False)\n', (5136, 5150), False, 'import signac\n'), ((3110, 3127), 'numpy.argmax', 'np.argmax', (['charge'], {}), '(charge)\n', (3119, 3127), True, 'import numpy as np\n'), ((3508, 3530), 'numpy.clip', 'np.clip', (['charge', '(0)', '(60)'], {}), '(charge, 0, 60)\n', (3515, 3530), True, 'import numpy as np\n'), ((4120, 4141), 'numpy.isinf', 'np.isinf', (['persistence'], {}), '(persistence)\n', (4128, 4141), True, 'import numpy as np\n'), ((4177, 4250), 'numpy.sum', 'np.sum', (['(delta_energy[peak.left:peak.right] * charge[peak.left:peak.right])'], {}), '(delta_energy[peak.left:peak.right] * charge[peak.left:peak.right])\n', (4183, 4250), True, 'import numpy as np\n'), ((5213, 5231), 'random.choice', 'random.choice', (['ids'], {}), '(ids)\n', (5226, 5231), False, 'import random\n')] |
# License: BSD 3 clause
import io, unittest
import numpy as np
import pickle
from scipy.sparse import csr_matrix
from tick.base_model.tests.generalized_linear_model import TestGLM
from tick.prox import ProxL1
from tick.linear_model import ModelLinReg, SimuLinReg
from tick.linear_model import ModelLogReg, SimuLogReg
from tick.linear_model import ModelPoisReg, SimuPoisReg
from tick.linear_model import ModelHinge, ModelQuadraticHinge, ModelSmoothedHinge
from tick.robust import ModelAbsoluteRegression, ModelEpsilonInsensitive, ModelHuber, \
ModelLinRegWithIntercepts, ModelModifiedHuber
from tick.simulation import weights_sparse_gauss
class Test(TestGLM):
def test_robust_model_serialization(self):
"""...Test serialization of robust models
"""
model_map = {
ModelAbsoluteRegression: SimuLinReg,
ModelEpsilonInsensitive: SimuLinReg,
ModelHuber: SimuLinReg,
ModelLinRegWithIntercepts: SimuLinReg,
ModelModifiedHuber: SimuLogReg
}
for mod in model_map:
np.random.seed(12)
n_samples, n_features = 100, 5
w0 = np.random.randn(n_features)
intercept0 = 50 * weights_sparse_gauss(n_weights=n_samples, nnz=30)
c0 = None
X, y = SimuLinReg(w0, c0, n_samples=n_samples, verbose=False,
seed=2038).simulate()
if mod == ModelLinRegWithIntercepts:
y += intercept0
model = mod(fit_intercept=False).fit(X, y)
pickled = pickle.loads(pickle.dumps(model))
self.assertTrue(model._model.compare(pickled._model))
if mod == ModelLinRegWithIntercepts:
test_vector = np.hstack((X[0], np.ones(n_samples)))
self.assertEqual(
model.loss(test_vector), pickled.loss(test_vector))
else:
self.assertEqual(model.loss(X[0]), pickled.loss(X[0]))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"tick.linear_model.SimuLinReg",
"numpy.random.seed",
"tick.simulation.weights_sparse_gauss",
"numpy.random.randn",
"numpy.ones",
"pickle.dumps"
] | [((2046, 2061), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2059, 2061), False, 'import io, unittest\n'), ((1103, 1121), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (1117, 1121), True, 'import numpy as np\n'), ((1182, 1209), 'numpy.random.randn', 'np.random.randn', (['n_features'], {}), '(n_features)\n', (1197, 1209), True, 'import numpy as np\n'), ((1240, 1289), 'tick.simulation.weights_sparse_gauss', 'weights_sparse_gauss', ([], {'n_weights': 'n_samples', 'nnz': '(30)'}), '(n_weights=n_samples, nnz=30)\n', (1260, 1289), False, 'from tick.simulation import weights_sparse_gauss\n'), ((1612, 1631), 'pickle.dumps', 'pickle.dumps', (['model'], {}), '(model)\n', (1624, 1631), False, 'import pickle\n'), ((1331, 1396), 'tick.linear_model.SimuLinReg', 'SimuLinReg', (['w0', 'c0'], {'n_samples': 'n_samples', 'verbose': '(False)', 'seed': '(2038)'}), '(w0, c0, n_samples=n_samples, verbose=False, seed=2038)\n', (1341, 1396), False, 'from tick.linear_model import ModelLinReg, SimuLinReg\n'), ((1797, 1815), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (1804, 1815), True, 'import numpy as np\n')] |
import numpy as np
import scipy.stats as st
def model_weights(deviances, b_samples=1000):
"""
Calculate weights of the model that can be used to compare them.
Pseudo-Bayesian Model averaging using Akaike-type
weighting. The weights are stabilized using the Bayesian bootstrap.
The code is taken from `compare` function of stats.py file
of arviz library (https://github.com/arviz-devs/arviz), version 0.6.1,
distributed under Apache License Version 2.0.
Parameters
----------
deviances: 2D array
An array containing WAIC or PSIS values for each model and observations
Rows are models and columns are observations.
b_samples: int
Number of samples taken by the Bayesian bootstrap estimation.
Returns
-------
list:
Weight of each of the model. Higher value means the model
is more compatible with the data.
"""
ic_i_val = np.array(deviances).transpose()
rows = ic_i_val.shape[0]
cols = ic_i_val.shape[1]
ic_i_val = ic_i_val * rows
b_weighting = st.dirichlet.rvs(alpha=[1] * rows, size=b_samples,
random_state=1)
weights = np.zeros((b_samples, cols))
z_bs = np.zeros_like(weights)
for i in range(b_samples):
z_b = np.dot(b_weighting[i], ic_i_val)
u_weights = np.exp((z_b - np.min(z_b)) / -2)
z_bs[i] = z_b
weights[i] = u_weights / np.sum(u_weights)
return weights.mean(axis=0).tolist()
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.zeros",
"scipy.stats.dirichlet.rvs",
"numpy.min",
"numpy.array",
"numpy.dot"
] | [((1071, 1137), 'scipy.stats.dirichlet.rvs', 'st.dirichlet.rvs', ([], {'alpha': '([1] * rows)', 'size': 'b_samples', 'random_state': '(1)'}), '(alpha=[1] * rows, size=b_samples, random_state=1)\n', (1087, 1137), True, 'import scipy.stats as st\n'), ((1188, 1215), 'numpy.zeros', 'np.zeros', (['(b_samples, cols)'], {}), '((b_samples, cols))\n', (1196, 1215), True, 'import numpy as np\n'), ((1227, 1249), 'numpy.zeros_like', 'np.zeros_like', (['weights'], {}), '(weights)\n', (1240, 1249), True, 'import numpy as np\n'), ((1296, 1328), 'numpy.dot', 'np.dot', (['b_weighting[i]', 'ic_i_val'], {}), '(b_weighting[i], ic_i_val)\n', (1302, 1328), True, 'import numpy as np\n'), ((931, 950), 'numpy.array', 'np.array', (['deviances'], {}), '(deviances)\n', (939, 950), True, 'import numpy as np\n'), ((1437, 1454), 'numpy.sum', 'np.sum', (['u_weights'], {}), '(u_weights)\n', (1443, 1454), True, 'import numpy as np\n'), ((1363, 1374), 'numpy.min', 'np.min', (['z_b'], {}), '(z_b)\n', (1369, 1374), True, 'import numpy as np\n')] |
import pandas as pd
import geopandas as gpd
import shapely as sp
import numpy as np
from scipy.spatial import Voronoi
def convert_point_csv_to_data_frame(path, lat_key='lat', lon_key='lon',
encoding='utf-8', separator='\t',
decimal='.', index_col=None, epsg=4326):
"""
Takes a CSV file with latitude and longitude columns and returns a Geopandas
DataFrame object. This function accepts optional configuration params for
the CSV
Args:
path (string): file path for the CSV file
lat_key (string): name of the latitude column
lon_key (string): name of the longitude column
encoding (string): encoding of CSV file
separator (string): separator value for the CSV
decimal (string): character used for decimal place in the CSV
index_col (string): name of the index column
epsg (int): spatial reference system code for geospatial data
Returns:
GeoPandas.GeoDataFrame: the contents of the supplied CSV file in the
format of a GeoPandas GeoDataFrame
"""
csv_points = pd.read_csv(path, encoding=encoding, sep=separator,
index_col=index_col, decimal=decimal)
zipped_data = zip(csv_points[lon_key], csv_points[lat_key])
geo_points = [sp.geometry.Point(xy) for xy in zipped_data]
crs = {'init': 'epsg:' + str(epsg)}
return gpd.GeoDataFrame(csv_points, crs=crs, geometry=geo_points)\
.to_crs(epsg=epsg)
def convert_point_data_to_data_frame(data, lat_key='lat', lon_key='lon',
epsg=4326):
"""
Takes a data set with latitude and longitude columns and returns a Geopandas
GeoDataFrame object.
Args:
data (Pandas.DataFrame): the lat/lon data to convert to GeoDataFrame
lat_key (string): name of the latitude column
lon_key (string): name of the longitude column
epsg (int): spatial reference system code for geospatial data
Returns:
GeoPandas.GeoDataFrame: the contents of the supplied data in the
format of a GeoPandas GeoDataFrame
"""
zipped_data = zip(data[lon_key], data[lat_key])
geo_points = [sp.geometry.Point(xy) for xy in zipped_data]
crs = {'init': 'epsg:' + str(epsg)}
return gpd.GeoDataFrame(data, crs=crs, geometry=geo_points)\
.to_crs(epsg=epsg)
def get_points_inside_shape(points, shape):
"""
Takes a point geometry object and a polygon geometry shape file
and returns all of the points within the boundaries of that shape
Args:
points (Geopandas.GeoDataFrame): Point geometry
shape (Geopandas.GeoDataFrame): Polygon geometry shape file object
Returns:
Geopandas.GeoDataFrame: All of the points that are within the outline of
the shape
"""
return points[points.within(shape.unary_union)]
def create_voronoi(points, epsg=4326):
"""
Make a voronoi diagram geometry out of point definitions
Args:
points (Geopandas.GeoDataFrame): The centroid points for the voronoi
epsg (int): spatial reference system code for geospatial data
Returns:
Geopandas.GeoDataFrame: The polygon geometry for the voronoi diagram
"""
np_points = [np.array([pt.x, pt.y]) for pt in np.array(points.geometry)]
vor = Voronoi(np_points)
lines = [
sp.geometry.LineString(vor.vertices[line])
for line in vor.ridge_vertices
if -1 not in line
]
voronoi_poly = sp.ops.polygonize(lines)
crs = {'init': 'epsg:' + str(epsg)}
return gpd.GeoDataFrame(crs=crs, geometry=list(voronoi_poly))\
.to_crs(epsg=epsg)
def make_voronoi_in_shp(points, shape, epsg=4326):
"""
Make a voronoi diagram geometry out of point definitions and embed it in a
shape.
Args:
points (Geopandas.GeoDataFrame): The centroid points for the voronoi
shape (Geopandas.GeoDataFrame): The shapefile to contain the voronoi
inside
epsg (int): spatial reference system code for geospatial data
Returns:
Geopandas.GeoDataFrame: The polygon geometry for the voronoi diagram
embedded inside the shape
"""
voronoi_geo = create_voronoi(points, epsg=epsg)
voronoi_geo_in_shape = get_points_inside_shape(voronoi_geo, shape)
shape_with_voronoi = gpd.sjoin(points, voronoi_geo_in_shape, how="inner",
op='within')
del shape_with_voronoi['geometry']
return voronoi_geo_in_shape.merge(shape_with_voronoi, left_index=True,
right_on='index_right')
| [
"shapely.geometry.Point",
"pandas.read_csv",
"shapely.ops.polygonize",
"geopandas.sjoin",
"scipy.spatial.Voronoi",
"shapely.geometry.LineString",
"geopandas.GeoDataFrame",
"numpy.array"
] | [((1145, 1238), 'pandas.read_csv', 'pd.read_csv', (['path'], {'encoding': 'encoding', 'sep': 'separator', 'index_col': 'index_col', 'decimal': 'decimal'}), '(path, encoding=encoding, sep=separator, index_col=index_col,\n decimal=decimal)\n', (1156, 1238), True, 'import pandas as pd\n'), ((3390, 3408), 'scipy.spatial.Voronoi', 'Voronoi', (['np_points'], {}), '(np_points)\n', (3397, 3408), False, 'from scipy.spatial import Voronoi\n'), ((3566, 3590), 'shapely.ops.polygonize', 'sp.ops.polygonize', (['lines'], {}), '(lines)\n', (3583, 3590), True, 'import shapely as sp\n'), ((4416, 4481), 'geopandas.sjoin', 'gpd.sjoin', (['points', 'voronoi_geo_in_shape'], {'how': '"""inner"""', 'op': '"""within"""'}), "(points, voronoi_geo_in_shape, how='inner', op='within')\n", (4425, 4481), True, 'import geopandas as gpd\n'), ((1347, 1368), 'shapely.geometry.Point', 'sp.geometry.Point', (['xy'], {}), '(xy)\n', (1364, 1368), True, 'import shapely as sp\n'), ((2245, 2266), 'shapely.geometry.Point', 'sp.geometry.Point', (['xy'], {}), '(xy)\n', (2262, 2266), True, 'import shapely as sp\n'), ((3320, 3342), 'numpy.array', 'np.array', (['[pt.x, pt.y]'], {}), '([pt.x, pt.y])\n', (3328, 3342), True, 'import numpy as np\n'), ((3432, 3474), 'shapely.geometry.LineString', 'sp.geometry.LineString', (['vor.vertices[line]'], {}), '(vor.vertices[line])\n', (3454, 3474), True, 'import shapely as sp\n'), ((1444, 1502), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['csv_points'], {'crs': 'crs', 'geometry': 'geo_points'}), '(csv_points, crs=crs, geometry=geo_points)\n', (1460, 1502), True, 'import geopandas as gpd\n'), ((2342, 2394), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['data'], {'crs': 'crs', 'geometry': 'geo_points'}), '(data, crs=crs, geometry=geo_points)\n', (2358, 2394), True, 'import geopandas as gpd\n'), ((3353, 3378), 'numpy.array', 'np.array', (['points.geometry'], {}), '(points.geometry)\n', (3361, 3378), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 09:24:26 2018
@author: virati
Do a simple synthetic regression to make sure our approaches are working
"""
import numpy as np
import scipy.signal as sig
import matplotlib.pyplot as plt
M_known = np.array([20,2,3.4,-1]).reshape(-1,1)
X_base = np.random.multivariate_normal([5,3,2,1],5 * np.identity(4),500)
Y_known = np.dot(X_base,M_known) #
Y_meas = Y_known + np.random.normal(0,10,(500,1))
#%%
from sklearn.linear_model import LinearRegression, RANSACRegressor
regmodel = LinearRegression(copy_X=True,normalize=True,fit_intercept=True)
regmodel.fit(X_base,Y_meas)
#%%
#TESTING PHASE
test_obs = 1500
xnoiz = 1
X_test = np.random.multivariate_normal([5,3,2,1],5 * np.ones((4,4)),test_obs)
X_noise = X_test # + np.random.normal(0,xnoiz,(test_obs,1))
ynoiz = 10
Y_true = (np.dot(X_noise,M_known) + np.random.normal(0,ynoiz,(test_obs,1)))
Y_test = regmodel.predict(X_noise)
#%%
plt.figure()
plt.subplot(211)
plt.scatter(Y_true,Y_test)
plt.subplot(212)
featvect = np.arange(4)
mcoefs = regmodel.coef_.T
#plt.bar(featvect,mcoefs,0.4)
plt.plot(featvect,mcoefs,label='Model Coefficients')
plt.plot(featvect,M_known,label='True Coefficients')
plt.legend()
#plt.figure()
#plt.scatter(np.ones_like(Y_known),Y_known,alpha=0.1)
#plt.title('')
plt.show() | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.identity",
"numpy.ones",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.array",
"numpy.random.normal",
... | [((395, 418), 'numpy.dot', 'np.dot', (['X_base', 'M_known'], {}), '(X_base, M_known)\n', (401, 418), True, 'import numpy as np\n'), ((555, 620), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'copy_X': '(True)', 'normalize': '(True)', 'fit_intercept': '(True)'}), '(copy_X=True, normalize=True, fit_intercept=True)\n', (571, 620), False, 'from sklearn.linear_model import LinearRegression, RANSACRegressor\n'), ((960, 972), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (970, 972), True, 'import matplotlib.pyplot as plt\n'), ((973, 989), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (984, 989), True, 'import matplotlib.pyplot as plt\n'), ((990, 1017), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Y_true', 'Y_test'], {}), '(Y_true, Y_test)\n', (1001, 1017), True, 'import matplotlib.pyplot as plt\n'), ((1017, 1033), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1028, 1033), True, 'import matplotlib.pyplot as plt\n'), ((1046, 1058), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (1055, 1058), True, 'import numpy as np\n'), ((1115, 1169), 'matplotlib.pyplot.plot', 'plt.plot', (['featvect', 'mcoefs'], {'label': '"""Model Coefficients"""'}), "(featvect, mcoefs, label='Model Coefficients')\n", (1123, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1222), 'matplotlib.pyplot.plot', 'plt.plot', (['featvect', 'M_known'], {'label': '"""True Coefficients"""'}), "(featvect, M_known, label='True Coefficients')\n", (1176, 1222), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1233), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1231, 1233), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1327, 1329), True, 'import matplotlib.pyplot as plt\n'), ((439, 472), 'numpy.random.normal', 'np.random.normal', (['(0)', '(10)', '(500, 1)'], {}), '(0, 10, (500, 1))\n', (455, 472), True, 'import numpy as np\n'), ((854, 878), 'numpy.dot', 'np.dot', (['X_noise', 'M_known'], {}), '(X_noise, M_known)\n', (860, 878), True, 'import numpy as np\n'), ((880, 921), 'numpy.random.normal', 'np.random.normal', (['(0)', 'ynoiz', '(test_obs, 1)'], {}), '(0, ynoiz, (test_obs, 1))\n', (896, 921), True, 'import numpy as np\n'), ((273, 299), 'numpy.array', 'np.array', (['[20, 2, 3.4, -1]'], {}), '([20, 2, 3.4, -1])\n', (281, 299), True, 'import numpy as np\n'), ((364, 378), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (375, 378), True, 'import numpy as np\n'), ((747, 762), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (754, 762), True, 'import numpy as np\n')] |
import os
import sys
import pickle
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from lib.metric import get_metrics, train_lr
def compute_fss(model, n_layers, img_size, inp_channel):
model.eval()
model = model.cuda()
n = 100
noise_imgs = np.random.randint(0,255,(n, inp_channel, img_size, img_size),'uint8')
noise_imgs = torch.Tensor(noise_imgs).cuda()
# noise_imgs = ((noise_imgs/255)-mean)/std
# noise_imgs = (noise_imgs - 127.5) / 2550
noise_imgs = (noise_imgs - 127.5) / 255
# noise_imgs = torch.rand((n,3,32,32)).cuda()
fss = []
for layer_index in range(n_layers):
with torch.no_grad():
try:
noise_feat = model.intermediate_forward(noise_imgs, layer_index)
except:
noise_feat = model.module.intermediate_forward(noise_imgs, layer_index)
noise_feat = noise_feat.view(n, -1)
noise_feat = noise_feat.mean(dim=0, keepdim=True)
fss.append(noise_feat)
return fss
def compute_val_fss(model, n_layers, dataloader):
model.eval()
model = model.cuda()
fss = []
for layer_index in range(n_layers):
count = 0
for data in tqdm(dataloader, desc="get_val_fss"):
if type(data) in [tuple, list] and len(data) == 2:
imgs, _ = data
elif isinstance(data, torch.Tensor):
imgs = data
else:
print(type(data))
raise NotImplementedError
imgs = imgs.cuda()
with torch.no_grad():
try:
feat = model.intermediate_forward(imgs, layer_index)
except:
feat = model.module.intermediate_forward(imgs, layer_index)
feat = feat.view(feat.size(0),-1)
if count == 0:
val_feats = feat
count += 1
else:
val_feats = torch.cat((val_feats, feat))
val_feat = val_feats.mean(dim=0, keepdim=True)
fss.append(val_feat)
return fss
def get_FSS_score_process(model, dataloader, fss, layer_index, magnitude, std):
model.eval()
model = model.cuda()
scores = []
for data in tqdm(dataloader, desc="get_FSS_score"):
if type(data) in [tuple, list] and len(data) == 2:
imgs, _ = data
elif isinstance(data, torch.Tensor):
imgs = data
else:
print(type(data))
raise NotImplementedError
imgs = imgs.type(torch.FloatTensor).cuda()
imgs.requires_grad = True
imgs.grad = None
model.zero_grad()
try:
feat = model.intermediate_forward(imgs, layer_index)
except:
feat = model.module.intermediate_forward(imgs, layer_index)
feat = feat.view(feat.size(0),-1)
loss = -(feat - fss[layer_index]).norm(p=2, dim=1).mean()
loss.backward()
gradient = imgs.grad.data
gradient = (gradient.float() - gradient.mean()) / gradient.std()
if len(std) ==3:
gradient.index_copy_(1, torch.LongTensor([0]).cuda(), gradient.index_select(1, torch.LongTensor([0]).cuda()) / std[0])
gradient.index_copy_(1, torch.LongTensor([1]).cuda(), gradient.index_select(1, torch.LongTensor([1]).cuda()) / std[1])
gradient.index_copy_(1, torch.LongTensor([2]).cuda(), gradient.index_select(1, torch.LongTensor([2]).cuda()) / std[2])
elif len(std) == 1:
gradient.index_copy_(1, torch.LongTensor([0]).cuda(), gradient.index_select(1, torch.LongTensor([0]).cuda()) / std[0])
tempInputs = torch.add(imgs.data, magnitude, gradient)
with torch.no_grad():
try:
feat = model.intermediate_forward(tempInputs, layer_index)
except:
feat = model.module.intermediate_forward(tempInputs, layer_index)
feat = feat.view(feat.size(0),-1)
_scores = np.linalg.norm((feat - fss[layer_index]).cpu().detach().numpy(), axis=1)
scores.extend(_scores)
return scores
def get_FSS_score_ensem_process(model, dataloader, fss, layer_indexs, magnitude, std):
scores_list = []
for layer_index in layer_indexs:
scores = get_FSS_score_process(model, dataloader, fss, layer_index, magnitude, std)
scores = np.array(scores).reshape(-1,1)
scores_list.append(scores)
return np.concatenate(scores_list, axis=1)
def search_FSS_hyperparams(model,
fss,
layer_indexs,
ind_dataloader_val_for_train,
ood_dataloader_val_for_train,
ind_dataloader_val_for_test,
ood_dataloader_val_for_test,
std):
# magnitude_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
magnitude_list = [0.2, 0.18, 0.16, 0.14, 0.12, 0.1, 0.08, 0.06, 0.04, 0.02, 0.01, 0.008, 0.006, 0.004, 0.002, 0.001, 0.0008, 0.0006, 0.0004, 0.0002, 0.0001, 0.0]
best_magnitude = None
best_tnr = 0
for m in tqdm(magnitude_list, desc="magnitude"):
ind_features_val_for_train = get_FSS_score_ensem_process(model, ind_dataloader_val_for_train, fss, layer_indexs, m, std)
ood_features_val_for_train = get_FSS_score_ensem_process(model, ood_dataloader_val_for_train, fss, layer_indexs, m, std)
ind_features_val_for_test = get_FSS_score_ensem_process(model, ind_dataloader_val_for_test, fss, layer_indexs, m, std)
ood_features_val_for_test = get_FSS_score_ensem_process(model, ood_dataloader_val_for_test, fss, layer_indexs, m, std)
lr = train_lr(ind_features_val_for_train, ood_features_val_for_train)
metrics = get_metrics(lr, ind_features_val_for_test, ood_features_val_for_test, acc_type="best")
print("m:{}, metrics:{}".format(m, metrics))
if metrics['TNR@tpr=0.95'] > best_tnr:
best_tnr = metrics['TNR@tpr=0.95']
best_magnitude = m
return best_magnitude
| [
"tqdm.tqdm",
"torch.LongTensor",
"torch.add",
"torch.cat",
"numpy.random.randint",
"torch.Tensor",
"numpy.array",
"lib.metric.get_metrics",
"torch.no_grad",
"numpy.concatenate",
"lib.metric.train_lr"
] | [((318, 390), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(n, inp_channel, img_size, img_size)', '"""uint8"""'], {}), "(0, 255, (n, inp_channel, img_size, img_size), 'uint8')\n", (335, 390), True, 'import numpy as np\n'), ((2287, 2325), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""get_FSS_score"""'}), "(dataloader, desc='get_FSS_score')\n", (2291, 2325), False, 'from tqdm import tqdm\n'), ((4492, 4527), 'numpy.concatenate', 'np.concatenate', (['scores_list'], {'axis': '(1)'}), '(scores_list, axis=1)\n', (4506, 4527), True, 'import numpy as np\n'), ((5202, 5240), 'tqdm.tqdm', 'tqdm', (['magnitude_list'], {'desc': '"""magnitude"""'}), "(magnitude_list, desc='magnitude')\n", (5206, 5240), False, 'from tqdm import tqdm\n'), ((1253, 1289), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""get_val_fss"""'}), "(dataloader, desc='get_val_fss')\n", (1257, 1289), False, 'from tqdm import tqdm\n'), ((3708, 3749), 'torch.add', 'torch.add', (['imgs.data', 'magnitude', 'gradient'], {}), '(imgs.data, magnitude, gradient)\n', (3717, 3749), False, 'import torch\n'), ((5777, 5841), 'lib.metric.train_lr', 'train_lr', (['ind_features_val_for_train', 'ood_features_val_for_train'], {}), '(ind_features_val_for_train, ood_features_val_for_train)\n', (5785, 5841), False, 'from lib.metric import get_metrics, train_lr\n'), ((5860, 5950), 'lib.metric.get_metrics', 'get_metrics', (['lr', 'ind_features_val_for_test', 'ood_features_val_for_test'], {'acc_type': '"""best"""'}), "(lr, ind_features_val_for_test, ood_features_val_for_test,\n acc_type='best')\n", (5871, 5950), False, 'from lib.metric import get_metrics, train_lr\n'), ((405, 429), 'torch.Tensor', 'torch.Tensor', (['noise_imgs'], {}), '(noise_imgs)\n', (417, 429), False, 'import torch\n'), ((692, 707), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (705, 707), False, 'import torch\n'), ((3763, 3778), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3776, 3778), False, 'import torch\n'), ((1605, 1620), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1618, 1620), False, 'import torch\n'), ((2003, 2031), 'torch.cat', 'torch.cat', (['(val_feats, feat)'], {}), '((val_feats, feat))\n', (2012, 2031), False, 'import torch\n'), ((4415, 4431), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4423, 4431), True, 'import numpy as np\n'), ((3170, 3191), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (3186, 3191), False, 'import torch\n'), ((3301, 3322), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (3317, 3322), False, 'import torch\n'), ((3432, 3453), 'torch.LongTensor', 'torch.LongTensor', (['[2]'], {}), '([2])\n', (3448, 3453), False, 'import torch\n'), ((3591, 3612), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (3607, 3612), False, 'import torch\n'), ((3225, 3246), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (3241, 3246), False, 'import torch\n'), ((3356, 3377), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (3372, 3377), False, 'import torch\n'), ((3487, 3508), 'torch.LongTensor', 'torch.LongTensor', (['[2]'], {}), '([2])\n', (3503, 3508), False, 'import torch\n'), ((3646, 3667), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (3662, 3667), False, 'import torch\n')] |
"""
Somes utilities function
"""
import numpy as np
def filter_kalman(mutm, Sigtm, Xt, mutilde_tm, expAh, SST, dim_x, dim_h):
"""
Compute the foward step using Kalman filter, predict and update step
Parameters
----------
mutm, Sigtm: Values of the foward distribution at t-1
Xt, mutilde_tm: Values of the trajectories at T and t-1
expAh, SST: Coefficients parameters["expA"][:, dim_x:] (dim_x+dim_h, dim_h) and SS^T (dim_x+dim_h, dim_x+dim_h)
dim_x,dim_h: Dimension of visibles and hidden variables
"""
# Predict step marginalization Normal Gaussian
mutemp = mutilde_tm + np.matmul(expAh, mutm)
Sigtemp = SST + np.matmul(expAh, np.matmul(Sigtm, expAh.T))
# Update step conditionnal Normal Gaussian
invSYY = np.linalg.inv(Sigtemp[:dim_x, :dim_x])
marg_mu = mutemp[dim_x:] + np.matmul(Sigtemp[dim_x:, :dim_x], np.matmul(invSYY, Xt - mutemp[:dim_x]))
marg_sig = Sigtemp[dim_x:, dim_x:] - np.matmul(Sigtemp[dim_x:, :dim_x], np.matmul(invSYY, Sigtemp[dim_x:, :dim_x].T))
marg_sig = 0.5 * marg_sig + 0.5 * marg_sig.T # Enforce symmetry
R = expAh[dim_x:, :] - np.matmul(Sigtemp[dim_x:, :dim_x], np.matmul(invSYY, expAh[:dim_x, :]))
# Pair probability distibution Z_t,Z_{t-1}
mu_pair = np.hstack((marg_mu, mutm))
Sig_pair = np.zeros((2 * dim_h, 2 * dim_h))
Sig_pair[:dim_h, :dim_h] = marg_sig
Sig_pair[dim_h:, :dim_h] = np.matmul(R, Sigtm)
Sig_pair[:dim_h, dim_h:] = Sig_pair[dim_h:, :dim_h].T
Sig_pair[dim_h:, dim_h:] = Sigtm
return marg_mu, marg_sig, mu_pair, Sig_pair
def smoothing_rauch(muft, Sigft, muStp, SigStp, Xtplus, mutilde_t, expAh, SST, dim_x, dim_h):
"""
Compute the backward step using Kalman smoother
"""
invTemp = np.linalg.inv(SST + np.matmul(expAh, np.matmul(Sigft, expAh.T)))
R = np.matmul(np.matmul(Sigft, expAh.T), invTemp)
mu_dym_rev = muft + np.matmul(R[:, :dim_x], Xtplus) - np.matmul(R, np.matmul(expAh, muft) + mutilde_t)
Sig_dym_rev = Sigft - np.matmul(np.matmul(R, expAh), Sigft)
marg_mu = mu_dym_rev + np.matmul(R[:, dim_x:], muStp)
marg_sig = np.matmul(R[:, dim_x:], np.matmul(SigStp, R[:, dim_x:].T)) + Sig_dym_rev
# Pair probability distibution Z_{t+1},Z_{t}
mu_pair = np.hstack((muStp, marg_mu))
Sig_pair = np.zeros((2 * dim_h, 2 * dim_h))
Sig_pair[:dim_h, :dim_h] = SigStp
Sig_pair[dim_h:, :dim_h] = np.matmul(R[:, dim_x:], SigStp)
Sig_pair[:dim_h, dim_h:] = Sig_pair[dim_h:, :dim_h].T
Sig_pair[dim_h:, dim_h:] = marg_sig
return marg_mu, marg_sig, mu_pair, Sig_pair
def filtersmoother(Xtplus, mutilde, R, diffusion_coeffs, mu0, sig0):
"""
Apply Kalman filter and Rauch smoother. Fallback for the fortran implementation
"""
# Initialize, we are going to use a numpy array for storing intermediate values and put the resulting µh and \Sigma_h into the xarray only at the end
lenTraj = Xtplus.shape[0]
dim_x = Xtplus.shape[1]
dim_h = mu0.shape[0]
muf = np.zeros((lenTraj, dim_h))
Sigf = np.zeros((lenTraj, dim_h, dim_h))
mus = np.zeros((lenTraj, dim_h))
Sigs = np.zeros((lenTraj, dim_h, dim_h))
# To store the pair probability distibution
muh = np.zeros((lenTraj, 2 * dim_h))
Sigh = np.zeros((lenTraj, 2 * dim_h, 2 * dim_h))
# Forward Proba
muf[0, :] = mu0
Sigf[0, :, :] = sig0
# Iterate and compute possible value for h at the same point
for i in range(1, lenTraj):
# try:
muf[i, :], Sigf[i, :, :], muh[i - 1, :], Sigh[i - 1, :, :] = filter_kalman(muf[i - 1, :], Sigf[i - 1, :, :], Xtplus[i - 1], mutilde[i - 1], R, diffusion_coeffs, dim_x, dim_h)
# except np.linalg.LinAlgError:
# print(i, muf[i - 1, :], Sigf[i - 1, :, :], Xtplus[i - 1], mutilde[i - 1], self.friction_coeffs[:, self.dim_x :], self.diffusion_coeffs)
# The last step comes only from the forward recursion
Sigs[-1, :, :] = Sigf[-1, :, :]
mus[-1, :] = muf[-1, :]
# Backward proba
for i in range(lenTraj - 2, -1, -1): # From T-1 to 0
# try:
mus[i, :], Sigs[i, :, :], muh[i, :], Sigh[i, :, :] = smoothing_rauch(muf[i, :], Sigf[i, :, :], mus[i + 1, :], Sigs[i + 1, :, :], Xtplus[i], mutilde[i], R, diffusion_coeffs, dim_x, dim_h)
# except np.linalg.LinAlgError as e:
# print(i, muf[i, :], Sigf[i, :, :], mus[i + 1, :], Sigs[i + 1, :, :], Xtplus[i], mutilde[i], self.friction_coeffs[:, self.dim_x :], self.diffusion_coeffs)
# print(repr(e))
# raise ValueError
return muh, Sigh
| [
"numpy.zeros",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.hstack"
] | [((766, 804), 'numpy.linalg.inv', 'np.linalg.inv', (['Sigtemp[:dim_x, :dim_x]'], {}), '(Sigtemp[:dim_x, :dim_x])\n', (779, 804), True, 'import numpy as np\n'), ((1262, 1288), 'numpy.hstack', 'np.hstack', (['(marg_mu, mutm)'], {}), '((marg_mu, mutm))\n', (1271, 1288), True, 'import numpy as np\n'), ((1304, 1336), 'numpy.zeros', 'np.zeros', (['(2 * dim_h, 2 * dim_h)'], {}), '((2 * dim_h, 2 * dim_h))\n', (1312, 1336), True, 'import numpy as np\n'), ((1408, 1427), 'numpy.matmul', 'np.matmul', (['R', 'Sigtm'], {}), '(R, Sigtm)\n', (1417, 1427), True, 'import numpy as np\n'), ((2253, 2280), 'numpy.hstack', 'np.hstack', (['(muStp, marg_mu)'], {}), '((muStp, marg_mu))\n', (2262, 2280), True, 'import numpy as np\n'), ((2296, 2328), 'numpy.zeros', 'np.zeros', (['(2 * dim_h, 2 * dim_h)'], {}), '((2 * dim_h, 2 * dim_h))\n', (2304, 2328), True, 'import numpy as np\n'), ((2398, 2429), 'numpy.matmul', 'np.matmul', (['R[:, dim_x:]', 'SigStp'], {}), '(R[:, dim_x:], SigStp)\n', (2407, 2429), True, 'import numpy as np\n'), ((2996, 3022), 'numpy.zeros', 'np.zeros', (['(lenTraj, dim_h)'], {}), '((lenTraj, dim_h))\n', (3004, 3022), True, 'import numpy as np\n'), ((3034, 3067), 'numpy.zeros', 'np.zeros', (['(lenTraj, dim_h, dim_h)'], {}), '((lenTraj, dim_h, dim_h))\n', (3042, 3067), True, 'import numpy as np\n'), ((3078, 3104), 'numpy.zeros', 'np.zeros', (['(lenTraj, dim_h)'], {}), '((lenTraj, dim_h))\n', (3086, 3104), True, 'import numpy as np\n'), ((3116, 3149), 'numpy.zeros', 'np.zeros', (['(lenTraj, dim_h, dim_h)'], {}), '((lenTraj, dim_h, dim_h))\n', (3124, 3149), True, 'import numpy as np\n'), ((3208, 3238), 'numpy.zeros', 'np.zeros', (['(lenTraj, 2 * dim_h)'], {}), '((lenTraj, 2 * dim_h))\n', (3216, 3238), True, 'import numpy as np\n'), ((3250, 3291), 'numpy.zeros', 'np.zeros', (['(lenTraj, 2 * dim_h, 2 * dim_h)'], {}), '((lenTraj, 2 * dim_h, 2 * dim_h))\n', (3258, 3291), True, 'import numpy as np\n'), ((619, 641), 'numpy.matmul', 'np.matmul', (['expAh', 'mutm'], {}), '(expAh, mutm)\n', (628, 641), True, 'import numpy as np\n'), ((1834, 1859), 'numpy.matmul', 'np.matmul', (['Sigft', 'expAh.T'], {}), '(Sigft, expAh.T)\n', (1843, 1859), True, 'import numpy as np\n'), ((2070, 2100), 'numpy.matmul', 'np.matmul', (['R[:, dim_x:]', 'muStp'], {}), '(R[:, dim_x:], muStp)\n', (2079, 2100), True, 'import numpy as np\n'), ((679, 704), 'numpy.matmul', 'np.matmul', (['Sigtm', 'expAh.T'], {}), '(Sigtm, expAh.T)\n', (688, 704), True, 'import numpy as np\n'), ((871, 909), 'numpy.matmul', 'np.matmul', (['invSYY', '(Xt - mutemp[:dim_x])'], {}), '(invSYY, Xt - mutemp[:dim_x])\n', (880, 909), True, 'import numpy as np\n'), ((987, 1031), 'numpy.matmul', 'np.matmul', (['invSYY', 'Sigtemp[dim_x:, :dim_x].T'], {}), '(invSYY, Sigtemp[dim_x:, :dim_x].T)\n', (996, 1031), True, 'import numpy as np\n'), ((1164, 1199), 'numpy.matmul', 'np.matmul', (['invSYY', 'expAh[:dim_x, :]'], {}), '(invSYY, expAh[:dim_x, :])\n', (1173, 1199), True, 'import numpy as np\n'), ((1895, 1926), 'numpy.matmul', 'np.matmul', (['R[:, :dim_x]', 'Xtplus'], {}), '(R[:, :dim_x], Xtplus)\n', (1904, 1926), True, 'import numpy as np\n'), ((2014, 2033), 'numpy.matmul', 'np.matmul', (['R', 'expAh'], {}), '(R, expAh)\n', (2023, 2033), True, 'import numpy as np\n'), ((2140, 2173), 'numpy.matmul', 'np.matmul', (['SigStp', 'R[:, dim_x:].T'], {}), '(SigStp, R[:, dim_x:].T)\n', (2149, 2173), True, 'import numpy as np\n'), ((1788, 1813), 'numpy.matmul', 'np.matmul', (['Sigft', 'expAh.T'], {}), '(Sigft, expAh.T)\n', (1797, 1813), True, 'import numpy as np\n'), ((1942, 1964), 'numpy.matmul', 'np.matmul', (['expAh', 'muft'], {}), '(expAh, muft)\n', (1951, 1964), True, 'import numpy as np\n')] |
import numpy as np
def hist_equalization(origin_img):
"""Performs histogram equalization on the provided image
Parameters
----------
origin_img : np.ndarray
image to histogram equalize
Returns
-------
out_img
histogram equalized image
"""
# create normalized cumulative histogram
hist_arr = np.bincount(origin_img.ravel())
cum_hist_arr = np.cumsum(hist_arr / np.sum(hist_arr))
# generate transformation lookup table
transform_lut = np.floor(255 * cum_hist_arr).astype(np.uint8)
# perform lookups and return resulting histogram equalized image
out_img = transform_lut[origin_img]
return out_img | [
"numpy.floor",
"numpy.sum"
] | [((424, 440), 'numpy.sum', 'np.sum', (['hist_arr'], {}), '(hist_arr)\n', (430, 440), True, 'import numpy as np\n'), ((506, 534), 'numpy.floor', 'np.floor', (['(255 * cum_hist_arr)'], {}), '(255 * cum_hist_arr)\n', (514, 534), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.