code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Functions to transfer coordinates under different coordinate system
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
import numpy as np
def geo_to_transform(lat, lon, alt, lat_0, lon_0, alt_0):
"""
Convert WG84 to ENU. The origin of the ENU should pass the geo reference.
Note this function is a writen by reversing the official API transform_to_geo.
:param lat: current latitude
:param lon: current longitude
:param alt: current altitude
:param lat_0: geo_ref latitude
:param lon_0: geo_ref longitude
:param alt_0: geo_ref altitude
:return:
"""
EARTH_RADIUS_EQUA = 6378137.0
scale = np.cos(np.deg2rad(lat_0))
mx = lon * np.pi * EARTH_RADIUS_EQUA * scale / 180
mx_0 = scale * np.deg2rad(lon_0) * EARTH_RADIUS_EQUA
x = mx - mx_0
my = np.log(np.tan((lat + 90) * np.pi / 360)) * EARTH_RADIUS_EQUA * scale
my_0 = scale * EARTH_RADIUS_EQUA * np.log(np.tan((90 + lat_0) * np.pi / 360))
y = -(my - my_0)
z = alt - alt_0
return x, y, z | [
"numpy.tan",
"numpy.deg2rad"
] | [((670, 687), 'numpy.deg2rad', 'np.deg2rad', (['lat_0'], {}), '(lat_0)\n', (680, 687), True, 'import numpy as np\n'), ((764, 781), 'numpy.deg2rad', 'np.deg2rad', (['lon_0'], {}), '(lon_0)\n', (774, 781), True, 'import numpy as np\n'), ((945, 979), 'numpy.tan', 'np.tan', (['((90 + lat_0) * np.pi / 360)'], {}), '((90 + lat_0) * np.pi / 360)\n', (951, 979), True, 'import numpy as np\n'), ((837, 869), 'numpy.tan', 'np.tan', (['((lat + 90) * np.pi / 360)'], {}), '((lat + 90) * np.pi / 360)\n', (843, 869), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import os
import itertools as it
from sympde.topology import Domain, ScalarFunctionSpace
from psydac.api.discretization import discretize
from psydac.utilities.utils import refine_array_1d
from psydac.fem.basic import FemField
from psydac.mapping.discrete import NurbsMapping
from psydac.core.kernels import (eval_fields_2d_no_weights, eval_fields_3d_no_weights,
eval_fields_2d_weighted, eval_fields_3d_weighted,
eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights,
eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights,
eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d,
eval_jac_det_2d_weights, eval_jac_det_3d_weights,
eval_jac_det_2d, eval_jac_det_3d,
pushforward_2d_l2, pushforward_3d_l2,
pushforward_2d_hdiv, pushforward_3d_hdiv,
pushforward_2d_hcurl, pushforward_3d_hcurl)
# Get mesh directory
try:
mesh_dir = os.environ['PSYDAC_MESH_DIR']
except KeyError:
base_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(base_dir, '..', '..', '..')
mesh_dir = os.path.join(base_dir, 'mesh')
@pytest.mark.parametrize('geometry', ('identity_2d.h5', 'identity_3d.h5', 'bent_pipe.h5',
'collela_2d.h5', 'collela_3d.h5'))
@pytest.mark.parametrize('refine', (1, 2))
@pytest.mark.parametrize('kind', ('hcurl', 'hdiv', 'l2', 'h1'))
def test_kernels(geometry, refine, kind):
filename = os.path.join(mesh_dir, geometry)
# SymPDE
domain = Domain.from_file(filename)
space = ScalarFunctionSpace('hcurl', domain, kind=kind)
# Discretization
domainh = discretize(domain, filename=filename)
mapping = list(domainh.mappings.values())[0]
ldim = mapping.ldim
spaceh = discretize(space, domainh, degree=[2]*ldim)
field = FemField(spaceh)
weight = FemField(spaceh)
if not field.coeffs.ghost_regions_in_sync:
field.coeffs.update_ghost_regions()
if not weight.coeffs.ghost_regions_in_sync:
weight.coeffs.update_ghost_regions()
# Giving random values
if kind in ['hcurl', 'hdiv']:
for i in range(ldim):
field.fields[i].coeffs._data[:] = np.random.random(field.fields[i].coeffs._data.shape)
weight.fields[i].coeffs._data[:] = np.random.random(weight.fields[i].coeffs._data.shape)
else:
field.coeffs._data[:] = np.random.random(field.coeffs._data.shape)
weight.coeffs._data[:] = np.random.random(weight.coeffs._data.shape)
# Preprocessing
is_nurbs = isinstance(mapping, NurbsMapping)
grid = []
ncells = []
for i in range(ldim):
grid_i_initial = mapping.space.breaks[i]
ncells.append(len(grid_i_initial) - 1)
grid.append(np.asarray(refine_array_1d(grid_i_initial, refine, remove_duplicates=False)))
n_eval_points = [refine + 1] * ldim
shape_grid = tuple(grid[i].size for i in range(ldim))
tensor_grid = [np.reshape(grid[i], (ncells[i], n_eval_points[i])) for i in range(ldim)]
pads_m, \
degree_m, \
global_basis_m, \
global_spans_m = mapping.space.preprocess_regular_tensor_grid(tensor_grid, der=1)
if kind not in ['hcurl', 'hdiv']:
pads_s, \
degree_s, \
global_basis_s, \
global_spans_s = spaceh.preprocess_regular_tensor_grid(tensor_grid, der=0)
# Direct API
try:
if ldim == 2:
jacobian_matrix_direct = np.array([[mapping.jac_mat(e1, e2) for e2 in grid[1]] for e1 in grid[0]])
if ldim == 3:
jacobian_matrix_direct = np.array([[[mapping.jac_mat(e1, e2, e3)
for e3 in grid[2]]
for e2 in grid[1]]
for e1 in grid[0]])
except NotImplementedError:
pass
if kind in ['hdiv', 'hcurl']:
if ldim == 2:
# No weights
f_direct = np.array([[[spaceh.spaces[i].eval_fields([e1, e2], field.fields[i]) for i in range(ldim)]
for e2 in grid[1]]
for e1 in grid[0]])
# Weighted
f_direct_w = np.array([[[np.array(spaceh.spaces[i].eval_fields([e1, e2],
field.fields[i],
weights=weight.fields[i]))
/ np.array(spaceh.spaces[i].eval_fields([e1, e2], weight.fields[i]))
for i in range(ldim)]
for e2 in grid[1]]
for e1 in grid[0]])
if ldim == 3:
# No weights
f_direct = np.array([[[spaceh.eval_fields([e1, e2, e3], field)
for e3 in grid[2]]
for e2 in grid[1]]
for e1 in grid[0]])
# Weighted
f_direct_w = np.array([[[np.array(spaceh.eval_fields([e1, e2, e3], field, weights=weight))
/ np.array(spaceh.eval_fields([e1, e2, e3], weight))
for e3 in grid[2]]
for e2 in grid[1]]
for e1 in grid[0]])
else:
if ldim == 2:
# No weights
f_direct = np.array([[spaceh.eval_fields([e1, e2], field) for e2 in grid[1]] for e1 in grid[0]])
# Weighted
f_direct_w = np.array([[np.array(spaceh.eval_fields([e1, e2], field, weights=weight))
/ np.array(spaceh.eval_fields([e1, e2], weight))
for e2 in grid[1]]
for e1 in grid[0]])
if ldim == 3:
# No weights
f_direct = np.array([[[spaceh.eval_fields([e1, e2, e3], field)
for e3 in grid[2]]
for e2 in grid[1]]
for e1 in grid[0]])
# Weighted
f_direct_w = np.array([[[np.array(spaceh.eval_fields([e1, e2, e3], field, weights=weight))
/ np.array(spaceh.eval_fields([e1, e2, e3], weight))
for e3 in grid[2]]
for e2 in grid[1]]
for e1 in grid[0]])
# Mapping related quantities through kernel functions
jac_mats = np.zeros(shape_grid + (ldim, ldim))
inv_jac_mats = np.zeros(shape_grid + (ldim, ldim))
jac_dets = np.zeros(shape_grid)
if is_nurbs:
global_arr_weights = mapping._weights_field.coeffs._data
if ldim == 2:
global_arr_x = mapping._fields[0].coeffs._data
global_arr_y = mapping._fields[1].coeffs._data
# Compute the jacobians
eval_jacobians_2d_weights(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, global_arr_weights, jac_mats)
# Compute the inverses of the jacobians
eval_jacobians_inv_2d_weights(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y,
global_arr_weights, inv_jac_mats)
# Compute the determinant of the jacobians
eval_jac_det_2d_weights(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y,
global_arr_weights, jac_dets)
if ldim == 3:
global_arr_x = mapping._fields[0].coeffs._data
global_arr_y = mapping._fields[1].coeffs._data
global_arr_z = mapping._fields[2].coeffs._data
# Compute the jacobians
eval_jacobians_3d_weights(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, global_arr_z,
global_arr_weights, jac_mats)
# Compute the inverses of the jacobians
eval_jacobians_inv_3d_weights(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, global_arr_z,
global_arr_weights, inv_jac_mats)
# Compute the determinant of the jacobians
eval_jac_det_3d_weights(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, global_arr_z,
global_arr_weights, jac_dets)
else:
if mapping.ldim == 2:
global_arr_x = mapping._fields[0].coeffs._data
global_arr_y = mapping._fields[1].coeffs._data
# Compute the jacobians
eval_jacobians_2d(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, jac_mats)
# Compute the inverses of the jacobians
eval_jacobians_inv_2d(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, inv_jac_mats)
# Compute the determinant of the jacobians
eval_jac_det_2d(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y,
jac_dets)
if ldim == 3:
global_arr_x = mapping._fields[0].coeffs._data
global_arr_y = mapping._fields[1].coeffs._data
global_arr_z = mapping._fields[2].coeffs._data
# Compute the jacobians
eval_jacobians_3d(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, global_arr_z, jac_mats)
# Compute the inverses of the jacobians
eval_jacobians_inv_3d(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, global_arr_z, inv_jac_mats)
# Compute the determinant of the jacobians
eval_jac_det_3d(*ncells, *pads_m, *degree_m, *n_eval_points, *global_basis_m, *global_spans_m,
global_arr_x, global_arr_y, global_arr_z,
jac_dets)
# Field related quantities through kernel functions
if kind in ['hcurl', 'hdiv']: # Product FemSpace
out_field = np.zeros((ldim,) + shape_grid)
out_field_w = np.zeros((ldim,) + shape_grid)
global_arr_field = [field.fields[i].coeffs._data[:] for i in range(ldim)]
global_arr_w = [weight.fields[i].coeffs._data[:] for i in range(ldim)]
if ldim == 2:
for i in range(2):
pads_s, \
degree_s, \
global_basis_s, \
global_spans_s = spaceh.spaces[i].preprocess_regular_tensor_grid(tensor_grid, der=0)
eval_fields_2d_no_weights(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field[i][:, :, None], out_field[i][:, :, None])
eval_fields_2d_weighted(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field[i][:, :, None], global_arr_w[i],
out_field_w[i][:, :, None])
if ldim == 3:
for i in range(3):
pads_s, \
degree_s, \
global_basis_s, \
global_spans_s = spaceh.spaces[i].preprocess_regular_tensor_grid(tensor_grid, der=0)
eval_fields_3d_no_weights(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field[i][:, :, :, None], out_field[i][:, :, :, None])
eval_fields_3d_weighted(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field[i][:, :, :, None], global_arr_w[i],
out_field_w[i][:, :, :, None])
else:
out_field = np.zeros(shape_grid + (1,))
out_field_w = np.zeros(shape_grid + (1,))
global_arr_field = field.coeffs._data.reshape(field.coeffs._data.shape + (1,))
global_arr_w = weight.coeffs._data
if ldim == 2:
eval_fields_2d_no_weights(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field, out_field)
eval_fields_2d_weighted(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field, global_arr_w, out_field_w)
if ldim == 3:
eval_fields_3d_no_weights(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field, out_field)
eval_fields_3d_weighted(*ncells, *pads_s, *degree_s, *n_eval_points, *global_basis_s, *global_spans_s,
global_arr_field, global_arr_w, out_field_w)
# First round of checks
# Jacobian related arrays
try:
assert np.allclose(jacobian_matrix_direct, jac_mats)
except NameError:
pass
if ldim == 2:
for i, j in it.product(range(jac_mats.shape[0]), range(jac_mats.shape[1])):
# Assert that the computed inverse is the inverse.
assert np.allclose(np.dot(jac_mats[i, j], inv_jac_mats[i, j]), np.eye(ldim))
# Assert that the computed Jacobian determinant is the Jacobian determinant
assert np.allclose(np.linalg.det(jac_mats[i, j]), jac_dets[i, j])
if ldim == 3:
for i, j, k in it.product(range(jac_mats.shape[0]), range(jac_mats.shape[1]), range(jac_mats.shape[2])):
# Assert that the computed inverse is the inverse.
assert np.allclose(np.dot(jac_mats[i, j, k], inv_jac_mats[i, j, k]), np.eye(ldim))
# Assert that the computed Jacobian determinant is the Jacobian determinant
assert np.allclose(np.linalg.det(jac_mats[i, j, k]), jac_dets[i, j, k])
# Field related arrays
if kind in ['hdiv', 'hcurl']:
assert np.allclose(f_direct[:, :, :, 0], np.moveaxis(out_field, 0, -1))
assert np.allclose(f_direct_w[:, :, :, 0], np.moveaxis(out_field_w, 0, -1))
else:
assert np.allclose(f_direct, out_field)
assert np.allclose(f_direct_w, out_field_w)
@pytest.mark.parametrize('jac_det, ldim, field_to_push', [(np.ones((5, 5)), 2, np.ones((5, 5, 1))),
(np.ones((5, 5, 5)), 3, np.ones((5, 5, 5, 1))),
(np.random.rand(5, 5), 2, np.random.rand(5, 5, 1)),
(np.random.rand(5, 5, 5), 3, np.random.rand(5, 5, 5, 1))])
def test_pushforwards_l2(ldim, jac_det, field_to_push):
expected = field_to_push[..., 0] / jac_det
out = np.zeros_like(field_to_push)
if ldim == 2:
pushforward_2d_l2(field_to_push, jac_det, out)
if ldim == 3:
pushforward_3d_l2(field_to_push, jac_det, out)
assert np.allclose(expected, out[..., 0])
@pytest.mark.parametrize('ldim', (2, 3))
def test_pushforwards_hdiv(ldim):
jacobians = np.full((5,) * ldim + (ldim, ldim), np.eye(ldim))
field_to_push = np.random.rand(ldim, *((5, ) * ldim), 1)
expected = np.moveaxis(field_to_push, 0, -2)
out = np.zeros(expected.shape)
if ldim == 2:
pushforward_2d_hdiv(field_to_push, jacobians, out)
if ldim == 3:
pushforward_3d_hdiv(field_to_push, jacobians, out)
assert np.allclose(expected, out)
@pytest.mark.parametrize('ldim', (2, 3))
def test_pushforwards_hcurl(ldim):
inv_jacobians = np.full((5,) * ldim + (ldim, ldim), np.eye(ldim))
field_to_push = np.random.rand(ldim, *((5, ) * ldim), 1)
expected = np.moveaxis(field_to_push, 0, -2)
out = np.zeros(expected.shape)
if ldim == 2:
pushforward_2d_hcurl(field_to_push, inv_jacobians, out)
if ldim == 3:
pushforward_3d_hcurl(field_to_push, inv_jacobians, out)
assert np.allclose(expected, out)
| [
"numpy.moveaxis",
"psydac.core.kernels.eval_jacobians_3d",
"numpy.allclose",
"numpy.ones",
"pytest.mark.parametrize",
"psydac.api.discretization.discretize",
"os.path.join",
"psydac.core.kernels.eval_fields_3d_weighted",
"psydac.utilities.utils.refine_array_1d",
"numpy.zeros_like",
"psydac.core.... | [((1406, 1533), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', "('identity_2d.h5', 'identity_3d.h5', 'bent_pipe.h5', 'collela_2d.h5',\n 'collela_3d.h5')"], {}), "('geometry', ('identity_2d.h5', 'identity_3d.h5',\n 'bent_pipe.h5', 'collela_2d.h5', 'collela_3d.h5'))\n", (1429, 1533), False, 'import pytest\n'), ((1569, 1610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""refine"""', '(1, 2)'], {}), "('refine', (1, 2))\n", (1592, 1610), False, 'import pytest\n'), ((1612, 1674), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "('hcurl', 'hdiv', 'l2', 'h1')"], {}), "('kind', ('hcurl', 'hdiv', 'l2', 'h1'))\n", (1635, 1674), False, 'import pytest\n'), ((16175, 16214), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ldim"""', '(2, 3)'], {}), "('ldim', (2, 3))\n", (16198, 16214), False, 'import pytest\n'), ((16661, 16700), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ldim"""', '(2, 3)'], {}), "('ldim', (2, 3))\n", (16684, 16700), False, 'import pytest\n'), ((1733, 1765), 'os.path.join', 'os.path.join', (['mesh_dir', 'geometry'], {}), '(mesh_dir, geometry)\n', (1745, 1765), False, 'import os\n'), ((1793, 1819), 'sympde.topology.Domain.from_file', 'Domain.from_file', (['filename'], {}), '(filename)\n', (1809, 1819), False, 'from sympde.topology import Domain, ScalarFunctionSpace\n'), ((1832, 1879), 'sympde.topology.ScalarFunctionSpace', 'ScalarFunctionSpace', (['"""hcurl"""', 'domain'], {'kind': 'kind'}), "('hcurl', domain, kind=kind)\n", (1851, 1879), False, 'from sympde.topology import Domain, ScalarFunctionSpace\n'), ((1916, 1953), 'psydac.api.discretization.discretize', 'discretize', (['domain'], {'filename': 'filename'}), '(domain, filename=filename)\n', (1926, 1953), False, 'from psydac.api.discretization import discretize\n'), ((2041, 2086), 'psydac.api.discretization.discretize', 'discretize', (['space', 'domainh'], {'degree': '([2] * ldim)'}), '(space, domainh, degree=[2] * ldim)\n', (2051, 2086), False, 'from psydac.api.discretization import discretize\n'), ((2097, 2113), 'psydac.fem.basic.FemField', 'FemField', (['spaceh'], {}), '(spaceh)\n', (2105, 2113), False, 'from psydac.fem.basic import FemField\n'), ((2127, 2143), 'psydac.fem.basic.FemField', 'FemField', (['spaceh'], {}), '(spaceh)\n', (2135, 2143), False, 'from psydac.fem.basic import FemField\n'), ((6896, 6931), 'numpy.zeros', 'np.zeros', (['(shape_grid + (ldim, ldim))'], {}), '(shape_grid + (ldim, ldim))\n', (6904, 6931), True, 'import numpy as np\n'), ((6951, 6986), 'numpy.zeros', 'np.zeros', (['(shape_grid + (ldim, ldim))'], {}), '(shape_grid + (ldim, ldim))\n', (6959, 6986), True, 'import numpy as np\n'), ((7002, 7022), 'numpy.zeros', 'np.zeros', (['shape_grid'], {}), '(shape_grid)\n', (7010, 7022), True, 'import numpy as np\n'), ((15950, 15978), 'numpy.zeros_like', 'np.zeros_like', (['field_to_push'], {}), '(field_to_push)\n', (15963, 15978), True, 'import numpy as np\n'), ((16137, 16171), 'numpy.allclose', 'np.allclose', (['expected', 'out[..., 0]'], {}), '(expected, out[..., 0])\n', (16148, 16171), True, 'import numpy as np\n'), ((16335, 16374), 'numpy.random.rand', 'np.random.rand', (['ldim', '*((5,) * ldim)', '(1)'], {}), '(ldim, *((5,) * ldim), 1)\n', (16349, 16374), True, 'import numpy as np\n'), ((16391, 16424), 'numpy.moveaxis', 'np.moveaxis', (['field_to_push', '(0)', '(-2)'], {}), '(field_to_push, 0, -2)\n', (16402, 16424), True, 'import numpy as np\n'), ((16435, 16459), 'numpy.zeros', 'np.zeros', (['expected.shape'], {}), '(expected.shape)\n', (16443, 16459), True, 'import numpy as np\n'), ((16631, 16657), 'numpy.allclose', 'np.allclose', (['expected', 'out'], {}), '(expected, out)\n', (16642, 16657), True, 'import numpy as np\n'), ((16826, 16865), 'numpy.random.rand', 'np.random.rand', (['ldim', '*((5,) * ldim)', '(1)'], {}), '(ldim, *((5,) * ldim), 1)\n', (16840, 16865), True, 'import numpy as np\n'), ((16882, 16915), 'numpy.moveaxis', 'np.moveaxis', (['field_to_push', '(0)', '(-2)'], {}), '(field_to_push, 0, -2)\n', (16893, 16915), True, 'import numpy as np\n'), ((16926, 16950), 'numpy.zeros', 'np.zeros', (['expected.shape'], {}), '(expected.shape)\n', (16934, 16950), True, 'import numpy as np\n'), ((17128, 17154), 'numpy.allclose', 'np.allclose', (['expected', 'out'], {}), '(expected, out)\n', (17139, 17154), True, 'import numpy as np\n'), ((1316, 1356), 'os.path.join', 'os.path.join', (['base_dir', '""".."""', '""".."""', '""".."""'], {}), "(base_dir, '..', '..', '..')\n", (1328, 1356), False, 'import os\n'), ((1372, 1402), 'os.path.join', 'os.path.join', (['base_dir', '"""mesh"""'], {}), "(base_dir, 'mesh')\n", (1384, 1402), False, 'import os\n'), ((2664, 2706), 'numpy.random.random', 'np.random.random', (['field.coeffs._data.shape'], {}), '(field.coeffs._data.shape)\n', (2680, 2706), True, 'import numpy as np\n'), ((2740, 2783), 'numpy.random.random', 'np.random.random', (['weight.coeffs._data.shape'], {}), '(weight.coeffs._data.shape)\n', (2756, 2783), True, 'import numpy as np\n'), ((3222, 3272), 'numpy.reshape', 'np.reshape', (['grid[i]', '(ncells[i], n_eval_points[i])'], {}), '(grid[i], (ncells[i], n_eval_points[i]))\n', (3232, 3272), True, 'import numpy as np\n'), ((11226, 11256), 'numpy.zeros', 'np.zeros', (['((ldim,) + shape_grid)'], {}), '((ldim,) + shape_grid)\n', (11234, 11256), True, 'import numpy as np\n'), ((11279, 11309), 'numpy.zeros', 'np.zeros', (['((ldim,) + shape_grid)'], {}), '((ldim,) + shape_grid)\n', (11287, 11309), True, 'import numpy as np\n'), ((13002, 13029), 'numpy.zeros', 'np.zeros', (['(shape_grid + (1,))'], {}), '(shape_grid + (1,))\n', (13010, 13029), True, 'import numpy as np\n'), ((13052, 13079), 'numpy.zeros', 'np.zeros', (['(shape_grid + (1,))'], {}), '(shape_grid + (1,))\n', (13060, 13079), True, 'import numpy as np\n'), ((14102, 14147), 'numpy.allclose', 'np.allclose', (['jacobian_matrix_direct', 'jac_mats'], {}), '(jacobian_matrix_direct, jac_mats)\n', (14113, 14147), True, 'import numpy as np\n'), ((15317, 15349), 'numpy.allclose', 'np.allclose', (['f_direct', 'out_field'], {}), '(f_direct, out_field)\n', (15328, 15349), True, 'import numpy as np\n'), ((15365, 15401), 'numpy.allclose', 'np.allclose', (['f_direct_w', 'out_field_w'], {}), '(f_direct_w, out_field_w)\n', (15376, 15401), True, 'import numpy as np\n'), ((16005, 16051), 'psydac.core.kernels.pushforward_2d_l2', 'pushforward_2d_l2', (['field_to_push', 'jac_det', 'out'], {}), '(field_to_push, jac_det, out)\n', (16022, 16051), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((16078, 16124), 'psydac.core.kernels.pushforward_3d_l2', 'pushforward_3d_l2', (['field_to_push', 'jac_det', 'out'], {}), '(field_to_push, jac_det, out)\n', (16095, 16124), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((16301, 16313), 'numpy.eye', 'np.eye', (['ldim'], {}), '(ldim)\n', (16307, 16313), True, 'import numpy as np\n'), ((16491, 16541), 'psydac.core.kernels.pushforward_2d_hdiv', 'pushforward_2d_hdiv', (['field_to_push', 'jacobians', 'out'], {}), '(field_to_push, jacobians, out)\n', (16510, 16541), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((16568, 16618), 'psydac.core.kernels.pushforward_3d_hdiv', 'pushforward_3d_hdiv', (['field_to_push', 'jacobians', 'out'], {}), '(field_to_push, jacobians, out)\n', (16587, 16618), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((16792, 16804), 'numpy.eye', 'np.eye', (['ldim'], {}), '(ldim)\n', (16798, 16804), True, 'import numpy as np\n'), ((16978, 17033), 'psydac.core.kernels.pushforward_2d_hcurl', 'pushforward_2d_hcurl', (['field_to_push', 'inv_jacobians', 'out'], {}), '(field_to_push, inv_jacobians, out)\n', (16998, 17033), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((17060, 17115), 'psydac.core.kernels.pushforward_3d_hcurl', 'pushforward_3d_hcurl', (['field_to_push', 'inv_jacobians', 'out'], {}), '(field_to_push, inv_jacobians, out)\n', (17080, 17115), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((1273, 1299), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1289, 1299), False, 'import os\n'), ((2468, 2520), 'numpy.random.random', 'np.random.random', (['field.fields[i].coeffs._data.shape'], {}), '(field.fields[i].coeffs._data.shape)\n', (2484, 2520), True, 'import numpy as np\n'), ((2568, 2621), 'numpy.random.random', 'np.random.random', (['weight.fields[i].coeffs._data.shape'], {}), '(weight.fields[i].coeffs._data.shape)\n', (2584, 2621), True, 'import numpy as np\n'), ((7296, 7467), 'psydac.core.kernels.eval_jacobians_2d_weights', 'eval_jacobians_2d_weights', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_weights', 'jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_weights, jac_mats)\n', (7321, 7467), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((7562, 7740), 'psydac.core.kernels.eval_jacobians_inv_2d_weights', 'eval_jacobians_inv_2d_weights', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_weights', 'inv_jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points,\n *global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_weights, inv_jac_mats)\n', (7591, 7740), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((7885, 8054), 'psydac.core.kernels.eval_jac_det_2d_weights', 'eval_jac_det_2d_weights', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_weights', 'jac_dets'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_weights, jac_dets)\n', (7908, 8054), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((8367, 8552), 'psydac.core.kernels.eval_jacobians_3d_weights', 'eval_jacobians_3d_weights', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_z', 'global_arr_weights', 'jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_z, global_arr_weights, jac_mats)\n', (8392, 8552), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((8685, 8877), 'psydac.core.kernels.eval_jacobians_inv_3d_weights', 'eval_jacobians_inv_3d_weights', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_z', 'global_arr_weights', 'inv_jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points,\n *global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_z, global_arr_weights, inv_jac_mats)\n', (8714, 8877), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((9022, 9205), 'psydac.core.kernels.eval_jac_det_3d_weights', 'eval_jac_det_3d_weights', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_z', 'global_arr_weights', 'jac_dets'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_z, global_arr_weights, jac_dets)\n', (9045, 9205), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((9476, 9615), 'psydac.core.kernels.eval_jacobians_2d', 'eval_jacobians_2d', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y, jac_mats)\n', (9493, 9615), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((9706, 9853), 'psydac.core.kernels.eval_jacobians_inv_2d', 'eval_jacobians_inv_2d', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'inv_jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y, inv_jac_mats)\n', (9727, 9853), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((9951, 10088), 'psydac.core.kernels.eval_jac_det_2d', 'eval_jac_det_2d', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'jac_dets'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y, jac_dets)\n', (9966, 10088), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((10389, 10546), 'psydac.core.kernels.eval_jacobians_3d', 'eval_jacobians_3d', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_z', 'jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_z, jac_mats)\n', (10406, 10546), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((10633, 10798), 'psydac.core.kernels.eval_jacobians_inv_3d', 'eval_jacobians_inv_3d', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_z', 'inv_jac_mats'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_z, inv_jac_mats)\n', (10654, 10798), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((10892, 11047), 'psydac.core.kernels.eval_jac_det_3d', 'eval_jac_det_3d', (['*ncells', '*pads_m', '*degree_m', '*n_eval_points', '*global_basis_m', '*global_spans_m', 'global_arr_x', 'global_arr_y', 'global_arr_z', 'jac_dets'], {}), '(*ncells, *pads_m, *degree_m, *n_eval_points, *\n global_basis_m, *global_spans_m, global_arr_x, global_arr_y,\n global_arr_z, jac_dets)\n', (10907, 11047), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((13246, 13384), 'psydac.core.kernels.eval_fields_2d_no_weights', 'eval_fields_2d_no_weights', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field', 'out_field'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field, out_field)\n', (13271, 13384), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((13431, 13587), 'psydac.core.kernels.eval_fields_2d_weighted', 'eval_fields_2d_weighted', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field', 'global_arr_w', 'out_field_w'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field, global_arr_w,\n out_field_w)\n', (13454, 13587), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((13650, 13788), 'psydac.core.kernels.eval_fields_3d_no_weights', 'eval_fields_3d_no_weights', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field', 'out_field'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field, out_field)\n', (13675, 13788), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((13835, 13991), 'psydac.core.kernels.eval_fields_3d_weighted', 'eval_fields_3d_weighted', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field', 'global_arr_w', 'out_field_w'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field, global_arr_w,\n out_field_w)\n', (13858, 13991), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((15177, 15206), 'numpy.moveaxis', 'np.moveaxis', (['out_field', '(0)', '(-1)'], {}), '(out_field, 0, -1)\n', (15188, 15206), True, 'import numpy as np\n'), ((15259, 15290), 'numpy.moveaxis', 'np.moveaxis', (['out_field_w', '(0)', '(-1)'], {}), '(out_field_w, 0, -1)\n', (15270, 15290), True, 'import numpy as np\n'), ((15463, 15478), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (15470, 15478), True, 'import numpy as np\n'), ((15483, 15501), 'numpy.ones', 'np.ones', (['(5, 5, 1)'], {}), '((5, 5, 1))\n', (15490, 15501), True, 'import numpy as np\n'), ((15563, 15581), 'numpy.ones', 'np.ones', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (15570, 15581), True, 'import numpy as np\n'), ((15586, 15607), 'numpy.ones', 'np.ones', (['(5, 5, 5, 1)'], {}), '((5, 5, 5, 1))\n', (15593, 15607), True, 'import numpy as np\n'), ((15669, 15689), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)'], {}), '(5, 5)\n', (15683, 15689), True, 'import numpy as np\n'), ((15694, 15717), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)', '(1)'], {}), '(5, 5, 1)\n', (15708, 15717), True, 'import numpy as np\n'), ((15779, 15802), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)', '(5)'], {}), '(5, 5, 5)\n', (15793, 15802), True, 'import numpy as np\n'), ((15807, 15833), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)', '(5)', '(1)'], {}), '(5, 5, 5, 1)\n', (15821, 15833), True, 'import numpy as np\n'), ((3037, 3101), 'psydac.utilities.utils.refine_array_1d', 'refine_array_1d', (['grid_i_initial', 'refine'], {'remove_duplicates': '(False)'}), '(grid_i_initial, refine, remove_duplicates=False)\n', (3052, 3101), False, 'from psydac.utilities.utils import refine_array_1d\n'), ((11732, 11904), 'psydac.core.kernels.eval_fields_2d_no_weights', 'eval_fields_2d_no_weights', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field[i][:, :, None]', 'out_field[i][:, :, None]'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field[i][:, :, None],\n out_field[i][:, :, None])\n', (11757, 11904), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((11955, 12144), 'psydac.core.kernels.eval_fields_2d_weighted', 'eval_fields_2d_weighted', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field[i][:, :, None]', 'global_arr_w[i]', 'out_field_w[i][:, :, None]'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field[i][:, :, None],\n global_arr_w[i], out_field_w[i][:, :, None])\n', (11978, 12144), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((12476, 12654), 'psydac.core.kernels.eval_fields_3d_no_weights', 'eval_fields_3d_no_weights', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field[i][:, :, :, None]', 'out_field[i][:, :, :, None]'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field[i][:, :, :, None],\n out_field[i][:, :, :, None])\n', (12501, 12654), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((12705, 12900), 'psydac.core.kernels.eval_fields_3d_weighted', 'eval_fields_3d_weighted', (['*ncells', '*pads_s', '*degree_s', '*n_eval_points', '*global_basis_s', '*global_spans_s', 'global_arr_field[i][:, :, :, None]', 'global_arr_w[i]', 'out_field_w[i][:, :, :, None]'], {}), '(*ncells, *pads_s, *degree_s, *n_eval_points, *\n global_basis_s, *global_spans_s, global_arr_field[i][:, :, :, None],\n global_arr_w[i], out_field_w[i][:, :, :, None])\n', (12728, 12900), False, 'from psydac.core.kernels import eval_fields_2d_no_weights, eval_fields_3d_no_weights, eval_fields_2d_weighted, eval_fields_3d_weighted, eval_jacobians_2d, eval_jacobians_3d, eval_jacobians_2d_weights, eval_jacobians_3d_weights, eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights, eval_jacobians_inv_2d, eval_jacobians_inv_3d, eval_jac_det_2d_weights, eval_jac_det_3d_weights, eval_jac_det_2d, eval_jac_det_3d, pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl\n'), ((14380, 14422), 'numpy.dot', 'np.dot', (['jac_mats[i, j]', 'inv_jac_mats[i, j]'], {}), '(jac_mats[i, j], inv_jac_mats[i, j])\n', (14386, 14422), True, 'import numpy as np\n'), ((14424, 14436), 'numpy.eye', 'np.eye', (['ldim'], {}), '(ldim)\n', (14430, 14436), True, 'import numpy as np\n'), ((14557, 14586), 'numpy.linalg.det', 'np.linalg.det', (['jac_mats[i, j]'], {}), '(jac_mats[i, j])\n', (14570, 14586), True, 'import numpy as np\n'), ((14830, 14878), 'numpy.dot', 'np.dot', (['jac_mats[i, j, k]', 'inv_jac_mats[i, j, k]'], {}), '(jac_mats[i, j, k], inv_jac_mats[i, j, k])\n', (14836, 14878), True, 'import numpy as np\n'), ((14880, 14892), 'numpy.eye', 'np.eye', (['ldim'], {}), '(ldim)\n', (14886, 14892), True, 'import numpy as np\n'), ((15013, 15045), 'numpy.linalg.det', 'np.linalg.det', (['jac_mats[i, j, k]'], {}), '(jac_mats[i, j, k])\n', (15026, 15045), True, 'import numpy as np\n')] |
import csv
import gensim
import numpy as np
if __name__ == '__main__':
csvfile = open("../data/data.csv", 'r')
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
next(spamreader, None)
data_set = list()
documents_context_str = list()
documents_context_emb = list()
embedding_size = 32
# learn embedding model
index = 0
for row in spamreader:
data_set.append(row)
taggedDocument = gensim.models.doc2vec.TaggedDocument(
gensim.utils.to_unicode(str.encode(' '.join(row[3:len(row)]))).split(), [index])
index = index + 1
documents_context_str.append(taggedDocument)
# train model
model = gensim.models.Doc2Vec(documents_context_str, dm=0, vector_size=embedding_size, window=5, min_count=1,
alpha=0.025, min_alpha=0.025, worker=12)
nrEpochs = 1
for epoch in range(nrEpochs):
if epoch % 2 == 0:
print('Now training epoch %s' % epoch)
model.train(documents_context_str, total_examples=len(documents_context_str), epochs=nrEpochs)
model.alpha -= 0.002
model.min_alpha = model.alpha
# save and get model
model.save('checkpoints/' + str(0) + '_context_attributes_doc2vec_2d' + str(embedding_size) + '.model',
sep_limit=2000000000)
model = gensim.models.Doc2Vec.load(
'checkpoints/' + str(0) + '_context_attributes_doc2vec_2d' + str(embedding_size) + '.model')
# apply embedding model and save data set
for document_context_str in documents_context_str:
try:
documents_context_emb.append(model.infer_vector(document_context_str.words))
except:
documents_context_emb.append([0] * embedding_size)
print(document_context_str.words, 'not found')
# concate
data_set_new = np.zeros((len(data_set), 3 + embedding_size), dtype=np.dtype('U20'))
# fill data
for index in range(0, len(data_set)):
# process
for sub_index_process in range(0, 3):
data_set_new[index, sub_index_process] = data_set[index][sub_index_process]
# context
for sub_index_context in range(0, embedding_size):
data_set_new[index, sub_index_context + 3] = documents_context_emb[index][sub_index_context]
# write dataset
with open("Data.csv", 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["case", "event", "time"])
for row in data_set_new:
try:
spamwriter.writerow(['{:f}'.format(cell) for cell in (row)])
except:
spamwriter.writerow(['{:s}'.format(cell) for cell in (row)])
| [
"gensim.models.Doc2Vec",
"numpy.dtype",
"csv.reader",
"csv.writer"
] | [((134, 183), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=';', quotechar='|')\n", (144, 183), False, 'import csv\n'), ((693, 844), 'gensim.models.Doc2Vec', 'gensim.models.Doc2Vec', (['documents_context_str'], {'dm': '(0)', 'vector_size': 'embedding_size', 'window': '(5)', 'min_count': '(1)', 'alpha': '(0.025)', 'min_alpha': '(0.025)', 'worker': '(12)'}), '(documents_context_str, dm=0, vector_size=\n embedding_size, window=5, min_count=1, alpha=0.025, min_alpha=0.025,\n worker=12)\n', (714, 844), False, 'import gensim\n'), ((2416, 2492), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""";"""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (2426, 2492), False, 'import csv\n'), ((1909, 1924), 'numpy.dtype', 'np.dtype', (['"""U20"""'], {}), "('U20')\n", (1917, 1924), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Define class game"""
import numpy as np
import tkinter as tk
from connectFour import find_row, drop_piece, winner, valid_move
from strategyAI import smart_AI, random_AI
ROW_COUNT = 6
COLUMN_COUNT = 7
WIDTH = 50
LENGTH = 40
listMessage=['Click on Game to start a new game',
'Player 1 wins. Click on Game to start a new game',
'Player 2 wins. Click on Game to start a new game']
class Game:
def __init__(self, can, lab_message, window):
self.onePlayer = True
self.difficulty_AI = 1
self.can = can
self.window = window
self.lab_message = lab_message
self.create_board()
self.turn = 0
self.game_over = False
self.draw_board()
def create_board(self) -> None:
"""
Create a board.
Returns
-------
board : np.array
Array that stores the board of the connect Four game.
"""
self.board = np.zeros((ROW_COUNT, COLUMN_COUNT))
def draw_board(self):
for i in range(ROW_COUNT):
for j in range(COLUMN_COUNT):
self.can.create_oval(10+WIDTH*j,
10+WIDTH*i,
10+LENGTH+WIDTH*j,
10+LENGTH+WIDTH*i,
outline='white')
self.lab_message.configure(text='player 1')
def reinit(self):
self.can.delete(tk.ALL)
self.create_board()
self.turn = 0
self.game_over = False
self.draw_board()
def play(self, col):
if not self.game_over:
if self.turn %2 == 0:
color = 'red'
player = 1
self.lab_message.configure(text='player 2')
else:
color = 'yellow'
player = 2
self.lab_message.configure(text='player 1')
if valid_move(self.board, col):
row = find_row(self.board, col)
drop_piece(self.board, row, col, player)
self.can.create_oval(10+col*WIDTH,
10+(ROW_COUNT-1)*WIDTH-row*WIDTH,
10+LENGTH+col*WIDTH,
10+(ROW_COUNT-1)*WIDTH+LENGTH-row*WIDTH,
fill=color,
outline='white')
if winner(self.board, player):
self.game_over = True
self.lab_message.configure(text=listMessage[player])
self.window.after(500, self.nextPlayer)
def mode_1HumanPlayer(self) -> None:
"""
Lauch game for one human player versus AI.
Returns
-------
None.
"""
self.onePlayer = True
self.reinit()
def difficulty_easy(self) -> None:
"""
Set the AI-difficulty level to easy.
Returns
-------
None.
"""
self.difficulty_AI = 0
def difficulty_intermediate(self):
"""
Set the AI-difficulty level to intermediate.
Returns
-------
None.
"""
self.difficulty_AI = 1
def mode_2HumanPlayers(self) -> None:
"""
Lauch game for two human players.
Returns
-------
None.
"""
self.onePlayer = False
self.reinit()
def nextPlayer(self) -> None:
"""
Increment the turn to switch players.
Returns
-------
None.
"""
self.turn += 1
# In versus AI mode only
if self.onePlayer and self.turn%2 !=0 and not self.game_over:
if self.difficulty_AI == 0:
idCol = random_AI(self.board)
elif self.difficulty_AI == 1:
idCol = smart_AI(self.board)
self.play(idCol) | [
"connectFour.valid_move",
"numpy.zeros",
"connectFour.winner",
"connectFour.drop_piece",
"strategyAI.random_AI",
"strategyAI.smart_AI",
"connectFour.find_row"
] | [((1021, 1056), 'numpy.zeros', 'np.zeros', (['(ROW_COUNT, COLUMN_COUNT)'], {}), '((ROW_COUNT, COLUMN_COUNT))\n', (1029, 1056), True, 'import numpy as np\n'), ((2008, 2035), 'connectFour.valid_move', 'valid_move', (['self.board', 'col'], {}), '(self.board, col)\n', (2018, 2035), False, 'from connectFour import find_row, drop_piece, winner, valid_move\n'), ((2059, 2084), 'connectFour.find_row', 'find_row', (['self.board', 'col'], {}), '(self.board, col)\n', (2067, 2084), False, 'from connectFour import find_row, drop_piece, winner, valid_move\n'), ((2101, 2141), 'connectFour.drop_piece', 'drop_piece', (['self.board', 'row', 'col', 'player'], {}), '(self.board, row, col, player)\n', (2111, 2141), False, 'from connectFour import find_row, drop_piece, winner, valid_move\n'), ((2523, 2549), 'connectFour.winner', 'winner', (['self.board', 'player'], {}), '(self.board, player)\n', (2529, 2549), False, 'from connectFour import find_row, drop_piece, winner, valid_move\n'), ((3923, 3944), 'strategyAI.random_AI', 'random_AI', (['self.board'], {}), '(self.board)\n', (3932, 3944), False, 'from strategyAI import smart_AI, random_AI\n'), ((4011, 4031), 'strategyAI.smart_AI', 'smart_AI', (['self.board'], {}), '(self.board)\n', (4019, 4031), False, 'from strategyAI import smart_AI, random_AI\n')] |
#!/usr/bin/env python
#
# Copyright 2012 atlas
#
# This file was adapted from a part of Project Ubertooth written by <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import bytes
from builtins import range
from past.utils import old_div
import sys
import time
import numpy
import threading
import rflib
from .bits import correctbytes
# import cPickle in Python 2 instead of pickle in Python 3
if sys.version_info < (3,):
import cPickle as pickle
else:
import pickle as pickle
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import Qt, QPointF, QLineF
def ensureQapp():
global _qt_app
if not globals().get("_qt_app"):
_qt_app = QtWidgets.QApplication([])
APP_SPECAN = 0x43
SPECAN_QUEUE = 1
class SpecanThread(threading.Thread):
def __init__(self, data, low_frequency, high_frequency, freq_step, delay, new_frame_callback):
threading.Thread.__init__(self)
self.daemon = True
self._data = data
self._delay = delay
self._low_frequency = low_frequency
self._high_frequency = high_frequency
self._freq_step = freq_step
self._new_frame_callback = new_frame_callback
self._stop = False
self._stopped = False
def run(self):
# this is where we pull in the data from the device
#frame_source = self._device.specan(self._low_frequency, self._high_frequency)
num_chans = int(old_div((self._high_frequency - self._low_frequency), self._freq_step))
if type(self._data) == list:
for rssi_values, timestamp in self._data:
rssi_values = [ (old_div((ord(x)^0x80),2))-88 for x in rssi_values[4:] ]
# since we are not accessing the dongle, we need some sort of delay
time.sleep(self._delay)
frequency_axis = numpy.linspace(self._low_frequency, self._high_frequency, num=len(rssi_values), endpoint=True)
self._new_frame_callback(numpy.copy(frequency_axis), numpy.copy(rssi_values))
if self._stop:
break
else:
while not self._stop:
try:
rssi_values, timestamp = self._data.recv(APP_SPECAN, SPECAN_QUEUE, 10000)
rssi_values = [ (old_div((ord(x)^0x80),2))-88 for x in rssi_values ]
frequency_axis = numpy.linspace(self._low_frequency, self._high_frequency, num=len(rssi_values), endpoint=True)
self._new_frame_callback(numpy.copy(frequency_axis), numpy.copy(rssi_values))
except:
sys.excepthook(*sys.exc_info())
self._data._stopSpecAn()
def stop(self):
self._stop = True
self.join(3.0)
self._stopped = True
class RenderArea(QtWidgets.QWidget):
def __init__(self, data, low_freq=2.400e9, high_freq=2.483e9, freq_step=1e6, delay=0, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self._graph = None
self._reticle = None
self._data = data
self._delay = delay
self._frame = None
self._persisted_frames = None
self._persisted_frames_depth = 350
self._path_max = None
self._low_frequency = low_freq #2.400e9
self._high_frequency = high_freq #2.483e9
self._frequency_step = freq_step #1e6
self._high_dbm = 0.0
self._low_dbm = -100.0
self._hide_markers = False
self._mouse_x = None
self._mouse_y = None
self._mouse_x2 = None
self._mouse_y2 = None
self._thread = SpecanThread(self._data,
self._low_frequency,
self._high_frequency,
self._frequency_step,
self._delay,
self._new_frame)
self._thread.start()
def stop_thread(self):
self._thread.stop()
def _new_graph(self):
self._graph = QtGui.QPixmap(self.width(), self.height())
self._graph.fill(Qt.black)
def _new_reticle(self):
self._reticle = QtGui.QPixmap(self.width(), self.height())
self._reticle.fill(Qt.transparent)
def _new_persisted_frames(self, frequency_bins):
self._persisted_frames = numpy.empty((self._persisted_frames_depth, frequency_bins))
self._persisted_frames.fill(-128 + -54)
self._persisted_frames_next_index = 0
def minimumSizeHint(self):
x_points = round(old_div((self._high_frequency - self._low_frequency), self._frequency_step))
y_points = round(self._high_dbm - self._low_dbm)
return QtCore.QSize(x_points * 4, y_points * 1)
def _new_frame(self, frequency_axis, rssi_values):
#print repr(frequency_axis)
#print repr(rssi_values)
self._frame = (frequency_axis, rssi_values)
if self._persisted_frames is None:
self._new_persisted_frames(len(frequency_axis))
self._persisted_frames[self._persisted_frames_next_index] = rssi_values
self._persisted_frames_next_index = (self._persisted_frames_next_index + 1) % self._persisted_frames.shape[0]
self.update()
def _draw_graph(self):
if self._graph is None:
self._new_graph()
elif self._graph.size() != self.size():
self._new_graph()
painter = QtGui.QPainter(self._graph)
try:
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.fillRect(0, 0, self._graph.width(), self._graph.height(), QtGui.QColor(0, 0, 0, 10))
if self._frame:
frequency_axis, rssi_values = self._frame
path_now = QtGui.QPainterPath()
path_max = QtGui.QPainterPath()
bins = list(range(len(frequency_axis)))
x_axis = self._hz_to_x(frequency_axis)
y_now = self._dbm_to_y(rssi_values)
y_max = self._dbm_to_y(numpy.amax(self._persisted_frames, axis=0))
# TODO: Wrapped Numpy types with float() to support old (<1.0) PySide API in Ubuntu 10.10
path_now.moveTo(float(x_axis[0]), float(y_now[0]))
for i in bins:
path_now.lineTo(float(x_axis[i]), float(y_now[i]))
# TODO: Wrapped Numpy types with float() to support old (<1.0) PySide API in Ubuntu 10.10
path_max.moveTo(float(x_axis[0]), float(y_max[0]))
db_tmp = self._low_dbm
max_max = None
for i in bins:
path_max.lineTo(float(x_axis[i]), float(y_max[i]))
if self._y_to_dbm(y_max[i]) > db_tmp:
db_tmp = self._y_to_dbm(y_max[i])
max_max = i
pen = QtGui.QPen()
pen.setBrush(Qt.white)
painter.setPen(pen)
painter.drawPath(path_now)
self._path_max = path_max
if not max_max == None and not self._hide_markers:
pen.setBrush(Qt.red)
pen.setStyle(Qt.DotLine)
painter.setPen(pen)
painter.drawText(QPointF(x_axis[max_max] + 4, 30), '%.06f' % (old_div(self._x_to_hz(x_axis[max_max]), 1e6)))
painter.drawText(QPointF(30, y_max[max_max] - 4), '%d' % (self._y_to_dbm(y_max[max_max])))
painter.drawLine(QPointF(x_axis[max_max], 0), QPointF(x_axis[max_max], self.height()))
painter.drawLine(QPointF(0, y_max[max_max]), QPointF(self.width(), y_max[max_max]))
if self._mouse_x:
painter.drawText(QPointF(self._hz_to_x(self._mouse_x) + 4, 58), '(%.06f)' % ((old_div(self._x_to_hz(x_axis[max_max]), 1e6)) - (old_div(self._mouse_x, 1e6))))
pen.setBrush(Qt.yellow)
painter.setPen(pen)
painter.drawText(QPointF(self._hz_to_x(self._mouse_x) + 4, 44), '%.06f' % (old_div(self._mouse_x, 1e6)))
painter.drawText(QPointF(54, self._dbm_to_y(self._mouse_y) - 4), '%d' % (self._mouse_y))
painter.drawLine(QPointF(self._hz_to_x(self._mouse_x), 0), QPointF(self._hz_to_x(self._mouse_x), self.height()))
painter.drawLine(QPointF(0, self._dbm_to_y(self._mouse_y)), QPointF(self.width(), self._dbm_to_y(self._mouse_y)))
if self._mouse_x2:
painter.drawText(QPointF(self._hz_to_x(self._mouse_x2) + 4, 118), '(%.06f)' % ((old_div(self._mouse_x, 1e6)) - (old_div(self._mouse_x2, 1e6))))
if self._mouse_x2:
pen.setBrush(Qt.red)
painter.setPen(pen)
painter.drawText(QPointF(self._hz_to_x(self._mouse_x2) + 4, 102), '(%.06f)' % ((old_div(self._x_to_hz(x_axis[max_max]), 1e6)) - (old_div(self._mouse_x2, 1e6))))
pen.setBrush(Qt.magenta)
painter.setPen(pen)
painter.drawText(QPointF(self._hz_to_x(self._mouse_x2) + 4, 88), '%.06f' % (old_div(self._mouse_x2, 1e6)))
painter.drawText(QPointF(78, self._dbm_to_y(self._mouse_y2) - 4), '%d' % (self._mouse_y2))
painter.drawLine(QPointF(self._hz_to_x(self._mouse_x2), 0), QPointF(self._hz_to_x(self._mouse_x2), self.height()))
painter.drawLine(QPointF(0, self._dbm_to_y(self._mouse_y2)), QPointF(self.width(), self._dbm_to_y(self._mouse_y2)))
if self._mouse_x:
painter.drawText(QPointF(self._hz_to_x(self._mouse_x) + 4, 74), '(%.06f)' % ((old_div(self._mouse_x2, 1e6)) - (old_div(self._mouse_x, 1e6))))
finally:
painter.end()
def _draw_reticle(self):
if self._reticle is None or (self._reticle.size() != self.size()):
self._new_reticle()
dbm_lines = [QLineF(self._hz_to_x(self._low_frequency), self._dbm_to_y(dbm),
self._hz_to_x(self._high_frequency), self._dbm_to_y(dbm))
for dbm in numpy.arange(self._low_dbm, self._high_dbm, 20.0)]
dbm_labels = [(dbm, QPointF(self._hz_to_x(self._low_frequency) + 2, self._dbm_to_y(dbm) - 2))
for dbm in numpy.arange(self._low_dbm, self._high_dbm, 20.0)]
frequency_lines = [QLineF(self._hz_to_x(frequency), self._dbm_to_y(self._high_dbm),
self._hz_to_x(frequency), self._dbm_to_y(self._low_dbm))
for frequency in numpy.arange(self._low_frequency, self._high_frequency, self._frequency_step * 20.0)]
frequency_labels = [(frequency, QPointF(self._hz_to_x(frequency) + 2, self._dbm_to_y(self._high_dbm) + 10))
for frequency in numpy.arange(self._low_frequency, self._high_frequency, self._frequency_step * 10.0)]
painter = QtGui.QPainter(self._reticle)
try:
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(Qt.blue)
# TODO: Removed to support old (<1.0) PySide API in Ubuntu 10.10
#painter.drawLines(dbm_lines)
for dbm_line in dbm_lines: painter.drawLine(dbm_line)
# TODO: Removed to support old (<1.0) PySide API in Ubuntu 10.10
#painter.drawLines(frequency_lines)
for frequency_line in frequency_lines: painter.drawLine(frequency_line)
painter.setPen(Qt.white)
for dbm, point in dbm_labels:
painter.drawText(point, '%+.0f' % dbm)
for frequency, point in frequency_labels:
painter.drawText(point, '%.02f' % (old_div(frequency, 1e6)))
finally:
painter.end()
def paintEvent(self, event):
self._draw_graph()
self._draw_reticle()
painter = QtGui.QPainter(self)
try:
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(QtGui.QPen())
painter.setBrush(QtGui.QBrush())
if self._graph:
painter.drawPixmap(0, 0, self._graph)
if self._path_max:
painter.setPen(Qt.green)
painter.drawPath(self._path_max)
painter.setOpacity(0.5)
if self._reticle:
painter.drawPixmap(0, 0, self._reticle)
finally:
painter.end()
def _hz_to_x(self, frequency_hz):
delta = frequency_hz - self._low_frequency
range = self._high_frequency - self._low_frequency
normalized = old_div(delta, range)
#print "freq: %s \nlow: %s \nhigh: %s \ndelta: %s \nrange: %s \nnormalized: %s" % (frequency_hz, self._low_frequency, self._high_frequency, delta, range, normalized)
return normalized * self.width()
def _x_to_hz(self, x):
range = self._high_frequency - self._low_frequency
tmp = old_div(x, self.width())
delta = tmp * range
return delta + self._low_frequency
def _dbm_to_y(self, dbm):
delta = self._high_dbm - dbm
range = self._high_dbm - self._low_dbm
normalized = old_div(delta, range)
return normalized * self.height()
def _y_to_dbm(self, y):
range = self._high_dbm - self._low_dbm
tmp = old_div(y, self.height())
delta = tmp * range
return self._high_dbm - delta
class Window(QtWidgets.QWidget):
def __init__(self, data, low_freq, high_freq, spacing, delay=.01, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self._low_freq = low_freq
self._high_freq = high_freq
self._spacing = spacing
self._delay= delay
self._data = self._open_data(data)
self.render_area = RenderArea(self._data, low_freq, high_freq, spacing, delay)
main_layout = QtWidgets.QGridLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(self.render_area, 0, 0)
self.setLayout(main_layout)
self.setWindowTitle("RfCat Spectrum Analyzer (thanks Ubertooth!)")
def sizeHint(self):
return QtCore.QSize(480, 160)
def _open_data(self, data):
if type(data) == str:
if data == '-':
data = rflib.RfCat()
data._debug = 1
freq = int(self._low_freq)
spc = int(self._spacing)
numChans = int(old_div((self._high_freq-self._low_freq), self._spacing))
data._doSpecAn(freq, spc, numChans)
else:
data = pickle.load(file(data,'rb'))
if data is None:
raise Exception('Data not found')
return data
def closeEvent(self, event):
self.render_area.stop_thread()
event.accept()
# handle mouse button clicks
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.render_area._mouse_x = self.render_area._x_to_hz(float(event.x()))
self.render_area._mouse_y = self.render_area._y_to_dbm(float(event.y()))
self.render_area._hide_markers = False
if event.button() == Qt.RightButton:
self.render_area._mouse_x2 = self.render_area._x_to_hz(float(event.x()))
self.render_area._mouse_y2 = self.render_area._y_to_dbm(float(event.y()))
self.render_area._hide_markers = False
if event.button() == Qt.MidButton:
self.render_area._mouse_x = None
self.render_area._mouse_y = None
self.render_area._mouse_x2 = None
self.render_area._mouse_y2 = None
self.render_area._hide_markers = not self.render_area._hide_markers
event.accept()
return
# handle key presses
def keyPressEvent(self, event):
# test for non-alphanumeric keys first
# arrow key
if event.key() >= Qt.Key_Left and event.key() <= Qt.Key_Down:
# left
if event.key() == Qt.Key_Left:
self._low_freq -= self._spacing
self._high_freq -= self._spacing
# up
if event.key() == Qt.Key_Up:
self._spacing = int(self._spacing * 1.1)
# right
if event.key() == Qt.Key_Right:
self._low_freq += self._spacing
self._high_freq += self._spacing
# down
if event.key() == Qt.Key_Down:
self._spacing = int(self._spacing / 1.1)
# this will redraw window with the correct labels etc., but we also need to re-start
# specan on the dongle, and I'm not sure how best to do that!
self.layout().removeWidget(self.render_area)
self.render_area = RenderArea(self._data, self._low_freq, self._high_freq, self._spacing, self._delay)
self.layout().addWidget(self.render_area, 0, 0)
event.accept()
return
# anything else is alphanumeric
try:
key= correctbytes(event.key()).upper()
event.accept()
except:
print('Unknown key pressed: 0x%x' % event.key())
event.ignore()
return
if key == 'H':
print('Key Action')
print()
print(' <LEFT ARROW> Reduce base frequency by one step')
print(' <RIGHT ARROW> Increase base frequency by one step')
print(' <DOWN ARROW> Reduce frequency step 10%')
print(' <UP ARROW> Increase frequency step 10%')
print(' <LEFT MOUSE> Mark LEFT frequency / signal strength at pointer')
print(' <RIGHT MOUSE> Mark RIGHT frequency / signal strength at pointer')
print(' <MIDDLE MOUSE> Toggle visibility of frequency / signal strength markers')
print(' H Print this HELP text')
print(' M Simulate MIDDLE MOUSE click (for those with trackpads)')
print(' Q Quit')
return
if key == 'M':
self.render_area._mouse_x = None
self.render_area._mouse_y = None
self.render_area._mouse_x2 = None
self.render_area._mouse_y2 = None
self.render_area._hide_markers = not self.render_area._hide_markers
return
if key == 'Q':
print('Quit!')
self.close()
return
print('Unsupported key pressed:', key)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
f = sys.argv[1]
fbase = eval(sys.argv[2])
fhigh = eval(sys.argv[3])
fdelta = eval(sys.argv[4])
if len(sys.argv) > 5:
delay = eval(sys.argv[5])
else:
delay = .01
window = Window(f, fbase, fhigh, fdelta, delay)
#window = Window('../data.again', 902.0, 928.0, 3e-1)
window.show()
sys.exit(app.exec_())
| [
"PySide2.QtGui.QColor",
"past.utils.old_div",
"PySide2.QtGui.QPainterPath",
"future.standard_library.install_aliases",
"numpy.empty",
"numpy.arange",
"sys.exc_info",
"PySide2.QtGui.QPen",
"threading.Thread.__init__",
"numpy.copy",
"PySide2.QtWidgets.QWidget.__init__",
"rflib.RfCat",
"PySide2... | [((936, 970), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (968, 970), False, 'from future import standard_library\n'), ((20081, 20113), 'PySide2.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (20103, 20113), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1489, 1515), 'PySide2.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (1511, 1515), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1701, 1732), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1726, 1732), False, 'import threading\n'), ((3775, 3815), 'PySide2.QtWidgets.QWidget.__init__', 'QtWidgets.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (3801, 3815), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((5243, 5302), 'numpy.empty', 'numpy.empty', (['(self._persisted_frames_depth, frequency_bins)'], {}), '((self._persisted_frames_depth, frequency_bins))\n', (5254, 5302), False, 'import numpy\n'), ((5607, 5647), 'PySide2.QtCore.QSize', 'QtCore.QSize', (['(x_points * 4)', '(y_points * 1)'], {}), '(x_points * 4, y_points * 1)\n', (5619, 5647), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6351, 6378), 'PySide2.QtGui.QPainter', 'QtGui.QPainter', (['self._graph'], {}), '(self._graph)\n', (6365, 6378), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13279, 13299), 'PySide2.QtGui.QPainter', 'QtGui.QPainter', (['self'], {}), '(self)\n', (13293, 13299), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((14015, 14036), 'past.utils.old_div', 'old_div', (['delta', 'range'], {}), '(delta, range)\n', (14022, 14036), False, 'from past.utils import old_div\n'), ((14614, 14635), 'past.utils.old_div', 'old_div', (['delta', 'range'], {}), '(delta, range)\n', (14621, 14635), False, 'from past.utils import old_div\n'), ((14986, 15026), 'PySide2.QtWidgets.QWidget.__init__', 'QtWidgets.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (15012, 15026), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((15320, 15343), 'PySide2.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (15341, 15343), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((15609, 15631), 'PySide2.QtCore.QSize', 'QtCore.QSize', (['(480)', '(160)'], {}), '(480, 160)\n', (15621, 15631), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((2253, 2321), 'past.utils.old_div', 'old_div', (['(self._high_frequency - self._low_frequency)', 'self._freq_step'], {}), '(self._high_frequency - self._low_frequency, self._freq_step)\n', (2260, 2321), False, 'from past.utils import old_div\n'), ((5458, 5531), 'past.utils.old_div', 'old_div', (['(self._high_frequency - self._low_frequency)', 'self._frequency_step'], {}), '(self._high_frequency - self._low_frequency, self._frequency_step)\n', (5465, 5531), False, 'from past.utils import old_div\n'), ((12178, 12207), 'PySide2.QtGui.QPainter', 'QtGui.QPainter', (['self._reticle'], {}), '(self._reticle)\n', (12192, 12207), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((2614, 2637), 'time.sleep', 'time.sleep', (['self._delay'], {}), '(self._delay)\n', (2624, 2637), False, 'import time\n'), ((6534, 6559), 'PySide2.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)', '(10)'], {}), '(0, 0, 0, 10)\n', (6546, 6559), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6704, 6724), 'PySide2.QtGui.QPainterPath', 'QtGui.QPainterPath', ([], {}), '()\n', (6722, 6724), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6752, 6772), 'PySide2.QtGui.QPainterPath', 'QtGui.QPainterPath', ([], {}), '()\n', (6770, 6772), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7881, 7893), 'PySide2.QtGui.QPen', 'QtGui.QPen', ([], {}), '()\n', (7891, 7893), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13403, 13415), 'PySide2.QtGui.QPen', 'QtGui.QPen', ([], {}), '()\n', (13413, 13415), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13446, 13460), 'PySide2.QtGui.QBrush', 'QtGui.QBrush', ([], {}), '()\n', (13458, 13460), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((15750, 15763), 'rflib.RfCat', 'rflib.RfCat', ([], {}), '()\n', (15761, 15763), False, 'import rflib\n'), ((2808, 2834), 'numpy.copy', 'numpy.copy', (['frequency_axis'], {}), '(frequency_axis)\n', (2818, 2834), False, 'import numpy\n'), ((2836, 2859), 'numpy.copy', 'numpy.copy', (['rssi_values'], {}), '(rssi_values)\n', (2846, 2859), False, 'import numpy\n'), ((6992, 7034), 'numpy.amax', 'numpy.amax', (['self._persisted_frames'], {'axis': '(0)'}), '(self._persisted_frames, axis=0)\n', (7002, 7034), False, 'import numpy\n'), ((11305, 11354), 'numpy.arange', 'numpy.arange', (['self._low_dbm', 'self._high_dbm', '(20.0)'], {}), '(self._low_dbm, self._high_dbm, 20.0)\n', (11317, 11354), False, 'import numpy\n'), ((11499, 11548), 'numpy.arange', 'numpy.arange', (['self._low_dbm', 'self._high_dbm', '(20.0)'], {}), '(self._low_dbm, self._high_dbm, 20.0)\n', (11511, 11548), False, 'import numpy\n'), ((11802, 11891), 'numpy.arange', 'numpy.arange', (['self._low_frequency', 'self._high_frequency', '(self._frequency_step * 20.0)'], {}), '(self._low_frequency, self._high_frequency, self.\n _frequency_step * 20.0)\n', (11814, 11891), False, 'import numpy\n'), ((12057, 12146), 'numpy.arange', 'numpy.arange', (['self._low_frequency', 'self._high_frequency', '(self._frequency_step * 10.0)'], {}), '(self._low_frequency, self._high_frequency, self.\n _frequency_step * 10.0)\n', (12069, 12146), False, 'import numpy\n'), ((15911, 15967), 'past.utils.old_div', 'old_div', (['(self._high_freq - self._low_freq)', 'self._spacing'], {}), '(self._high_freq - self._low_freq, self._spacing)\n', (15918, 15967), False, 'from past.utils import old_div\n'), ((3348, 3374), 'numpy.copy', 'numpy.copy', (['frequency_axis'], {}), '(frequency_axis)\n', (3358, 3374), False, 'import numpy\n'), ((3376, 3399), 'numpy.copy', 'numpy.copy', (['rssi_values'], {}), '(rssi_values)\n', (3386, 3399), False, 'import numpy\n'), ((8284, 8316), 'PySide2.QtCore.QPointF', 'QPointF', (['(x_axis[max_max] + 4)', '(30)'], {}), '(x_axis[max_max] + 4, 30)\n', (8291, 8316), False, 'from PySide2.QtCore import Qt, QPointF, QLineF\n'), ((8413, 8444), 'PySide2.QtCore.QPointF', 'QPointF', (['(30)', '(y_max[max_max] - 4)'], {}), '(30, y_max[max_max] - 4)\n', (8420, 8444), False, 'from PySide2.QtCore import Qt, QPointF, QLineF\n'), ((8524, 8551), 'PySide2.QtCore.QPointF', 'QPointF', (['x_axis[max_max]', '(0)'], {}), '(x_axis[max_max], 0)\n', (8531, 8551), False, 'from PySide2.QtCore import Qt, QPointF, QLineF\n'), ((8631, 8657), 'PySide2.QtCore.QPointF', 'QPointF', (['(0)', 'y_max[max_max]'], {}), '(0, y_max[max_max])\n', (8638, 8657), False, 'from PySide2.QtCore import Qt, QPointF, QLineF\n'), ((13060, 13089), 'past.utils.old_div', 'old_div', (['frequency', '(1000000.0)'], {}), '(frequency, 1000000.0)\n', (13067, 13089), False, 'from past.utils import old_div\n'), ((3461, 3475), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3473, 3475), False, 'import sys\n'), ((9109, 9142), 'past.utils.old_div', 'old_div', (['self._mouse_x', '(1000000.0)'], {}), '(self._mouse_x, 1000000.0)\n', (9116, 9142), False, 'from past.utils import old_div\n'), ((10248, 10282), 'past.utils.old_div', 'old_div', (['self._mouse_x2', '(1000000.0)'], {}), '(self._mouse_x2, 1000000.0)\n', (10255, 10282), False, 'from past.utils import old_div\n'), ((8887, 8920), 'past.utils.old_div', 'old_div', (['self._mouse_x', '(1000000.0)'], {}), '(self._mouse_x, 1000000.0)\n', (8894, 8920), False, 'from past.utils import old_div\n'), ((10023, 10057), 'past.utils.old_div', 'old_div', (['self._mouse_x2', '(1000000.0)'], {}), '(self._mouse_x2, 1000000.0)\n', (10030, 10057), False, 'from past.utils import old_div\n'), ((9678, 9711), 'past.utils.old_div', 'old_div', (['self._mouse_x', '(1000000.0)'], {}), '(self._mouse_x, 1000000.0)\n', (9685, 9711), False, 'from past.utils import old_div\n'), ((9710, 9744), 'past.utils.old_div', 'old_div', (['self._mouse_x2', '(1000000.0)'], {}), '(self._mouse_x2, 1000000.0)\n', (9717, 9744), False, 'from past.utils import old_div\n'), ((10821, 10855), 'past.utils.old_div', 'old_div', (['self._mouse_x2', '(1000000.0)'], {}), '(self._mouse_x2, 1000000.0)\n', (10828, 10855), False, 'from past.utils import old_div\n'), ((10854, 10887), 'past.utils.old_div', 'old_div', (['self._mouse_x', '(1000000.0)'], {}), '(self._mouse_x, 1000000.0)\n', (10861, 10887), False, 'from past.utils import old_div\n')] |
"""A `dowel.logger.LogOutput` for Numpy npz files."""
import warnings
from pathlib import Path
import zipfile
import tempfile
from dowel import TabularInput
from dowel.logger import LogOutput
from dowel.simple_outputs import FileOutput
from dowel.utils import colorize
import numpy as np
try:
import torch
except ImportError:
torch = None
try:
import tensorflow as tf
except ImportError:
tf = None
"""TODO
File handling is not done properly, system can quickly throw an error that to many files are open.
Implement system underlying numpy savez system and store data incrementally:
- Write each numpy array (incrementally?) to a `.npy` tempfile.
- During dumping, current `.npy` files are written to the desired `.npz` file.
This means that for N numpy arrays there will be N+1 file objects.
Incrementally writing might be done using numpy.nditer. May need to wrap current appended val in an extra dimension,
such that final array is [val, val, val]. See:
https://github.com/numpy/numpy/blob/91118b3363b636f932f7ff6748d8259e9eb2c23a/numpy/lib/format.py#L677
"""
class NpzOutput(LogOutput):
"""Numpy npz file output for logger.
Standard numpy arrays are saved uncompressed. To save disk space `np.savez_compressed` can be used. Note however that this is computationally more intensive.
:param file_name: The file this output should log to (requires .npz suffix, automatically appended when omitted).
:param compressed: Use `np.savez_compressed or `np.savez` (default)
"""
def __init__(self, file_name, compressed=False):
file_path = Path(file_name)
if file_path.suffix == "":
file_path = file_path.with_suffix(".npz")
assert file_path.suffix == ".npz"
file_path.parent.mkdir(parents=True, exist_ok=True)
self._file_path = file_path
# self._tmpdir = tempfile.TemporaryDirectory()
# self._tmpdir_path = Path(self._tmpdir.name)
# self._tmpfiles = {}
self._compressed = compressed
self._fieldnames = None
self._data = {}
@property
def types_accepted(self):
return TabularInput
def record(self, data, prefix=""):
"""Log tabular data to npz."""
if isinstance(data, TabularInput):
to_dict = data.as_dict
if not to_dict.keys():
return
if not self._fieldnames:
self._fieldnames = set(to_dict.keys())
for key, val in sorted(to_dict.items()):
self._data[key] = []
if set(to_dict.keys()) != self._fieldnames:
raise ValueError(
"Inconsistent TabularInput keys detected. "
"NpzOutput keys: {}. "
"TabularInput keys: {}. "
"Did you change key sets after your first "
"logger.log(TabularInput)?".format(
set(self._fieldnames), set(to_dict.keys())
)
)
for key in self._fieldnames:
val = to_dict[key]
if torch and torch.is_tensor(val):
val = val.detach().numpy()
if tf and tf.is_tensor(val):
self.close()
raise NotImplementedError()
if isinstance(val, np.ndarray) and len(self._data[key]) > 0 and self._data[key][0].shape != val.shape:
raise ValueError(
"Wrong shape for key: '{}'. Got {}, but should be {}".format(
key, val.shape, self._data[key][0].shape
)
)
self._data[key].append(val)
for k in to_dict.keys():
data.mark(k)
else:
raise ValueError("Unacceptable type.")
def dump(self, step=None):
"""Dump the contents of this output.
:param step: The current run step.
"""
if self._compressed:
np.savez_compressed(self._file_path, **self._data)
else:
np.savez(self._file_path, **self._data)
| [
"tensorflow.is_tensor",
"pathlib.Path",
"numpy.savez_compressed",
"numpy.savez",
"torch.is_tensor"
] | [((1610, 1625), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1614, 1625), False, 'from pathlib import Path\n'), ((4100, 4150), 'numpy.savez_compressed', 'np.savez_compressed', (['self._file_path'], {}), '(self._file_path, **self._data)\n', (4119, 4150), True, 'import numpy as np\n'), ((4177, 4216), 'numpy.savez', 'np.savez', (['self._file_path'], {}), '(self._file_path, **self._data)\n', (4185, 4216), True, 'import numpy as np\n'), ((3164, 3184), 'torch.is_tensor', 'torch.is_tensor', (['val'], {}), '(val)\n', (3179, 3184), False, 'import torch\n'), ((3280, 3297), 'tensorflow.is_tensor', 'tf.is_tensor', (['val'], {}), '(val)\n', (3292, 3297), True, 'import tensorflow as tf\n')] |
import csv
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import numpy as np
import transforms3d.euler as t3d
import helper
import tensorflow as tf
###################### Print Operations #########################
def print_(text="Test", color='w', style='no', bg_color=''):
color_dict = {'b': 30, 'r': 31, 'g': 32, 'y': 33, 'bl': 34, 'p': 35, 'c': 36, 'w': 37}
style_dict = {'no': 0, 'bold': 1, 'underline': 2, 'neg1': 3, 'neg2': 5}
bg_color_dict = {'b': 40, 'r': 41, 'g': 42, 'y': 43, 'bl': 44, 'p': 45, 'c': 46, 'w': 47}
if bg_color is not '':
print("\033[" + str(style_dict[style]) + ";" + str(color_dict[color]) + ";" + str(bg_color_dict[bg_color]) + "m" + text + "\033[00m")
else: print("\033["+ str(style_dict[style]) + ";" + str(color_dict[color]) + "m"+ text + "\033[00m")
###################### Data Downloading Operations #########################
def download_data(file):
print_('################### Downloading Data ###################', color='g', style='bold')
from google_drive_downloader import GoogleDriveDownloader as gdd
if file=='train_data':
file_id = '16YU-tdayVNBwM3XlPDgFrrzlPjhQN3PB'
elif file=='car_data':
file_id = '1k9W75uhUFTfA_iK7YePGn5t9f4JhtgSe'
if not os.path.exists(os.path.join(os.getcwd(),'data',file)):
gdd.download_file_from_google_drive(file_id=file_id,
dest_path=os.path.join(os.getcwd(),'data',file+'.zip'),
showsize=True,
unzip=True)
os.remove(os.path.join(os.getcwd(),'data',file+'.zip'))
return True
###################### Data Handling Operations #########################
# Read the templates from a given file.
def read_templates(file_name,templates_dict):
with open(os.path.join('data',templates_dict,file_name),'r') as csvfile:
csvreader = csv.reader(csvfile)
data = []
for row in csvreader:
row = [float(i) for i in row]
data.append(row)
return data # n2 x 2048 x 3
# Read the file names having templates.
def template_files(templates_dict):
with open(os.path.join('data',templates_dict,'template_filenames.txt'),'r') as file:
files = file.readlines()
files = [x.strip() for x in files]
print(files)
return files # 1 x n1
# Read the templates from each file.
def templates_data(templates_dict):
files = template_files(templates_dict) # Read the available file names.
data = []
for i in range(len(files)):
temp = read_templates(files[i],templates_dict)
for i in temp:
data.append(i)
return np.asarray(data) # (n1 x n2 x 2048 x 3) & n = n1 x n2
# Preprocess the templates and rearrange them.
def process_templates(templates_dict):
data = templates_data(templates_dict) # Read all the templates.
print(data.shape[0]/2048)
templates = []
for i in range(data.shape[0]/2048):
start_idx = i*2048
end_idx = (i+1)*2048
templates.append(data[start_idx:end_idx,:])
return np.asarray(templates) # Return all the templates (n x 2048 x 3)
# Read poses from given file.
def read_poses(templates_dict, filename):
# Arguments:
# filename: Read data from a given file (string)
# Output:
# poses: Return array of all the poses in the file (n x 6)
with open(os.path.join('data',templates_dict,filename),'r') as csvfile:
csvreader = csv.reader(csvfile)
poses = []
for row in csvreader:
row = [float(i) for i in row]
poses.append(row)
return np.asarray(poses)
# Read names of files in given data_dictionary.
def read_files(data_dict):
with open(os.path.join('data',data_dict,'files.txt')) as file:
files = file.readlines()
files = [x.split()[0] for x in files]
return files[0]
# Read data from h5 file and return as templates.
def read_h5(file_name):
import h5py
f = h5py.File(file_name, 'r')
templates = np.array(f.get('templates'))
f.close()
return templates
def read_noise_data(data_dict):
import h5py
f = h5py.File(os.path.join('data',data_dict,'noise_data.h5'), 'r')
templates = np.array(f.get('templates'))
sources = np.array(f.get('sources'))
f.close()
return templates, sources
def read_pairs(data_dict, file_name):
with open(os.path.join('data', data_dict, file_name), 'r') as csvfile:
csvreader = csv.reader(csvfile)
pairs = []
for row in csvreader:
row = [int(x) for x in row]
pairs.append(row)
return np.asarray(pairs)
# Main function to load data and return as templates array.
def loadData(data_dict):
files = read_files(data_dict) # Read file names.
print(files)
templates = read_h5(files) # Read templates from h5 file using given file_name.
return templates
###################### Transformation Operations #########################
def rotate_point_cloud_by_angle_y(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
# rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
rotated_data[k, ...] = np.dot(rotation_matrix, shape_pc.reshape((-1, 3)).T).T # Pre-Multiplication (changes done)
return rotated_data
def rotate_point_cloud_by_angle_x(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[1, 0, 0],
[0, cosval, -sinval],
[0, sinval, cosval]])
shape_pc = batch_data[k, ...]
# rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
rotated_data[k, ...] = np.dot(rotation_matrix, shape_pc.reshape((-1, 3)).T).T # Pre-Multiplication (changes done)
return rotated_data
def rotate_point_cloud_by_angle_z(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, -sinval, 0],
[sinval, cosval, 0],
[0, 0, 1]])
shape_pc = batch_data[k, ...]
# rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
rotated_data[k, ...] = np.dot(rotation_matrix, shape_pc.reshape((-1, 3)).T).T # Pre-Multiplication (changes done)
return rotated_data
# Translate the data as per given translation vector.
def translate(data,shift):
# Arguments:
# data: Point Cloud data (1 x num_points x 3)
# shift: Translation vector (1 x 3)
try:
data = np.asarray(data)
except:
pass
return data+shift
# Apply the given transformation to given point cloud data.
def apply_transformation(datas,poses): # Transformation function for (2 & 4c, loss 8b)
# Arguments:
# datas: Point Clouds (batch_size x num_points x 3)
# poses: translation+euler (Batch_size x 6)
# Output:
# transformed_data: Transformed Point Clouds by given poses (batch_size x num_points x 3)
transformed_data = np.copy(datas)
for i in range(datas.shape[0]):
transformed_data[i,:,:] = rotate_point_cloud_by_angle_z(transformed_data[i,:,:],poses[i,5])
transformed_data[i,:,:] = rotate_point_cloud_by_angle_y(transformed_data[i,:,:],poses[i,4])
transformed_data[i,:,:] = rotate_point_cloud_by_angle_x(transformed_data[i,:,:],poses[i,3])
transformed_data[i,:,:] = translate(transformed_data[i,:,:],[poses[i,0],poses[i,1],poses[i,2]])
return transformed_data
# Convert poses from 6D to 7D. # For loss function ( 8a )
def poses_euler2quat(poses):
# Arguments:
# poses: 6D pose (translation + euler) (batch_size x 6)
# Output:
# new_poses: 7D pose (translation + quaternions) (batch_size x 7)
new_poses = [] # Store 7D poses
for i in range(poses.shape[0]):
temp = t3d.euler2quat(poses[i,3],poses[i,4],poses[i,5]) # Convert from euler to quaternion. (1x4)
temp1 = [poses[i,0],poses[i,1],poses[i,2],temp[0],temp[1],temp[2],temp[3]] # Add translation & Quaternion (1x7)
new_poses.append(temp1)
return np.asarray(new_poses)
# Geenerate random poses equal to batch_size.
def generate_poses(batch_size):
# Arguments:
# batch_size: No of 6D poses required.
# Output:
# poses: Array of poses with translation and rotation (euler angles in radians) (batch_size x 6)
poses = [] # List to store the 6D poses.
for i in range(batch_size):
# Generate random translations.
x = np.round(2*np.random.random_sample()-1,2)
y = np.round(2*np.random.random_sample()-1,2)
z = np.round(2*np.random.random_sample()-1,2)
# Generate random rotations.
x_rot = np.round(np.pi*np.random.random_sample()-(np.pi/2),3)
y_rot = np.round(np.pi*np.random.random_sample()-(np.pi/2),3)
z_rot = np.round(np.pi*np.random.random_sample()-(np.pi/2),3)
poses.append([x,y,z,x_rot,y_rot,z_rot])
return np.array(poses).reshape((batch_size,6))
# Convert 6D poses to transformation matrix. # (for 4b)
def transformation(poses):
# Arguments:
# poses: 6D (x,y,z,euler_x,euler_y,euler_z) (in radians)
# Output
# transformation_matrix: batch_size x 4 x 4
transformation_matrix = np.zeros((poses.shape[0],4,4))
transformation_matrix[:,3,3] = 1
for i in range(poses.shape[0]):
rot = t3d.euler2mat(poses[i,5],poses[i,4],poses[i,3],'szyx') # Calculate rotation matrix using transforms3d
transformation_matrix[i,0:3,0:3]=rot # Store rotation matrix in transformation matrix.
transformation_matrix[i,0:3,3]=poses[i,0:3] # Store translations in transformation matrix.
return transformation_matrix
# Convert poses (quaternions) to transformation matrix and apply on point cloud.
def transformation_quat2mat(poses,TRANSFORMATIONS,templates_data): # (for 4b)
# Arguments:
# poses: 7D (x,y,z,quat_q0,quat_q1,quat_q2,quat_q3) (in radians) (batch_size x 7)
# TRANSFORMATIONS: Overall tranformation matrix.
# template_data: Point Cloud (batch_size x num_points x 3)
# Output
# TRANSFORMATIONS: Batch_size x 4 x 4
# templates_data: Transformed template data (batch_size x num_points x 3)
poses = np.array(poses) # Convert poses to array.
poses = poses.reshape(poses.shape[-2],poses.shape[-1])
for i in range(poses.shape[0]):
transformation_matrix = np.zeros((4,4))
transformation_matrix[3,3] = 1
rot = t3d.quat2mat([poses[i,3],poses[i,4],poses[i,5],poses[i,6]]) # Calculate rotation matrix using transforms3d
transformation_matrix[0:3,0:3]=rot # Store rotation matrix in transformation matrix.
transformation_matrix[0:3,3]=poses[i,0:3] # Store translations in transformation matrix.
TRANSFORMATIONS[i,:,:] = np.dot(transformation_matrix,TRANSFORMATIONS[i,:,:]) # 4b (Multiply tranfromation matrix to Initial Transfromation Matrix)
templates_data[i,:,:]=np.dot(rot,templates_data[i,:,:].T).T # Apply Rotation to Template Data
templates_data[i,:,:]=templates_data[i,:,:]+poses[i,0:3] # Apply translation to template data
return TRANSFORMATIONS,templates_data
# Convert the Final Transformation Matrix to Translation + Orientation (Euler Angles in Degrees)
def find_final_pose(TRANSFORMATIONS):
# Arguments:
# TRANSFORMATIONS: transformation matrix (batch_size x 4 x 4)
# Output:
# final_pose: final pose predicted by network (batch_size x 6)
final_pose = np.zeros((TRANSFORMATIONS.shape[0],6)) # Array to store the poses.
for i in range(TRANSFORMATIONS.shape[0]):
rot = TRANSFORMATIONS[i,0:3,0:3] # Extract rotation matrix.
euler = t3d.mat2euler(rot,'szyx') # Convert rotation matrix to euler angles. (Pre-multiplication)
final_pose[i,3:6]=[euler[2],euler[1],euler[0]] # Store the translation
final_pose[i,0:3]=TRANSFORMATIONS[i,0:3,3].T # Store the euler angles.
return final_pose
# Convert the Final Transformation Matrix to Translation + Orientation (Euler Angles in Degrees)
def find_final_pose_inv(TRANSFORMATIONS_ip):
# Arguments:
# TRANSFORMATIONS: transformation matrix (batch_size x 4 x 4)
# Output:
# final_pose: final pose predicted by network (batch_size x 6)
TRANSFORMATIONS = np.copy(TRANSFORMATIONS_ip)
final_pose = np.zeros((TRANSFORMATIONS.shape[0],6)) # Array to store the poses.
for i in range(TRANSFORMATIONS.shape[0]):
TRANSFORMATIONS[i] = np.linalg.inv(TRANSFORMATIONS[i])
rot = TRANSFORMATIONS[i,0:3,0:3] # Extract rotation matrix.
euler = t3d.mat2euler(rot,'szyx') # Convert rotation matrix to euler angles. (Pre-multiplication)
final_pose[i,3:6]=[euler[2],euler[1],euler[0]] # Store the translation
final_pose[i,0:3]=TRANSFORMATIONS[i,0:3,3].T # Store the euler angles.
return final_pose
# Subtract the centroids from source and template (Like ICP) and then find the pose.
def centroid_subtraction(source_data, template_data):
# Arguments:
# source_data: Source Point Clouds (batch_size x num_points x 3)
# template_data: Template Point Clouds (batch_size x num_points x 3)
# Output:
# source_data: Centroid subtracted from source point cloud (batch_size x num_points x 3)
# template_data: Centroid subtracted from template point cloud (batch_size x num_points x 3)
# centroid_translation_pose: Apply this pose after final iteration. (batch_size x 7)
centroid_translation_pose = np.zeros((source_data.shape[0],7))
for i in range(source_data.shape[0]):
source_centroid = np.mean(source_data[i],axis=0)
template_centroid = np.mean(template_data[i],axis=0)
source_data[i] = source_data[i] - source_centroid
template_data[i] = template_data[i] - template_centroid
centroid_translation = source_centroid - template_centroid
centroid_translation_pose[i] = np.array([centroid_translation[0],centroid_translation[1],centroid_translation[2],1,0,0,0])
return source_data, template_data, centroid_translation_pose
def inverse_pose(pose):
transformation_pose = np.zeros((4,4))
transformation_pose[3,3]=1
transformation_pose[0:3,0:3] = t3d.euler2mat(pose[5], pose[4], pose[3], 'szyx')
transformation_pose[0,3] = pose[0]
transformation_pose[1,3] = pose[1]
transformation_pose[2,3] = pose[2]
transformation_pose = np.linalg.inv(transformation_pose)
pose_inv = np.zeros((1,6))[0]
pose_inv[0] = transformation_pose[0,3]
pose_inv[1] = transformation_pose[1,3]
pose_inv[2] = transformation_pose[2,3]
orient_inv = t3d.mat2euler(transformation_pose[0:3,0:3], 'szyx')
pose_inv[3] = orient_inv[2]
pose_inv[4] = orient_inv[1]
pose_inv[5] = orient_inv[0]
return pose_inv
###################### Shuffling Operations #########################
# Randomly shuffle given array of poses for training procedure.
def shuffle_templates(templates):
# Arguments:
# templates: Input array of templates to get randomly shuffled (batch_size x num_points x 3)
# Output:
# shuffled_templates: Randomly ordered poses (batch_size x num_points x 3)
shuffled_templates = np.zeros(templates.shape) # Array to store shuffled templates.
templates_idxs = np.arange(0,templates.shape[0])
np.random.shuffle(templates_idxs) # Randomly shuffle template indices.
for i in range(templates.shape[0]):
shuffled_templates[i,:,:]=templates[templates_idxs[i],:,:] # Rearrange them as per shuffled indices.
return shuffled_templates
# Randomly shuffle given array of poses for training procedure.
def shuffle_poses(poses):
# Arguments:
# poses: Input array of poses to get randomly shuffled (batch_size x n)
# Output:
# shuffled_poses: Randomly ordered poses (batch_size x n)
shuffled_poses = np.zeros(poses.shape) # Array to store shuffled poses.
poses_idxs = np.arange(0,poses.shape[0])
np.random.shuffle(poses_idxs) # Shuffle the indexes of poses.
for i in range(poses.shape[0]):
shuffled_poses[i,:]=poses[poses_idxs[i],:] # Rearrange them as per shuffled indexes.
return shuffled_poses
# Generate random transformation/pose for data augmentation.
def random_trans():
# Output:
# 6D pose with first 3 translation values and last 3 euler angles in radian about x,y,z-axes. (1x6)
# Generate random translations.
x_trans, y_trans, z_trans = 0.4*np.random.uniform()-0.2, 0.4*np.random.uniform()-0.2, 0.4*np.random.uniform()-0.2
# Generate random rotation angles.
x_rot, y_rot, z_rot = (np.pi/9)*np.random.uniform()-(np.pi/18), (np.pi/9)*np.random.uniform()-(np.pi/18), (np.pi/9)*np.random.uniform()-(np.pi/18)
return [x_trans,y_trans,z_trans,x_rot,y_rot,z_rot]
# Generate random poses for each batch to train the network.
def generate_random_poses(batch_size):
# Arguments:
# Batch_size: No of poses in the output
# Output:
# poses: Randomly generated poses (batch_size x 6)
poses = []
for i in range(batch_size):
x_trans, y_trans, z_trans = 2*np.random.uniform()-1, 2*np.random.uniform()-1, 2*np.random.uniform()-1 # Generate random translation
x_rot, y_rot, z_rot = (np.pi)*np.random.uniform()-(np.pi/2), (np.pi)*np.random.uniform()-(np.pi/2), (np.pi)*np.random.uniform()-(np.pi/2) # Generate random orientation
poses.append([np.round(x_trans,4), np.round(y_trans,4), np.round(z_trans,4), np.round(x_rot,4), np.round(y_rot,4), np.round(z_rot,4)]) # round upto 4 decimal digits
return np.array(poses)
def select_random_points(source_data, num_point):
random_source_data = np.copy(source_data)
idx = np.arange(random_source_data.shape[1]) # Find indexes of source data.
np.random.shuffle(idx) # Shuffle indexes.
random_source_data = random_source_data[:,idx,:] # Shuffle data as per shuffled indexes.
return random_source_data[:,0:num_point,:]
def add_noise(source_data):
for i in range(source_data.shape[0]):
mean = 0
for j in range(source_data.shape[1]):
sigma = 0.04*np.random.random_sample() # Generate random variance value b/w 0 to 0.1
source_data[i,j,:] = source_data[i,j,:] + np.random.normal(mean, sigma, source_data[i,j].shape) # Add gaussian noise.
return source_data
###################### Tensor Operations #########################
def rotate_point_cloud_by_angle_y_tensor(data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
Nx3 array, original batch of point clouds
Return:
Nx3 array, rotated batch of point clouds
"""
cosval = tf.cos(rotation_angle)
sinval = tf.sin(rotation_angle)
rotation_matrix = tf.reshape([[cosval, 0, sinval],[0, 1, 0],[-sinval, 0, cosval]], [3,3])
data = tf.reshape(data, [-1, 3])
rotated_data = tf.transpose(tf.tensordot(rotation_matrix, tf.transpose(data), [1,0]))
return rotated_data
def rotate_point_cloud_by_angle_x_tensor(data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
Nx3 array, original batch of point clouds
Return:
Nx3 array, rotated batch of point clouds
"""
cosval = tf.cos(rotation_angle)
sinval = tf.sin(rotation_angle)
rotation_matrix = tf.reshape([[1, 0, 0],[0, cosval, -sinval],[0, sinval, cosval]], [3,3])
data = tf.reshape(data, [-1, 3])
rotated_data = tf.transpose(tf.tensordot(rotation_matrix, tf.transpose(data), [1,0]))
return rotated_data
def rotate_point_cloud_by_angle_z_tensor(data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
Nx3 array, original batch of point clouds
Return:
Nx3 array, rotated batch of point clouds
"""
cosval = tf.cos(rotation_angle)
sinval = tf.sin(rotation_angle)
rotation_matrix = tf.reshape([[cosval, -sinval, 0],[sinval, cosval, 0],[0, 0, 1]], [3,3])
data = tf.reshape(data, [-1, 3])
rotated_data = tf.transpose(tf.tensordot(rotation_matrix, tf.transpose(data), [1,0]))
return rotated_data
def translate_tensor(data,shift):
# Add the translation vector to given tensor. (num_point x 3)
return tf.add(data,shift)
# Tranform the data as per given poses with orientation as euler in degrees.
def transformation_tensor(datas,poses):
# Arguments:
# datas: Tensor of Point Cloud (batch_size x num_points x 3)
# poses: Tensor of Poses (translation + euler angles in degrees) (batch_size x num_points x 3)
# Ouput:
# transformed_data: Tensor of transformed point cloud (batch_size x num_points x 3)
transformed_data = tf.zeros([datas.shape[1], datas.shape[2]]) # Tensor to store the transformed point clouds as tensor.
for i in range(datas.shape[0]):
transformed_data_t = rotate_point_cloud_by_angle_x_tensor(datas[i,...],poses[i,3]) # Rotate about x-axis
transformed_data_t = rotate_point_cloud_by_angle_y_tensor(transformed_data_t,poses[i,4]) # Rotate about y-axis
transformed_data_t = rotate_point_cloud_by_angle_z_tensor(transformed_data_t,poses[i,5]) # Rotate about z-axis
transformed_data_t = translate_tensor(transformed_data_t,[poses[i,0],poses[i,1],poses[i,2]]) # Translate by given vector.
transformed_data = tf.concat([transformed_data, transformed_data_t], 0) # Append the transformed tensor point cloud.
transformed_data = tf.reshape(transformed_data, [-1, datas.shape[1], datas.shape[2]])[1:] # Reshape the transformed tensor and remove first one. (batch_size x num_point x 3)
return transformed_data
# Tranform the data as per given poses with orientation as quaternion.
def transformation_quat_tensor(data,quat,translation):
# Arguments:
# data: Tensor of Point Cloud. (batch_size x num_point x 3)
# quat: Quaternion tensor to generate rotation matrix. (batch_size x 4)
# translation: Translation tensor to translate the point cloud. (batch_size x 3)
# Outputs:
# transformed_data: Tensor of Rotated and Translated Point Cloud Data. (batch_size x num_points x 3)
transformed_data = tf.zeros([data.shape[1],3]) # Tensor to store transformed data.
for i in range(quat.shape[0]):
# Seperate each quaternion value.
q0 = tf.slice(quat,[i,0],[1,1])
q1 = tf.slice(quat,[i,1],[1,1])
q2 = tf.slice(quat,[i,2],[1,1])
q3 = tf.slice(quat,[i,3],[1,1])
# Convert quaternion to rotation matrix.
# Ref: http://www-evasion.inrialpes.fr/people/Franck.Hetroy/Teaching/ProjetsImage/2007/Bib/besl_mckay-pami1992.pdf
# A method for Registration of 3D shapes paper by <NAME> and <NAME>.
R = [[q0*q0+q1*q1-q2*q2-q3*q3, 2*(q1*q2-q0*q3), 2*(q1*q3+q0*q2)],
[2*(q1*q2+q0*q3), q0*q0+q2*q2-q1*q1-q3*q3, 2*(q2*q3-q0*q1)],
[2*(q1*q3-q0*q2), 2*(q2*q3+q0*q1), q0*q0+q3*q3-q1*q1-q2*q2]]
R = tf.reshape(R,[3,3]) # Convert R into a single tensor of shape 3x3.
# tf.tensordot: Arg: tensor1, tensor2, axes
# axes defined for tensor1 & tensor2 should be of same size.
# axis 1 of R is of size 3 and axis 0 of data (3xnum_points) is of size 3.
temp_rotated_data = tf.transpose(tf.tensordot(R, tf.transpose(data[i,...]), [1,0])) # Rotate the data. (num_points x 3)
temp_rotated_data = tf.add(temp_rotated_data,translation[i,...]) # Add the translation (num_points x 3)
transformed_data = tf.concat([transformed_data, temp_rotated_data],0) # Append data (batch_size x num_points x 3)
transformed_data = tf.reshape(transformed_data, [-1,data.shape[1],3])[1:] # Reshape data and remove first point cloud. (batch_size x num_point x 3)
return transformed_data
###################### Display Operations #########################
# Display data inside ModelNet files.
def display_clouds(filename,model_no):
# Arguments:
# filename: Name of file to read the data from. (string)
# model_no: Number to choose the model inside that file. (int)
data = []
# Read the entire data from that file.
with open(os.path.join('data','templates',filename),'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
row = [float(x) for x in row]
data.append(row)
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
data = np.asarray(data)
start_idx = model_no*2048
end_idx = (model_no+1)*2048
data = data[start_idx:end_idx,:] # Choose specific data related to the given model number.
X,Y,Z = [],[],[]
for row in data:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
ax.scatter(X,Y,Z)
plt.show()
# Display given Point Cloud Data in blue color (default).
def display_clouds_data(data):
# Arguments:
# data: array of point clouds (num_points x 3)
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
try:
data = data.tolist()
except:
pass
X,Y,Z = [],[],[]
for row in data:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
ax.scatter(X,Y,Z)
plt.show()
# Display given template, source and predicted point cloud data.
def display_three_clouds(data1,data2,data3,title):
# Arguments:
# data1 Template Data (num_points x 3) (Red)
# data2 Source Data (num_points x 3) (Green)
# data3 Predicted Data (num_points x 3) (Blue)
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
try:
data1 = data1.tolist()
data2 = data2.tolist()
data3 = data3.tolist()
except:
pass
# Add Template Data in Plot
X,Y,Z = [],[],[]
for row in data1:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
l1 = ax.scatter(X,Y,Z,c=[1,0,0,1])
# Add Source Data in Plot
X,Y,Z = [],[],[]
for row in data2:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
l2 = ax.scatter(X,Y,Z,c=[0,1,0,0.5])
# Add Predicted Data in Plot
X,Y,Z = [],[],[]
for row in data3:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
l3 = ax.scatter(X,Y,Z,c=[0,0,1,0.5])
# Add details to Plot.
plt.legend((l1,l2,l3),('Template Data','Source Data','Predicted Data'),prop={'size':15},markerscale=4)
ax.tick_params(labelsize=10)
ax.set_xlabel('X-axis',fontsize=15)
ax.set_ylabel('Y-axis',fontsize=15)
ax.set_zlabel('Z-axis',fontsize=15)
# ax.set_xlim(-1,1.25)
# ax.set_ylim(-1,1)
# ax.set_zlim(-0.5,1.25)
plt.title(title,fontdict={'fontsize':25})
ax.xaxis.set_tick_params(labelsize=15)
ax.yaxis.set_tick_params(labelsize=15)
ax.zaxis.set_tick_params(labelsize=15)
plt.show()
# Display template, source, predicted point cloud data with results after each iteration.
def display_itr_clouds(data1,data2,data3,ITR,title):
# Arguments:
# data1 Template Data (num_points x 3) (Red)
# data2 Source Data (num_points x 3) (Green)
# data3 Predicted Data (num_points x 3) (Blue)
# ITR Point Clouds obtained after each iteration (iterations x batch_size x num of points x 3) (Yellow)
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
print(ITR.shape) # Display Number of Point Clouds in ITR.
try:
data1 = data1.tolist()
data2 = data2.tolist()
data3 = data3.tolist()
except:
pass
# Add Template Data in Plot
X,Y,Z = [],[],[]
for row in data1:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
l1 = ax.scatter(X,Y,Z,c=[1,0,0,1])
# Add Source Data in Plot
X,Y,Z = [],[],[]
for row in data2:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
l2 = ax.scatter(X,Y,Z,c=[0,1,0,1])
# Add Predicted Data in Plot
X,Y,Z = [],[],[]
for row in data3:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
l3 = ax.scatter(X,Y,Z,c=[0,0,1,1])
# Add point clouds after each iteration in Plot.
for itr_data in ITR:
X,Y,Z = [],[],[]
for row in itr_data[0]:
X.append(row[0])
Y.append(row[1])
Z.append(row[2])
ax.scatter(X,Y,Z,c=[1,1,0,0.5])
# Add details to Plot.
plt.legend((l1,l2,l3),('Template Data','Source Data','Predicted Data'),prop={'size':15},markerscale=4)
ax.tick_params(labelsize=10)
ax.set_xlabel('X-axis',fontsize=15)
ax.set_ylabel('Y-axis',fontsize=15)
ax.set_zlabel('Z-axis',fontsize=15)
plt.title(title,fontdict={'fontsize':25})
ax.xaxis.set_tick_params(labelsize=15)
ax.yaxis.set_tick_params(labelsize=15)
ax.zaxis.set_tick_params(labelsize=15)
plt.show()
# Log test results to a folder
def log_test_results(LOG_DIR, filename, log):
# It will log the data in following sequence in csv format:
# Sr. No., time taken, number of iterations, translation error, rotation error.
# If log dir doesn't exists create one.
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
# Find params from the dictionary.
ITR, TIME = log['ITR'], log['TIME']
Trans_Err, Rot_Err = log['Trans_Err'], log['Rot_Err']
idxs_5_5, idxs_10_1, idxs_20_2 = log['idxs_5_5'], log['idxs_10_1'], log['idxs_20_2']
num_batches = log['num_batches']
# Find mean and variance.
TIME_mean = sum(TIME)/len(TIME)
# Log all the data in a csv file.
import csv
with open(os.path.join(LOG_DIR, filename+'.csv'),'w') as csvfile:
csvwriter = csv.writer(csvfile)
for i in range(len(TIME)):
csvwriter.writerow([i, TIME[i], ITR[i], Trans_Err[i], Rot_Err[i]])
if len(idxs_5_5) != 0:
accuray_5_5 = len(idxs_5_5)/(num_batches*1.0)
mean_5_5_rot_err = np.sum(np.array(Rot_Err)[idxs_5_5])/len(idxs_5_5)
var_5_5_rot_err = np.var(np.array(Rot_Err)[idxs_5_5])
mean_5_5_trans_err = np.sum(np.array(Trans_Err)[idxs_5_5])/len(idxs_5_5)
var_5_5_trans_err = np.var(np.array(Trans_Err)[idxs_5_5])
mean_5_5_itr = np.sum(np.array(ITR)[idxs_5_5])/len(idxs_5_5)
var_5_5_itr = np.var(np.array(ITR)[idxs_5_5])
mean_5_5_time = np.sum(np.array(TIME)[idxs_5_5])/len(idxs_5_5)
var_5_5_time = np.var(np.array(TIME)[idxs_5_5])
else:
accuray_5_5, mean_5_5_rot_err, var_5_5_rot_err, mean_5_5_trans_err, var_5_5_trans_err, mean_5_5_itr, var_5_5_itr, mean_5_5_time, var_5_5_time = 0, 0, 0, 0, 0, 0, 0, 0, 0
if len(idxs_10_1) != 0:
accuray_10_1 = len(idxs_10_1)/(num_batches*1.0)
mean_10_1_rot_err = np.sum(np.array(Rot_Err)[idxs_10_1])/len(idxs_10_1)
var_10_1_rot_err = np.var(np.array(Rot_Err)[idxs_10_1])
mean_10_1_trans_err = np.sum(np.array(Trans_Err)[idxs_10_1])/len(idxs_10_1)
var_10_1_trans_err = np.var(np.array(Trans_Err)[idxs_10_1])
mean_10_1_itr = np.sum(np.array(ITR)[idxs_10_1])/len(idxs_10_1)
var_10_1_itr = np.var(np.array(ITR)[idxs_10_1])
mean_10_1_time = np.sum(np.array(TIME)[idxs_10_1])/len(idxs_10_1)
var_10_1_time = np.var(np.array(TIME)[idxs_10_1])
else:
accuray_10_1, mean_10_1_rot_err, var_10_1_rot_err, mean_10_1_trans_err, var_10_1_trans_err, mean_10_1_itr, var_10_1_itr, mean_10_1_time, var_10_1_time = 0, 0, 0, 0, 0, 0, 0, 0, 0
if len(idxs_20_2) != 0:
# Find accuracies:
accuray_20_2 = len(idxs_20_2)/(num_batches*1.0)
# Find mean rotation error.
mean_20_2_rot_err = np.sum(np.array(Rot_Err)[idxs_20_2])/len(idxs_20_2)
# Find variance of rotation error.
var_20_2_rot_err = np.var(np.array(Rot_Err)[idxs_20_2])
# Find mean translation error.
mean_20_2_trans_err = np.sum(np.array(Trans_Err)[idxs_20_2])/len(idxs_20_2)
# Find variance of translation error.
var_20_2_trans_err = np.var(np.array(Trans_Err)[idxs_20_2])
# Find mean iterations.
mean_20_2_itr = np.sum(np.array(ITR)[idxs_20_2])/len(idxs_20_2)
# Find variance of iterations.
var_20_2_itr = np.var(np.array(ITR)[idxs_20_2])
# Find mean time required.
mean_20_2_time = np.sum(np.array(TIME)[idxs_20_2])/len(idxs_20_2)
# Find variance of time.
var_20_2_time = np.var(np.array(TIME)[idxs_20_2])
else:
accuray_20_2, mean_20_2_rot_err, var_20_2_rot_err, mean_20_2_trans_err, var_20_2_trans_err, mean_20_2_itr, var_20_2_itr, mean_20_2_time, var_20_2_time = 0, 0, 0, 0, 0, 0, 0, 0, 0
with open(os.path.join(LOG_DIR, filename+'.txt'),'w') as file:
file.write("Mean of Time: {}\n".format(TIME_mean))
file.write("\n")
file.write("###### 5 Degree & 0.05 Units ######\n")
file.write("Accuray: {}%\n".format(accuray_5_5*100))
file.write("Mean rotational error: {}\n".format(mean_5_5_rot_err))
file.write("Mean translation error: {}\n".format(mean_5_5_trans_err))
file.write("Mean time: {}\n".format(mean_5_5_time))
file.write("Var time: {}\n".format(var_5_5_time))
file.write("Var translation error: {}\n".format(var_5_5_trans_err))
file.write("Var rotational error: {}\n".format(var_5_5_rot_err))
file.write("Mean Iterations: {}\n".format(mean_5_5_itr))
file.write("Var Iterations: {}\n".format(var_5_5_itr))
file.write("\n")
file.write("###### 10 Degree & 0.1 Units ######\n")
file.write("Accuray: {}%\n".format(accuray_10_1*100))
file.write("Mean rotational error: {}\n".format(mean_10_1_rot_err))
file.write("Mean translation error: {}\n".format(mean_10_1_trans_err))
file.write("Mean time: {}\n".format(mean_10_1_time))
file.write("Var time: {}\n".format(var_10_1_time))
file.write("Var translation error: {}\n".format(var_10_1_trans_err))
file.write("Var rotational error: {}\n".format(var_10_1_rot_err))
file.write("Mean Iterations: {}\n".format(mean_10_1_itr))
file.write("Var Iterations: {}\n".format(var_10_1_itr))
file.write("\n")
file.write("###### 20 Degree & 0.2 Units ######\n")
file.write("Accuray: {}%\n".format(accuray_20_2*100))
file.write("Mean rotational error: {}\n".format(mean_20_2_rot_err))
file.write("Mean translation error: {}\n".format(mean_20_2_trans_err))
file.write("Mean time: {}\n".format(mean_20_2_time))
file.write("Var time: {}\n".format(var_20_2_time))
file.write("Var translation error: {}\n".format(var_20_2_trans_err))
file.write("Var rotational error: {}\n".format(var_20_2_rot_err))
file.write("Mean Iterations: {}\n".format(mean_20_2_itr))
file.write("Var Iterations: {}\n".format(var_20_2_itr))
plt.hist(Rot_Err,np.arange(0,185,5))
plt.xlim(0,180)
plt.savefig(os.path.join(LOG_DIR,'rot_err_hist.jpeg'),dpi=500,quality=100)
plt.figure()
plt.hist(Trans_Err,np.arange(0,1.01,0.01))
plt.xlim(0,1)
plt.savefig(os.path.join(LOG_DIR,'trans_err_hist.jpeg'),dpi=500,quality=100)
if __name__=='__main__':
# a = np.array([[0,0,0,0,0,0],[0,0,0,90,0,0]])
# print a.shape
# a = poses_euler2quat(a)
# print(a[1,3]*a[1,3]+a[1,4]*a[1,4]+a[1,5]*a[1,5]+a[1,6]*a[1,6])
# print(a[0,3]*a[0,3]+a[0,4]*a[0,4]+a[0,5]*a[0,5]+a[0,6]*a[0,6])
# print a.shape
# display_clouds('airplane_templates.csv',0)
templates = helper.process_templates('multi_model_templates')
# templates = helper.process_templates('templates')
# airplane = templates[0,:,:]
idx = 199
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
# start = idx*2048
# end = (idx+1)*2048
ax.scatter(templates[idx,:,0],templates[idx,:,1],templates[idx,:,2])
plt.show()
print(templates.shape)
| [
"matplotlib.pyplot.title",
"os.mkdir",
"csv.reader",
"numpy.random.random_sample",
"tensorflow.reshape",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"numpy.random.normal",
"os.path.join",
"numpy.round",
"transforms3d.euler.quat2mat",
"tensorflow.sin",
"numpy.co... | [((2496, 2512), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2506, 2512), True, 'import numpy as np\n'), ((2894, 2915), 'numpy.asarray', 'np.asarray', (['templates'], {}), '(templates)\n', (2904, 2915), True, 'import numpy as np\n'), ((3384, 3401), 'numpy.asarray', 'np.asarray', (['poses'], {}), '(poses)\n', (3394, 3401), True, 'import numpy as np\n'), ((3719, 3744), 'h5py.File', 'h5py.File', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (3728, 3744), False, 'import h5py\n'), ((4290, 4307), 'numpy.asarray', 'np.asarray', (['pairs'], {}), '(pairs)\n', (4300, 4307), True, 'import numpy as np\n'), ((4900, 4944), 'numpy.zeros', 'np.zeros', (['batch_data.shape'], {'dtype': 'np.float32'}), '(batch_data.shape, dtype=np.float32)\n', (4908, 4944), True, 'import numpy as np\n'), ((5721, 5765), 'numpy.zeros', 'np.zeros', (['batch_data.shape'], {'dtype': 'np.float32'}), '(batch_data.shape, dtype=np.float32)\n', (5729, 5765), True, 'import numpy as np\n'), ((6542, 6586), 'numpy.zeros', 'np.zeros', (['batch_data.shape'], {'dtype': 'np.float32'}), '(batch_data.shape, dtype=np.float32)\n', (6550, 6586), True, 'import numpy as np\n'), ((7747, 7761), 'numpy.copy', 'np.copy', (['datas'], {}), '(datas)\n', (7754, 7761), True, 'import numpy as np\n'), ((8785, 8806), 'numpy.asarray', 'np.asarray', (['new_poses'], {}), '(new_poses)\n', (8795, 8806), True, 'import numpy as np\n'), ((9877, 9909), 'numpy.zeros', 'np.zeros', (['(poses.shape[0], 4, 4)'], {}), '((poses.shape[0], 4, 4))\n', (9885, 9909), True, 'import numpy as np\n'), ((10835, 10850), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (10843, 10850), True, 'import numpy as np\n'), ((12057, 12096), 'numpy.zeros', 'np.zeros', (['(TRANSFORMATIONS.shape[0], 6)'], {}), '((TRANSFORMATIONS.shape[0], 6))\n', (12065, 12096), True, 'import numpy as np\n'), ((12833, 12860), 'numpy.copy', 'np.copy', (['TRANSFORMATIONS_ip'], {}), '(TRANSFORMATIONS_ip)\n', (12840, 12860), True, 'import numpy as np\n'), ((12875, 12914), 'numpy.zeros', 'np.zeros', (['(TRANSFORMATIONS.shape[0], 6)'], {}), '((TRANSFORMATIONS.shape[0], 6))\n', (12883, 12914), True, 'import numpy as np\n'), ((14001, 14036), 'numpy.zeros', 'np.zeros', (['(source_data.shape[0], 7)'], {}), '((source_data.shape[0], 7))\n', (14009, 14036), True, 'import numpy as np\n'), ((14587, 14603), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (14595, 14603), True, 'import numpy as np\n'), ((14663, 14711), 'transforms3d.euler.euler2mat', 't3d.euler2mat', (['pose[5]', 'pose[4]', 'pose[3]', '"""szyx"""'], {}), "(pose[5], pose[4], pose[3], 'szyx')\n", (14676, 14711), True, 'import transforms3d.euler as t3d\n'), ((14843, 14877), 'numpy.linalg.inv', 'np.linalg.inv', (['transformation_pose'], {}), '(transformation_pose)\n', (14856, 14877), True, 'import numpy as np\n'), ((15043, 15095), 'transforms3d.euler.mat2euler', 't3d.mat2euler', (['transformation_pose[0:3, 0:3]', '"""szyx"""'], {}), "(transformation_pose[0:3, 0:3], 'szyx')\n", (15056, 15095), True, 'import transforms3d.euler as t3d\n'), ((15592, 15617), 'numpy.zeros', 'np.zeros', (['templates.shape'], {}), '(templates.shape)\n', (15600, 15617), True, 'import numpy as np\n'), ((15678, 15710), 'numpy.arange', 'np.arange', (['(0)', 'templates.shape[0]'], {}), '(0, templates.shape[0])\n', (15687, 15710), True, 'import numpy as np\n'), ((15711, 15744), 'numpy.random.shuffle', 'np.random.shuffle', (['templates_idxs'], {}), '(templates_idxs)\n', (15728, 15744), True, 'import numpy as np\n'), ((16229, 16250), 'numpy.zeros', 'np.zeros', (['poses.shape'], {}), '(poses.shape)\n', (16237, 16250), True, 'import numpy as np\n'), ((16301, 16329), 'numpy.arange', 'np.arange', (['(0)', 'poses.shape[0]'], {}), '(0, poses.shape[0])\n', (16310, 16329), True, 'import numpy as np\n'), ((16333, 16362), 'numpy.random.shuffle', 'np.random.shuffle', (['poses_idxs'], {}), '(poses_idxs)\n', (16350, 16362), True, 'import numpy as np\n'), ((17880, 17895), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (17888, 17895), True, 'import numpy as np\n'), ((17969, 17989), 'numpy.copy', 'np.copy', (['source_data'], {}), '(source_data)\n', (17976, 17989), True, 'import numpy as np\n'), ((17997, 18035), 'numpy.arange', 'np.arange', (['random_source_data.shape[1]'], {}), '(random_source_data.shape[1])\n', (18006, 18035), True, 'import numpy as np\n'), ((18070, 18092), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (18087, 18092), True, 'import numpy as np\n'), ((18931, 18953), 'tensorflow.cos', 'tf.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (18937, 18953), True, 'import tensorflow as tf\n'), ((18964, 18986), 'tensorflow.sin', 'tf.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (18970, 18986), True, 'import tensorflow as tf\n'), ((19006, 19080), 'tensorflow.reshape', 'tf.reshape', (['[[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]]', '[3, 3]'], {}), '([[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]], [3, 3])\n', (19016, 19080), True, 'import tensorflow as tf\n'), ((19086, 19111), 'tensorflow.reshape', 'tf.reshape', (['data', '[-1, 3]'], {}), '(data, [-1, 3])\n', (19096, 19111), True, 'import tensorflow as tf\n'), ((19477, 19499), 'tensorflow.cos', 'tf.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (19483, 19499), True, 'import tensorflow as tf\n'), ((19510, 19532), 'tensorflow.sin', 'tf.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (19516, 19532), True, 'import tensorflow as tf\n'), ((19552, 19626), 'tensorflow.reshape', 'tf.reshape', (['[[1, 0, 0], [0, cosval, -sinval], [0, sinval, cosval]]', '[3, 3]'], {}), '([[1, 0, 0], [0, cosval, -sinval], [0, sinval, cosval]], [3, 3])\n', (19562, 19626), True, 'import tensorflow as tf\n'), ((19632, 19657), 'tensorflow.reshape', 'tf.reshape', (['data', '[-1, 3]'], {}), '(data, [-1, 3])\n', (19642, 19657), True, 'import tensorflow as tf\n'), ((20023, 20045), 'tensorflow.cos', 'tf.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (20029, 20045), True, 'import tensorflow as tf\n'), ((20056, 20078), 'tensorflow.sin', 'tf.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (20062, 20078), True, 'import tensorflow as tf\n'), ((20098, 20172), 'tensorflow.reshape', 'tf.reshape', (['[[cosval, -sinval, 0], [sinval, cosval, 0], [0, 0, 1]]', '[3, 3]'], {}), '([[cosval, -sinval, 0], [sinval, cosval, 0], [0, 0, 1]], [3, 3])\n', (20108, 20172), True, 'import tensorflow as tf\n'), ((20178, 20203), 'tensorflow.reshape', 'tf.reshape', (['data', '[-1, 3]'], {}), '(data, [-1, 3])\n', (20188, 20203), True, 'import tensorflow as tf\n'), ((20418, 20437), 'tensorflow.add', 'tf.add', (['data', 'shift'], {}), '(data, shift)\n', (20424, 20437), True, 'import tensorflow as tf\n'), ((20856, 20898), 'tensorflow.zeros', 'tf.zeros', (['[datas.shape[1], datas.shape[2]]'], {}), '([datas.shape[1], datas.shape[2]])\n', (20864, 20898), True, 'import tensorflow as tf\n'), ((22287, 22315), 'tensorflow.zeros', 'tf.zeros', (['[data.shape[1], 3]'], {}), '([data.shape[1], 3])\n', (22295, 22315), True, 'import tensorflow as tf\n'), ((24315, 24327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24325, 24327), True, 'import matplotlib.pyplot as plt\n'), ((24379, 24395), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (24389, 24395), True, 'import numpy as np\n'), ((24660, 24670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24668, 24670), True, 'import matplotlib.pyplot as plt\n'), ((24834, 24846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24844, 24846), True, 'import matplotlib.pyplot as plt\n'), ((25048, 25058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25056, 25058), True, 'import matplotlib.pyplot as plt\n'), ((25347, 25359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25357, 25359), True, 'import matplotlib.pyplot as plt\n'), ((26006, 26120), 'matplotlib.pyplot.legend', 'plt.legend', (['(l1, l2, l3)', "('Template Data', 'Source Data', 'Predicted Data')"], {'prop': "{'size': 15}", 'markerscale': '(4)'}), "((l1, l2, l3), ('Template Data', 'Source Data', 'Predicted Data'),\n prop={'size': 15}, markerscale=4)\n", (26016, 26120), True, 'import matplotlib.pyplot as plt\n'), ((26322, 26365), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontdict': "{'fontsize': 25}"}), "(title, fontdict={'fontsize': 25})\n", (26331, 26365), True, 'import matplotlib.pyplot as plt\n'), ((26485, 26495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26493, 26495), True, 'import matplotlib.pyplot as plt\n'), ((26920, 26932), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26930, 26932), True, 'import matplotlib.pyplot as plt\n'), ((27846, 27960), 'matplotlib.pyplot.legend', 'plt.legend', (['(l1, l2, l3)', "('Template Data', 'Source Data', 'Predicted Data')"], {'prop': "{'size': 15}", 'markerscale': '(4)'}), "((l1, l2, l3), ('Template Data', 'Source Data', 'Predicted Data'),\n prop={'size': 15}, markerscale=4)\n", (27856, 27960), True, 'import matplotlib.pyplot as plt\n'), ((28091, 28134), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontdict': "{'fontsize': 25}"}), "(title, fontdict={'fontsize': 25})\n", (28100, 28134), True, 'import matplotlib.pyplot as plt\n'), ((28254, 28264), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28262, 28264), True, 'import matplotlib.pyplot as plt\n'), ((33767, 33783), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(180)'], {}), '(0, 180)\n', (33775, 33783), True, 'import matplotlib.pyplot as plt\n'), ((33860, 33872), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (33870, 33872), True, 'import matplotlib.pyplot as plt\n'), ((33918, 33932), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (33926, 33932), True, 'import matplotlib.pyplot as plt\n'), ((34340, 34389), 'helper.process_templates', 'helper.process_templates', (['"""multi_model_templates"""'], {}), "('multi_model_templates')\n", (34364, 34389), False, 'import helper\n'), ((34492, 34504), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (34502, 34504), True, 'import matplotlib.pyplot as plt\n'), ((34661, 34671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34669, 34671), True, 'import matplotlib.pyplot as plt\n'), ((1790, 1809), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1800, 1809), False, 'import csv\n'), ((3265, 3284), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (3275, 3284), False, 'import csv\n'), ((3877, 3925), 'os.path.join', 'os.path.join', (['"""data"""', 'data_dict', '"""noise_data.h5"""'], {}), "('data', data_dict, 'noise_data.h5')\n", (3889, 3925), False, 'import os\n'), ((4173, 4192), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (4183, 4192), False, 'import csv\n'), ((5046, 5068), 'numpy.cos', 'np.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (5052, 5068), True, 'import numpy as np\n'), ((5080, 5102), 'numpy.sin', 'np.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (5086, 5102), True, 'import numpy as np\n'), ((5123, 5187), 'numpy.array', 'np.array', (['[[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]]'], {}), '([[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]])\n', (5131, 5187), True, 'import numpy as np\n'), ((5867, 5889), 'numpy.cos', 'np.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (5873, 5889), True, 'import numpy as np\n'), ((5901, 5923), 'numpy.sin', 'np.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (5907, 5923), True, 'import numpy as np\n'), ((5944, 6008), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, cosval, -sinval], [0, sinval, cosval]]'], {}), '([[1, 0, 0], [0, cosval, -sinval], [0, sinval, cosval]])\n', (5952, 6008), True, 'import numpy as np\n'), ((6688, 6710), 'numpy.cos', 'np.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (6694, 6710), True, 'import numpy as np\n'), ((6722, 6744), 'numpy.sin', 'np.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (6728, 6744), True, 'import numpy as np\n'), ((6765, 6829), 'numpy.array', 'np.array', (['[[cosval, -sinval, 0], [sinval, cosval, 0], [0, 0, 1]]'], {}), '([[cosval, -sinval, 0], [sinval, cosval, 0], [0, 0, 1]])\n', (6773, 6829), True, 'import numpy as np\n'), ((7302, 7318), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (7312, 7318), True, 'import numpy as np\n'), ((8528, 8581), 'transforms3d.euler.euler2quat', 't3d.euler2quat', (['poses[i, 3]', 'poses[i, 4]', 'poses[i, 5]'], {}), '(poses[i, 3], poses[i, 4], poses[i, 5])\n', (8542, 8581), True, 'import transforms3d.euler as t3d\n'), ((9985, 10045), 'transforms3d.euler.euler2mat', 't3d.euler2mat', (['poses[i, 5]', 'poses[i, 4]', 'poses[i, 3]', '"""szyx"""'], {}), "(poses[i, 5], poses[i, 4], poses[i, 3], 'szyx')\n", (9998, 10045), True, 'import transforms3d.euler as t3d\n'), ((11003, 11019), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (11011, 11019), True, 'import numpy as np\n'), ((11060, 11126), 'transforms3d.euler.quat2mat', 't3d.quat2mat', (['[poses[i, 3], poses[i, 4], poses[i, 5], poses[i, 6]]'], {}), '([poses[i, 3], poses[i, 4], poses[i, 5], poses[i, 6]])\n', (11072, 11126), True, 'import transforms3d.euler as t3d\n'), ((11387, 11442), 'numpy.dot', 'np.dot', (['transformation_matrix', 'TRANSFORMATIONS[i, :, :]'], {}), '(transformation_matrix, TRANSFORMATIONS[i, :, :])\n', (11393, 11442), True, 'import numpy as np\n'), ((12248, 12274), 'transforms3d.euler.mat2euler', 't3d.mat2euler', (['rot', '"""szyx"""'], {}), "(rot, 'szyx')\n", (12261, 12274), True, 'import transforms3d.euler as t3d\n'), ((13009, 13042), 'numpy.linalg.inv', 'np.linalg.inv', (['TRANSFORMATIONS[i]'], {}), '(TRANSFORMATIONS[i])\n', (13022, 13042), True, 'import numpy as np\n'), ((13123, 13149), 'transforms3d.euler.mat2euler', 't3d.mat2euler', (['rot', '"""szyx"""'], {}), "(rot, 'szyx')\n", (13136, 13149), True, 'import transforms3d.euler as t3d\n'), ((14095, 14126), 'numpy.mean', 'np.mean', (['source_data[i]'], {'axis': '(0)'}), '(source_data[i], axis=0)\n', (14102, 14126), True, 'import numpy as np\n'), ((14148, 14181), 'numpy.mean', 'np.mean', (['template_data[i]'], {'axis': '(0)'}), '(template_data[i], axis=0)\n', (14155, 14181), True, 'import numpy as np\n'), ((14385, 14486), 'numpy.array', 'np.array', (['[centroid_translation[0], centroid_translation[1], centroid_translation[2],\n 1, 0, 0, 0]'], {}), '([centroid_translation[0], centroid_translation[1],\n centroid_translation[2], 1, 0, 0, 0])\n', (14393, 14486), True, 'import numpy as np\n'), ((14890, 14906), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {}), '((1, 6))\n', (14898, 14906), True, 'import numpy as np\n'), ((21474, 21526), 'tensorflow.concat', 'tf.concat', (['[transformed_data, transformed_data_t]', '(0)'], {}), '([transformed_data, transformed_data_t], 0)\n', (21483, 21526), True, 'import tensorflow as tf\n'), ((21598, 21664), 'tensorflow.reshape', 'tf.reshape', (['transformed_data', '[-1, datas.shape[1], datas.shape[2]]'], {}), '(transformed_data, [-1, datas.shape[1], datas.shape[2]])\n', (21608, 21664), True, 'import tensorflow as tf\n'), ((22427, 22457), 'tensorflow.slice', 'tf.slice', (['quat', '[i, 0]', '[1, 1]'], {}), '(quat, [i, 0], [1, 1])\n', (22435, 22457), True, 'import tensorflow as tf\n'), ((22461, 22491), 'tensorflow.slice', 'tf.slice', (['quat', '[i, 1]', '[1, 1]'], {}), '(quat, [i, 1], [1, 1])\n', (22469, 22491), True, 'import tensorflow as tf\n'), ((22495, 22525), 'tensorflow.slice', 'tf.slice', (['quat', '[i, 2]', '[1, 1]'], {}), '(quat, [i, 2], [1, 1])\n', (22503, 22525), True, 'import tensorflow as tf\n'), ((22529, 22559), 'tensorflow.slice', 'tf.slice', (['quat', '[i, 3]', '[1, 1]'], {}), '(quat, [i, 3], [1, 1])\n', (22537, 22559), True, 'import tensorflow as tf\n'), ((22996, 23017), 'tensorflow.reshape', 'tf.reshape', (['R', '[3, 3]'], {}), '(R, [3, 3])\n', (23006, 23017), True, 'import tensorflow as tf\n'), ((23398, 23444), 'tensorflow.add', 'tf.add', (['temp_rotated_data', 'translation[i, ...]'], {}), '(temp_rotated_data, translation[i, ...])\n', (23404, 23444), True, 'import tensorflow as tf\n'), ((23508, 23559), 'tensorflow.concat', 'tf.concat', (['[transformed_data, temp_rotated_data]', '(0)'], {}), '([transformed_data, temp_rotated_data], 0)\n', (23517, 23559), True, 'import tensorflow as tf\n'), ((23627, 23679), 'tensorflow.reshape', 'tf.reshape', (['transformed_data', '[-1, data.shape[1], 3]'], {}), '(transformed_data, [-1, data.shape[1], 3])\n', (23637, 23679), True, 'import tensorflow as tf\n'), ((24211, 24230), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (24221, 24230), False, 'import csv\n'), ((28536, 28559), 'os.path.exists', 'os.path.exists', (['LOG_DIR'], {}), '(LOG_DIR)\n', (28550, 28559), False, 'import os\n'), ((28561, 28578), 'os.mkdir', 'os.mkdir', (['LOG_DIR'], {}), '(LOG_DIR)\n', (28569, 28578), False, 'import os\n'), ((29018, 29037), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (29028, 29037), False, 'import csv\n'), ((33746, 33766), 'numpy.arange', 'np.arange', (['(0)', '(185)', '(5)'], {}), '(0, 185, 5)\n', (33755, 33766), True, 'import numpy as np\n'), ((33796, 33838), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""rot_err_hist.jpeg"""'], {}), "(LOG_DIR, 'rot_err_hist.jpeg')\n", (33808, 33838), False, 'import os\n'), ((33893, 33917), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.01)'], {}), '(0, 1.01, 0.01)\n', (33902, 33917), True, 'import numpy as np\n'), ((33945, 33989), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""trans_err_hist.jpeg"""'], {}), "(LOG_DIR, 'trans_err_hist.jpeg')\n", (33957, 33989), False, 'import os\n'), ((1713, 1760), 'os.path.join', 'os.path.join', (['"""data"""', 'templates_dict', 'file_name'], {}), "('data', templates_dict, file_name)\n", (1725, 1760), False, 'import os\n'), ((2026, 2088), 'os.path.join', 'os.path.join', (['"""data"""', 'templates_dict', '"""template_filenames.txt"""'], {}), "('data', templates_dict, 'template_filenames.txt')\n", (2038, 2088), False, 'import os\n'), ((3189, 3235), 'os.path.join', 'os.path.join', (['"""data"""', 'templates_dict', 'filename'], {}), "('data', templates_dict, filename)\n", (3201, 3235), False, 'import os\n'), ((3489, 3533), 'os.path.join', 'os.path.join', (['"""data"""', 'data_dict', '"""files.txt"""'], {}), "('data', data_dict, 'files.txt')\n", (3501, 3533), False, 'import os\n'), ((4098, 4140), 'os.path.join', 'os.path.join', (['"""data"""', 'data_dict', 'file_name'], {}), "('data', data_dict, file_name)\n", (4110, 4140), False, 'import os\n'), ((9590, 9605), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (9598, 9605), True, 'import numpy as np\n'), ((11535, 11573), 'numpy.dot', 'np.dot', (['rot', 'templates_data[i, :, :].T'], {}), '(rot, templates_data[i, :, :].T)\n', (11541, 11573), True, 'import numpy as np\n'), ((19171, 19189), 'tensorflow.transpose', 'tf.transpose', (['data'], {}), '(data)\n', (19183, 19189), True, 'import tensorflow as tf\n'), ((19717, 19735), 'tensorflow.transpose', 'tf.transpose', (['data'], {}), '(data)\n', (19729, 19735), True, 'import tensorflow as tf\n'), ((20263, 20281), 'tensorflow.transpose', 'tf.transpose', (['data'], {}), '(data)\n', (20275, 20281), True, 'import tensorflow as tf\n'), ((24138, 24181), 'os.path.join', 'os.path.join', (['"""data"""', '"""templates"""', 'filename'], {}), "('data', 'templates', filename)\n", (24150, 24181), False, 'import os\n'), ((28948, 28988), 'os.path.join', 'os.path.join', (['LOG_DIR', "(filename + '.csv')"], {}), "(LOG_DIR, filename + '.csv')\n", (28960, 28988), False, 'import os\n'), ((31713, 31753), 'os.path.join', 'os.path.join', (['LOG_DIR', "(filename + '.txt')"], {}), "(LOG_DIR, filename + '.txt')\n", (31725, 31753), False, 'import os\n'), ((1272, 1283), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1281, 1283), False, 'import os\n'), ((1493, 1504), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1502, 1504), False, 'import os\n'), ((16806, 16825), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (16823, 16825), True, 'import numpy as np\n'), ((16835, 16854), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (16852, 16854), True, 'import numpy as np\n'), ((16864, 16883), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (16881, 16883), True, 'import numpy as np\n'), ((16958, 16977), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (16975, 16977), True, 'import numpy as np\n'), ((17000, 17019), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17017, 17019), True, 'import numpy as np\n'), ((17042, 17061), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17059, 17061), True, 'import numpy as np\n'), ((17720, 17740), 'numpy.round', 'np.round', (['x_trans', '(4)'], {}), '(x_trans, 4)\n', (17728, 17740), True, 'import numpy as np\n'), ((17741, 17761), 'numpy.round', 'np.round', (['y_trans', '(4)'], {}), '(y_trans, 4)\n', (17749, 17761), True, 'import numpy as np\n'), ((17762, 17782), 'numpy.round', 'np.round', (['z_trans', '(4)'], {}), '(z_trans, 4)\n', (17770, 17782), True, 'import numpy as np\n'), ((17783, 17801), 'numpy.round', 'np.round', (['x_rot', '(4)'], {}), '(x_rot, 4)\n', (17791, 17801), True, 'import numpy as np\n'), ((17802, 17820), 'numpy.round', 'np.round', (['y_rot', '(4)'], {}), '(y_rot, 4)\n', (17810, 17820), True, 'import numpy as np\n'), ((17821, 17839), 'numpy.round', 'np.round', (['z_rot', '(4)'], {}), '(z_rot, 4)\n', (17829, 17839), True, 'import numpy as np\n'), ((18390, 18415), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (18413, 18415), True, 'import numpy as np\n'), ((18510, 18564), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', 'source_data[i, j].shape'], {}), '(mean, sigma, source_data[i, j].shape)\n', (18526, 18564), True, 'import numpy as np\n'), ((23304, 23330), 'tensorflow.transpose', 'tf.transpose', (['data[i, ...]'], {}), '(data[i, ...])\n', (23316, 23330), True, 'import tensorflow as tf\n'), ((29308, 29325), 'numpy.array', 'np.array', (['Rot_Err'], {}), '(Rot_Err)\n', (29316, 29325), True, 'import numpy as np\n'), ((29441, 29460), 'numpy.array', 'np.array', (['Trans_Err'], {}), '(Trans_Err)\n', (29449, 29460), True, 'import numpy as np\n'), ((29558, 29571), 'numpy.array', 'np.array', (['ITR'], {}), '(ITR)\n', (29566, 29571), True, 'import numpy as np\n'), ((29672, 29686), 'numpy.array', 'np.array', (['TIME'], {}), '(TIME)\n', (29680, 29686), True, 'import numpy as np\n'), ((30055, 30072), 'numpy.array', 'np.array', (['Rot_Err'], {}), '(Rot_Err)\n', (30063, 30072), True, 'import numpy as np\n'), ((30193, 30212), 'numpy.array', 'np.array', (['Trans_Err'], {}), '(Trans_Err)\n', (30201, 30212), True, 'import numpy as np\n'), ((30315, 30328), 'numpy.array', 'np.array', (['ITR'], {}), '(ITR)\n', (30323, 30328), True, 'import numpy as np\n'), ((30434, 30448), 'numpy.array', 'np.array', (['TIME'], {}), '(TIME)\n', (30442, 30448), True, 'import numpy as np\n'), ((30919, 30936), 'numpy.array', 'np.array', (['Rot_Err'], {}), '(Rot_Err)\n', (30927, 30936), True, 'import numpy as np\n'), ((31130, 31149), 'numpy.array', 'np.array', (['Trans_Err'], {}), '(Trans_Err)\n', (31138, 31149), True, 'import numpy as np\n'), ((31311, 31324), 'numpy.array', 'np.array', (['ITR'], {}), '(ITR)\n', (31319, 31324), True, 'import numpy as np\n'), ((31486, 31500), 'numpy.array', 'np.array', (['TIME'], {}), '(TIME)\n', (31494, 31500), True, 'import numpy as np\n'), ((1387, 1398), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1396, 1398), False, 'import os\n'), ((9190, 9215), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9213, 9215), True, 'import numpy as np\n'), ((9238, 9263), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9261, 9263), True, 'import numpy as np\n'), ((9286, 9311), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9309, 9311), True, 'import numpy as np\n'), ((9373, 9398), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9396, 9398), True, 'import numpy as np\n'), ((9437, 9462), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9460, 9462), True, 'import numpy as np\n'), ((9501, 9526), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9524, 9526), True, 'import numpy as np\n'), ((17423, 17442), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17440, 17442), True, 'import numpy as np\n'), ((17448, 17467), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17465, 17467), True, 'import numpy as np\n'), ((17473, 17492), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17490, 17492), True, 'import numpy as np\n'), ((17566, 17585), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17583, 17585), True, 'import numpy as np\n'), ((17605, 17624), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17622, 17624), True, 'import numpy as np\n'), ((17644, 17663), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17661, 17663), True, 'import numpy as np\n'), ((29238, 29255), 'numpy.array', 'np.array', (['Rot_Err'], {}), '(Rot_Err)\n', (29246, 29255), True, 'import numpy as np\n'), ((29367, 29386), 'numpy.array', 'np.array', (['Trans_Err'], {}), '(Trans_Err)\n', (29375, 29386), True, 'import numpy as np\n'), ((29496, 29509), 'numpy.array', 'np.array', (['ITR'], {}), '(ITR)\n', (29504, 29509), True, 'import numpy as np\n'), ((29608, 29622), 'numpy.array', 'np.array', (['TIME'], {}), '(TIME)\n', (29616, 29622), True, 'import numpy as np\n'), ((29982, 29999), 'numpy.array', 'np.array', (['Rot_Err'], {}), '(Rot_Err)\n', (29990, 29999), True, 'import numpy as np\n'), ((30116, 30135), 'numpy.array', 'np.array', (['Trans_Err'], {}), '(Trans_Err)\n', (30124, 30135), True, 'import numpy as np\n'), ((30250, 30263), 'numpy.array', 'np.array', (['ITR'], {}), '(ITR)\n', (30258, 30263), True, 'import numpy as np\n'), ((30367, 30381), 'numpy.array', 'np.array', (['TIME'], {}), '(TIME)\n', (30375, 30381), True, 'import numpy as np\n'), ((30809, 30826), 'numpy.array', 'np.array', (['Rot_Err'], {}), '(Rot_Err)\n', (30817, 30826), True, 'import numpy as np\n'), ((31013, 31032), 'numpy.array', 'np.array', (['Trans_Err'], {}), '(Trans_Err)\n', (31021, 31032), True, 'import numpy as np\n'), ((31213, 31226), 'numpy.array', 'np.array', (['ITR'], {}), '(ITR)\n', (31221, 31226), True, 'import numpy as np\n'), ((31392, 31406), 'numpy.array', 'np.array', (['TIME'], {}), '(TIME)\n', (31400, 31406), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def visualize_gmm(gmm, max_K_to_display=16, fontsize=25):
''' Create single image visualization of all GMM parameters
Post Condition
--------------
New matplotlib figure created with visual of means and stddevs for all K clusters.
'''
K = gmm.K
D = gmm.D
P = int(np.sqrt(D))
comp_ids_bigtosmall_K = np.argsort(gmm.log_pi_K)[::-1][:max_K_to_display]
ncols = max_K_to_display + 1
fig, ax_grid = plt.subplots(
nrows=2, ncols=ncols,
figsize=(3 * ncols, 3 * 2))
ax_grid[0,0].set_ylabel("mean", fontsize=fontsize)
ax_grid[1,0].set_ylabel("stddev", fontsize=fontsize)
last_col_id = comp_ids_bigtosmall_K.size - 1
for col_id, kk in enumerate(comp_ids_bigtosmall_K):
# Plot learned means
cur_ax = ax_grid[0, col_id]
mu_img_PP = gmm.mu_KD[kk].reshape((P, P))
img_h = cur_ax.imshow(mu_img_PP, interpolation='nearest',
vmin=-1.0, vmax=1.0, cmap='gray')
cur_ax.set_title("pi[%d] %.3f" % (kk, np.exp(gmm.log_pi_K[kk])), fontsize=fontsize)
cur_ax.set_xticks([])
cur_ax.set_yticks([])
if col_id == last_col_id:
cbar = fig.colorbar(img_h, ax=ax_grid[0, col_id + 1], ticks=[-1, 0, 1])
cbar.ax.tick_params(labelsize=fontsize)
# Plot learned stddev
cur_ax = ax_grid[1, col_id]
stddev_img_PP = gmm.stddev_KD[kk].reshape((P, P))
img_h = cur_ax.imshow(stddev_img_PP, interpolation='nearest',
vmin=0.0, vmax=1.5, cmap='gray')
cur_ax.set_xticks([])
cur_ax.set_yticks([])
if col_id == last_col_id:
cbar = fig.colorbar(img_h, ax=ax_grid[1, col_id + 1], ticks=[0.0, 0.5, 1.0])
cbar.ax.tick_params(labelsize=fontsize)
for empty_kk in range(K, ncols):
empty_ax=ax_grid[0, empty_kk]
empty_ax.set_visible(False)
empty_ax=ax_grid[1, empty_kk]
empty_ax.set_visible(False)
plt.tight_layout() | [
"matplotlib.pyplot.subplots",
"numpy.argsort",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt"
] | [((493, 555), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': 'ncols', 'figsize': '(3 * ncols, 3 * 2)'}), '(nrows=2, ncols=ncols, figsize=(3 * ncols, 3 * 2))\n', (505, 555), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2024), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2022, 2024), True, 'import matplotlib.pyplot as plt\n'), ((349, 359), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (356, 359), True, 'import numpy as np\n'), ((390, 414), 'numpy.argsort', 'np.argsort', (['gmm.log_pi_K'], {}), '(gmm.log_pi_K)\n', (400, 414), True, 'import numpy as np\n'), ((1064, 1088), 'numpy.exp', 'np.exp', (['gmm.log_pi_K[kk]'], {}), '(gmm.log_pi_K[kk])\n', (1070, 1088), True, 'import numpy as np\n')] |
import cv2
import time
import numpy as np
from statistics import mean
from unified_detector import Fingertips
images = np.load('../dataset/test/images.npy')
test_x = np.load('../dataset/test/test_x.npy')
test_y_prob = np.load('../dataset/test/test_y_prob.npy')
test_y_keys = np.load('../dataset/test/test_y_keys.npy')
crop_info = np.load('../dataset/test/crop_info.npy')
model = Fingertips(weights='../weights/fingertip.h5')
# classification
ground_truth_class = np.array([0, 0, 0, 0, 0, 0, 0, 0])
prediction_class = np.array([0, 0, 0, 0, 0, 0, 0, 0])
# regression
fingertip_err = np.array([0, 0, 0, 0, 0, 0, 0, 0])
avg_time = 0
iteration = 0
conf_mat = np.zeros(shape=(8, 8))
for n_image, (info, image, cropped_image, gt_prob, gt_pos) in enumerate(zip(crop_info, images, test_x,
test_y_prob, test_y_keys), 1):
print('Images: ', n_image)
tl = [info[0], info[1]]
height, width = info[2], info[3]
""" Predictions """
tic = time.time()
prob, pos = model.classify(image=cropped_image)
pos = np.mean(pos, 0)
""" Post processing """
threshold = 0.5
prob = np.asarray([(p >= threshold) * 1.0 for p in prob])
for i in range(0, len(gt_pos), 2):
gt_pos[i] = gt_pos[i] * width / 128. + tl[0]
gt_pos[i + 1] = gt_pos[i + 1] * height / 128. + tl[1]
for i in range(0, len(pos), 2):
pos[i] = pos[i] * width + tl[0]
pos[i + 1] = pos[i + 1] * height + tl[1]
toc = time.time()
avg_time = avg_time + (toc - tic)
iteration = iteration + 1
""" Calculations """
# Classification
gt_cls = model.class_finder(prob=gt_prob)
pred_cls = model.class_finder(prob=prob)
ground_truth_class[gt_cls] = ground_truth_class[gt_cls] + 1
if gt_cls == pred_cls:
prediction_class[pred_cls] = prediction_class[pred_cls] + 1
# Regression
squared_diff = np.square(gt_pos - pos)
error = 0
for i in range(0, 5):
if prob[i] == 1:
error = error + np.sqrt(squared_diff[2 * i] + squared_diff[2 * i + 1])
error = error / sum(prob)
fingertip_err[pred_cls] = fingertip_err[pred_cls] + error
conf_mat[gt_cls, pred_cls] = conf_mat[gt_cls, pred_cls] + 1
""" Drawing finger tips """
index = 0
color = [(15, 15, 240), (15, 240, 155), (240, 155, 15), (240, 15, 155), (240, 15, 240)]
for c, p in enumerate(prob):
if p == 1:
image = cv2.circle(image, (int(pos[index]), int(pos[index + 1])), radius=12,
color=color[c], thickness=-2)
index = index + 2
cv2.imshow('', image)
cv2.waitKey(0)
# cv2.imwrite('output_perform/' + image_name, image)
accuracy = prediction_class / ground_truth_class
accuracy = accuracy * 100
accuracy = np.round(accuracy, 2)
avg_time = avg_time / iteration
fingertip_err = fingertip_err / prediction_class
fingertip_err = np.round(fingertip_err, 4)
np.save('../data/conf_mat.npy', conf_mat)
print(prediction_class)
print(ground_truth_class)
print('Accuracy: ', accuracy, '%')
print('Fingertip detection error: ', fingertip_err, ' pixels')
print('Mean Error: ', mean(fingertip_err), ' pixels')
print('Total Iteration: ', iteration)
print('Mean Execution Time: ', round(avg_time, 4))
| [
"numpy.load",
"numpy.save",
"cv2.waitKey",
"numpy.asarray",
"numpy.square",
"numpy.zeros",
"time.time",
"numpy.mean",
"numpy.array",
"statistics.mean",
"unified_detector.Fingertips",
"cv2.imshow",
"numpy.round",
"numpy.sqrt"
] | [((120, 157), 'numpy.load', 'np.load', (['"""../dataset/test/images.npy"""'], {}), "('../dataset/test/images.npy')\n", (127, 157), True, 'import numpy as np\n'), ((167, 204), 'numpy.load', 'np.load', (['"""../dataset/test/test_x.npy"""'], {}), "('../dataset/test/test_x.npy')\n", (174, 204), True, 'import numpy as np\n'), ((219, 261), 'numpy.load', 'np.load', (['"""../dataset/test/test_y_prob.npy"""'], {}), "('../dataset/test/test_y_prob.npy')\n", (226, 261), True, 'import numpy as np\n'), ((276, 318), 'numpy.load', 'np.load', (['"""../dataset/test/test_y_keys.npy"""'], {}), "('../dataset/test/test_y_keys.npy')\n", (283, 318), True, 'import numpy as np\n'), ((331, 371), 'numpy.load', 'np.load', (['"""../dataset/test/crop_info.npy"""'], {}), "('../dataset/test/crop_info.npy')\n", (338, 371), True, 'import numpy as np\n'), ((381, 426), 'unified_detector.Fingertips', 'Fingertips', ([], {'weights': '"""../weights/fingertip.h5"""'}), "(weights='../weights/fingertip.h5')\n", (391, 426), False, 'from unified_detector import Fingertips\n'), ((466, 500), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (474, 500), True, 'import numpy as np\n'), ((520, 554), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (528, 554), True, 'import numpy as np\n'), ((585, 619), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (593, 619), True, 'import numpy as np\n'), ((658, 680), 'numpy.zeros', 'np.zeros', ([], {'shape': '(8, 8)'}), '(shape=(8, 8))\n', (666, 680), True, 'import numpy as np\n'), ((2850, 2871), 'numpy.round', 'np.round', (['accuracy', '(2)'], {}), '(accuracy, 2)\n', (2858, 2871), True, 'import numpy as np\n'), ((2969, 2995), 'numpy.round', 'np.round', (['fingertip_err', '(4)'], {}), '(fingertip_err, 4)\n', (2977, 2995), True, 'import numpy as np\n'), ((2996, 3037), 'numpy.save', 'np.save', (['"""../data/conf_mat.npy"""', 'conf_mat'], {}), "('../data/conf_mat.npy', conf_mat)\n", (3003, 3037), True, 'import numpy as np\n'), ((1024, 1035), 'time.time', 'time.time', ([], {}), '()\n', (1033, 1035), False, 'import time\n'), ((1098, 1113), 'numpy.mean', 'np.mean', (['pos', '(0)'], {}), '(pos, 0)\n', (1105, 1113), True, 'import numpy as np\n'), ((1174, 1226), 'numpy.asarray', 'np.asarray', (['[((p >= threshold) * 1.0) for p in prob]'], {}), '([((p >= threshold) * 1.0) for p in prob])\n', (1184, 1226), True, 'import numpy as np\n'), ((1517, 1528), 'time.time', 'time.time', ([], {}), '()\n', (1526, 1528), False, 'import time\n'), ((2665, 2686), 'cv2.imshow', 'cv2.imshow', (['""""""', 'image'], {}), "('', image)\n", (2675, 2686), False, 'import cv2\n'), ((2691, 2705), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2702, 2705), False, 'import cv2\n'), ((3210, 3229), 'statistics.mean', 'mean', (['fingertip_err'], {}), '(fingertip_err)\n', (3214, 3229), False, 'from statistics import mean\n'), ((1940, 1963), 'numpy.square', 'np.square', (['(gt_pos - pos)'], {}), '(gt_pos - pos)\n', (1949, 1963), True, 'import numpy as np\n'), ((2073, 2127), 'numpy.sqrt', 'np.sqrt', (['(squared_diff[2 * i] + squared_diff[2 * i + 1])'], {}), '(squared_diff[2 * i] + squared_diff[2 * i + 1])\n', (2080, 2127), True, 'import numpy as np\n')] |
from image_display import display_image
import numpy as np, cv2, os
from torchvision.transforms import Grayscale
from torchvision.transforms import Compose
import torch
from torch import nn
from torchvision.transforms import ToTensor
model = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=30, kernel_size=3, padding = 1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(in_channels=30, out_channels=30, kernel_size=7, padding = 2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(in_channels=30, out_channels=30, kernel_size=11, padding = 3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Dropout(.5),
nn.Flatten(),
nn.Linear(in_features=270, out_features=256),
nn.ReLU(),
nn.Dropout(.5),
nn.Linear(in_features=256, out_features=128),
nn.ReLU(),
nn.Dropout(.5),
nn.Linear(in_features=128, out_features=7)
)
model.load_state_dict(torch.load("./Finalmodel40"))
transform = Compose([
ToTensor(),
Grayscale()
])
def detect_face():
if not os.path.isdir('Screenshot'):
os.makedirs('Screenshot')
#cv2_base_directory = os.path.dirname(os.path.abspath(cv2.__file__))
#classifier_path = cv2_base_directory+'\\data\\haarcascade_frontalface_default.xml'
classifier_path = 'haarcascades/haarcascade_frontalface_default.xml'
face_classifier = cv2.CascadeClassifier(classifier_path)
# To capture video from webcam.
cap = cv2.VideoCapture(0)
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_classifier.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 255), 1)
# Display
cv2.imshow('img', img)
cv2.imwrite('Screenshot/face_screenshot.jpg',img)
# Stop if escape|enter key is pressed
k = cv2.waitKey(30) & 0xff
if (cv2.waitKey(1) == 13) | (k == 27): #13 for return (enter) key
break
# Release the VideoCapture object
cap.release()
cv2.destroyAllWindows()
def main():
detect_face()
path = "./Screenshot/face_screenshot.jpg"
img = cv2.imread(path)
#Format for the Mul:0 Tensor
img= cv2.resize(img,dsize=(48,48), interpolation = cv2.INTER_CUBIC)
#Numpy array
np_image_data = np.asarray(img)
img = transform(np_image_data)
img = img.unsqueeze(1)
output = model(img)
_, prediction = torch.max(output.data, 1)
prediction = prediction[0].int()
emotion = ""
if prediction == 0:
emotion = "angry"
if prediction == 1:
emotion = "disgust"
if prediction == 2:
emotion = "fear"
if prediction == 3:
emotion = "happy"
if prediction == 4:
emotion = "neutral"
if prediction == 5:
emotion = "sad"
if prediction == 6:
emotion = "suprise"
display_image('./Screenshot/face_screenshot.jpg', emotion, 1)
if __name__ == "__main__":
main() | [
"torch.nn.Dropout",
"image_display.display_image",
"cv2.rectangle",
"cv2.imshow",
"torch.nn.Flatten",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"torch.nn.Linear",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.waitKey",
"numpy.asarray",
"torch.nn.Conv2d",
"torch.max",
"torchvision.tran... | [((264, 331), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(30)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=1, out_channels=30, kernel_size=3, padding=1)\n', (273, 331), False, 'from torch import nn\n'), ((339, 348), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (346, 348), False, 'from torch import nn\n'), ((354, 391), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (366, 391), False, 'from torch import nn\n'), ((396, 464), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(30)', 'out_channels': '(30)', 'kernel_size': '(7)', 'padding': '(2)'}), '(in_channels=30, out_channels=30, kernel_size=7, padding=2)\n', (405, 464), False, 'from torch import nn\n'), ((472, 481), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (479, 481), False, 'from torch import nn\n'), ((487, 524), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (499, 524), False, 'from torch import nn\n'), ((529, 598), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(30)', 'out_channels': '(30)', 'kernel_size': '(11)', 'padding': '(3)'}), '(in_channels=30, out_channels=30, kernel_size=11, padding=3)\n', (538, 598), False, 'from torch import nn\n'), ((606, 615), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (613, 615), False, 'from torch import nn\n'), ((621, 658), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (633, 658), False, 'from torch import nn\n'), ((663, 678), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (673, 678), False, 'from torch import nn\n'), ((683, 695), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (693, 695), False, 'from torch import nn\n'), ((701, 745), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(270)', 'out_features': '(256)'}), '(in_features=270, out_features=256)\n', (710, 745), False, 'from torch import nn\n'), ((751, 760), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (758, 760), False, 'from torch import nn\n'), ((766, 781), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (776, 781), False, 'from torch import nn\n'), ((786, 830), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(256)', 'out_features': '(128)'}), '(in_features=256, out_features=128)\n', (795, 830), False, 'from torch import nn\n'), ((836, 845), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (843, 845), False, 'from torch import nn\n'), ((851, 866), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (861, 866), False, 'from torch import nn\n'), ((871, 913), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(128)', 'out_features': '(7)'}), '(in_features=128, out_features=7)\n', (880, 913), False, 'from torch import nn\n'), ((943, 971), 'torch.load', 'torch.load', (['"""./Finalmodel40"""'], {}), "('./Finalmodel40')\n", (953, 971), False, 'import torch\n'), ((1386, 1424), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['classifier_path'], {}), '(classifier_path)\n', (1407, 1424), False, 'import numpy as np, cv2, os\n'), ((1472, 1491), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1488, 1491), False, 'import numpy as np, cv2, os\n'), ((2226, 2249), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2247, 2249), False, 'import numpy as np, cv2, os\n'), ((2337, 2353), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2347, 2353), False, 'import numpy as np, cv2, os\n'), ((2396, 2458), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(48, 48)', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, dsize=(48, 48), interpolation=cv2.INTER_CUBIC)\n', (2406, 2458), False, 'import numpy as np, cv2, os\n'), ((2496, 2511), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2506, 2511), True, 'import numpy as np, cv2, os\n'), ((2619, 2644), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (2628, 2644), False, 'import torch\n'), ((3061, 3122), 'image_display.display_image', 'display_image', (['"""./Screenshot/face_screenshot.jpg"""', 'emotion', '(1)'], {}), "('./Screenshot/face_screenshot.jpg', emotion, 1)\n", (3074, 3122), False, 'from image_display import display_image\n'), ((1000, 1010), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1008, 1010), False, 'from torchvision.transforms import ToTensor\n'), ((1016, 1027), 'torchvision.transforms.Grayscale', 'Grayscale', ([], {}), '()\n', (1025, 1027), False, 'from torchvision.transforms import Grayscale\n'), ((1066, 1093), 'os.path.isdir', 'os.path.isdir', (['"""Screenshot"""'], {}), "('Screenshot')\n", (1079, 1093), False, 'import numpy as np, cv2, os\n'), ((1103, 1128), 'os.makedirs', 'os.makedirs', (['"""Screenshot"""'], {}), "('Screenshot')\n", (1114, 1128), False, 'import numpy as np, cv2, os\n'), ((1607, 1644), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1619, 1644), False, 'import numpy as np, cv2, os\n'), ((1911, 1933), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (1921, 1933), False, 'import numpy as np, cv2, os\n'), ((1942, 1992), 'cv2.imwrite', 'cv2.imwrite', (['"""Screenshot/face_screenshot.jpg"""', 'img'], {}), "('Screenshot/face_screenshot.jpg', img)\n", (1953, 1992), False, 'import numpy as np, cv2, os\n'), ((1828, 1888), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 255)', '(1)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 255), 1)\n', (1841, 1888), False, 'import numpy as np, cv2, os\n'), ((2050, 2065), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (2061, 2065), False, 'import numpy as np, cv2, os\n'), ((2085, 2099), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2096, 2099), False, 'import numpy as np, cv2, os\n')] |
from typing import Optional
import numpy as np
import torch
from sklearn.model_selection import KFold, cross_val_score
from sklearn.neural_network import MLPClassifier
def c2st(
X: torch.Tensor,
Y: torch.Tensor,
seed: int = 1,
n_folds: int = 5,
scoring: str = "accuracy",
z_score: bool = True,
noise_scale: Optional[float] = None,
) -> torch.Tensor:
"""Classifier-based 2-sample test returning accuracy
Trains classifiers with N-fold cross-validation [1]. Scikit learn MLPClassifier are
used, with 2 hidden layers of 10x dim each, where dim is the dimensionality of the
samples X and Y.
Args:
X: Sample 1
Y: Sample 2
seed: Seed for sklearn
n_folds: Number of folds
z_score: Z-scoring using X
noise_scale: If passed, will add Gaussian noise with std noise_scale to samples
References:
[1]: https://scikit-learn.org/stable/modules/cross_validation.html
"""
if z_score:
X_mean = torch.mean(X, axis=0)
X_std = torch.std(X, axis=0)
X = (X - X_mean) / X_std
Y = (Y - X_mean) / X_std
if noise_scale is not None:
X += noise_scale * torch.randn(X.shape)
Y += noise_scale * torch.randn(Y.shape)
X = X.cpu().numpy()
Y = Y.cpu().numpy()
ndim = X.shape[1]
clf = MLPClassifier(
activation="relu",
hidden_layer_sizes=(10 * ndim, 10 * ndim),
max_iter=10000,
solver="adam",
random_state=seed,
)
data = np.concatenate((X, Y))
target = np.concatenate(
(
np.zeros((X.shape[0],)),
np.ones((Y.shape[0],)),
)
)
shuffle = KFold(n_splits=n_folds, shuffle=True, random_state=seed)
scores = cross_val_score(clf, data, target, cv=shuffle, scoring=scoring)
scores = np.asarray(np.mean(scores)).astype(np.float32)
return torch.from_numpy(np.atleast_1d(scores))
def c2st_auc(
X: torch.Tensor,
Y: torch.Tensor,
seed: int = 1,
n_folds: int = 5,
z_score: bool = True,
noise_scale: Optional[float] = None,
) -> torch.Tensor:
"""Classifier-based 2-sample test returning AUC (area under curve)
Same as c2st, except that it returns ROC AUC rather than accuracy
Args:
X: Sample 1
Y: Sample 2
seed: Seed for sklearn
n_folds: Number of folds
z_score: Z-scoring using X
noise_scale: If passed, will add Gaussian noise with std noise_scale to samples
Returns:
Metric
"""
return c2st(
X,
Y,
seed=seed,
n_folds=n_folds,
scoring="roc_auc",
z_score=z_score,
noise_scale=noise_scale,
)
| [
"torch.mean",
"sklearn.model_selection.cross_val_score",
"numpy.zeros",
"torch.randn",
"sklearn.model_selection.KFold",
"numpy.ones",
"torch.std",
"numpy.mean",
"sklearn.neural_network.MLPClassifier",
"numpy.atleast_1d",
"numpy.concatenate"
] | [((1343, 1472), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'activation': '"""relu"""', 'hidden_layer_sizes': '(10 * ndim, 10 * ndim)', 'max_iter': '(10000)', 'solver': '"""adam"""', 'random_state': 'seed'}), "(activation='relu', hidden_layer_sizes=(10 * ndim, 10 * ndim),\n max_iter=10000, solver='adam', random_state=seed)\n", (1356, 1472), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1528, 1550), 'numpy.concatenate', 'np.concatenate', (['(X, Y)'], {}), '((X, Y))\n', (1542, 1550), True, 'import numpy as np\n'), ((1694, 1750), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_folds', 'shuffle': '(True)', 'random_state': 'seed'}), '(n_splits=n_folds, shuffle=True, random_state=seed)\n', (1699, 1750), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((1764, 1827), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['clf', 'data', 'target'], {'cv': 'shuffle', 'scoring': 'scoring'}), '(clf, data, target, cv=shuffle, scoring=scoring)\n', (1779, 1827), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((1006, 1027), 'torch.mean', 'torch.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1016, 1027), False, 'import torch\n'), ((1044, 1064), 'torch.std', 'torch.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1053, 1064), False, 'import torch\n'), ((1917, 1938), 'numpy.atleast_1d', 'np.atleast_1d', (['scores'], {}), '(scores)\n', (1930, 1938), True, 'import numpy as np\n'), ((1191, 1211), 'torch.randn', 'torch.randn', (['X.shape'], {}), '(X.shape)\n', (1202, 1211), False, 'import torch\n'), ((1239, 1259), 'torch.randn', 'torch.randn', (['Y.shape'], {}), '(Y.shape)\n', (1250, 1259), False, 'import torch\n'), ((1602, 1625), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (1610, 1625), True, 'import numpy as np\n'), ((1639, 1661), 'numpy.ones', 'np.ones', (['(Y.shape[0],)'], {}), '((Y.shape[0],))\n', (1646, 1661), True, 'import numpy as np\n'), ((1853, 1868), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1860, 1868), True, 'import numpy as np\n')] |
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
from __future__ import print_function, division
import os
import sys; sys.dont_write_bytecode = True
sys.path.insert(0, '../script/')
sys.path.insert(0, '../script/analysis')
from subprocess import call
import glob
import numpy as np
import hdf5_to_dict as io
import util
from bhlight import bcall
from scipy.optimize import root
TMP_DIR = 'TMP'
util.safe_remove(TMP_DIR)
PROBLEM = 'sod'
AUTO = '-auto' in sys.argv
TABLE = '-table' in sys.argv
FORCE = '-force' in sys.argv
RELTABLE = '-reltable' in sys.argv
tscale = 1.e-2
gam = 1.4
if AUTO and not RELTABLE:
import pickle
else:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
os.chdir('../prob/' + PROBLEM)
# COMPILE CODE
args = [sys.executable, 'build.py', '-dir', TMP_DIR]
if TABLE:
args.append('-table')
if FORCE:
args.append('-force')
if RELTABLE:
args.append('-reltable')
tscale = 1.0
call(args)
if TABLE:
call(['mv', 'sc_eos_gamma_{}.h5'.format(str(gam).replace('.','p')), TMP_DIR])
os.chdir('../../test/')
call(['mv', '../prob/' + PROBLEM + '/' + TMP_DIR, './'])
# RUN EXECUTABLE
os.chdir(TMP_DIR)
bcall(['./bhlight', '-p', 'param_template.dat'])
os.chdir('../')
# READ SIMULATION OUTPUT
dfiles = np.sort(glob.glob(os.path.join(TMP_DIR,'')+'/dumps/dump*.h5'))
hdr = io.load_hdr(dfiles[0])
geom = io.load_geom(hdr)
dump = io.load_dump(dfiles[-1], geom)
x_code = geom['x'][:,0,0]
rho_code = dump['RHO'][:,0,0]
P_code = dump['PRESS'][:,0,0]/(tscale*tscale)
u_code = dump['U1'][:,0,0]/(tscale)
eps_code = (dump['UU'][:,0,0]/dump['RHO'][:,0,0])/(tscale*tscale)
if RELTABLE:
ye_code = dump['Ye'][:,0,0]
if RELTABLE:
import matplotlib as mpl; mpl.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(2,2,1)
ax.plot(x_code,rho_code,label='density')
ax.plot(x_code,ye_code,'r--',label=r'$Y_e$')
plt.legend()
plt.xlabel('x'); plt.ylabel('Density')
ax = fig.add_subplot(2,2,2)
ax.plot(x_code,P_code)
plt.xlabel('x'); plt.ylabel('Pressure')
ax = fig.add_subplot(2,2,3)
ax.plot(x_code,u_code)
plt.xlabel('x'); plt.ylabel('Velocity')
ax = fig.add_subplot(2,2,4)
ax.plot(x_code,eps_code)
plt.xlabel('x'); plt.ylabel('Specific Internal Energy')
plt.subplots_adjust(wspace=0.15)
plt.savefig('sod_rel.png', bbox_inches='tight')
util.safe_remove(TMP_DIR)
sys.exit()
# GET ANALYTIC SOLUTION (ADAPTED FROM BRUCE FRYXELL'S exact_riemann.f)
x0 = 0.5
t = 0.25
rho1 = 1.; P1 = 1.; u1 = 0.
rho5 = 0.125; P5 = 0.1; u5 = 0.
cs1 = np.sqrt(gam*P1/rho1)
cs5 = np.sqrt(gam*P5/rho5)
Gam = (gam-1.)/(gam+1.)
beta = (gam-1.)/(2.*gam)
def func(x):
P3 = x[0]
u4 = (P3-P5)*np.sqrt((1.-Gam)/(rho5*(P3 + Gam*P5)))
u2 = (P1**beta - P3**beta)*np.sqrt((1.-Gam**2.)*P1**(1./gam)/(Gam**2*rho1))
return u2-u4
P3 = root(func, [(P1+P5)/2.]).x[0]
P4 = P3
rho3 = rho1*(P3/P1)**(1./gam)
rho4 = rho5*(P4 + Gam*P5)/(P5 + Gam*P4)
u4 = cs5*(P4/P5-1)/(gam*np.sqrt(1. + (gam+1.)/(2.*gam)*(P4/P5-1.)))
ushock = cs5*np.sqrt(1. + (gam+1.)/(2.*gam)*(P4/P5-1.))
u3 = u4
cs3 = np.sqrt(gam*P3/rho3)
xsh = x0 + ushock*t
xcd = x0 + u3*t
xft = 0.5 + (u3-cs3)*t
xhd = 0.5 - cs1*t
N = 1024
x = np.linspace(0, 1, 1024)
rho = np.zeros(N)
P = np.zeros(N)
u = np.zeros(N)
for n in range(N):
if x[n] < xhd:
rho[n] = rho1
P[n] = P1
u[n] = u1
elif x[n] < xft:
u[n] = 2./(gam+1.)*(cs1 + (x[n] - x0)/t)
fac = 1. - 0.5*(gam-1.)*u[n]/cs1
rho[n] = rho1*fac**(2./(gam-1.))
P[n] = P1*fac**(2.*gam/(gam-1.))
elif x[n] < xcd:
rho[n] = rho3
P[n] = P3
u[n] = u3
elif x[n] < xsh:
rho[n] = rho4
P[n] = P4
u[n] = u4
else:
rho[n] = rho5
P[n] = P5
u[n] = u5
if AUTO:
data = {}
data['SOL'] = [x, rho]
data['CODE'] = [x_code, rho_code]
pickle.dump(data, open('data.p', 'wb'))
# CLEAN UP
util.safe_remove(TMP_DIR)
sys.exit()
# MAKE FIGURE
code_col = 'r'; code_ls = ''; code_mrk = '.'
sol_col = 'k'; sol_ls = '-'; sol_mrk = ''
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(2,2,1)
ax.plot(x_code, rho_code, color=code_col, linestyle=code_ls, marker=code_mrk)
ax.plot(x, rho, color=sol_col, linestyle=sol_ls, marker=sol_mrk)
plt.xlabel('x'); plt.ylabel('Density')
plt.ylim([0, 1.1])
ax = fig.add_subplot(2,2,2)
ax.plot(x_code, P_code, color=code_col, linestyle=code_ls, marker=code_mrk)
ax.plot(x, P, color=sol_col, linestyle=sol_ls, marker=sol_mrk)
plt.xlabel('x'); plt.ylabel('Pressure')
plt.ylim([0, 1.1])
ax = fig.add_subplot(2,2,3)
ax.plot(x_code, u_code, color=code_col, linestyle=code_ls, marker=code_mrk)
ax.plot(x, u, color=sol_col, linestyle=sol_ls, marker=sol_mrk)
plt.xlabel('x'); plt.ylabel('Velocity')
plt.ylim([0, 1.1])
ax = fig.add_subplot(2,2,4)
ax.plot(x_code, P_code/rho_code, color=code_col, linestyle=code_ls, marker=code_mrk)
ax.plot(x, P/rho, color=sol_col, linestyle=sol_ls, marker=sol_mrk)
plt.xlabel('x'); plt.ylabel('Temperature')
plt.ylim([0.7, 1.2])
plt.subplots_adjust(wspace=0.15)
plt.savefig('sod.png', bbox_inches='tight')
# CLEAN UP
util.safe_remove(TMP_DIR)
| [
"matplotlib.pyplot.figure",
"os.path.join",
"os.chdir",
"bhlight.bcall",
"hdf5_to_dict.load_hdr",
"numpy.linspace",
"hdf5_to_dict.load_geom",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.use",
"subprocess.call",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.yla... | [((507, 539), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../script/"""'], {}), "(0, '../script/')\n", (522, 539), False, 'import sys\n'), ((540, 580), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../script/analysis"""'], {}), "(0, '../script/analysis')\n", (555, 580), False, 'import sys\n'), ((753, 778), 'util.safe_remove', 'util.safe_remove', (['TMP_DIR'], {}), '(TMP_DIR)\n', (769, 778), False, 'import util\n'), ((1069, 1099), 'os.chdir', 'os.chdir', (["('../prob/' + PROBLEM)"], {}), "('../prob/' + PROBLEM)\n", (1077, 1099), False, 'import os\n'), ((1292, 1302), 'subprocess.call', 'call', (['args'], {}), '(args)\n', (1296, 1302), False, 'from subprocess import call\n'), ((1393, 1416), 'os.chdir', 'os.chdir', (['"""../../test/"""'], {}), "('../../test/')\n", (1401, 1416), False, 'import os\n'), ((1417, 1473), 'subprocess.call', 'call', (["['mv', '../prob/' + PROBLEM + '/' + TMP_DIR, './']"], {}), "(['mv', '../prob/' + PROBLEM + '/' + TMP_DIR, './'])\n", (1421, 1473), False, 'from subprocess import call\n'), ((1492, 1509), 'os.chdir', 'os.chdir', (['TMP_DIR'], {}), '(TMP_DIR)\n', (1500, 1509), False, 'import os\n'), ((1510, 1558), 'bhlight.bcall', 'bcall', (["['./bhlight', '-p', 'param_template.dat']"], {}), "(['./bhlight', '-p', 'param_template.dat'])\n", (1515, 1558), False, 'from bhlight import bcall\n'), ((1559, 1574), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (1567, 1574), False, 'import os\n'), ((1679, 1701), 'hdf5_to_dict.load_hdr', 'io.load_hdr', (['dfiles[0]'], {}), '(dfiles[0])\n', (1690, 1701), True, 'import hdf5_to_dict as io\n'), ((1709, 1726), 'hdf5_to_dict.load_geom', 'io.load_geom', (['hdr'], {}), '(hdr)\n', (1721, 1726), True, 'import hdf5_to_dict as io\n'), ((1734, 1764), 'hdf5_to_dict.load_dump', 'io.load_dump', (['dfiles[-1]', 'geom'], {}), '(dfiles[-1], geom)\n', (1746, 1764), True, 'import hdf5_to_dict as io\n'), ((2921, 2945), 'numpy.sqrt', 'np.sqrt', (['(gam * P1 / rho1)'], {}), '(gam * P1 / rho1)\n', (2928, 2945), True, 'import numpy as np\n'), ((2948, 2972), 'numpy.sqrt', 'np.sqrt', (['(gam * P5 / rho5)'], {}), '(gam * P5 / rho5)\n', (2955, 2972), True, 'import numpy as np\n'), ((3441, 3465), 'numpy.sqrt', 'np.sqrt', (['(gam * P3 / rho3)'], {}), '(gam * P3 / rho3)\n', (3448, 3465), True, 'import numpy as np\n'), ((3552, 3575), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1024)'], {}), '(0, 1, 1024)\n', (3563, 3575), True, 'import numpy as np\n'), ((3582, 3593), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3590, 3593), True, 'import numpy as np\n'), ((3600, 3611), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3608, 3611), True, 'import numpy as np\n'), ((3618, 3629), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3626, 3629), True, 'import numpy as np\n'), ((4382, 4413), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.18, 10)'}), '(figsize=(16.18, 10))\n', (4392, 4413), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4600), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (4595, 4600), True, 'import matplotlib.pyplot as plt\n'), ((4602, 4623), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (4612, 4623), True, 'import matplotlib.pyplot as plt\n'), ((4624, 4642), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (4632, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4811, 4826), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (4821, 4826), True, 'import matplotlib.pyplot as plt\n'), ((4828, 4850), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure"""'], {}), "('Pressure')\n", (4838, 4850), True, 'import matplotlib.pyplot as plt\n'), ((4851, 4869), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (4859, 4869), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (5048, 5053), True, 'import matplotlib.pyplot as plt\n'), ((5055, 5077), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity"""'], {}), "('Velocity')\n", (5065, 5077), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5096), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (5086, 5096), True, 'import matplotlib.pyplot as plt\n'), ((5278, 5293), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (5288, 5293), True, 'import matplotlib.pyplot as plt\n'), ((5295, 5320), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature"""'], {}), "('Temperature')\n", (5305, 5320), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5341), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.7, 1.2]'], {}), '([0.7, 1.2])\n', (5329, 5341), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5375), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.15)'}), '(wspace=0.15)\n', (5362, 5375), True, 'import matplotlib.pyplot as plt\n'), ((5377, 5420), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sod.png"""'], {'bbox_inches': '"""tight"""'}), "('sod.png', bbox_inches='tight')\n", (5388, 5420), True, 'import matplotlib.pyplot as plt\n'), ((5433, 5458), 'util.safe_remove', 'util.safe_remove', (['TMP_DIR'], {}), '(TMP_DIR)\n', (5449, 5458), False, 'import util\n'), ((1012, 1033), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1026, 1033), False, 'import matplotlib\n'), ((2061, 2075), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (2068, 2075), True, 'import matplotlib as mpl\n'), ((2118, 2149), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.18, 10)'}), '(figsize=(16.18, 10))\n', (2128, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2284), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2282, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2287, 2302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2297, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2325), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (2314, 2325), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2399), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2394, 2399), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2423), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pressure"""'], {}), "('Pressure')\n", (2411, 2423), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2497), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2492, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2521), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity"""'], {}), "('Velocity')\n", (2509, 2521), True, 'import matplotlib.pyplot as plt\n'), ((2582, 2597), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2592, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2599, 2637), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Specific Internal Energy"""'], {}), "('Specific Internal Energy')\n", (2609, 2637), True, 'import matplotlib.pyplot as plt\n'), ((2641, 2673), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.15)'}), '(wspace=0.15)\n', (2660, 2673), True, 'import matplotlib.pyplot as plt\n'), ((2676, 2723), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sod_rel.png"""'], {'bbox_inches': '"""tight"""'}), "('sod_rel.png', bbox_inches='tight')\n", (2687, 2723), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2751), 'util.safe_remove', 'util.safe_remove', (['TMP_DIR'], {}), '(TMP_DIR)\n', (2742, 2751), False, 'import util\n'), ((2754, 2764), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2762, 2764), False, 'import sys\n'), ((3384, 3442), 'numpy.sqrt', 'np.sqrt', (['(1.0 + (gam + 1.0) / (2.0 * gam) * (P4 / P5 - 1.0))'], {}), '(1.0 + (gam + 1.0) / (2.0 * gam) * (P4 / P5 - 1.0))\n', (3391, 3442), True, 'import numpy as np\n'), ((4234, 4259), 'util.safe_remove', 'util.safe_remove', (['TMP_DIR'], {}), '(TMP_DIR)\n', (4250, 4259), False, 'import util\n'), ((4262, 4272), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4270, 4272), False, 'import sys\n'), ((3058, 3105), 'numpy.sqrt', 'np.sqrt', (['((1.0 - Gam) / (rho5 * (P3 + Gam * P5)))'], {}), '((1.0 - Gam) / (rho5 * (P3 + Gam * P5)))\n', (3065, 3105), True, 'import numpy as np\n'), ((3126, 3193), 'numpy.sqrt', 'np.sqrt', (['((1.0 - Gam ** 2.0) * P1 ** (1.0 / gam) / (Gam ** 2 * rho1))'], {}), '((1.0 - Gam ** 2.0) * P1 ** (1.0 / gam) / (Gam ** 2 * rho1))\n', (3133, 3193), True, 'import numpy as np\n'), ((3195, 3224), 'scipy.optimize.root', 'root', (['func', '[(P1 + P5) / 2.0]'], {}), '(func, [(P1 + P5) / 2.0])\n', (3199, 3224), False, 'from scipy.optimize import root\n'), ((3327, 3385), 'numpy.sqrt', 'np.sqrt', (['(1.0 + (gam + 1.0) / (2.0 * gam) * (P4 / P5 - 1.0))'], {}), '(1.0 + (gam + 1.0) / (2.0 * gam) * (P4 / P5 - 1.0))\n', (3334, 3385), True, 'import numpy as np\n'), ((1628, 1653), 'os.path.join', 'os.path.join', (['TMP_DIR', '""""""'], {}), "(TMP_DIR, '')\n", (1640, 1653), False, 'import os\n')] |
'''
open3d visualizer
'''
import carla_utils as cu
from carla_utils import carla
import numpy as np
import open3d
import open3d
import os
import glob
from ..system import Clock, parse_yaml_file_unsafe
from ..basic import HomogeneousMatrix
from ..augment import ActorVertices
from ..world_map import connect_to_server
from .tools import default_argparser
def calculate_vis_bounding_box(vehicle : carla.Vehicle):
color = vehicle.attributes.get('color', '190,190,190')
color = np.array(eval(color)).astype(np.float64) / 255
line_set = open3d.geometry.LineSet()
vertices, lines = ActorVertices.d2arrow(vehicle)
vertices = np.hstack((vertices, np.zeros((vertices.shape[0],1))))
colors = np.expand_dims(color, axis=0).repeat(len(lines), axis=0)
line_set.points = open3d.utility.Vector3dVector(vertices)
line_set.lines = open3d.utility.Vector2iVector(lines)
line_set.colors = open3d.utility.Vector3dVector(colors)
return line_set
def get_fixed_boundary(color_open3d : np.ndarray):
max_x, max_y = 80, 45
z = 0
line_set = open3d.geometry.LineSet()
points = np.array([ [max_x,max_y,z], [-max_x,max_y,z], [-max_x,-max_y,z], [max_x,-max_y,z] ]).astype(np.float64)
lines = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])
colors = np.expand_dims(color_open3d, axis=0).repeat(len(lines), axis=0)
line_set.points = open3d.utility.Vector3dVector(points)
line_set.lines = open3d.utility.Vector2iVector(lines)
line_set.colors = open3d.utility.Vector3dVector(colors)
return line_set
def calculate_perception_range(vehicle, line_set, perception_range):
current_transform = vehicle.get_transform()
x, y, z = current_transform.location.x, current_transform.location.y, current_transform.location.z
resolution = np.deg2rad(2)
rads = np.linspace(-np.pi, np.pi, int(np.pi / resolution))
points, lines = [], []
for i, rad in enumerate(rads):
point = [x + perception_range*np.cos(rad), y + perception_range*np.sin(rad), z]
points.append(point)
lines.append([i, (i+1)%len(rads)])
points = np.array(points, dtype=np.float64)
lines = np.array(lines)
color = vehicle.attributes.get('color', '190,190,190')
color = np.array(eval(color)).astype(np.float64) / 255
colors = np.expand_dims(color, axis=0).repeat(len(lines), axis=0)
line_set.points = open3d.utility.Vector3dVector(points)
line_set.lines = open3d.utility.Vector2iVector(lines)
line_set.colors = open3d.utility.Vector3dVector(colors)
return line_set
def get_road_geometry_center(global_paths):
color = np.array([100, 100, 100], np.float64) / 255
line_sets = []
for global_path in global_paths:
centers = np.stack([global_path.x, global_path.y]).T
lines = np.vstack([np.arange(0, len(global_path)-1), np.arange(1, len(global_path))]).T
colors = np.expand_dims(color, axis=0).repeat(len(lines), axis=0)
line_set = open3d.geometry.LineSet()
line_set.points = open3d.utility.Vector3dVector(np.hstack((centers, np.zeros((len(global_path),1)))))
line_set.lines = open3d.utility.Vector2iVector(lines)
line_set.colors = open3d.utility.Vector3dVector(colors)
line_sets.append(line_set)
return line_sets
def get_road_geometry_side(town_map):
color = np.array([150, 150, 150], np.float64) / 255
files = glob.glob(os.path.split(os.path.abspath(__file__))[0] + '/../utils/global_path/*')
global_paths = [cu.GlobalPath.read_from_disk(town_map, i) for i in files]
line_sets = []
for global_path in global_paths:
lane_widths = np.array([w.lane_width for w in global_path.carla_waypoints]).reshape(len(global_path),1)
thetas = np.array(global_path.theta).reshape(len(global_path),1)
directions = np.hstack([np.cos(thetas + np.pi/2), np.sin(thetas + np.pi/2)])
centers = np.stack([global_path.x, global_path.y]).T
lines = np.vstack([np.arange(0, len(global_path)-1), np.arange(1, len(global_path))]).T
colors = np.expand_dims(color, axis=0).repeat(len(lines), axis=0)
### left
line_set = open3d.geometry.LineSet()
left = centers + lane_widths/2 * directions
line_set.points = open3d.utility.Vector3dVector(np.hstack((left, np.zeros((len(global_path),1)))))
line_set.lines = open3d.utility.Vector2iVector(lines)
line_set.colors = open3d.utility.Vector3dVector(colors)
line_sets.append(line_set)
### right
line_set = open3d.geometry.LineSet()
right = centers - lane_widths/2 * directions
line_set.points = open3d.utility.Vector3dVector(np.hstack((right, np.zeros((len(global_path),1)))))
line_set.lines = open3d.utility.Vector2iVector(lines)
line_set.colors = open3d.utility.Vector3dVector(colors)
line_sets.append(line_set)
return line_sets
class VehiclesVisualizer(object):
def __init__(self, config, view_pose=None):
'''parameter'''
self.config = config
host, port, timeout, map_name = config.host, config.port, config.timeout, config.map_name
self.client, self.world, self.town_map = connect_to_server(host, port, timeout, map_name)
self.clock = Clock(10)
self.max_vehicles = config.max_vehicles ## max number
self.window_name = 'Vehicles Visualisation Example' + ' ' + host + ':' + str(port)
self.vis = open3d.visualization.Visualizer()
self.vis.create_window(window_name=self.window_name, width=1000, height=1000, left=0, top=0)
self.view_pose = [0, 0, 60, 0, 0, -np.pi/2] if view_pose is None else view_pose
render_option = self.vis.get_render_option()
self.background_color = np.array([0.1529, 0.1569, 0.1333], np.float32)
render_option.background_color = self.background_color
render_option.point_color_option = open3d.visualization.PointColorOption.ZCoordinate
# coordinate_frame = open3d.geometry.TriangleMesh.create_coordinate_frame()
# self.vis.add_geometry(coordinate_frame)
view_control = self.vis.get_view_control()
params = view_control.convert_to_pinhole_camera_parameters()
params.extrinsic = HomogeneousMatrix.xyzrpy(self.view_pose)
view_control.convert_from_pinhole_camera_parameters(params)
'''add geometry'''
self.vis.add_geometry(get_fixed_boundary(self.background_color))
self.bounding_boxs = [open3d.geometry.LineSet() for _ in range(self.max_vehicles)]
[self.vis.add_geometry(bounding_box) for bounding_box in self.bounding_boxs]
def run_step(self, vehicles):
number_min = min(len(vehicles), self.max_vehicles)
number_max = max(len(vehicles), self.max_vehicles)
for i in range(number_min):
vehicle, bounding_box = vehicles[i], self.bounding_boxs[i]
new_bounding_box = calculate_vis_bounding_box(vehicle)
bounding_box.points = new_bounding_box.points
bounding_box.lines = new_bounding_box.lines
bounding_box.colors = new_bounding_box.colors
for i in range(number_min, number_max): self.bounding_boxs[i].clear()
return
def update_vis(self):
self.vis.update_geometry()
self.vis.poll_events()
self.vis.update_renderer()
def run(self):
while True:
self.clock.tick_begin()
actors = self.world.get_actors()
vehicles = actors.filter('*vehicle*')
self.run_step(list(vehicles))
self.update_vis()
self.clock.tick_end()
if __name__ == "__main__":
print(__doc__)
import os
from os.path import join
try:
config = parse_yaml_file_unsafe('./config/carla.yaml')
except FileNotFoundError:
print('[vehicle_visualizer] use default config.')
file_dir = os.path.dirname(__file__)
config = parse_yaml_file_unsafe(join(file_dir, './default_carla.yaml'))
args = default_argparser().parse_args()
config.update(args)
vehicles_visualizer = VehiclesVisualizer(config, )
try:
vehicles_visualizer.run()
except KeyboardInterrupt:
print('canceled by user')
| [
"numpy.stack",
"os.path.abspath",
"open3d.visualization.Visualizer",
"carla_utils.GlobalPath.read_from_disk",
"numpy.deg2rad",
"os.path.join",
"os.path.dirname",
"open3d.geometry.LineSet",
"numpy.zeros",
"numpy.expand_dims",
"open3d.utility.Vector2iVector",
"numpy.array",
"numpy.sin",
"num... | [((556, 581), 'open3d.geometry.LineSet', 'open3d.geometry.LineSet', ([], {}), '()\n', (579, 581), False, 'import open3d\n'), ((799, 838), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['vertices'], {}), '(vertices)\n', (828, 838), False, 'import open3d\n'), ((860, 896), 'open3d.utility.Vector2iVector', 'open3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (889, 896), False, 'import open3d\n'), ((919, 956), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (948, 956), False, 'import open3d\n'), ((1081, 1106), 'open3d.geometry.LineSet', 'open3d.geometry.LineSet', ([], {}), '()\n', (1104, 1106), False, 'import open3d\n'), ((1236, 1278), 'numpy.array', 'np.array', (['[[0, 1], [1, 2], [2, 3], [3, 0]]'], {}), '([[0, 1], [1, 2], [2, 3], [3, 0]])\n', (1244, 1278), True, 'import numpy as np\n'), ((1379, 1416), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (1408, 1416), False, 'import open3d\n'), ((1438, 1474), 'open3d.utility.Vector2iVector', 'open3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (1467, 1474), False, 'import open3d\n'), ((1497, 1534), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (1526, 1534), False, 'import open3d\n'), ((1796, 1809), 'numpy.deg2rad', 'np.deg2rad', (['(2)'], {}), '(2)\n', (1806, 1809), True, 'import numpy as np\n'), ((2113, 2147), 'numpy.array', 'np.array', (['points'], {'dtype': 'np.float64'}), '(points, dtype=np.float64)\n', (2121, 2147), True, 'import numpy as np\n'), ((2160, 2175), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (2168, 2175), True, 'import numpy as np\n'), ((2388, 2425), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (2417, 2425), False, 'import open3d\n'), ((2447, 2483), 'open3d.utility.Vector2iVector', 'open3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (2476, 2483), False, 'import open3d\n'), ((2506, 2543), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (2535, 2543), False, 'import open3d\n'), ((2622, 2659), 'numpy.array', 'np.array', (['[100, 100, 100]', 'np.float64'], {}), '([100, 100, 100], np.float64)\n', (2630, 2659), True, 'import numpy as np\n'), ((2974, 2999), 'open3d.geometry.LineSet', 'open3d.geometry.LineSet', ([], {}), '()\n', (2997, 2999), False, 'import open3d\n'), ((3135, 3171), 'open3d.utility.Vector2iVector', 'open3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (3164, 3171), False, 'import open3d\n'), ((3198, 3235), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (3227, 3235), False, 'import open3d\n'), ((3343, 3380), 'numpy.array', 'np.array', (['[150, 150, 150]', 'np.float64'], {}), '([150, 150, 150], np.float64)\n', (3351, 3380), True, 'import numpy as np\n'), ((3503, 3544), 'carla_utils.GlobalPath.read_from_disk', 'cu.GlobalPath.read_from_disk', (['town_map', 'i'], {}), '(town_map, i)\n', (3531, 3544), True, 'import carla_utils as cu\n'), ((4157, 4182), 'open3d.geometry.LineSet', 'open3d.geometry.LineSet', ([], {}), '()\n', (4180, 4182), False, 'import open3d\n'), ((4376, 4412), 'open3d.utility.Vector2iVector', 'open3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (4405, 4412), False, 'import open3d\n'), ((4439, 4476), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (4468, 4476), False, 'import open3d\n'), ((4550, 4575), 'open3d.geometry.LineSet', 'open3d.geometry.LineSet', ([], {}), '()\n', (4573, 4575), False, 'import open3d\n'), ((4771, 4807), 'open3d.utility.Vector2iVector', 'open3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (4800, 4807), False, 'import open3d\n'), ((4834, 4871), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (4863, 4871), False, 'import open3d\n'), ((5469, 5502), 'open3d.visualization.Visualizer', 'open3d.visualization.Visualizer', ([], {}), '()\n', (5500, 5502), False, 'import open3d\n'), ((5778, 5824), 'numpy.array', 'np.array', (['[0.1529, 0.1569, 0.1333]', 'np.float32'], {}), '([0.1529, 0.1569, 0.1333], np.float32)\n', (5786, 5824), True, 'import numpy as np\n'), ((672, 704), 'numpy.zeros', 'np.zeros', (['(vertices.shape[0], 1)'], {}), '((vertices.shape[0], 1))\n', (680, 704), True, 'import numpy as np\n'), ((719, 748), 'numpy.expand_dims', 'np.expand_dims', (['color'], {'axis': '(0)'}), '(color, axis=0)\n', (733, 748), True, 'import numpy as np\n'), ((1120, 1215), 'numpy.array', 'np.array', (['[[max_x, max_y, z], [-max_x, max_y, z], [-max_x, -max_y, z], [max_x, -max_y, z]\n ]'], {}), '([[max_x, max_y, z], [-max_x, max_y, z], [-max_x, -max_y, z], [\n max_x, -max_y, z]])\n', (1128, 1215), True, 'import numpy as np\n'), ((1292, 1328), 'numpy.expand_dims', 'np.expand_dims', (['color_open3d'], {'axis': '(0)'}), '(color_open3d, axis=0)\n', (1306, 1328), True, 'import numpy as np\n'), ((2308, 2337), 'numpy.expand_dims', 'np.expand_dims', (['color'], {'axis': '(0)'}), '(color, axis=0)\n', (2322, 2337), True, 'import numpy as np\n'), ((2741, 2781), 'numpy.stack', 'np.stack', (['[global_path.x, global_path.y]'], {}), '([global_path.x, global_path.y])\n', (2749, 2781), True, 'import numpy as np\n'), ((3907, 3947), 'numpy.stack', 'np.stack', (['[global_path.x, global_path.y]'], {}), '([global_path.x, global_path.y])\n', (3915, 3947), True, 'import numpy as np\n'), ((6503, 6528), 'open3d.geometry.LineSet', 'open3d.geometry.LineSet', ([], {}), '()\n', (6526, 6528), False, 'import open3d\n'), ((7943, 7968), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7958, 7968), False, 'import os\n'), ((2897, 2926), 'numpy.expand_dims', 'np.expand_dims', (['color'], {'axis': '(0)'}), '(color, axis=0)\n', (2911, 2926), True, 'import numpy as np\n'), ((3640, 3701), 'numpy.array', 'np.array', (['[w.lane_width for w in global_path.carla_waypoints]'], {}), '([w.lane_width for w in global_path.carla_waypoints])\n', (3648, 3701), True, 'import numpy as np\n'), ((3747, 3774), 'numpy.array', 'np.array', (['global_path.theta'], {}), '(global_path.theta)\n', (3755, 3774), True, 'import numpy as np\n'), ((3836, 3862), 'numpy.cos', 'np.cos', (['(thetas + np.pi / 2)'], {}), '(thetas + np.pi / 2)\n', (3842, 3862), True, 'import numpy as np\n'), ((3862, 3888), 'numpy.sin', 'np.sin', (['(thetas + np.pi / 2)'], {}), '(thetas + np.pi / 2)\n', (3868, 3888), True, 'import numpy as np\n'), ((4063, 4092), 'numpy.expand_dims', 'np.expand_dims', (['color'], {'axis': '(0)'}), '(color, axis=0)\n', (4077, 4092), True, 'import numpy as np\n'), ((8009, 8047), 'os.path.join', 'join', (['file_dir', '"""./default_carla.yaml"""'], {}), "(file_dir, './default_carla.yaml')\n", (8013, 8047), False, 'from os.path import join\n'), ((1973, 1984), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (1979, 1984), True, 'import numpy as np\n'), ((2007, 2018), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (2013, 2018), True, 'import numpy as np\n'), ((3424, 3449), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3439, 3449), False, 'import os\n')] |
import unittest
from unittest import mock
import numpy
from PIL import Image
import image_generator
from materials import *
def mock_get_global_material_list(material):
return [(material + '_1', 'white'), (material + '_2', 'black')]
class Test_Image_Generator(unittest.TestCase):
@mock.patch('image_generator.get_global_material_list', side_effect=mock_get_global_material_list)
def test_generate_materials_lists_single_list_single_material(self, mock_function):
materials_options = [
['plastic']
]
actual = image_generator.generate_materials_lists(materials_options)
expected = [['plastic_1'], ['plastic_2']]
self.assertEqual(actual, expected)
@mock.patch('image_generator.get_global_material_list', side_effect=mock_get_global_material_list)
def test_generate_materials_lists_single_list_multiple_material(self, mock_function):
materials_options = [
['plastic', 'metal']
]
actual = image_generator.generate_materials_lists(materials_options)
expected = [
['plastic_1', 'metal_1'],
['plastic_1', 'metal_2'],
['plastic_2', 'metal_1'],
['plastic_2', 'metal_2']
]
self.assertEqual(actual, expected)
@mock.patch('image_generator.get_global_material_list', side_effect=mock_get_global_material_list)
def test_generate_materials_lists_multiple_list_single_material(self, mock_function):
materials_options = [
['metal'],
['plastic']
]
actual = image_generator.generate_materials_lists(materials_options)
expected = [
['metal_1'],
['metal_2'],
['plastic_1'],
['plastic_2']
]
self.assertEqual(actual, expected)
@mock.patch('image_generator.get_global_material_list', side_effect=mock_get_global_material_list)
def test_generate_materials_lists_multiple_list_multiple_material(self, mock_function):
materials_options = [
['plastic', 'metal'],
['wood', 'wood']
]
actual = image_generator.generate_materials_lists(materials_options)
expected = [
['plastic_1', 'metal_1'],
['plastic_1', 'metal_2'],
['plastic_2', 'metal_1'],
['plastic_2', 'metal_2'],
['wood_1', 'wood_1'],
['wood_1', 'wood_2'],
['wood_2', 'wood_1'],
['wood_2', 'wood_2']
]
self.assertEqual(actual, expected)
def test_generate_output_file_name(self):
self.assertEqual(image_generator.generate_output_file_name('test_type', []), 'test_type')
self.assertEqual(image_generator.generate_output_file_name('test_type', ['a']), 'test_type_a')
self.assertEqual(image_generator.generate_output_file_name('test_type', ['a', 'b']), 'test_type_a_b')
self.assertEqual(image_generator.generate_output_file_name('test_type', ['A']), 'test_type_a')
def test_generate_scene_configuration(self):
object_definition = {
'type': 'sphere',
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}
actual = image_generator.generate_scene_configuration(object_definition, None)
expected = {
'screenshot': True,
'objects': [{
'id': 'test_sphere',
'type': 'sphere',
'kinematic': True,
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
}]
}
self.assertEqual(actual, expected)
def test_generate_scene_configuration_with_material_list(self):
object_definition = {
'type': 'sphere',
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}
actual = image_generator.generate_scene_configuration(object_definition, ['test_material'])
expected = {
'screenshot': True,
'objects': [{
'id': 'test_sphere',
'type': 'sphere',
'kinematic': True,
'materials': ['test_material'],
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
}]
}
self.assertEqual(actual, expected)
@mock.patch('image_generator.get_global_material_list', side_effect=mock_get_global_material_list)
def test_generate_scene_configuration_list(self, mock_function):
self.maxDiff = None
object_list = [{
'type': 'sphere',
'materials_options': [
['plastic'],
['metal']
],
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}, {
'type': 'sofa',
'position': {
'x': 0,
'y': 0.5,
'z': 3
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
actual = image_generator.generate_scene_configuration_list(object_list)
expected = [{
'screenshot': True,
'objects': [{
'id': 'test_sphere',
'type': 'sphere',
'kinematic': True,
'materials': ['plastic_1'],
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
}]
}, {
'screenshot': True,
'objects': [{
'id': 'test_sphere',
'type': 'sphere',
'kinematic': True,
'materials': ['plastic_2'],
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
}]
}, {
'screenshot': True,
'objects': [{
'id': 'test_sphere',
'type': 'sphere',
'kinematic': True,
'materials': ['metal_1'],
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
}]
}, {
'screenshot': True,
'objects': [{
'id': 'test_sphere',
'type': 'sphere',
'kinematic': True,
'materials': ['metal_2'],
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0.5,
'z': 2
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
}]
}, {
'screenshot': True,
'objects': [{
'id': 'test_sofa',
'type': 'sofa',
'kinematic': True,
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0.5,
'z': 3
},
'scale': {
'x': 1,
'y': 1,
'z': 1
}
}]
}]
}]
self.assertEqual(actual, expected)
def test_get_global_material_list(self):
self.assertEqual(METAL_MATERIALS, image_generator.get_global_material_list('metal'))
self.assertEqual(PLASTIC_MATERIALS, image_generator.get_global_material_list('plastic'))
def test_retrieve_image_pixel_list(self):
object_screenshot = Image.fromarray(numpy.array([[(1, 2, 3), (4, 5, 6)], [(7, 8, 9), (10, 11, 12)]], \
dtype=numpy.uint8))
actual = image_generator.retrieve_image_pixel_list(object_screenshot)
expected = [[(1, 2, 3), (4, 5, 6)], [(7, 8, 9), (10, 11, 12)]]
self.assertEqual(actual, expected)
| [
"image_generator.generate_scene_configuration",
"image_generator.generate_output_file_name",
"image_generator.generate_scene_configuration_list",
"image_generator.generate_materials_lists",
"unittest.mock.patch",
"image_generator.retrieve_image_pixel_list",
"numpy.array",
"image_generator.get_global_m... | [((306, 408), 'unittest.mock.patch', 'mock.patch', (['"""image_generator.get_global_material_list"""'], {'side_effect': 'mock_get_global_material_list'}), "('image_generator.get_global_material_list', side_effect=\n mock_get_global_material_list)\n", (316, 408), False, 'from unittest import mock\n'), ((741, 843), 'unittest.mock.patch', 'mock.patch', (['"""image_generator.get_global_material_list"""'], {'side_effect': 'mock_get_global_material_list'}), "('image_generator.get_global_material_list', side_effect=\n mock_get_global_material_list)\n", (751, 843), False, 'from unittest import mock\n'), ((1324, 1426), 'unittest.mock.patch', 'mock.patch', (['"""image_generator.get_global_material_list"""'], {'side_effect': 'mock_get_global_material_list'}), "('image_generator.get_global_material_list', side_effect=\n mock_get_global_material_list)\n", (1334, 1426), False, 'from unittest import mock\n'), ((1874, 1976), 'unittest.mock.patch', 'mock.patch', (['"""image_generator.get_global_material_list"""'], {'side_effect': 'mock_get_global_material_list'}), "('image_generator.get_global_material_list', side_effect=\n mock_get_global_material_list)\n", (1884, 1976), False, 'from unittest import mock\n'), ((5388, 5490), 'unittest.mock.patch', 'mock.patch', (['"""image_generator.get_global_material_list"""'], {'side_effect': 'mock_get_global_material_list'}), "('image_generator.get_global_material_list', side_effect=\n mock_get_global_material_list)\n", (5398, 5490), False, 'from unittest import mock\n'), ((578, 637), 'image_generator.generate_materials_lists', 'image_generator.generate_materials_lists', (['materials_options'], {}), '(materials_options)\n', (618, 637), False, 'import image_generator\n'), ((1024, 1083), 'image_generator.generate_materials_lists', 'image_generator.generate_materials_lists', (['materials_options'], {}), '(materials_options)\n', (1064, 1083), False, 'import image_generator\n'), ((1622, 1681), 'image_generator.generate_materials_lists', 'image_generator.generate_materials_lists', (['materials_options'], {}), '(materials_options)\n', (1662, 1681), False, 'import image_generator\n'), ((2190, 2249), 'image_generator.generate_materials_lists', 'image_generator.generate_materials_lists', (['materials_options'], {}), '(materials_options)\n', (2230, 2249), False, 'import image_generator\n'), ((3464, 3533), 'image_generator.generate_scene_configuration', 'image_generator.generate_scene_configuration', (['object_definition', 'None'], {}), '(object_definition, None)\n', (3508, 3533), False, 'import image_generator\n'), ((4588, 4675), 'image_generator.generate_scene_configuration', 'image_generator.generate_scene_configuration', (['object_definition', "['test_material']"], {}), "(object_definition, [\n 'test_material'])\n", (4632, 4675), False, 'import image_generator\n'), ((6288, 6350), 'image_generator.generate_scene_configuration_list', 'image_generator.generate_scene_configuration_list', (['object_list'], {}), '(object_list)\n', (6337, 6350), False, 'import image_generator\n'), ((10025, 10085), 'image_generator.retrieve_image_pixel_list', 'image_generator.retrieve_image_pixel_list', (['object_screenshot'], {}), '(object_screenshot)\n', (10066, 10085), False, 'import image_generator\n'), ((2697, 2755), 'image_generator.generate_output_file_name', 'image_generator.generate_output_file_name', (['"""test_type"""', '[]'], {}), "('test_type', [])\n", (2738, 2755), False, 'import image_generator\n'), ((2796, 2857), 'image_generator.generate_output_file_name', 'image_generator.generate_output_file_name', (['"""test_type"""', "['a']"], {}), "('test_type', ['a'])\n", (2837, 2857), False, 'import image_generator\n'), ((2900, 2966), 'image_generator.generate_output_file_name', 'image_generator.generate_output_file_name', (['"""test_type"""', "['a', 'b']"], {}), "('test_type', ['a', 'b'])\n", (2941, 2966), False, 'import image_generator\n'), ((3011, 3072), 'image_generator.generate_output_file_name', 'image_generator.generate_output_file_name', (['"""test_type"""', "['A']"], {}), "('test_type', ['A'])\n", (3052, 3072), False, 'import image_generator\n'), ((9664, 9713), 'image_generator.get_global_material_list', 'image_generator.get_global_material_list', (['"""metal"""'], {}), "('metal')\n", (9704, 9713), False, 'import image_generator\n'), ((9760, 9811), 'image_generator.get_global_material_list', 'image_generator.get_global_material_list', (['"""plastic"""'], {}), "('plastic')\n", (9800, 9811), False, 'import image_generator\n'), ((9907, 9995), 'numpy.array', 'numpy.array', (['[[(1, 2, 3), (4, 5, 6)], [(7, 8, 9), (10, 11, 12)]]'], {'dtype': 'numpy.uint8'}), '([[(1, 2, 3), (4, 5, 6)], [(7, 8, 9), (10, 11, 12)]], dtype=\n numpy.uint8)\n', (9918, 9995), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
#
# This file is part of the FFEA simulation package
#
# Copyright (c) by the Theory and Development FFEA teams,
# as they appear in the README.md file.
#
# FFEA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFEA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FFEA. If not, see <http://www.gnu.org/licenses/>.
#
# To help us fund FFEA development, we humbly ask that you cite
# the research papers on the package.
#
"""
Created on Wed Dec 2 16:37:06 2015
@author: py12rw
"""
import argparse
import slender_analysis_lib
import json
import numpy as np
def convert_string_list(string_list):
"""
Converts a list of strings (e.g. ["3", "5", "6"]) to a list of integers.
In: list of strings
Out: list of integers
"""
int_list = []
for string in string_list:
int_list.append(int(string))
return int_list
def convert_array_in_dict_to_list(dict):
"""
This turns a dictionary of arrays into a dictionary of lists. Useful as
the json library cannot work with arrays.
In: a dictionary containing at least one array
Out: a dictionary containing no arrays
"""
for key, value in dict.iteritems():
if type(value) == type(np.zeros([1])):
dict[key] = value.tolist()
return dict
parser = argparse.ArgumentParser(description="CLI for analysing Myosin-7 trajectory files")
parser.add_argument("FFEA_filename", action="store", help="Path to input .ffea file")
parser.add_argument("-head_pin_file", action="store", help="Path to pinfile containing head")
parser.add_argument("-tail_pin_file", action="store", help="Path to pinfile containing tail")
parser.add_argument("-head_point_index", action="store", help="Index of a node in the center of the head")
parser.add_argument("-middle_point_index", action="store", help="Index of a node in the center of the molecule (ideally in the bendiest possible part)")
parser.add_argument("-tail_point_index", action="store", help="Index of a node in the center of the tail")
parser.add_argument("-twist", action="store_true", help="Whether to perform a twist test")
parser.add_argument("-head_line", action="store", nargs="+", help="Indices of points forming a line through the molecule\'s head, orthogonal to the principal axis (e.g. 3 34 51 57).")
parser.add_argument("-tail_line", action="store", nargs="+", help="Indices of points forming a line through the molecule\'s tail, orthogonal to the principal axis.")
parser.add_argument("-dist", action="store_true", help="Perform an end-to-end distance test (note: if this is disabled, the twist test will give results in terms of rads instead of rads/m)")
parser.add_argument("-angle", action="store_true", help="Perform an angular distribution test")
parser.add_argument("-plot", action="store_true", help="Save the results of any tests that have been performed to PDFs in the current working directory")
parser.add_argument("-name", action="store", help="Name of the molecule being tested (used in plots)")
args = parser.parse_args()
args.head_line = convert_string_list(args.head_line)
args.tail_line = convert_string_list(args.tail_line)
results = slender_analysis_lib.run_sequential_tests(args.FFEA_filename,
args.head_pin_file,
args.tail_pin_file,
int(args.head_point_index),
int(args.tail_point_index),
int(args.middle_point_index),
args.head_line,
args.tail_line,
args.twist,
args.dist,
args.angle,
args.plot,
args.name)
results = convert_array_in_dict_to_list(results)
out_filename = args.FFEA_filename.split('.')[0]+"_slender_analysis.json"
with open(out_filename, 'w') as outfile:
json.dump(results, outfile) | [
"json.dump",
"numpy.zeros",
"argparse.ArgumentParser"
] | [((1775, 1862), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CLI for analysing Myosin-7 trajectory files"""'}), "(description=\n 'CLI for analysing Myosin-7 trajectory files')\n", (1798, 1862), False, 'import argparse\n'), ((4725, 4752), 'json.dump', 'json.dump', (['results', 'outfile'], {}), '(results, outfile)\n', (4734, 4752), False, 'import json\n'), ((1694, 1707), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (1702, 1707), True, 'import numpy as np\n')] |
"""
Testing a point ('Null') Hypothesis (not using pseudopriors)
"""
from __future__ import division
import numpy as np
import pymc3 as pm
from scipy.stats import binom
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# THE DATA.
# For each subject, specify the condition s/he was in,
# the number of trials s/he experienced, and the number correct.
# (Randomly generated fictitious data.)
npg = 20 # number of subjects per group
ntrl = 20 # number of trials per subject
cond_of_subj = np.repeat([0, 1, 2, 3], npg)
n_trl_of_subj = np.repeat([ntrl], 4*npg)
np.random.seed(47401)
n_corr_of_subj = np.concatenate((binom.rvs(n=ntrl, p=.61, size=npg),
binom.rvs(n=ntrl, p=.50, size=npg),
binom.rvs(n=ntrl, p=.49, size=npg),
binom.rvs(n=ntrl, p=.51, size=npg)))
n_subj = len(cond_of_subj)
n_cond = len(set(cond_of_subj))
# THE MODEL
with pm.Model() as model:
# Hyperprior on model index:
model_index = pm.DiscreteUniform('model_index', lower=0, upper=1)
# Constants for hyperprior:
shape_Gamma = 1.0
rate_Gamma = 0.1
# Hyperprior on mu and kappa:
kappa = pm.Gamma('kappa', shape_Gamma, rate_Gamma, shape=n_cond)
mu0 = pm.Beta('mu0', 1, 1)
a_Beta0 = mu0 * kappa[cond_of_subj]
b_Beta0 = (1 - mu0) * kappa[cond_of_subj]
mu1 = pm.Beta('mu1', 1, 1, shape=n_cond)
a_Beta1 = mu1[cond_of_subj] * kappa[cond_of_subj]
b_Beta1 = (1 - mu1[cond_of_subj]) * kappa[cond_of_subj]
#Prior on theta
theta0 = pm.Beta('theta0', a_Beta0, b_Beta0, shape=n_subj)
theta1 = pm.Beta('theta1', a_Beta1, b_Beta1, shape=n_subj)
# if model_index == 0 then sample from theta1 else sample from theta0
theta = pm.math.switch(pm.math.eq(model_index, 0), theta1, theta0)
# Likelihood:
y = pm.Binomial('y', p=theta, n=n_trl_of_subj, observed=n_corr_of_subj)
# Sampling
step = pm.ElemwiseCategorical(vars=[model_index],values=[0,1])
trace = pm.sample(10000, step)
# EXAMINE THE RESULTS.
## Print summary for each trace
#pm.summary(trace)
## Check for mixing and autocorrelation
#pm.autocorrplot(trace, vars =[mu, kappa])
## Plot KDE and sampled values for each parameter.
#pm.traceplot(trace)
model_idx_sample = trace['model_index']
pM1 = sum(model_idx_sample == 0) / len(model_idx_sample)
pM2 = 1 - pM1
plt.figure(figsize=(15, 15))
plt.subplot2grid((3,3), (0,0), colspan=3)
plt.plot(model_idx_sample, label='p(DiffMu|D) = %.3f ; p(SameMu|D) = {:.3f}'.format(pM1, pM2));
plt.xlabel('Step in Markov Chain')
plt.legend(loc='upper right', framealpha=0.75)
count = 0
position = [(1,0), (1,1), (1,2), (2,0), (2,1), (2,2)]
for i in range(0, 4):
mui_sample = trace['mu1'][:,i][model_idx_sample == 0]
for j in range(i+1, 4):
muj_sample = trace['mu1'][:,j][model_idx_sample == 0]
ax = plt.subplot2grid((3,3), position[count])
pm.plot_posterior(mui_sample-muj_sample,
ref_val=0, ax=ax)
plt.title(r'$\mu_{} - \mu_{}$'.format(i+1, j+1))
plt.xlim(-0.3, 0.3)
count += 1
plt.tight_layout()
plt.savefig('Figure_12.5.png')
plt.show()
| [
"pymc3.sample",
"numpy.random.seed",
"matplotlib.pyplot.subplot2grid",
"pymc3.DiscreteUniform",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"pymc3.Binomial",
"matplotlib.pyplot.tight_layout",
"pymc3.math.eq",
"numpy.repeat",
"pymc3.Model",
"matplotlib.pyplot.show",
"matplotlib... | [((201, 234), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (214, 234), True, 'import matplotlib.pyplot as plt\n'), ((505, 533), 'numpy.repeat', 'np.repeat', (['[0, 1, 2, 3]', 'npg'], {}), '([0, 1, 2, 3], npg)\n', (514, 533), True, 'import numpy as np\n'), ((550, 576), 'numpy.repeat', 'np.repeat', (['[ntrl]', '(4 * npg)'], {}), '([ntrl], 4 * npg)\n', (559, 576), True, 'import numpy as np\n'), ((575, 596), 'numpy.random.seed', 'np.random.seed', (['(47401)'], {}), '(47401)\n', (589, 596), True, 'import numpy as np\n'), ((2335, 2363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (2345, 2363), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2407), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(0, 0)'], {'colspan': '(3)'}), '((3, 3), (0, 0), colspan=3)\n', (2380, 2407), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2536), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step in Markov Chain"""'], {}), "('Step in Markov Chain')\n", (2512, 2536), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2583), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'framealpha': '(0.75)'}), "(loc='upper right', framealpha=0.75)\n", (2547, 2583), True, 'import matplotlib.pyplot as plt\n'), ((3072, 3090), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3088, 3090), True, 'import matplotlib.pyplot as plt\n'), ((3091, 3121), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figure_12.5.png"""'], {}), "('Figure_12.5.png')\n", (3102, 3121), True, 'import matplotlib.pyplot as plt\n'), ((3122, 3132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3130, 3132), True, 'import matplotlib.pyplot as plt\n'), ((904, 914), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (912, 914), True, 'import pymc3 as pm\n'), ((976, 1027), 'pymc3.DiscreteUniform', 'pm.DiscreteUniform', (['"""model_index"""'], {'lower': '(0)', 'upper': '(1)'}), "('model_index', lower=0, upper=1)\n", (994, 1027), True, 'import pymc3 as pm\n'), ((1149, 1205), 'pymc3.Gamma', 'pm.Gamma', (['"""kappa"""', 'shape_Gamma', 'rate_Gamma'], {'shape': 'n_cond'}), "('kappa', shape_Gamma, rate_Gamma, shape=n_cond)\n", (1157, 1205), True, 'import pymc3 as pm\n'), ((1217, 1237), 'pymc3.Beta', 'pm.Beta', (['"""mu0"""', '(1)', '(1)'], {}), "('mu0', 1, 1)\n", (1224, 1237), True, 'import pymc3 as pm\n'), ((1335, 1369), 'pymc3.Beta', 'pm.Beta', (['"""mu1"""', '(1)', '(1)'], {'shape': 'n_cond'}), "('mu1', 1, 1, shape=n_cond)\n", (1342, 1369), True, 'import pymc3 as pm\n'), ((1518, 1567), 'pymc3.Beta', 'pm.Beta', (['"""theta0"""', 'a_Beta0', 'b_Beta0'], {'shape': 'n_subj'}), "('theta0', a_Beta0, b_Beta0, shape=n_subj)\n", (1525, 1567), True, 'import pymc3 as pm\n'), ((1581, 1630), 'pymc3.Beta', 'pm.Beta', (['"""theta1"""', 'a_Beta1', 'b_Beta1'], {'shape': 'n_subj'}), "('theta1', a_Beta1, b_Beta1, shape=n_subj)\n", (1588, 1630), True, 'import pymc3 as pm\n'), ((1803, 1870), 'pymc3.Binomial', 'pm.Binomial', (['"""y"""'], {'p': 'theta', 'n': 'n_trl_of_subj', 'observed': 'n_corr_of_subj'}), "('y', p=theta, n=n_trl_of_subj, observed=n_corr_of_subj)\n", (1814, 1870), True, 'import pymc3 as pm\n'), ((1898, 1955), 'pymc3.ElemwiseCategorical', 'pm.ElemwiseCategorical', ([], {'vars': '[model_index]', 'values': '[0, 1]'}), '(vars=[model_index], values=[0, 1])\n', (1920, 1955), True, 'import pymc3 as pm\n'), ((1966, 1988), 'pymc3.sample', 'pm.sample', (['(10000)', 'step'], {}), '(10000, step)\n', (1975, 1988), True, 'import pymc3 as pm\n'), ((631, 666), 'scipy.stats.binom.rvs', 'binom.rvs', ([], {'n': 'ntrl', 'p': '(0.61)', 'size': 'npg'}), '(n=ntrl, p=0.61, size=npg)\n', (640, 666), False, 'from scipy.stats import binom\n'), ((684, 718), 'scipy.stats.binom.rvs', 'binom.rvs', ([], {'n': 'ntrl', 'p': '(0.5)', 'size': 'npg'}), '(n=ntrl, p=0.5, size=npg)\n', (693, 718), False, 'from scipy.stats import binom\n'), ((736, 771), 'scipy.stats.binom.rvs', 'binom.rvs', ([], {'n': 'ntrl', 'p': '(0.49)', 'size': 'npg'}), '(n=ntrl, p=0.49, size=npg)\n', (745, 771), False, 'from scipy.stats import binom\n'), ((788, 823), 'scipy.stats.binom.rvs', 'binom.rvs', ([], {'n': 'ntrl', 'p': '(0.51)', 'size': 'npg'}), '(n=ntrl, p=0.51, size=npg)\n', (797, 823), False, 'from scipy.stats import binom\n'), ((1732, 1758), 'pymc3.math.eq', 'pm.math.eq', (['model_index', '(0)'], {}), '(model_index, 0)\n', (1742, 1758), True, 'import pymc3 as pm\n'), ((2832, 2873), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', 'position[count]'], {}), '((3, 3), position[count])\n', (2848, 2873), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2941), 'pymc3.plot_posterior', 'pm.plot_posterior', (['(mui_sample - muj_sample)'], {'ref_val': '(0)', 'ax': 'ax'}), '(mui_sample - muj_sample, ref_val=0, ax=ax)\n', (2898, 2941), True, 'import pymc3 as pm\n'), ((3031, 3050), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.3)', '(0.3)'], {}), '(-0.3, 0.3)\n', (3039, 3050), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
# @Date: 2017-07-31 15:20:54
# @Last Modified by: <NAME>
# @Last Modified time: 2020-03-31 18:15:43
from functools import partialmethod
import numpy as np
from ..core import PointNeuron, addSonicFeatures
from ..constants import FARADAY, Rg, Z_Na, Z_Ca
@addSonicFeatures
class LeechTouch(PointNeuron):
''' Leech touch sensory neuron
Reference:
*<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2005).
Computational model of touch sensory cells (T Cells) of the leech: role of the
afterhyperpolarization (AHP) in activity-dependent conduction failure.
J Comput Neurosci 18, 5–24.*
'''
# Neuron name
name = 'LeechT'
# ------------------------------ Biophysical parameters ------------------------------
# Resting parameters
Cm0 = 1e-2 # Membrane capacitance (F/m2)
Vm0 = -53.58 # Membrane potential (mV)
# Reversal potentials (mV)
ENa = 45.0 # Sodium
EK = -62.0 # Potassium
ECa = 60.0 # Calcium
ELeak = -48.0 # Non-specific leakage
EPumpNa = -300.0 # Sodium pump
# Maximal channel conductances (S/m2)
gNabar = 3500.0 # Sodium
gKdbar = 900.0 # Delayed-rectifier Potassium
gCabar = 20.0 # Calcium
gKCabar = 236.0 # Calcium-dependent Potassium
gLeak = 1.0 # Non-specific leakage
gPumpNa = 20.0 # Sodium pump
# Activation time constants (s)
taum = 0.1e-3 # Sodium
taus = 0.6e-3 # Calcium
# Original conversion constants from inward ionic current (nA) to build-up of
# intracellular ion concentration (arb.)
K_Na_original = 0.016 # iNa to intracellular [Na+]
K_Ca_original = 0.1 # iCa to intracellular [Ca2+]
# Constants needed to convert K from original model (soma compartment)
# to current model (point-neuron)
surface = 6434.0e-12 # surface of cell assumed as a single soma (m2)
curr_factor = 1e6 # mA to nA
# Time constants for the removal of ions from intracellular pools (s)
taur_Na = 16.0 # Sodium
taur_Ca = 1.25 # Calcium
# Time constants for the PumpNa and KCa currents activation
# from specific intracellular ions (s)
taua_PumpNa = 0.1 # PumpNa current activation from intracellular Na+
taua_KCa = 0.01 # KCa current activation from intracellular Ca2+
# ------------------------------ States names & descriptions ------------------------------
states = {
'm': 'iNa activation gate',
'h': 'iNa inactivation gate',
'n': 'iKd gate',
's': 'iCa gate',
'Nai': 'submembrane Na+ concentration (arbitrary unit)',
'ANa': 'Na+ dependent iPumpNa gate',
'Cai': 'submembrane Ca2+ concentration (arbitrary unit)',
'ACa': 'Ca2+ dependent iKCa gate'
}
def __new__(cls):
cls.K_Na = cls.K_Na_original * cls.surface * cls.curr_factor
cls.K_Ca = cls.K_Ca_original * cls.surface * cls.curr_factor
return super(LeechTouch, cls).__new__(cls)
# ------------------------------ Gating states kinetics ------------------------------
@staticmethod
def _xinf(Vm, halfmax, slope, power):
''' Generic function computing the steady-state open-probability of a
particular ion channel gate at a given voltage.
:param Vm: membrane potential (mV)
:param halfmax: half-activation voltage (mV)
:param slope: slope parameter of activation function (mV)
:param power: power exponent multiplying the exponential expression (integer)
:return: steady-state open-probability (-)
'''
return 1 / (1 + np.exp((Vm - halfmax) / slope))**power
@staticmethod
def _taux(Vm, halfmax, slope, tauMax, tauMin):
''' Generic function computing the voltage-dependent, adaptation time constant
of a particular ion channel gate at a given voltage.
:param Vm: membrane potential (mV)
:param halfmax: voltage at which adaptation time constant is half-maximal (mV)
:param slope: slope parameter of adaptation time constant function (mV)
:return: adptation time constant (s)
'''
return (tauMax - tauMin) / (1 + np.exp((Vm - halfmax) / slope)) + tauMin
@staticmethod
def _derCion(Cion, Iion, Kion, tau):
''' Generic function computing the time derivative of the concentration
of a specific ion in its intracellular pool.
:param Cion: ion concentration in the pool (arbitrary unit)
:param Iion: ionic current (mA/m2)
:param Kion: scaling factor for current contribution to pool (arb. unit / nA???)
:param tau: time constant for removal of ions from the pool (s)
:return: variation of ionic concentration in the pool (arbitrary unit /s)
'''
return (Kion * (-Iion) - Cion) / tau
@staticmethod
def _derAion(Aion, Cion, tau):
''' Generic function computing the time derivative of the concentration and time
dependent activation function, for a specific pool-dependent ionic current.
:param Aion: concentration and time dependent activation function (arbitrary unit)
:param Cion: ion concentration in the pool (arbitrary unit)
:param tau: time constant for activation function variation (s)
:return: variation of activation function (arbitrary unit / s)
'''
return (Cion - Aion) / tau
minf = partialmethod(_xinf, halfmax=-35.0, slope=-5.0, power=1)
hinf = partialmethod(_xinf, halfmax=-50.0, slope=9.0, power=2)
tauh = partialmethod(_taux, halfmax=-36.0, slope=3.5, tauMax=14.0e-3, tauMin=0.2e-3)
ninf = partialmethod(_xinf, halfmax=-22.0, slope=-9.0, power=1)
taun = partialmethod(_taux, halfmax=-10.0, slope=10.0, tauMax=6.0e-3, tauMin=1.0e-3)
sinf = partialmethod(_xinf, halfmax=-10.0, slope=-2.8, power=1)
# ------------------------------ States derivatives ------------------------------
@classmethod
def derNai(cls, Nai, m, h, Vm):
''' Evolution of submembrane Sodium concentration '''
return cls._derCion(Nai, cls.iNa(m, h, Vm), cls.K_Na, cls.taur_Na) # M/s
@classmethod
def derCai(cls, Cai, s, Vm):
''' Evolution of submembrane Calcium concentration '''
return cls._derCion(Cai, cls.iCa(s, Vm), cls.K_Ca, cls.taur_Ca) # M/s
@classmethod
def derANa(cls, ANa, Nai):
''' Evolution of Na+ dependent iPumpNa gate '''
return cls._derAion(ANa, Nai, cls.taua_PumpNa)
@classmethod
def derACa(cls, ACa, Cai):
''' Evolution of Ca2+ dependent iKCa gate '''
return cls._derAion(ACa, Cai, cls.taua_KCa)
@classmethod
def derStates(cls):
return {
'm': lambda Vm, x: (cls.minf(Vm) - x['m']) / cls.taum,
'h': lambda Vm, x: (cls.hinf(Vm) - x['h']) / cls.tauh(Vm),
'n': lambda Vm, x: (cls.ninf(Vm) - x['n']) / cls.taun(Vm),
's': lambda Vm, x: (cls.sinf(Vm) - x['s']) / cls.taus,
'Nai': lambda Vm, x: cls.derNai(x['Nai'], x['m'], x['h'], Vm),
'ANa': lambda Vm, x: cls.derANa(x['ANa'], x['Nai']),
'Cai': lambda Vm, x: cls.derCai(x['Cai'], x['s'], Vm),
'ACa': lambda Vm, x: cls.derACa(x['ACa'], x['Cai'])
}
# ------------------------------ Steady states ------------------------------
@classmethod
def steadyStates(cls):
lambda_dict = {
'm': lambda Vm: cls.minf(Vm),
'h': lambda Vm: cls.hinf(Vm),
'n': lambda Vm: cls.ninf(Vm),
's': lambda Vm: cls.sinf(Vm)
}
lambda_dict['Nai'] = lambda Vm: -cls.K_Na * cls.iNa(
lambda_dict['m'](Vm), lambda_dict['h'](Vm), Vm)
lambda_dict['Cai'] = lambda Vm: -cls.K_Ca * cls.iCa(lambda_dict['s'](Vm), Vm)
lambda_dict['ANa'] = lambda Vm: lambda_dict['Nai'](Vm)
lambda_dict['ACa'] = lambda Vm: lambda_dict['Cai'](Vm)
return lambda_dict
# ------------------------------ Membrane currents ------------------------------
@classmethod
def iNa(cls, m, h, Vm):
''' Sodium current '''
return cls.gNabar * m**3 * h * (Vm - cls.ENa) # mA/m2
@classmethod
def iKd(cls, n, Vm):
''' Delayed-rectifier Potassium current '''
return cls.gKdbar * n**2 * (Vm - cls.EK) # mA/m2
@classmethod
def iCa(cls, s, Vm):
''' Calcium current '''
return cls.gCabar * s * (Vm - cls.ECa) # mA/m2
@classmethod
def iKCa(cls, ACa, Vm):
''' Calcium-activated Potassium current '''
return cls.gKCabar * ACa * (Vm - cls.EK) # mA/m2
@classmethod
def iPumpNa(cls, ANa, Vm):
''' NaK-ATPase pump current '''
return cls.gPumpNa * ANa * (Vm - cls.EPumpNa) # mA/m2
@classmethod
def iLeak(cls, Vm):
''' Non-specific leakage current '''
return cls.gLeak * (Vm - cls.ELeak) # mA/m2
@classmethod
def currents(cls):
return {
'iNa': lambda Vm, x: cls.iNa(x['m'], x['h'], Vm),
'iKd': lambda Vm, x: cls.iKd(x['n'], Vm),
'iCa': lambda Vm, x: cls.iCa(x['s'], Vm),
'iPumpNa': lambda Vm, x: cls.iPumpNa(x['ANa'], Vm),
'iKCa': lambda Vm, x: cls.iKCa(x['ACa'], Vm),
'iLeak': lambda Vm, _: cls.iLeak(Vm)
}
class LeechMech(PointNeuron):
''' Generic leech neuron
Reference:
*<NAME>. (1998). Synaptic facilitation by reflected action potentials: enhancement
of transmission when nerve impulses reverse direction at axon branch points. Proc. Natl.
Acad. Sci. U.S.A. 95, 8345–8350.*
'''
# ------------------------------ Biophysical parameters ------------------------------
alphaC_sf = 1e-5 # Calcium activation rate constant scaling factor (M)
betaC = 0.1e3 # beta rate for the open-probability of iKCa channels (s-1)
T = 293.15 # Room temperature (K)
# ------------------------------ Gating states kinetics ------------------------------
@staticmethod
def alpham(Vm):
return -0.03 * (Vm + 28) / (np.exp(- (Vm + 28) / 15) - 1) * 1e3 # s-1
@staticmethod
def betam(Vm):
return 2.7 * np.exp(-(Vm + 53) / 18) * 1e3 # s-1
@staticmethod
def alphah(Vm):
return 0.045 * np.exp(-(Vm + 58) / 18) * 1e3 # s-1
@staticmethod
def betah(Vm):
''' .. warning:: the original paper contains an error (multiplication) in the
expression of this rate constant, corrected in the mod file on ModelDB (division).
'''
return 0.72 / (np.exp(-(Vm + 23) / 14) + 1) * 1e3 # s-1
@staticmethod
def alphan(Vm):
return -0.024 * (Vm - 17) / (np.exp(-(Vm - 17) / 8) - 1) * 1e3 # s-1
@staticmethod
def betan(Vm):
return 0.2 * np.exp(-(Vm + 48) / 35) * 1e3 # s-1
@staticmethod
def alphas(Vm):
return -1.5 * (Vm - 20) / (np.exp(-(Vm - 20) / 5) - 1) * 1e3 # s-1
@staticmethod
def betas(Vm):
return 1.5 * np.exp(-(Vm + 25) / 10) * 1e3 # s-1
@classmethod
def alphaC(cls, Cai):
return 0.1 * Cai / cls.alphaC_sf * 1e3 # s-1
# ------------------------------ States derivatives ------------------------------
@classmethod
def derC(cls, c, Cai):
''' Evolution of the c-gate open-probability '''
return cls.alphaC(Cai) * (1 - c) - cls.betaC * c # s-1
@classmethod
def derStates(cls):
return {
'm': lambda Vm, x: cls.alpham(Vm) * (1 - x['m']) - cls.betam(Vm) * x['m'],
'h': lambda Vm, x: cls.alphah(Vm) * (1 - x['h']) - cls.betah(Vm) * x['h'],
'n': lambda Vm, x: cls.alphan(Vm) * (1 - x['n']) - cls.betan(Vm) * x['n'],
's': lambda Vm, x: cls.alphas(Vm) * (1 - x['s']) - cls.betas(Vm) * x['s'],
'c': lambda Vm, x: cls.derC(x['c'], x['Cai'])
}
# ------------------------------ Steady states ------------------------------
@classmethod
def steadyStates(cls):
return {
'm': lambda Vm: cls.alpham(Vm) / (cls.alpham(Vm) + cls.betam(Vm)),
'h': lambda Vm: cls.alphah(Vm) / (cls.alphah(Vm) + cls.betah(Vm)),
'n': lambda Vm: cls.alphan(Vm) / (cls.alphan(Vm) + cls.betan(Vm)),
's': lambda Vm: cls.alphas(Vm) / (cls.alphas(Vm) + cls.betas(Vm)),
}
# ------------------------------ Membrane currents ------------------------------
@classmethod
def iNa(cls, m, h, Vm, Nai):
''' Sodium current '''
ENa = cls.nernst(Z_Na, Nai, cls.Nao, cls.T) # mV
return cls.gNabar * m**4 * h * (Vm - ENa) # mA/m2
@classmethod
def iKd(cls, n, Vm):
''' Delayed-rectifier Potassium current '''
return cls.gKdbar * n**2 * (Vm - cls.EK) # mA/m2
@classmethod
def iCa(cls, s, Vm, Cai):
''' Calcium current '''
ECa = cls.nernst(Z_Ca, Cai, cls.Cao, cls.T) # mV
return cls.gCabar * s * (Vm - ECa) # mA/m2
@classmethod
def iKCa(cls, c, Vm):
''' Calcium-activated Potassium current '''
return cls.gKCabar * c * (Vm - cls.EK) # mA/m2
@classmethod
def iLeak(cls, Vm):
''' Non-specific leakage current '''
return cls.gLeak * (Vm - cls.ELeak) # mA/m2
@classmethod
def currents(cls):
return {
'iNa': lambda Vm, x: cls.iNa(x['m'], x['h'], Vm, x['Nai']),
'iKd': lambda Vm, x: cls.iKd(x['n'], Vm),
'iCa': lambda Vm, x: cls.iCa(x['s'], Vm, x['Cai']),
'iKCa': lambda Vm, x: cls.iKCa(x['c'], Vm),
'iLeak': lambda Vm, _: cls.iLeak(Vm)
}
@addSonicFeatures
class LeechPressure(LeechMech):
''' Leech pressure sensory neuron
Reference:
*<NAME>. (1998). Synaptic facilitation by reflected action potentials: enhancement
of transmission when nerve impulses reverse direction at axon branch points. Proc. Natl.
Acad. Sci. U.S.A. 95, 8345–8350.*
'''
# Neuron name
name = 'LeechP'
# ------------------------------ Biophysical parameters ------------------------------
# Resting parameters
Cm0 = 1e-2 # Membrane capacitance (F/m2)
Vm0 = -48.865 # Membrane potential (mV)
Nai0 = 0.01 # Intracellular Sodium concentration (M)
Cai0 = 1e-7 # Intracellular Calcium concentration (M)
# Reversal potentials (mV)
# ENa = 60 # Sodium (from MOD file on ModelDB)
# ECa = 125 # Calcium (from MOD file on ModelDB)
EK = -68.0 # Potassium
ELeak = -49.0 # Non-specific leakage
# Maximal channel conductances (S/m2)
gNabar = 3500.0 # Sodium
gKdbar = 60.0 # Delayed-rectifier Potassium
gCabar = 0.02 # Calcium
gKCabar = 8.0 # Calcium-dependent Potassium
gLeak = 5.0 # Non-specific leakage
# Ionic concentrations (M)
Nao = 0.11 # Extracellular Sodium
Cao = 1.8e-3 # Extracellular Calcium
# Additional parameters
INaPmax = 70.0 # Maximum pump rate of the NaK-ATPase (mA/m2)
khalf_Na = 0.012 # Sodium concentration at which NaK-ATPase is at half its maximum rate (M)
ksteep_Na = 1e-3 # Sensitivity of NaK-ATPase to varying Sodium concentrations (M)
iCaS = 0.1 # Calcium pump current parameter (mA/m2)
diam = 50e-6 # Cell soma diameter (m)
# ------------------------------ States names & descriptions ------------------------------
states = {
'm': 'iNa activation gate',
'h': 'iNa inactivation gate',
'n': 'iKd gate',
's': 'iCa gate',
'c': 'iKCa gate',
'Nai': 'submembrane Na+ concentration (M)',
'Cai': 'submembrane Ca2+ concentration (M)'
}
def __new__(cls):
# Surface to volume ratio of the (spherical) cell soma (m-1)
SV_ratio = 6 / cls.diam
# Conversion constants from membrane ionic currents into
# change rate of intracellular ionic concentrations (M/s)
cls.K_Na = SV_ratio / (Z_Na * FARADAY) * 1e-6 # Sodium
cls.K_Ca = SV_ratio / (Z_Ca * FARADAY) * 1e-6 # Calcium
return super(LeechPressure, cls).__new__(cls)
# ------------------------------ States derivatives ------------------------------
@classmethod
def derStates(cls):
return {**super().derStates(), **{
'Nai': lambda Vm, x: -(cls.iNa(x['m'], x['h'], Vm, x['Nai']) +
cls.iPumpNa(x['Nai'])) * cls.K_Na,
'Cai': lambda Vm, x: -(cls.iCa(x['s'], Vm, x['Cai']) +
cls.iPumpCa(x['Cai'])) * cls.K_Ca
}}
# ------------------------------ Steady states ------------------------------
@classmethod
def cinf(cls, Cai):
return cls.alphaC(Cai) / (cls.alphaC(Cai) + cls.betaC)
@classmethod
def steadyStates(cls):
lambda_dict = {**super().steadyStates(), **{
'Nai': lambda _: cls.Nai0,
'Cai': lambda _: cls.Cai0,
}}
lambda_dict['c'] = lambda _: cls.cinf(lambda_dict['Cai'](_))
return lambda_dict
# ------------------------------ Membrane currents ------------------------------
@classmethod
def iPumpNa(cls, Nai):
''' NaK-ATPase pump current '''
return cls.INaPmax / (1 + np.exp((cls.khalf_Na - Nai) / cls.ksteep_Na)) # mA/m2
@classmethod
def iPumpCa(cls, Cai):
''' Calcium pump current '''
return cls.iCaS * (Cai - cls.Cai0) / 1.5 # mA/m2
@classmethod
def currents(cls):
return {**super().currents(), **{
'iPumpNa': lambda Vm, x: cls.iPumpNa(x['Nai']) / 3.,
'iPumpCa': lambda Vm, x: cls.iPumpCa(x['Cai'])
}}
# @addSonicFeatures
class LeechRetzius(LeechMech):
''' Leech Retzius neuron
References:
*<NAME>., <NAME>., <NAME>., and <NAME>. (2009). Summation of excitatory
postsynaptic potentials in electrically-coupled neurones. Neuroscience 163, 202–212.*
*ModelDB link: https://senselab.med.yale.edu/modeldb/ShowModel.cshtml?model=120910*
iA current reference:
*<NAME>., <NAME>., and <NAME>. (1992). Properties of two voltage-activated
potassium currents in acutely isolated juvenile rat dentate gyrus granule cells.
J. Neurophysiol. 68, 2086–2099.*
'''
# Neuron name
name = 'LeechR'
# ------------------------------ Biophysical parameters ------------------------------
# Resting parameters
Cm0 = 5e-2 # Membrane capacitance (F/m2)
Vm0 = -44.45 # Membrane resting potential (mV)
# Reversal potentials (mV)
ENa = 50.0 # Sodium (from retztemp.ses file on ModelDB)
EK = -79.0 # Potassium (from retztemp.ses file on ModelDB)
ECa = 125.0 # Calcium (from cachdend.mod file on ModelDB)
ELeak = -30.0 # Non-specific leakage (from leakdend.mod file on ModelDB)
# Maximal channel conductances (S/m2)
gNabar = 1250.0 # Sodium current
gKdbar = 10.0 # Delayed-rectifier Potassium
GAMax = 100.0 # Transient Potassium
gCabar = 4.0 # Calcium current
gKCabar = 130.0 # Calcium-dependent Potassium
gLeak = 1.25 # Non-specific leakage
# Ionic concentrations (M)
Cai = 5e-8 # Intracellular Calcium (from retztemp.ses file)
# Additional parameters
Vhalf = -73.1 # half-activation voltage (mV)
# ------------------------------ States names & descriptions ------------------------------
states = {
'm': 'iNa activation gate',
'h': 'iNa inactivation gate',
'n': 'iKd gate',
's': 'iCa gate',
'c': 'iKCa gate',
'a': 'iA activation gate',
'b': 'iA inactivation gate',
}
# ------------------------------ Gating states kinetics ------------------------------
@staticmethod
def ainf(Vm):
Vth = -55.0 # mV
return 0 if Vm <= Vth else min(1, 2 * (Vm - Vth)**3 / ((11 - Vth)**3 + (Vm - Vth)**3))
@classmethod
def taua(cls, Vm):
x = -1.5 * (Vm - cls.Vhalf) * 1e-3 * FARADAY / (Rg * cls.T) # [-]
alpha = np.exp(x) # ms-1
beta = np.exp(0.7 * x) # ms-1
return max(0.5, beta / (0.3 * (1 + alpha))) * 1e-3 # s
@classmethod
def binf(cls, Vm):
return 1. / (1 + np.exp((cls.Vhalf - Vm) / -6.3))
@classmethod
def taub(cls, Vm):
x = 2 * (Vm - cls.Vhalf) * 1e-3 * FARADAY / (Rg * cls.T) # [-]
alpha = np.exp(x) # ms-1
beta = np.exp(0.65 * x) # ms-1
return max(7.5, beta / (0.02 * (1 + alpha))) * 1e-3 # s
# ------------------------------ States derivatives ------------------------------
@classmethod
def derStates(cls, Vm, states):
return {**super().derStates(Vm, states), **{
'a': lambda Vm, x: (cls.ainf(Vm) - x['a']) / cls.taua(Vm),
'b': lambda Vm, x: (cls.binf(Vm) - x['b']) / cls.taub(Vm)
}}
# ------------------------------ Steady states ------------------------------
@classmethod
def steadyStates(cls):
return {**super().steadyStates(), **{
'a': lambda Vm: cls.ainf(Vm),
'b': lambda Vm: cls.binf(Vm)
}}
# ------------------------------ Membrane currents ------------------------------
@classmethod
def iA(cls, a, b, Vm):
''' Transient Potassium current '''
return cls.GAMax * a * b * (Vm - cls.EK) # mA/m2
@classmethod
def currents(cls):
return {**super().currents(), **{
'iA': lambda Vm, x: cls.iA(x['a'], x['b'], Vm)
}}
| [
"functools.partialmethod",
"numpy.exp"
] | [((5582, 5638), 'functools.partialmethod', 'partialmethod', (['_xinf'], {'halfmax': '(-35.0)', 'slope': '(-5.0)', 'power': '(1)'}), '(_xinf, halfmax=-35.0, slope=-5.0, power=1)\n', (5595, 5638), False, 'from functools import partialmethod\n'), ((5650, 5705), 'functools.partialmethod', 'partialmethod', (['_xinf'], {'halfmax': '(-50.0)', 'slope': '(9.0)', 'power': '(2)'}), '(_xinf, halfmax=-50.0, slope=9.0, power=2)\n', (5663, 5705), False, 'from functools import partialmethod\n'), ((5717, 5792), 'functools.partialmethod', 'partialmethod', (['_taux'], {'halfmax': '(-36.0)', 'slope': '(3.5)', 'tauMax': '(0.014)', 'tauMin': '(0.0002)'}), '(_taux, halfmax=-36.0, slope=3.5, tauMax=0.014, tauMin=0.0002)\n', (5730, 5792), False, 'from functools import partialmethod\n'), ((5806, 5862), 'functools.partialmethod', 'partialmethod', (['_xinf'], {'halfmax': '(-22.0)', 'slope': '(-9.0)', 'power': '(1)'}), '(_xinf, halfmax=-22.0, slope=-9.0, power=1)\n', (5819, 5862), False, 'from functools import partialmethod\n'), ((5874, 5949), 'functools.partialmethod', 'partialmethod', (['_taux'], {'halfmax': '(-10.0)', 'slope': '(10.0)', 'tauMax': '(0.006)', 'tauMin': '(0.001)'}), '(_taux, halfmax=-10.0, slope=10.0, tauMax=0.006, tauMin=0.001)\n', (5887, 5949), False, 'from functools import partialmethod\n'), ((5963, 6019), 'functools.partialmethod', 'partialmethod', (['_xinf'], {'halfmax': '(-10.0)', 'slope': '(-2.8)', 'power': '(1)'}), '(_xinf, halfmax=-10.0, slope=-2.8, power=1)\n', (5976, 6019), False, 'from functools import partialmethod\n'), ((20262, 20271), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (20268, 20271), True, 'import numpy as np\n'), ((20295, 20310), 'numpy.exp', 'np.exp', (['(0.7 * x)'], {}), '(0.7 * x)\n', (20301, 20310), True, 'import numpy as np\n'), ((20611, 20620), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (20617, 20620), True, 'import numpy as np\n'), ((20644, 20660), 'numpy.exp', 'np.exp', (['(0.65 * x)'], {}), '(0.65 * x)\n', (20650, 20660), True, 'import numpy as np\n'), ((10371, 10394), 'numpy.exp', 'np.exp', (['(-(Vm + 53) / 18)'], {}), '(-(Vm + 53) / 18)\n', (10377, 10394), True, 'import numpy as np\n'), ((10470, 10493), 'numpy.exp', 'np.exp', (['(-(Vm + 58) / 18)'], {}), '(-(Vm + 58) / 18)\n', (10476, 10493), True, 'import numpy as np\n'), ((10979, 11002), 'numpy.exp', 'np.exp', (['(-(Vm + 48) / 35)'], {}), '(-(Vm + 48) / 35)\n', (10985, 11002), True, 'import numpy as np\n'), ((11190, 11213), 'numpy.exp', 'np.exp', (['(-(Vm + 25) / 10)'], {}), '(-(Vm + 25) / 10)\n', (11196, 11213), True, 'import numpy as np\n'), ((17457, 17501), 'numpy.exp', 'np.exp', (['((cls.khalf_Na - Nai) / cls.ksteep_Na)'], {}), '((cls.khalf_Na - Nai) / cls.ksteep_Na)\n', (17463, 17501), True, 'import numpy as np\n'), ((20449, 20480), 'numpy.exp', 'np.exp', (['((cls.Vhalf - Vm) / -6.3)'], {}), '((cls.Vhalf - Vm) / -6.3)\n', (20455, 20480), True, 'import numpy as np\n'), ((3718, 3748), 'numpy.exp', 'np.exp', (['((Vm - halfmax) / slope)'], {}), '((Vm - halfmax) / slope)\n', (3724, 3748), True, 'import numpy as np\n'), ((4303, 4333), 'numpy.exp', 'np.exp', (['((Vm - halfmax) / slope)'], {}), '((Vm - halfmax) / slope)\n', (4309, 4333), True, 'import numpy as np\n'), ((10269, 10292), 'numpy.exp', 'np.exp', (['(-(Vm + 28) / 15)'], {}), '(-(Vm + 28) / 15)\n', (10275, 10292), True, 'import numpy as np\n'), ((10761, 10784), 'numpy.exp', 'np.exp', (['(-(Vm + 23) / 14)'], {}), '(-(Vm + 23) / 14)\n', (10767, 10784), True, 'import numpy as np\n'), ((10879, 10901), 'numpy.exp', 'np.exp', (['(-(Vm - 17) / 8)'], {}), '(-(Vm - 17) / 8)\n', (10885, 10901), True, 'import numpy as np\n'), ((11090, 11112), 'numpy.exp', 'np.exp', (['(-(Vm - 20) / 5)'], {}), '(-(Vm - 20) / 5)\n', (11096, 11112), True, 'import numpy as np\n')] |
import mne
import numpy as np
from config import fname, subject_id, n_jobs
report = mne.open_report(fname.report)
# Read longer epochs
epochs = mne.read_epochs(fname.epochs_long).pick_types(meg=True)
# Compute Cross-Spectral Density matrices
freqs = np.logspace(np.log10(12), np.log10(30), 9)
csd = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=5, n_jobs=n_jobs)
csd_baseline = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=5, n_jobs=n_jobs)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = mne.time_frequency.csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=5, n_jobs=n_jobs)
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
# Compute DICS beamformer to localize ERS
fwd = mne.read_forward_solution(fname.fwd)
inv = mne.beamformer.make_dics(epochs.info, fwd, csd, noise_csd=csd_baseline)
# Compute source power
stc_baseline, _ = mne.beamformer.apply_dics_csd(csd_baseline, inv)
stc_ers, _ = mne.beamformer.apply_dics_csd(csd_ers, inv)
stc_baseline.subject = subject_id
stc_ers.subject = subject_id
# Normalize with baseline power.
stc_ers /= stc_baseline
stc_ers.data = np.log(stc_ers.data)
stc_ers.save(fname.stc_dics)
stc_ers.save_as_volume(fname.nii_dics, src=fwd['src'])
fig = stc_ers.plot(subject=subject_id, subjects_dir=fname.subjects_dir, src=fwd['src'],
clim=dict(kind='percent', lims=[99, 99.5, 100]))
report.add_figs_to_section(fig, 'DICS Source estimate', 'Source level', replace=True)
report.save(fname.report_html, overwrite=True, open_browser=False)
| [
"numpy.log",
"mne.open_report",
"numpy.log10",
"mne.time_frequency.csd_morlet",
"mne.read_forward_solution",
"mne.beamformer.apply_dics_csd",
"mne.beamformer.make_dics",
"mne.read_epochs"
] | [((86, 115), 'mne.open_report', 'mne.open_report', (['fname.report'], {}), '(fname.report)\n', (101, 115), False, 'import mne\n'), ((303, 394), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(-1)', 'tmax': '(1.5)', 'decim': '(5)', 'n_jobs': 'n_jobs'}), '(epochs, freqs, tmin=-1, tmax=1.5, decim=5,\n n_jobs=n_jobs)\n', (332, 394), False, 'import mne\n'), ((406, 495), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(-1)', 'tmax': '(0)', 'decim': '(5)', 'n_jobs': 'n_jobs'}), '(epochs, freqs, tmin=-1, tmax=0, decim=5,\n n_jobs=n_jobs)\n', (435, 495), False, 'import mne\n'), ((560, 652), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(0.5)', 'tmax': '(1.5)', 'decim': '(5)', 'n_jobs': 'n_jobs'}), '(epochs, freqs, tmin=0.5, tmax=1.5, decim=5,\n n_jobs=n_jobs)\n', (589, 652), False, 'import mne\n'), ((776, 812), 'mne.read_forward_solution', 'mne.read_forward_solution', (['fname.fwd'], {}), '(fname.fwd)\n', (801, 812), False, 'import mne\n'), ((819, 890), 'mne.beamformer.make_dics', 'mne.beamformer.make_dics', (['epochs.info', 'fwd', 'csd'], {'noise_csd': 'csd_baseline'}), '(epochs.info, fwd, csd, noise_csd=csd_baseline)\n', (843, 890), False, 'import mne\n'), ((933, 981), 'mne.beamformer.apply_dics_csd', 'mne.beamformer.apply_dics_csd', (['csd_baseline', 'inv'], {}), '(csd_baseline, inv)\n', (962, 981), False, 'import mne\n'), ((995, 1038), 'mne.beamformer.apply_dics_csd', 'mne.beamformer.apply_dics_csd', (['csd_ers', 'inv'], {}), '(csd_ers, inv)\n', (1024, 1038), False, 'import mne\n'), ((1175, 1195), 'numpy.log', 'np.log', (['stc_ers.data'], {}), '(stc_ers.data)\n', (1181, 1195), True, 'import numpy as np\n'), ((266, 278), 'numpy.log10', 'np.log10', (['(12)'], {}), '(12)\n', (274, 278), True, 'import numpy as np\n'), ((280, 292), 'numpy.log10', 'np.log10', (['(30)'], {}), '(30)\n', (288, 292), True, 'import numpy as np\n'), ((147, 181), 'mne.read_epochs', 'mne.read_epochs', (['fname.epochs_long'], {}), '(fname.epochs_long)\n', (162, 181), False, 'import mne\n')] |
# -*- coding: utf-8 -*-
import logging
import os
import cfg
import numpy as np
import torch
import torch.nn as nn
from methods import DigitsDA
from models import Classifier1, SVHN_generator, USPS_generator, weights_init
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from tqdm import tqdm
from utils import BalancedBatchSampler
torch.multiprocessing.set_sharing_strategy("file_system")
def get_dataset_size28(dataset="mnist", data_dir="./data"):
trans = transforms.Compose(
[transforms.Resize((28, 28)), transforms.ToTensor(), transforms.Normalize((0.5), (0.5))]
)
if dataset == "mnist":
train_ds = datasets.MNIST(data_dir, train=True, download=True, transform=trans)
test_ds = datasets.MNIST(data_dir, train=False, download=True, transform=trans)
elif dataset == "usps":
train_ds = datasets.USPS(data_dir, train=True, download=True, transform=trans)
test_ds = datasets.USPS(data_dir, train=False, download=True, transform=trans)
return train_ds, test_ds
def get_dataset_size32(dataset="mnist", data_dir="./data"):
if dataset == "mnist":
trans = transforms.Compose(
[
transforms.Resize(32),
transforms.Lambda(lambda x: x.convert("RGB")),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
train_ds = datasets.MNIST(data_dir, train=True, download=True, transform=trans)
test_ds = datasets.MNIST(data_dir, train=False, download=True, transform=trans)
elif dataset == "svhn":
trans = transforms.Compose(
[transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
train_ds = datasets.SVHN(data_dir, split="train", download=True, transform=trans)
test_ds = datasets.SVHN(data_dir, split="test", download=True, transform=trans)
return train_ds, test_ds
def get_dataloader(source, target, data_dir, batch_size, num_workers=0):
if source == "svhn" or target == "svhn":
source_train_ds, source_test_ds = get_dataset_size32(source, data_dir)
target_train_ds, target_test_ds = get_dataset_size32(target, data_dir)
elif source == "usps" or target == "usps":
source_train_ds, source_test_ds = get_dataset_size28(source, data_dir)
target_train_ds, target_test_ds = get_dataset_size28(target, data_dir)
source_labels = torch.zeros((len(source_train_ds)))
for i, data in tqdm(enumerate(source_train_ds)):
source_labels[i] = data[1]
source_train_sampler = BalancedBatchSampler(source_labels, batch_size=batch_size)
source_train_dl = DataLoader(source_train_ds, batch_sampler=source_train_sampler, num_workers=num_workers)
target_train_dl = DataLoader(target_train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers)
source_test_dl = DataLoader(source_test_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers)
target_test_dl = DataLoader(target_test_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return source_train_dl, target_train_dl, source_test_dl, target_test_dl
def main():
args = cfg.parse_args()
# Logging config
if args.use_bomb:
prefix = "b"
else:
prefix = ""
description = f"{prefix}{args.method}_{args.source_ds}_to_{args.target_ds}"
description += f"_k{args.k}_m{args.mbsize}_lr{args.lr}_epsilon{args.epsilon}_be{args.batch_epsilon}_mass{args.mass}_tau{args.tau}"
base_dir = "snapshot/"
out_dir = os.path.join(base_dir, description)
os.makedirs(out_dir, exist_ok=True)
log_file = os.path.join(out_dir, "log.txt")
if os.path.exists(log_file):
os.remove(log_file)
logging.basicConfig(
filename=log_file,
filemode="a",
format="%(asctime)s %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.INFO,
)
logger = logging.getLogger()
logger.info(args)
# Set up parameters
batch_size = args.k * args.mbsize
n_epoch = args.n_epochs
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
gpus = args.gpu_id.split(",")
# Set random seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Get dataloaders
source_train_dl, target_train_dl, source_test_dl, target_test_dl = get_dataloader(
args.source_ds, args.target_ds, args.data_dir, batch_size, args.num_workers
)
# Train
if args.source_ds == "svhn" or args.target_ds == "svhn":
model_g = SVHN_generator().cuda().apply(weights_init)
elif args.source_ds == "usps" or args.target_ds == "usps":
model_g = USPS_generator().cuda().apply(weights_init)
model_f = Classifier1(nclass=args.nclass).cuda().apply(weights_init)
if len(gpus) > 1:
model_g = nn.DataParallel(model_g, device_ids=[int(i) for i in gpus])
model_f = nn.DataParallel(model_f, device_ids=[int(i) for i in gpus])
model_g.train()
model_f.train()
model_da = DigitsDA(
model_g,
model_f,
n_class=args.nclass,
logger=logger,
out_dir=out_dir,
eta1=args.eta1,
eta2=args.eta2,
epsilon=args.epsilon,
batch_epsilon=args.batch_epsilon,
mass=args.mass,
tau=args.tau,
test_interval=args.test_interval,
)
model_da.source_only(source_train_dl, lr=args.lr) # Train on source domain only
if args.use_bomb:
# Use the stable version because the training loss is small
model_da.fit_bomb2(
source_train_dl,
target_train_dl,
target_test_dl,
n_epochs=n_epoch,
lr=args.lr,
k=args.k,
batch_size=batch_size,
method=args.method,
)
else:
model_da.fit(
source_train_dl,
target_train_dl,
target_test_dl,
n_epochs=n_epoch,
lr=args.lr,
k=args.k,
batch_size=batch_size,
method=args.method,
)
# Evaluate
source_acc = model_da.evaluate(source_test_dl)
target_acc = model_da.evaluate(target_test_dl)
logger.info("source_acc={}, target_acc={}".format(source_acc, target_acc))
if __name__ == "__main__":
main()
| [
"os.remove",
"numpy.random.seed",
"torchvision.transforms.Normalize",
"torchvision.datasets.SVHN",
"os.path.join",
"models.USPS_generator",
"torch.utils.data.DataLoader",
"torch.multiprocessing.set_sharing_strategy",
"os.path.exists",
"torch.manual_seed",
"cfg.parse_args",
"torchvision.dataset... | [((369, 426), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (411, 426), False, 'import torch\n'), ((2666, 2724), 'utils.BalancedBatchSampler', 'BalancedBatchSampler', (['source_labels'], {'batch_size': 'batch_size'}), '(source_labels, batch_size=batch_size)\n', (2686, 2724), False, 'from utils import BalancedBatchSampler\n'), ((2747, 2840), 'torch.utils.data.DataLoader', 'DataLoader', (['source_train_ds'], {'batch_sampler': 'source_train_sampler', 'num_workers': 'num_workers'}), '(source_train_ds, batch_sampler=source_train_sampler, num_workers\n =num_workers)\n', (2757, 2840), False, 'from torch.utils.data import DataLoader\n'), ((2858, 2951), 'torch.utils.data.DataLoader', 'DataLoader', (['target_train_ds'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers'}), '(target_train_ds, batch_size=batch_size, shuffle=True,\n num_workers=num_workers)\n', (2868, 2951), False, 'from torch.utils.data import DataLoader\n'), ((2969, 3062), 'torch.utils.data.DataLoader', 'DataLoader', (['source_test_ds'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(source_test_ds, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (2979, 3062), False, 'from torch.utils.data import DataLoader\n'), ((3080, 3173), 'torch.utils.data.DataLoader', 'DataLoader', (['target_test_ds'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(target_test_ds, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (3090, 3173), False, 'from torch.utils.data import DataLoader\n'), ((3272, 3288), 'cfg.parse_args', 'cfg.parse_args', ([], {}), '()\n', (3286, 3288), False, 'import cfg\n'), ((3640, 3675), 'os.path.join', 'os.path.join', (['base_dir', 'description'], {}), '(base_dir, description)\n', (3652, 3675), False, 'import os\n'), ((3680, 3715), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (3691, 3715), False, 'import os\n'), ((3731, 3763), 'os.path.join', 'os.path.join', (['out_dir', '"""log.txt"""'], {}), "(out_dir, 'log.txt')\n", (3743, 3763), False, 'import os\n'), ((3771, 3795), 'os.path.exists', 'os.path.exists', (['log_file'], {}), '(log_file)\n', (3785, 3795), False, 'import os\n'), ((3829, 3977), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_file', 'filemode': '"""a"""', 'format': '"""%(asctime)s %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""', 'level': 'logging.INFO'}), "(filename=log_file, filemode='a', format=\n '%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=\n logging.INFO)\n", (3848, 3977), False, 'import logging\n'), ((4028, 4047), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4045, 4047), False, 'import logging\n'), ((4275, 4300), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4289, 4300), True, 'import numpy as np\n'), ((4305, 4333), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4322, 4333), False, 'import torch\n'), ((5101, 5345), 'methods.DigitsDA', 'DigitsDA', (['model_g', 'model_f'], {'n_class': 'args.nclass', 'logger': 'logger', 'out_dir': 'out_dir', 'eta1': 'args.eta1', 'eta2': 'args.eta2', 'epsilon': 'args.epsilon', 'batch_epsilon': 'args.batch_epsilon', 'mass': 'args.mass', 'tau': 'args.tau', 'test_interval': 'args.test_interval'}), '(model_g, model_f, n_class=args.nclass, logger=logger, out_dir=\n out_dir, eta1=args.eta1, eta2=args.eta2, epsilon=args.epsilon,\n batch_epsilon=args.batch_epsilon, mass=args.mass, tau=args.tau,\n test_interval=args.test_interval)\n', (5109, 5345), False, 'from methods import DigitsDA\n'), ((671, 739), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['data_dir'], {'train': '(True)', 'download': '(True)', 'transform': 'trans'}), '(data_dir, train=True, download=True, transform=trans)\n', (685, 739), False, 'from torchvision import datasets, transforms\n'), ((758, 827), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['data_dir'], {'train': '(False)', 'download': '(True)', 'transform': 'trans'}), '(data_dir, train=False, download=True, transform=trans)\n', (772, 827), False, 'from torchvision import datasets, transforms\n'), ((1455, 1523), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['data_dir'], {'train': '(True)', 'download': '(True)', 'transform': 'trans'}), '(data_dir, train=True, download=True, transform=trans)\n', (1469, 1523), False, 'from torchvision import datasets, transforms\n'), ((1542, 1611), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['data_dir'], {'train': '(False)', 'download': '(True)', 'transform': 'trans'}), '(data_dir, train=False, download=True, transform=trans)\n', (1556, 1611), False, 'from torchvision import datasets, transforms\n'), ((3805, 3824), 'os.remove', 'os.remove', (['log_file'], {}), '(log_file)\n', (3814, 3824), False, 'import os\n'), ((530, 557), 'torchvision.transforms.Resize', 'transforms.Resize', (['(28, 28)'], {}), '((28, 28))\n', (547, 557), False, 'from torchvision import datasets, transforms\n'), ((559, 580), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (578, 580), False, 'from torchvision import datasets, transforms\n'), ((582, 612), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (602, 612), False, 'from torchvision import datasets, transforms\n'), ((875, 942), 'torchvision.datasets.USPS', 'datasets.USPS', (['data_dir'], {'train': '(True)', 'download': '(True)', 'transform': 'trans'}), '(data_dir, train=True, download=True, transform=trans)\n', (888, 942), False, 'from torchvision import datasets, transforms\n'), ((961, 1029), 'torchvision.datasets.USPS', 'datasets.USPS', (['data_dir'], {'train': '(False)', 'download': '(True)', 'transform': 'trans'}), '(data_dir, train=False, download=True, transform=trans)\n', (974, 1029), False, 'from torchvision import datasets, transforms\n'), ((1820, 1890), 'torchvision.datasets.SVHN', 'datasets.SVHN', (['data_dir'], {'split': '"""train"""', 'download': '(True)', 'transform': 'trans'}), "(data_dir, split='train', download=True, transform=trans)\n", (1833, 1890), False, 'from torchvision import datasets, transforms\n'), ((1909, 1978), 'torchvision.datasets.SVHN', 'datasets.SVHN', (['data_dir'], {'split': '"""test"""', 'download': '(True)', 'transform': 'trans'}), "(data_dir, split='test', download=True, transform=trans)\n", (1922, 1978), False, 'from torchvision import datasets, transforms\n'), ((1215, 1236), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (1232, 1236), False, 'from torchvision import datasets, transforms\n'), ((1317, 1338), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1336, 1338), False, 'from torchvision import datasets, transforms\n'), ((1356, 1410), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1376, 1410), False, 'from torchvision import datasets, transforms\n'), ((1689, 1710), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (1706, 1710), False, 'from torchvision import datasets, transforms\n'), ((1712, 1733), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1731, 1733), False, 'from torchvision import datasets, transforms\n'), ((1735, 1789), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1755, 1789), False, 'from torchvision import datasets, transforms\n'), ((4809, 4840), 'models.Classifier1', 'Classifier1', ([], {'nclass': 'args.nclass'}), '(nclass=args.nclass)\n', (4820, 4840), False, 'from models import Classifier1, SVHN_generator, USPS_generator, weights_init\n'), ((4626, 4642), 'models.SVHN_generator', 'SVHN_generator', ([], {}), '()\n', (4640, 4642), False, 'from models import Classifier1, SVHN_generator, USPS_generator, weights_init\n'), ((4751, 4767), 'models.USPS_generator', 'USPS_generator', ([], {}), '()\n', (4765, 4767), False, 'from models import Classifier1, SVHN_generator, USPS_generator, weights_init\n')] |
'''
Reference : https://github.com/graykode/nlp-tutorial
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import os
def get_sinusoid_encoding_table(n_position, d_model):
##PE(pos,2i) = sin(pos/10000^(2i/dmodel))
##PE(pos,2i+1) = cos(pos/10000^(2i/dmodel))
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_model)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_model)]
##计算每个位置的token的pos_embedding
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table)
def get_attn_pad_mask(seq_q, seq_k):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
# eq(zero) is PAD token
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # batch_size x 1 x len_k(=len_q), one is masking
return pad_attn_mask.expand(batch_size, len_q, len_k) # batch_size x len_q x len_k
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, Q, K, V, attn_mask):
# scores : [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
##scores就是Q*K(T)/sqrt(d_k)这里Q[1,8,5,64],K(T)[1,8,64,5]所以输出[1,8,5,5]
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)
# Fills elements of self tensor with value where mask is one.
##补0的位置填一个小数防止反向传播梯度为0
scores.masked_fill_(attn_mask, -1e9)
##scores经过softmax后和V相乘
##Softmax(dim=-1)对某一维度的行进行softmax运算
attn = nn.Softmax(dim=-1)(scores)
##attn维度[1,8,5,5],V维度[1,8,5,64],所以context维度[1,8,5,64]
context = torch.matmul(attn, V)
return context, attn
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__()
##Q,K,V矩阵d_k = d_v = 64,n_heads=8所以输出维度512
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.linear = nn.Linear(n_heads * d_v, d_model)
##layerNorm就是将同一层神经元的输入变成均值0方差1
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, Q, K, V, attn_mask):
##Q,K,V都是同一个enc_inputs,维度为[batch_size x seq_len x embed_dim],d_model是embed_dim
residual, batch_size = Q, Q.size(0)
# (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
##对输入数据用W_Q,W_K, W_V矩阵进行变换
##假设输入是[1,5,512],W_Q输出还是512所以维度没变依然[1,5,512]
##要转化成多头形式,n_heads=8所以view()以后变成[1,5,8,64],然后转置成[1,8,5,64]用于后面的计算
# q_s: [batch_size x n_heads x len_q x d_k]
q_s = self.W_Q(Q).view(batch_size, -1, n_heads, d_k).transpose(1,2)
# k_s: [batch_size x n_heads x len_k x d_k]
k_s = self.W_K(K).view(batch_size, -1, n_heads, d_k).transpose(1,2)
# v_s: [batch_size x n_heads x len_k x d_v]
v_s = self.W_V(V).view(batch_size, -1, n_heads, d_v).transpose(1,2)
##unsqueeze()给数据增加一个维度,unsqueeze(1)就是增加第二个维度比如(2,3)->(2,1,3)
##unsqueeze(1)将attn_mask维度变化为[1,5,5]->[1,1,5,5],然后repeat()将第二个维度重复n_heads次
# attn_mask : [batch_size x n_heads x len_q x len_k]
attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1)
# context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)]
##计算ScaledDotProductAttention的值,attn用于结果的可视化
context, attn = ScaledDotProductAttention()(q_s, k_s, v_s, attn_mask)
##计算完后再转置回原来的维度[1,5,8,64],然后view()将多头的结果合并变成[1,5,512]
##view()操作是对整块内存进行的,所以在view()之前执行contiguous()把tensor变成在内存中连续分布的形式
# context: [batch_size x len_q x n_heads * d_v]
context = context.transpose(1, 2).contiguous().view(batch_size, -1, n_heads * d_v)
output = self.linear(context)
return self.layer_norm(output + residual)# output: [batch_size x len_q x d_model]
class PoswiseFeedForwardNet(nn.Module):
def __init__(self):
super(PoswiseFeedForwardNet, self).__init__()
##conv1d在kernel_size=1,步长为1,no pading时可以当成MLP使用
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
##layerNorm就是将同一层神经元的输入变成均值0方差1
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inputs):
residual = inputs # inputs : [batch_size, len_q, d_model]
output = nn.ReLU()(self.conv1(inputs.transpose(1, 2)))
output = self.conv2(output).transpose(1, 2)
return self.layer_norm(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer, self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.pos_ffn = PoswiseFeedForwardNet()
def forward(self, enc_inputs, enc_self_attn_mask):
##多头注意力加前馈神经网络
# enc_inputs to same Q,K,V
enc_outputs = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask)
enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size x len_q x d_model]
return enc_outputs
class Encoder(nn.Module):
def __init__(self, vocab_size):
super(Encoder, self).__init__()
##nn.Embedding嵌入向量查找表,参数为(词典大小,嵌入向量维度)
self.src_emb = nn.Embedding(vocab_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(vocab_size+1, d_model),freeze=True)
self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_inputs): # enc_inputs : [batch_size x source_len]
##加上位置信息pos_emb
##如果一次输入5个token[1,5],embed成512维的向量,所以batch_size=1,seq_len=5,embed_dim=512
enc_outputs = self.src_emb(enc_inputs) + self.pos_emb(enc_inputs)
##记录数据中补0的位置,替换成一个小数,以免反向传播时梯度为0
enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs)
for layer in self.layers:
enc_outputs = layer(enc_outputs, enc_self_attn_mask)
return enc_outputs
class TransformerEncoder(nn.Module):
def __init__(self, vocab_size, target_size, src_len):
super(TransformerEncoder, self).__init__()
self.encoder = Encoder(vocab_size)
self.projection = nn.Linear(src_len*d_model, target_size, bias=False)
def forward(self, enc_inputs):
##输入为[BatchSize,SeqLen]如[1,5]
# print(f'enc_inputs.shape:{enc_inputs.shape}')
enc_outputs = self.encoder(enc_inputs)
##encoder输出维度[BatchSize,SeqLen,EmbedDim]如[1,5,512]
##经过nn.Linear()线性变换成想要输出的维度
# print("enc_outputs.shape:",enc_outputs.shape)
enc_logits = self.projection(enc_outputs.contiguous().view(enc_outputs.size(0),-1))
return enc_logits
EmbeddingSize = 512
d_model = EmbeddingSize # Embedding Size
d_ff = 2048 # FeedForward dimension
d_k = d_v = 64 # dimension of K(=Q), V
n_layers = 6 # number of Encoder of Decoder Layer
n_heads = 8 # number of heads in Multi-Head Attention
if __name__ == '__main__':
sentences = ['ich mochte ein bier P', 'S i want a beer', 'i want a beer E']
labels = ['ge','en','en']
##文本特征化为数字,label直接转化为数字
##nn.CrossEntropyLoss()的输入为一个二维张量和一个一维张量
def make_batch(sentences,labels):
assert(len(sentences)==len(labels))
input_batch = [[word2id[t] for t in n.split()] for n in sentences]
output_batch = [label2id[i] for i in labels]
return torch.LongTensor(input_batch),torch.LongTensor(output_batch)
##Global things
word2id = None
label2id = None
##parameters
LR=0.001
EPOCHS=20
MAX_LEN = 5
src_len = MAX_LEN # length of source
tgt_len = 5 # length of target
# device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device='cpu'
model = None
def train(sentences, labels):
global word2id
vocab_size = None
global label2id
target_size = None
global model
vocab = set((' '.join(sentences)).split())
vocab = sorted(vocab)
word2id = {w:i for i,w in enumerate(vocab)}
vocab_size = len(word2id)
label2id = {v:i for i,v in enumerate(sorted(set(labels)))}
target_size = len(label2id)
model = TransformerEncoder(vocab_size, target_size, src_len)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
enc_inputs,output_batch = make_batch(sentences,labels)
enc_inputs=enc_inputs.to(device)
output_batch=output_batch.to(device)
print(f'inputs:{enc_inputs.shape}',f'outputs:{output_batch.shape}')
print('device:',enc_inputs.device)
model.train()
for epoch in range(EPOCHS):
optimizer.zero_grad()
outputs = model(enc_inputs)
loss = criterion(outputs, output_batch)
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
if loss<0.1:
print('loss<0.1,break')
break
def predict(data_inputs):
##test时一定要model.eval()
##model.train()启用 BatchNormalization 和 Dropout
##model.eval()不启用 BatchNormalization 和 Dropout
input_batch = [[word2id[t] for t in n.split()] for n in data_inputs]
id2label = {v:k for k,v in label2id.items()}
if not os.path.exists('./model/torch_model.pkl'):
print('model not exists')
## Test
model.eval()
predict = model(torch.LongTensor(input_batch))
confidence = predict.softmax(1)
# print(confidence)
predict = predict.data.max(1, keepdim=True)[1]
if len(data_inputs) == 1:
# print(data_inputs,'->',id2label[predict.squeeze().item()])
return {'label':id2label[predict.squeeze().item()],'confidence':confidence.max().item()}
else:
# print(data_inputs, '->', [id2label[i.item()] for i in predict.squeeze()])
return [{'label':id2label[v.item()],'confidence':confidence[i].max().item()} for i,v in enumerate(predict.squeeze())]
train(sentences,labels)
r=predict(['ich mochte ein bier P'])
# r=predict(sentences)
print(r)
| [
"torch.nn.ReLU",
"torch.LongTensor",
"numpy.power",
"torch.nn.Embedding",
"torch.nn.Conv1d",
"torch.FloatTensor",
"torch.nn.CrossEntropyLoss",
"os.path.exists",
"torch.nn.LayerNorm",
"numpy.sin",
"torch.nn.Softmax",
"numpy.cos",
"torch.nn.Linear",
"torch.matmul",
"numpy.sqrt"
] | [((678, 709), 'numpy.sin', 'np.sin', (['sinusoid_table[:, 0::2]'], {}), '(sinusoid_table[:, 0::2])\n', (684, 709), True, 'import numpy as np\n'), ((750, 781), 'numpy.cos', 'np.cos', (['sinusoid_table[:, 1::2]'], {}), '(sinusoid_table[:, 1::2])\n', (756, 781), True, 'import numpy as np\n'), ((805, 838), 'torch.FloatTensor', 'torch.FloatTensor', (['sinusoid_table'], {}), '(sinusoid_table)\n', (822, 838), False, 'import torch\n'), ((1902, 1923), 'torch.matmul', 'torch.matmul', (['attn', 'V'], {}), '(attn, V)\n', (1914, 1923), False, 'import torch\n'), ((2136, 2169), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(d_k * n_heads)'], {}), '(d_model, d_k * n_heads)\n', (2145, 2169), True, 'import torch.nn as nn\n'), ((2189, 2222), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(d_k * n_heads)'], {}), '(d_model, d_k * n_heads)\n', (2198, 2222), True, 'import torch.nn as nn\n'), ((2242, 2275), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(d_v * n_heads)'], {}), '(d_model, d_v * n_heads)\n', (2251, 2275), True, 'import torch.nn as nn\n'), ((2298, 2331), 'torch.nn.Linear', 'nn.Linear', (['(n_heads * d_v)', 'd_model'], {}), '(n_heads * d_v, d_model)\n', (2307, 2331), True, 'import torch.nn as nn\n'), ((2398, 2419), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (2410, 2419), True, 'import torch.nn as nn\n'), ((4365, 4429), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'd_model', 'out_channels': 'd_ff', 'kernel_size': '(1)'}), '(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n', (4374, 4429), True, 'import torch.nn as nn\n'), ((4451, 4515), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'd_ff', 'out_channels': 'd_model', 'kernel_size': '(1)'}), '(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n', (4460, 4515), True, 'import torch.nn as nn\n'), ((4582, 4603), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (4594, 4603), True, 'import torch.nn as nn\n'), ((5570, 5603), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'd_model'], {}), '(vocab_size, d_model)\n', (5582, 5603), True, 'import torch.nn as nn\n'), ((6510, 6563), 'torch.nn.Linear', 'nn.Linear', (['(src_len * d_model)', 'target_size'], {'bias': '(False)'}), '(src_len * d_model, target_size, bias=False)\n', (6519, 6563), True, 'import torch.nn as nn\n'), ((8603, 8624), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8622, 8624), True, 'import torch.nn as nn\n'), ((369, 414), 'numpy.power', 'np.power', (['(10000)', '(2 * (hid_idx // 2) / d_model)'], {}), '(10000, 2 * (hid_idx // 2) / d_model)\n', (377, 414), True, 'import numpy as np\n'), ((1544, 1556), 'numpy.sqrt', 'np.sqrt', (['d_k'], {}), '(d_k)\n', (1551, 1556), True, 'import numpy as np\n'), ((1795, 1813), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (1805, 1813), True, 'import torch.nn as nn\n'), ((4719, 4728), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4726, 4728), True, 'import torch.nn as nn\n'), ((7693, 7722), 'torch.LongTensor', 'torch.LongTensor', (['input_batch'], {}), '(input_batch)\n', (7709, 7722), False, 'import torch\n'), ((7723, 7753), 'torch.LongTensor', 'torch.LongTensor', (['output_batch'], {}), '(output_batch)\n', (7739, 7753), False, 'import torch\n'), ((9682, 9723), 'os.path.exists', 'os.path.exists', (['"""./model/torch_model.pkl"""'], {}), "('./model/torch_model.pkl')\n", (9696, 9723), False, 'import os\n'), ((9825, 9854), 'torch.LongTensor', 'torch.LongTensor', (['input_batch'], {}), '(input_batch)\n', (9841, 9854), False, 'import torch\n')] |
# readers.py provides functions to read spectrum files for data and
# metadata.
import pandas as pd
import numpy as np
from os.path import abspath, expanduser, splitext, basename, join, split
import glob
from collections import OrderedDict
import json
PICO_GPS_KEYS = "gps","GPS start","GPS"
class PiccoloFileError(Exception):
"""Error type for piccolo-related issues"""
pass
def _find_pico_dark(pico_light_path):
"""
Recent piccolo versions store dark and light spectra in different locations
Default naming conventions are a bit tricky (timestamp changes between
light and dark), so we need to check every dark file in the directory
"""
#easy case - there's just one dark and one light file, so they have
#the same time stamp
first_end = "0000.pico.light"
if pico_light_path.endswith(first_end):
return pico_light_path[:-len(first_end)]+"0000.pico.dark"
#harder case - there's multiple light files per dark, so find the dark
#with the closest timestamp before this one
#assume there's not that many dark files
dark_files = glob.glob(join(split(pico_light_path)[0],"*.dark"))
#insert our light file in here, its dark file will come immediately before
#it when sorted
dark_files.append(pico_light_path)
dark_files.sort()
dark_idx = dark_files.index(pico_light_path)-1
if dark_idx == -1:
raise PiccoloFileError("Unable to find .pico.dark file for {}"
.format(pico_light_path))
#TODO: It's still possible there's not a matching dark file
#(eg we've chosen the wrong dark file)
return dark_files[dark_idx]
def read_pico(filepath, read_data=True, read_metadata=True, verbose=False):
"""
Read pico file for data and metadata
Return
------
2-tuple of (pd.DataFrame, OrderedDict) for data, metadata
"""
data = None
metadata = None
raw_metadata = {}
with open(abspath(expanduser(filepath)), 'r') as f:
if verbose:
print('reading {}'.format(filepath))
raw_metadata = json.load(f)
#dark spectra are stored in a different file for some piccolo formats
if filepath.endswith('.pico.light'):
with open(_find_pico_dark(filepath),'r') as f:
dark_metadata = json.load(f)
raw_metadata['Spectra'] += dark_metadata['Spectra']
#TODO: How to handle multiple spectrometers per file?
#For now, just return the first one
spectrometer = raw_metadata["Spectra"][0]["Metadata"]["name"]
#the 4 spectra we need to get a complete measurement
downwelling_light = None
downwelling_dark = None
upwelling_light = None
upwelling_dark = None
#figure out which of the 4 spectra we need
for spectrum in raw_metadata["Spectra"]:
meta = spectrum["Metadata"]
if meta["name"] == spectrometer:
if meta["Dark"] and meta["Direction"]=="Upwelling":
upwelling_dark = spectrum
elif meta["Dark"] and meta["Direction"]=="Downwelling":
downwelling_dark = spectrum
elif meta["Direction"] == "Upwelling":
upwelling_light = spectrum
elif meta["Direction"] == "Downwelling":
downwelling_light = spectrum
if read_data:
if(downwelling_light is None or downwelling_dark is None or
upwelling_light is None or upwelling_dark is None):
raise PiccoloFileError("Piccolo File missing necessary spectrum")
#Pico always in raw counts
wavelength_coeffs = downwelling_light["Metadata"][
"WavelengthCalibrationCoefficients"]
wavelength_idxs = range(len(downwelling_light["Pixels"]))
wavelengths = np.poly1d(wavelength_coeffs[::-1])(wavelength_idxs)
#TODO: How to get ref data for pico?
columns = ("wavelength","tgt_count","ref_count","tgt_count_dark",
"ref_count_dark")
data = pd.DataFrame(
columns = columns,
data = np.array((wavelengths,
upwelling_light["Pixels"],
downwelling_light["Pixels"],
upwelling_dark["Pixels"],
downwelling_dark["Pixels"],
)).T
)
if read_metadata:
metadata = OrderedDict()
metadata['file'] = f.name
metadata['instrument_type'] = spectrometer
metadata['integration_time'] = downwelling_light["Metadata"]["IntegrationTime"]
for gps_key in PICO_GPS_KEYS:
if gps_key in downwelling_light:
metadata['gps_time_ref'] = downwelling_light.get("gps",{}).get("time",None)
metadata['gps_time_tgt'] = metadata['gps_time_tgt']
metadata['wavelength_range'] = None
if read_data:
metadata['wavelength_range'] = (data.index.min(), data.index.max())
return data, metadata
| [
"numpy.poly1d",
"json.load",
"numpy.array",
"collections.OrderedDict",
"os.path.split",
"os.path.expanduser"
] | [((2071, 2083), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2080, 2083), False, 'import json\n'), ((4346, 4359), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4357, 4359), False, 'from collections import OrderedDict\n'), ((2291, 2303), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2300, 2303), False, 'import json\n'), ((3744, 3778), 'numpy.poly1d', 'np.poly1d', (['wavelength_coeffs[::-1]'], {}), '(wavelength_coeffs[::-1])\n', (3753, 3778), True, 'import numpy as np\n'), ((1115, 1137), 'os.path.split', 'split', (['pico_light_path'], {}), '(pico_light_path)\n', (1120, 1137), False, 'from os.path import abspath, expanduser, splitext, basename, join, split\n'), ((1945, 1965), 'os.path.expanduser', 'expanduser', (['filepath'], {}), '(filepath)\n', (1955, 1965), False, 'from os.path import abspath, expanduser, splitext, basename, join, split\n'), ((4036, 4174), 'numpy.array', 'np.array', (["(wavelengths, upwelling_light['Pixels'], downwelling_light['Pixels'],\n upwelling_dark['Pixels'], downwelling_dark['Pixels'])"], {}), "((wavelengths, upwelling_light['Pixels'], downwelling_light[\n 'Pixels'], upwelling_dark['Pixels'], downwelling_dark['Pixels']))\n", (4044, 4174), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 12:21:09 2019
@author: reonid
mailto: <EMAIL>
Reads data from ASTRA res-file, result of modelling of code ASTRA*
*Automated System for TRansport Analysis (c) G.V.Pereverzev, P.N.Yushmanov
Res-file has the following general structure:
Signature
Text (Model + Log)
Header
Frames[]
Versions of Astra:
6.2.1 - OK
7.0 - OK ??? only a few files hes been tested
Example of using
res = ResFile("GG2")
# time signal
tt, yy = res.find_signal('<ne>')
plt.plot(tt, yy)
# profile one-by-one
rr, t, yy = res.find_profile('Te', time=0.00198729)
plt.plot(rr, yy)
"""
import os.path # os.path.getsize(path)
import numpy as np
import struct
from textwrap import wrap
#import matplotlib.pyplot as plt
ASTRA_NRD = 501
ASTRA_NRW = 128
ASTRA_NCONST = 256
ASTRA_NARRX = 67
ASTRA_NSBMX = 20
ASTRA_NRDX = 200
ASTRA_NTVAR = 25000
ASTRA_NTARR = 25000
ASTRA_NEQNS = 19
ASTRA_RES_SIGNATURE = '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
RELATIVE_POS = 1
ABSOLUTE_POS = 0
class AstraResError(Exception):
pass
class NotAString(Exception):
pass
class EndOfFile(Exception):
pass
class ProfileNotFound(Exception):
pass
class ProfileOutOfIndex(Exception):
pass
#%% common read utils
def little_endian(type_):
return np.dtype(type_).newbyteorder('<')
def read_long(file):
b = file.read(4) # ??? returns 0 if file exhausted
if len(b) == 0:
raise EndOfFile
#return int.from_bytes(b, byteorder='little', signed=True)
return struct.unpack("<l", b)[0]
def read_byte(file):
b = file.read(1)
#return int.from_bytes(b, byteorder='little')
return struct.unpack("<b", b)[0]
def read_double(file):
b = file.read(8)
tpl = struct.unpack("<d", b)
return tpl[0]
def dump(filename, src_file=None, size=None, data_bytes=None):
with open(filename, 'wb') as dumpfile:
if src_file is not None:
pos = src_file.tell()
b = src_file.read(size)
dumpfile.write(b)
src_file.seek(pos, ABSOLUTE_POS)
else:
dumpfile.write(data_bytes)
#%% specific read utils
def read_packet_size(file, previous=None, max_size=None):
size = read_long(file)
if (previous is not None) and (size != previous):
raise AstraResError('Wrong packet format (%d != %d)' % (previous, size))
if (max_size is not None) and (size > max_size):
raise AstraResError('Too big packet')
return size
def _read_packet(file):
N = read_packet_size(file)
b = file.read(N)
if len(b) == 0:
raise EndOfFile
_ = read_packet_size(file, N)
return b
def read_str_packet(file):
N = read_packet_size(file) # 4 bytes packet size
L = read_byte(file) # 1 byte string length
if N != L+1:
file.seek(-5, RELATIVE_POS) # moves the file position back
raise NotAString
b = file.read(L)
_ = read_packet_size(file, N)
return b.decode('cp1252')
def read_signature_packet(file):
N = read_packet_size(file, None, 32) # to prevent problems with files of other formats
b = file.read(N)
_ = read_packet_size(file, N)
s = b.decode('cp1252')
if s != ASTRA_RES_SIGNATURE:
raise AstraResError('Signature not found in the beginning of the file')
return s
def read_packet(file, dtype):
if dtype == 'str':
return read_str_packet(file)
elif dtype == '^^^':
return read_signature_packet(file)
elif dtype == '?':
return _read_packet(file)
#---------------------
b = _read_packet(file)
if dtype == 'double':
return struct.unpack("<d", b)[0]
elif dtype == 'long':
try:
return struct.unpack("<l", b)[0]
except:
print('read_packet(file, "long"): len(b) = %d (should be 4)' % len(b))
raise EndOfFile
elif dtype == 'double[]':
arr_len = len(b) // 8
#return np.frombuffer(b, np.float64, arr_len)
return np.frombuffer(b, little_endian(np.float64), arr_len)
def read_bin(file, dtype, length):
if dtype == 'str':
return file.read(length*1).decode('cp1252')
elif dtype == 'char[4][]':
s = file.read(length*4).decode('cp1252')
return wrap(s, 4) # [s[i:i+3] for i in range(0, len(s), 3)]
elif dtype == 'char[6][]':
s = file.read(length*6).decode('cp1252') #, errors='ignore')
return wrap(s, 6)
elif dtype == 'long[]':
b = file.read(length*4)
return np.frombuffer(b, little_endian(np.int32), length)
elif dtype == 'short[]':
b = file.read(length*2)
return np.frombuffer(b, little_endian(np.int16), length)
elif dtype == 'double[]':
b = file.read(length*8)
return np.frombuffer(b, little_endian(np.float64), length)
elif dtype == 'float[]':
b = file.read(length*4)
return np.frombuffer(b, little_endian(np.float32), length)
#%% Res file structure objects
def _convert(x, down, scale):
return down + (32768 + x)*scale/65535.0
class ResProfile:
def __init__(self, file):
packet_size = read_packet_size(file)
if packet_size == 4: # start of new Frame !!!
file.seek(-4, RELATIVE_POS) # moves the file position back
raise ProfileNotFound
self.scale = read_double(file)
self.down = read_double(file)
n = (packet_size - 2*8) // 2 # 8 = sizeof(double)
self.raw_array = read_bin(file, 'short[]', n)
self.array = np.empty(len(self.raw_array), dtype=little_endian(np.float64))
self.array = _convert(self.raw_array, self.down, self.scale)
_ = read_packet_size(file, packet_size)
#------------------------------------------------------------------------------
class ResFrame:
def __init__(self, file): #def __init__(self, file, nprof):
# ------ 0, 1 or several slices of time signals -----
# each with its own time instant
nslices = read_packet(file, 'long')
if nslices > 0:
merged_slices = read_packet(file, 'double[]')
N = len(merged_slices) // nslices
self.time_slices = [np.array(merged_slices[i*N:i*N+N]) for i in range(nslices)]
else:
self.time_slices = []
self.prof_time_stamp = read_packet(file, 'double')
self.const_values = read_packet(file, 'double[]')
self.unknown_packet = read_packet(file, '?') # usually filled with zero except the first byte
# ------ profiles -----
self.profiles = []
#for _ in range(nprof):
while True:
try:
prof = ResProfile(file)
self.profiles.append(prof)
except ProfileNotFound: # in version 7 can be 6 or 7 unnamed profiles ???
break # New Frame starts
except EndOfFile:
break
#------------------------------------------------------------------------------
class ResOutputInfo:
def __init__(self, file, unmentioned_names): #, vers_1st_dig=None):
unmentioned_names = unmentioned_names.split(',')
_nout = read_long(file)
_names = read_bin(file, 'char[4][]', _nout)
_scales = read_bin(file, 'double[]', _nout)
k = len(unmentioned_names)
self.names = [name.strip() for name in unmentioned_names + _names]
self.scales = k*[1.0] + list(_scales)
#if (vers_1st_dig == '6')or(vers_1st_dig is None):
# k = len(unmentioned_names)
# self.names = [name.strip() for name in unmentioned_names + _names]
# self.scales = k*[1.0] + list(_scales)
#elif vers_1st_dig == '7': # ??? I'm not sure that one of unnamned shoul be added th the end... It's just a guess
# k = len(unmentioned_names)
# self.names = [name.strip() for name in unmentioned_names + _names]
# self.names.append('#last')
# self.scales = k*[1.0] + list(_scales) + [1.0]
#------------------------------------------------------------------------------
class ResHeader:
def __init__(self, file):
file_pos = file.tell()
# first packet of the header -------------------
packet_size1 = read_packet_size(file)
self.rd_name = read_bin(file, 'str', 40).strip()
self.eq_name = read_bin(file, 'str', 40).strip()
self.version = read_bin(file, 'str', 32).strip(' \0') # ???
self.xline1 = read_bin(file, 'str', 132).strip()
#vers_1st_dig = self.version.split()[1][0]
self.year = read_long(file)
self.month = read_long(file)
self.day = read_long(file)
self.hour = read_long(file)
self.minute = read_long(file)
self.n_cf_nam = read_long(file)
self.n_pr_nam = read_long(file)
self.rad_out_info = ResOutputInfo(file, "#radius, #x1, #x2, #x3, #x4, #x5, #x6")
self.time_out_info = ResOutputInfo(file, "#time")
b = file.read(8 + 4*4)
self.hro, self.nb1, self.nsbr, self.ngr, self.nxout = struct.unpack("<dllll", b)
self.leq = read_bin(file, 'long[]', 7) # Note Change the cycle in NEQNS,(LEQ(j),j=1,NEQNS)
_ = read_packet_size(file, packet_size1) # end of the first packet
# second packet of the header -------------------
_ = read_packet_size(file) # packet_size2
if self.nxout > 0:
self.kto = read_bin(file, 'long[]', self.ngr)
self.ngridx = read_bin(file, 'long[]', self.ngr)
self.ntypex = read_bin(file, 'long[]', self.ngr)
self.timex = read_bin(file, 'double[]', self.ngr)
self.gdex = read_bin(file, 'long[]', self.ngr)
self.gdey = read_bin(file, 'long[]', self.ngr)
self.datarr = read_bin(file, 'float[]', self.gdey[self.ngr-1] + self.ngridx[self.ngr-1]-1)
self.namex = read_bin(file, 'char[6][]', ASTRA_NARRX)
self.nwindx = read_bin(file, 'long[]', ASTRA_NARRX)
self.kogda = read_bin(file, 'long[]', ASTRA_NARRX)
else:
self.kto, self.ngridx, self.ntypex, self.timex = None, None, None, None
self.gdex, self.gdey = None, None
self.datarr, self.namex, self.nwindx, self.kogda = None, None, None, None
# ??? we do not reach the end of the second packet yet
# but the rest of the second packet is ignored
#ntout = len(self.time_out_info.names)-1
#nrout = len(self.rad_out_info.names)-7
# ???
#self.jtout = read_long(file)
#if self.jtout != 0:
# ltouto = ???
# ltout = ltouto + self.jtout - 1 # ???
#
# for i in range(ntout):
# ttout = read_double(file)
# tout = read_bin(file, 'double[]', ltout - ltouto) # J=LTOUTO, LTOUT-1
#
file.seek(file_pos, ABSOLUTE_POS)
self._packet0 = read_packet(file, '?')
file_pos = file.tell()
self._packet1 = read_packet(file, '?')
if len(self._packet1) == 4:
file.seek(file_pos, ABSOLUTE_POS)
def display(self):
print('------- ASTRA res-file header -------')
print(' ', self.rd_name)
print(' ', self.eq_name)
print(' ', self.version)
print(' ', self.xline1)
def get_const_names(str_list):
result = []
section = 0
for s in str_list:
if s.lower().find('constants:') > -1:
section += 1
elif (section == 1)and(s.find('=') == -1):
section += 1
elif section == 1:
cname = s.split('=')[0]
cname = cname.strip()
result.append(cname)
return result
#%% Res file main object
class ResFile:
def __init__(self, filename):
self.filename = filename
self.filesize = os.path.getsize(filename)
self.log = []
self.model = []
self.frames = []
with open(filename, "rb") as file:
# read signature ------------------------------
_ = read_signature_packet(file)
# read model and log --------------------------
section = 0
while True:
try:
s = read_packet(file, 'str')
if s == ASTRA_RES_SIGNATURE: # separator between model section and log section
section += 1
elif section == 0:
self.model.append(s)
elif section == 1:
self.log.append(s)
else:
continue
except NotAString:
break
self.const_names = get_const_names(self.log)
# read header (2 packets) --------------------
self.header = ResHeader(file)
# read frames --------------------------------
while True:
try:
# frame = ResFrame(file, len(self.rad_names)) # ??? I don't know the exact number of the unnamed profiles
frame = ResFrame(file)
self.frames.append(frame)
except EndOfFile:
break #OK
except BaseException as e:
print(type(e).__name__, ": '", e, "'")
print('WARNING! Not all the frames have been readed!')
break
self._last_file_pos = file.tell()
if self._last_file_pos != self.filesize:
print('WARNING! End of the file not reached!')
# ---------------------------------------------
self._actualize_profile_name_list()
self.rad_times = self.extract_time_array('rad')
self.time_times = self.extract_time_array('time')
self.rad_names = self.header.rad_out_info.names
self.time_names = self.header.time_out_info.names
def _actualize_profile_name_list(self):
# correction of the rad names -----------------
n = self.get_profile_count()
n_ = len(self.header.rad_out_info.names)
if n_ > n:
print('WARNING! Actual profile number is less the expected number')
self.header.rad_out_info.names = self.header.rad_out_info.names[0:n] # remove "#last" if needed
self.header.rad_out_info.scales = self.header.rad_out_info.scales[0:n]
elif n_ < n:
# Just for the case.
print('WARNING! Actual profile number exceeds the expected number')
self.header.rad_out_info.names.extend(['#last', '#last2', '#last3', '#last4', '#last5'][0:n-n_])
self.header.rad_out_info.scales.extend( [1.0]*(n-n_) )
def extract_time_array(self, kind):
tt = []
if kind == 'time':
for fr in self.frames:
for time_slice in fr.time_slices:
tt.append(time_slice[0])
elif kind == 'rad':
for fr in self.frames:
tt.append(fr.prof_time_stamp)
return np.array(tt)
def get_frame_count(self):
return len(self.frames) # len(self.rad_times)
def get_signal_count(self):
return len(self.header.time_names)
def get_profile_count(self):
# return len(self.header.rad_names)
if len(self.frames) == 0:
return 0
else:
return len(self.frames[0].profiles)
def find_profile(self, name, index=None, time=None):
name_idx = name if isinstance(name, int) else self.rad_names.index(name)
if name_idx == -1:
raise AstraResError('no radial rpofile ' + str(name) )
if index is None:
index = np.searchsorted(self.rad_times, time)
if index >= len(self.rad_times):
index = len(self.rad_times) - 1
if index >= self.get_frame_count():
raise ProfileOutOfIndex
t = self.rad_times[index]
rr = self.frames[index].profiles[0].array
yy = self.frames[index].profiles[name_idx].array
return rr, t, yy
def find_signal(self, name):
idx = name if isinstance(name, int) else self.time_names.index(name)
result = []
for fr in self.frames:
for time_slice in fr.time_slices:
result.append(time_slice[idx])
return self.time_times, np.array(result)
#%%
if __name__ == '__main__':
pass
| [
"textwrap.wrap",
"struct.unpack",
"numpy.dtype",
"numpy.searchsorted",
"numpy.array"
] | [((1776, 1798), 'struct.unpack', 'struct.unpack', (['"""<d"""', 'b'], {}), "('<d', b)\n", (1789, 1798), False, 'import struct\n'), ((1562, 1584), 'struct.unpack', 'struct.unpack', (['"""<l"""', 'b'], {}), "('<l', b)\n", (1575, 1584), False, 'import struct\n'), ((1693, 1715), 'struct.unpack', 'struct.unpack', (['"""<b"""', 'b'], {}), "('<b', b)\n", (1706, 1715), False, 'import struct\n'), ((9320, 9346), 'struct.unpack', 'struct.unpack', (['"""<dllll"""', 'b'], {}), "('<dllll', b)\n", (9333, 9346), False, 'import struct\n'), ((15615, 15627), 'numpy.array', 'np.array', (['tt'], {}), '(tt)\n', (15623, 15627), True, 'import numpy as np\n'), ((1326, 1341), 'numpy.dtype', 'np.dtype', (['type_'], {}), '(type_)\n', (1334, 1341), True, 'import numpy as np\n'), ((3705, 3727), 'struct.unpack', 'struct.unpack', (['"""<d"""', 'b'], {}), "('<d', b)\n", (3718, 3727), False, 'import struct\n'), ((4340, 4350), 'textwrap.wrap', 'wrap', (['s', '(4)'], {}), '(s, 4)\n', (4344, 4350), False, 'from textwrap import wrap\n'), ((16291, 16328), 'numpy.searchsorted', 'np.searchsorted', (['self.rad_times', 'time'], {}), '(self.rad_times, time)\n', (16306, 16328), True, 'import numpy as np\n'), ((16978, 16994), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (16986, 16994), True, 'import numpy as np\n'), ((4514, 4524), 'textwrap.wrap', 'wrap', (['s', '(6)'], {}), '(s, 6)\n', (4518, 4524), False, 'from textwrap import wrap\n'), ((6299, 6339), 'numpy.array', 'np.array', (['merged_slices[i * N:i * N + N]'], {}), '(merged_slices[i * N:i * N + N])\n', (6307, 6339), True, 'import numpy as np\n'), ((3792, 3814), 'struct.unpack', 'struct.unpack', (['"""<l"""', 'b'], {}), "('<l', b)\n", (3805, 3814), False, 'import struct\n')] |
"""
File: plot_ball.py
Copyright (c) 2016 <NAME>
License: MIT
Course: PHYS227
Assignment: cw-2-classwork-team
Date: Feb 11, 2016
Email: <EMAIL>
Name: <NAME>
Description: Plot a formula
"""
import numpy as np
import matplotlib.pyplot as plt
g = 9.81
def f(t, v):
y = v * t - ((g * t**2) /2)
return y
def test_f():
#test the initial condition at time = 0.
assert f(0, 10) == 0.0
#test at time = 2, rounding the result to 2 decimal places.
assert round(f(2, 10), 2) == 0.38
#test for decimal value of t, rounding the result to 4 decimal places.
assert round(f(1.5, 10), 2) == 3.96
#test the end point for t, rounding the result to 3 decimal places.
assert round(f(20 / g, 10), 3) == 0.0
def plot_here():
#func = f(range(10.0 / g), 10)
v = 20
t = np.linspace(0, 2 * v / 9.8, 100)
func = f(t, 20)
plt.plot(t, func)
plt.title('Trajectory of a Ball')
plt.xlabel('time (s)')
plt.ylabel('height (m)')
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((801, 833), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * v / 9.8)', '(100)'], {}), '(0, 2 * v / 9.8, 100)\n', (812, 833), True, 'import numpy as np\n'), ((858, 875), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'func'], {}), '(t, func)\n', (866, 875), True, 'import matplotlib.pyplot as plt\n'), ((880, 913), 'matplotlib.pyplot.title', 'plt.title', (['"""Trajectory of a Ball"""'], {}), "('Trajectory of a Ball')\n", (889, 913), True, 'import matplotlib.pyplot as plt\n'), ((918, 940), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (s)"""'], {}), "('time (s)')\n", (928, 940), True, 'import matplotlib.pyplot as plt\n'), ((945, 969), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""height (m)"""'], {}), "('height (m)')\n", (955, 969), True, 'import matplotlib.pyplot as plt\n'), ((974, 984), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (982, 984), True, 'import matplotlib.pyplot as plt\n')] |
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0107_activation_functions.py
@Version : v0.1
@Time : 2019-10-29 10:26
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0107,P12
@Desc : TensorFlow 基础, Implementing Activation Functions
"""
# common imports
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops
from tools import show_values
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
np.random.seed(42)
# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()
test_data = [-10., -3., -1., 0., 1., 3., 10.]
# 部分线性的非线性函数
# 1. 整流线性单元(Rectifier Linear Unit,ReLU),非线性函数,max(0,x):连续但不平滑
show_values(tf.nn.relu(test_data), "tf.nn.relu({})".format(test_data))
# 2. ReLUMax6, min(max(0,x),6):计算运行速度快,解决梯度消失
show_values(tf.nn.relu6(test_data), "tf.nn.relu6({})".format(test_data))
# 6. softplus函数,ReLU函数的平滑版,log(exp(x)+1)
show_values(tf.nn.softplus(test_data), "tf.nn.softplus({})".format(test_data))
# 7. ELU激励函数(Exponential Linear Unit,ELU),
# 与softplus函数相似,只是输入无限小时,趋近于-1,而softplus函数趋近于0.
show_values(tf.nn.elu(test_data), "tf.nn.elu({})".format(test_data))
# 都是类似于Logistic函数
# 3. sigmoid函数,Logistic函数,1/(1+exp(-x)):最常用的连续的、平滑的激励函数,也叫逻辑函数
# sigmoid函数的取值范围为-1到1
show_values(tf.nn.sigmoid(test_data), "tf.nn.sigmoid({})".format(test_data))
# 4. 双曲正切函数(Hyper Tangent,tanh),((exp(x)-exp(-x))/(exp(x)+exp(-x))
# 双曲正切函数的的取值范围为0到1
show_values(tf.nn.tanh(test_data), "tf.nn.tanh({})".format(test_data))
# 5. softsign函数,x/(abs(x)+1):符号函数的连续估计
show_values(tf.nn.softsign(test_data), "tf.nn.softsign({})".format(test_data))
# X range
x_vals = np.linspace(start = -10., stop = 10., num = 100)
y_relu = sess.run(tf.nn.relu(x_vals))
y_relu6 = sess.run(tf.nn.relu6(x_vals))
y_sigmoid = sess.run(tf.nn.sigmoid(x_vals))
y_tanh = sess.run(tf.nn.tanh(x_vals))
y_softsign = sess.run(tf.nn.softsign(x_vals))
y_softplus = sess.run(tf.nn.softplus(x_vals))
y_elu = sess.run(tf.nn.elu(x_vals))
# Plot the different functions
plt.figure()
plt.plot(x_vals, y_relu, 'k-', label = 'ReLU', linewidth = 4)
plt.plot(x_vals, y_elu, 'b--', label = 'ExpLU', linewidth = 3)
plt.plot(x_vals, y_relu6, 'g-.', label = 'ReLU6', linewidth = 2)
plt.plot(x_vals, y_softplus, 'r:', label = 'Softplus', linewidth = 1)
plt.ylim([-1.5, 7])
plt.legend(loc = 'upper left')
plt.title("图1-3:ReLU、ReLU6、softplus 和 ELU 激励函数")
plt.figure()
plt.plot(x_vals, y_sigmoid, 'r--', label = 'Sigmoid', linewidth = 2)
plt.plot(x_vals, y_tanh, 'b:', label = 'Tanh', linewidth = 2)
plt.plot(x_vals, y_softsign, 'g-.', label = 'Softsign', linewidth = 2)
plt.ylim([-2, 2])
plt.legend(loc = 'upper left')
plt.title("图1-3:sigmoid、softsign 和 tanh 激励函数")
# -----------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"tensorflow.nn.tanh",
"matplotlib.pyplot.figure",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.nn.relu6",
"numpy.set_printoptions",
"tensorflow.nn.relu",
"tensorflow.nn.elu",
"numpy.linspace",
"matplotlib.pyplot.get_fignums",
... | [((814, 899), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'suppress': '(True)', 'threshold': 'np.inf', 'linewidth': '(200)'}), '(precision=8, suppress=True, threshold=np.inf, linewidth=200\n )\n', (833, 899), True, 'import numpy as np\n'), ((938, 956), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (952, 956), True, 'import numpy as np\n'), ((970, 995), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (993, 995), False, 'from tensorflow.python.framework import ops\n'), ((1295, 1307), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1305, 1307), True, 'import tensorflow as tf\n'), ((2377, 2421), 'numpy.linspace', 'np.linspace', ([], {'start': '(-10.0)', 'stop': '(10.0)', 'num': '(100)'}), '(start=-10.0, stop=10.0, num=100)\n', (2388, 2421), True, 'import numpy as np\n'), ((2746, 2758), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2756, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2816), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_relu', '"""k-"""'], {'label': '"""ReLU"""', 'linewidth': '(4)'}), "(x_vals, y_relu, 'k-', label='ReLU', linewidth=4)\n", (2767, 2816), True, 'import matplotlib.pyplot as plt\n'), ((2821, 2879), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_elu', '"""b--"""'], {'label': '"""ExpLU"""', 'linewidth': '(3)'}), "(x_vals, y_elu, 'b--', label='ExpLU', linewidth=3)\n", (2829, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2884, 2944), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_relu6', '"""g-."""'], {'label': '"""ReLU6"""', 'linewidth': '(2)'}), "(x_vals, y_relu6, 'g-.', label='ReLU6', linewidth=2)\n", (2892, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2949, 3014), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_softplus', '"""r:"""'], {'label': '"""Softplus"""', 'linewidth': '(1)'}), "(x_vals, y_softplus, 'r:', label='Softplus', linewidth=1)\n", (2957, 3014), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3038), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1.5, 7]'], {}), '([-1.5, 7])\n', (3027, 3038), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3067), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3049, 3067), True, 'import matplotlib.pyplot as plt\n'), ((3070, 3118), 'matplotlib.pyplot.title', 'plt.title', (['"""图1-3:ReLU、ReLU6、softplus 和 ELU 激励函数"""'], {}), "('图1-3:ReLU、ReLU6、softplus 和 ELU 激励函数')\n", (3079, 3118), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3130, 3132), True, 'import matplotlib.pyplot as plt\n'), ((3133, 3197), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_sigmoid', '"""r--"""'], {'label': '"""Sigmoid"""', 'linewidth': '(2)'}), "(x_vals, y_sigmoid, 'r--', label='Sigmoid', linewidth=2)\n", (3141, 3197), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3259), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_tanh', '"""b:"""'], {'label': '"""Tanh"""', 'linewidth': '(2)'}), "(x_vals, y_tanh, 'b:', label='Tanh', linewidth=2)\n", (3210, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3264, 3330), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_softsign', '"""g-."""'], {'label': '"""Softsign"""', 'linewidth': '(2)'}), "(x_vals, y_softsign, 'g-.', label='Softsign', linewidth=2)\n", (3272, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3335, 3352), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2, 2]'], {}), '([-2, 2])\n', (3343, 3352), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3381), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3363, 3381), True, 'import matplotlib.pyplot as plt\n'), ((3384, 3430), 'matplotlib.pyplot.title', 'plt.title', (['"""图1-3:sigmoid、softsign 和 tanh 激励函数"""'], {}), "('图1-3:sigmoid、softsign 和 tanh 激励函数')\n", (3393, 3430), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3533), 'winsound.Beep', 'winsound.Beep', (['(600)', '(500)'], {}), '(600, 500)\n', (3523, 3533), False, 'import winsound\n'), ((1443, 1464), 'tensorflow.nn.relu', 'tf.nn.relu', (['test_data'], {}), '(test_data)\n', (1453, 1464), True, 'import tensorflow as tf\n'), ((1560, 1582), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['test_data'], {}), '(test_data)\n', (1571, 1582), True, 'import tensorflow as tf\n'), ((1674, 1699), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['test_data'], {}), '(test_data)\n', (1688, 1699), True, 'import tensorflow as tf\n'), ((1844, 1864), 'tensorflow.nn.elu', 'tf.nn.elu', (['test_data'], {}), '(test_data)\n', (1853, 1864), True, 'import tensorflow as tf\n'), ((2017, 2041), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['test_data'], {}), '(test_data)\n', (2030, 2041), True, 'import tensorflow as tf\n'), ((2180, 2201), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['test_data'], {}), '(test_data)\n', (2190, 2201), True, 'import tensorflow as tf\n'), ((2290, 2315), 'tensorflow.nn.softsign', 'tf.nn.softsign', (['test_data'], {}), '(test_data)\n', (2304, 2315), True, 'import tensorflow as tf\n'), ((2444, 2462), 'tensorflow.nn.relu', 'tf.nn.relu', (['x_vals'], {}), '(x_vals)\n', (2454, 2462), True, 'import tensorflow as tf\n'), ((2483, 2502), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['x_vals'], {}), '(x_vals)\n', (2494, 2502), True, 'import tensorflow as tf\n'), ((2525, 2546), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x_vals'], {}), '(x_vals)\n', (2538, 2546), True, 'import tensorflow as tf\n'), ((2566, 2584), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['x_vals'], {}), '(x_vals)\n', (2576, 2584), True, 'import tensorflow as tf\n'), ((2608, 2630), 'tensorflow.nn.softsign', 'tf.nn.softsign', (['x_vals'], {}), '(x_vals)\n', (2622, 2630), True, 'import tensorflow as tf\n'), ((2654, 2676), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['x_vals'], {}), '(x_vals)\n', (2668, 2676), True, 'import tensorflow as tf\n'), ((2695, 2712), 'tensorflow.nn.elu', 'tf.nn.elu', (['x_vals'], {}), '(x_vals)\n', (2704, 2712), True, 'import tensorflow as tf\n'), ((3570, 3580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3578, 3580), True, 'import matplotlib.pyplot as plt\n'), ((3541, 3558), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (3556, 3558), True, 'import matplotlib.pyplot as plt\n')] |
# %%
import numpy as np
import sys
sys.path.append('../..')
from utils.tester import Tester
import pickle
import os
import matplotlib
import matplotlib.pyplot as plt
import math
import tikzplotlib
city_name = 'Phoenix'
data = []
# data.append({'save_file_name': '2021-03-20_03-48-37', 'description': 'Fully Targeted Lockdown'})
# data.append({'save_file_name': '2021-03-20_03-20-17', 'description': 'Lockdown fixed for entities'})
# data.append({'save_file_name': '2021-03-20_03-20-13', 'description': 'Lockdown fixed for cities'})
# data.append({'save_file_name': '2021-03-20_03-23-06', 'description': 'Lockdown fixed for all'})
# data.append({'save_file_name': '2021-04-09_21-26-07', 'description': 'Fully Targeted Lockdown'})
# data.append({'save_file_name': '2021-04-09_21-27-44', 'description': 'Lockdown fixed for entities'})
# data.append({'save_file_name': '2021-04-09_21-18-40', 'description': 'Lockdown fixed for cities'})
# data.append({'save_file_name': '2021-04-09_21-19-03', 'description': 'Lockdown fixed for all'})
data.append({'save_file_name': '2021-04-23_14-02-29', 'description': 'Fully Targeted Lockdown'})
data.append({'save_file_name': '2021-04-25_13-24-31', 'description': 'Lockdown fixed for cities'})
data.append({'save_file_name': '2021-04-25_13-32-06', 'description': 'Lockdown fixed for entities'})
data.append({'save_file_name': '2021-04-25_13-13-27', 'description': 'Lockdown fixed for all'})
# city_name = 'Seattle'
# save_file_name = '2021-03-21_23-18-05'
base_directory = os.getcwd()
base_directory = base_directory[0:base_directory.find('src')+3]
if city_name == 'Phoenix':
data_folder_name = 'Phoenix'
if city_name == 'Seattle':
data_folder_name = 'IntercityFlow_Seattle'
# Load city data
city_data_file_path = os.path.join(base_directory, '..', 'data', data_folder_name, 'data_processing_outputs', 'city_data.p')
with open(city_data_file_path,'rb') as f:
city_data = pickle.load(f)
city_list = list(city_data.keys())
# %%
total_population = 0
for city in city_data.keys():
total_population = total_population + city_data[city]['population']
total_population = total_population[0]
tester_list = []
peak_infections_list = []
num_deaths_list = []
average_lockdown_list = []
for ind in range(len(data)):
file_path = os.path.join(base_directory, 'optimization', 'save', data[ind]['save_file_name'])
with open(file_path,'rb') as f:
tester = pickle.load(f)
data[ind]['tester'] = tester
data[ind]['scale_frac'] = tester.params['scale_frac']
data[ind]['I'] = np.sum(tester.results['I_best'] * data[ind]['scale_frac'], axis=1)
data[ind]['D'] = np.sum(tester.results['D_best'] * data[ind]['scale_frac'], axis=1)
data[ind]['peak_infections'] = np.max(data[ind]['I']) * 100000 / total_population
data[ind]['num_deaths'] = data[ind]['D'][-1] * 100000 / total_population
# data[ind]['peak_infections'] = 100 * np.max(data[ind]['I']) / total_population
# data[ind]['num_deaths'] = 100 * data[ind]['D'][-1] / total_population
peak_infections_list.append(data[ind]['peak_infections'])
num_deaths_list.append(data[ind]['num_deaths'])
average_lockdown_list.append(1 - np.average(data[ind]['tester'].results['L_best'][0:98]))
# %%
# Plot the results
x = np.arange(len(data))
cmap = matplotlib.cm.get_cmap('Oranges')
norm = matplotlib.colors.Normalize(vmin=np.min(num_deaths_list), vmax=np.max(num_deaths_list))
color_list = []
for i in range(len(data)):
color_list.append(cmap(norm(data[i]['num_deaths'])))
width = 0.5
# %%
fig = plt.figure()
### PLOT PEAK INFECTIONS COMPARISON
ax1 = fig.add_subplot(111)
cmap = matplotlib.cm.get_cmap('Oranges')
norm = matplotlib.colors.Normalize(vmin=np.min(num_deaths_list), vmax=np.max(num_deaths_list))
labels = []
for i in range(len(data)):
val = data[i]['peak_infections']
ax1.bar(i, val, width, edgecolor='black', facecolor=color_list[i], label=data[i]['description'])
labels = [
'Heterogeneous \n Lockdown \n Strategies',
'Activity Site \n Lockdown \n Strategies',
'Regional \n Lockdown \n Strategies',
'MSA \n Lockdown \n Strategies'
]
# ax1.set_title('Peak Infections', fontsize=fontsize)
ax1.set_ylabel('Peak Infections per 100,000 of Population')
ax1.set_xticks(x)
ax1.set_xticklabels(labels)
ax1.tick_params(axis='both')
save_location = os.path.join(base_directory, 'plotting', 'tikz_plotting', city_name)
filename = os.path.join(save_location, 'scale_cost_by_pop_homogenous_lockdown_infections_comparison.tex')
tikzplotlib.save(filename)
# %%
fig = plt.figure()
### PLOT DEATHS COMPARISON
ax2 = fig.add_subplot(111)
cmap = matplotlib.cm.get_cmap('Oranges')
norm = matplotlib.colors.Normalize(vmin=np.min(num_deaths_list), vmax=np.max(num_deaths_list))
labels = []
for i in range(len(data)):
val = data[i]['num_deaths']
ax2.bar(i, val, width, edgecolor='black', facecolor=color_list[i], label=data[i]['description'])
labels = [
'Heterogeneous \n Lockdown \n Strategies',
'Activity Site \n Lockdown \n Strategies',
'Regional \n Lockdown \n Strategies',
'MSA \n Lockdown \n Strategies'
]
# ax1.set_title('Peak Infections', fontsize=fontsize)
ax2.set_ylabel('Deaths per 100,000 People')
ax2.set_xticks(x)
ax2.set_xticklabels(labels)
ax2.tick_params(axis='both')
save_location = os.path.join(base_directory, 'plotting', 'tikz_plotting', city_name)
filename = os.path.join(save_location, 'scale_cost_by_pop_homogenous_lockdown_deaths_comparison.tex')
tikzplotlib.save(filename)
# %%
fig = plt.figure()
### PLOT lockdown COMPARISON
ax3 = fig.add_subplot(111)
cmap = matplotlib.cm.get_cmap('Blues')
norm = matplotlib.colors.Normalize(vmin=np.min(average_lockdown_list), vmax=np.max(average_lockdown_list))
color_list = []
for i in range(len(data)):
color_list.append(cmap(norm(average_lockdown_list[i])))
labels = []
for i in range(len(data)):
val = average_lockdown_list[i]
ax3.bar(i, val, width, edgecolor='black', facecolor=color_list[i], label=data[i]['description'])
labels = [
'Heterogeneous \n Lockdown \n Strategies',
'Activity Site \n Lockdown \n Strategies',
'Regional \n Lockdown \n Strategies',
'MSA \n Lockdown \n Strategies'
]
ax3.set_ylabel('Average Lockdown')
ax3.set_xticks(x)
ax3.set_xticklabels(labels)
ax3.tick_params(axis='both')
save_location = os.path.join(base_directory, 'plotting', 'tikz_plotting', city_name)
filename = os.path.join(save_location, 'scale_cost_by_pop_homogenous_lockdown_avg_lockdown_comparison.tex')
tikzplotlib.save(filename)
# %%
# %%
| [
"sys.path.append",
"numpy.sum",
"numpy.average",
"matplotlib.cm.get_cmap",
"os.getcwd",
"matplotlib.pyplot.figure",
"tikzplotlib.save",
"pickle.load",
"numpy.min",
"numpy.max",
"os.path.join"
] | [((35, 59), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (50, 59), False, 'import sys\n'), ((1514, 1525), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1523, 1525), False, 'import os\n'), ((1765, 1871), 'os.path.join', 'os.path.join', (['base_directory', '""".."""', '"""data"""', 'data_folder_name', '"""data_processing_outputs"""', '"""city_data.p"""'], {}), "(base_directory, '..', 'data', data_folder_name,\n 'data_processing_outputs', 'city_data.p')\n", (1777, 1871), False, 'import os\n'), ((3294, 3327), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""Oranges"""'], {}), "('Oranges')\n", (3316, 3327), False, 'import matplotlib\n'), ((3549, 3561), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3559, 3561), True, 'import matplotlib.pyplot as plt\n'), ((3633, 3666), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""Oranges"""'], {}), "('Oranges')\n", (3655, 3666), False, 'import matplotlib\n'), ((4340, 4408), 'os.path.join', 'os.path.join', (['base_directory', '"""plotting"""', '"""tikz_plotting"""', 'city_name'], {}), "(base_directory, 'plotting', 'tikz_plotting', city_name)\n", (4352, 4408), False, 'import os\n'), ((4420, 4518), 'os.path.join', 'os.path.join', (['save_location', '"""scale_cost_by_pop_homogenous_lockdown_infections_comparison.tex"""'], {}), "(save_location,\n 'scale_cost_by_pop_homogenous_lockdown_infections_comparison.tex')\n", (4432, 4518), False, 'import os\n'), ((4515, 4541), 'tikzplotlib.save', 'tikzplotlib.save', (['filename'], {}), '(filename)\n', (4531, 4541), False, 'import tikzplotlib\n'), ((4555, 4567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4565, 4567), True, 'import matplotlib.pyplot as plt\n'), ((4630, 4663), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""Oranges"""'], {}), "('Oranges')\n", (4652, 4663), False, 'import matplotlib\n'), ((5317, 5385), 'os.path.join', 'os.path.join', (['base_directory', '"""plotting"""', '"""tikz_plotting"""', 'city_name'], {}), "(base_directory, 'plotting', 'tikz_plotting', city_name)\n", (5329, 5385), False, 'import os\n'), ((5397, 5491), 'os.path.join', 'os.path.join', (['save_location', '"""scale_cost_by_pop_homogenous_lockdown_deaths_comparison.tex"""'], {}), "(save_location,\n 'scale_cost_by_pop_homogenous_lockdown_deaths_comparison.tex')\n", (5409, 5491), False, 'import os\n'), ((5488, 5514), 'tikzplotlib.save', 'tikzplotlib.save', (['filename'], {}), '(filename)\n', (5504, 5514), False, 'import tikzplotlib\n'), ((5528, 5540), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5538, 5540), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5636), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (5627, 5636), False, 'import matplotlib\n'), ((6345, 6413), 'os.path.join', 'os.path.join', (['base_directory', '"""plotting"""', '"""tikz_plotting"""', 'city_name'], {}), "(base_directory, 'plotting', 'tikz_plotting', city_name)\n", (6357, 6413), False, 'import os\n'), ((6425, 6525), 'os.path.join', 'os.path.join', (['save_location', '"""scale_cost_by_pop_homogenous_lockdown_avg_lockdown_comparison.tex"""'], {}), "(save_location,\n 'scale_cost_by_pop_homogenous_lockdown_avg_lockdown_comparison.tex')\n", (6437, 6525), False, 'import os\n'), ((6523, 6549), 'tikzplotlib.save', 'tikzplotlib.save', (['filename'], {}), '(filename)\n', (6539, 6549), False, 'import tikzplotlib\n'), ((1926, 1940), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1937, 1940), False, 'import pickle\n'), ((2282, 2368), 'os.path.join', 'os.path.join', (['base_directory', '"""optimization"""', '"""save"""', "data[ind]['save_file_name']"], {}), "(base_directory, 'optimization', 'save', data[ind][\n 'save_file_name'])\n", (2294, 2368), False, 'import os\n'), ((2548, 2614), 'numpy.sum', 'np.sum', (["(tester.results['I_best'] * data[ind]['scale_frac'])"], {'axis': '(1)'}), "(tester.results['I_best'] * data[ind]['scale_frac'], axis=1)\n", (2554, 2614), True, 'import numpy as np\n'), ((2636, 2702), 'numpy.sum', 'np.sum', (["(tester.results['D_best'] * data[ind]['scale_frac'])"], {'axis': '(1)'}), "(tester.results['D_best'] * data[ind]['scale_frac'], axis=1)\n", (2642, 2702), True, 'import numpy as np\n'), ((2417, 2431), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2428, 2431), False, 'import pickle\n'), ((3368, 3391), 'numpy.min', 'np.min', (['num_deaths_list'], {}), '(num_deaths_list)\n', (3374, 3391), True, 'import numpy as np\n'), ((3398, 3421), 'numpy.max', 'np.max', (['num_deaths_list'], {}), '(num_deaths_list)\n', (3404, 3421), True, 'import numpy as np\n'), ((3707, 3730), 'numpy.min', 'np.min', (['num_deaths_list'], {}), '(num_deaths_list)\n', (3713, 3730), True, 'import numpy as np\n'), ((3737, 3760), 'numpy.max', 'np.max', (['num_deaths_list'], {}), '(num_deaths_list)\n', (3743, 3760), True, 'import numpy as np\n'), ((4704, 4727), 'numpy.min', 'np.min', (['num_deaths_list'], {}), '(num_deaths_list)\n', (4710, 4727), True, 'import numpy as np\n'), ((4734, 4757), 'numpy.max', 'np.max', (['num_deaths_list'], {}), '(num_deaths_list)\n', (4740, 4757), True, 'import numpy as np\n'), ((5677, 5706), 'numpy.min', 'np.min', (['average_lockdown_list'], {}), '(average_lockdown_list)\n', (5683, 5706), True, 'import numpy as np\n'), ((5713, 5742), 'numpy.max', 'np.max', (['average_lockdown_list'], {}), '(average_lockdown_list)\n', (5719, 5742), True, 'import numpy as np\n'), ((2738, 2760), 'numpy.max', 'np.max', (["data[ind]['I']"], {}), "(data[ind]['I'])\n", (2744, 2760), True, 'import numpy as np\n'), ((3178, 3233), 'numpy.average', 'np.average', (["data[ind]['tester'].results['L_best'][0:98]"], {}), "(data[ind]['tester'].results['L_best'][0:98])\n", (3188, 3233), True, 'import numpy as np\n')] |
from pynput.keyboard import Key
import numpy as np
import threading
import time
from utils.player import Player
from utils.keyboard import Keyboard
from utils.reader import Reader
# env vars
orbDist = None
# screen reader
reader = Reader()
reader.showWindow = False
reader.printDebug = False
reader.selectMonitor(2)
reader.calibrate()
readingThread = threading.Thread(target=reader.start)
# player
player = Player('./out/nnconf_24:08:2021:04:13:18_ep1000.json')
# keyboard controller
controller = Keyboard({
0: 'w',
1: 's',
2: 'a',
3: 'd',
4: Key.space,
})
def main():
# variable binding
global agent, orbPos, readingThread
readingThread.start()
# print ready up prompt
print('move your cursor to window')
time.sleep(1)
print('game starts in\n3')
time.sleep(1)
print('2')
time.sleep(1)
print('1')
# start
controller.apply(4)
state, orbDist, newOrb, gameover = reader.getState()
plays = 0
tScore = 0
while plays < 20:
tempState, newOrbDist, newOrb, gameover = reader.getState()
# reset if gameover
if gameover:
plays += 1
time.sleep(0.5)
controller.apply(4)
time.sleep(0.1)
state, orbDist, newOrb, gameover = reader.getState()
# step controller
elif not np.array_equal(state, tempState) or orbDist != newOrbDist:
state = tempState
orbDist = newOrbDist
if newOrb:
tScore += 1
action = player.getAction(state)
controller.apply(action)
print('avg score in 20 plays: {}'.format(tScore / 20))
if __name__ == "__main__":
main()
| [
"threading.Thread",
"utils.keyboard.Keyboard",
"time.sleep",
"utils.player.Player",
"numpy.array_equal",
"utils.reader.Reader"
] | [((235, 243), 'utils.reader.Reader', 'Reader', ([], {}), '()\n', (241, 243), False, 'from utils.reader import Reader\n'), ((357, 394), 'threading.Thread', 'threading.Thread', ([], {'target': 'reader.start'}), '(target=reader.start)\n', (373, 394), False, 'import threading\n'), ((414, 468), 'utils.player.Player', 'Player', (['"""./out/nnconf_24:08:2021:04:13:18_ep1000.json"""'], {}), "('./out/nnconf_24:08:2021:04:13:18_ep1000.json')\n", (420, 468), False, 'from utils.player import Player\n'), ((505, 571), 'utils.keyboard.Keyboard', 'Keyboard', (["{(0): 'w', (1): 's', (2): 'a', (3): 'd', (4): Key.space}"], {}), "({(0): 'w', (1): 's', (2): 'a', (3): 'd', (4): Key.space})\n", (513, 571), False, 'from utils.keyboard import Keyboard\n'), ((762, 775), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (772, 775), False, 'import time\n'), ((811, 824), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (821, 824), False, 'import time\n'), ((844, 857), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (854, 857), False, 'import time\n'), ((1184, 1199), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1194, 1199), False, 'import time\n'), ((1244, 1259), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1254, 1259), False, 'import time\n'), ((1369, 1401), 'numpy.array_equal', 'np.array_equal', (['state', 'tempState'], {}), '(state, tempState)\n', (1383, 1401), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from Align.affine_ransac import Ransac
from Align.affine_transform import Affine
# The ration of the best match over second best match
# distance of best match
# ------------------------------- <= MATCH_RATIO
# distance of second best match
RATIO = 0.8
class Align():
def __init__(self, source_path, target_path,
K=3, threshold=1):
''' __INIT__
Initialize the instance.
Input arguments:
- source_path : the path of sorce image that to be warped
- target_path : the path of target image
- K : the number of corresponding points, default is 3
- threshold : a threshold determins which points are outliers
in the RANSAC process, if the residual is larger than threshold,
it can be regarded as outliers, default value is 1
'''
self.source_path = source_path
self.target_path = target_path
self.K = K
self.threshold = threshold
def read_image(self, path, mode=1):
''' READ_IMAGE
Load image from file path.
Input arguments:
- path : the image to be read
- mode : 1 for reading color image, 0 for grayscale image
default is 1
Output:
- the image to be processed
'''
return cv2.imread(path, mode)
def extract_SIFT(self, img):
''' EXTRACT_SIFT
Extract SIFT descriptors from the given image.
Input argument:
- img : the image to be processed
Output:
-kp : positions of key points where descriptors are extracted
- desc : all SIFT descriptors of the image, its dimension
will be n by 128 where n is the number of key points
'''
# Convert the image to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Extract key points and SIFT descriptors
sift = cv2.xfeatures2d.SIFT_create()
kp, desc = sift.detectAndCompute(img_gray, None)
# Extract positions of key points
kp = np.array([p.pt for p in kp]).T
return kp, desc
def match_SIFT(self, desc_s, desc_t):
''' MATCH_SIFT
Match SIFT descriptors of source image and target image.
Obtain the index of conrresponding points to do estimation
of affine transformation.
Input arguments:
- desc_s : descriptors of source image
- desc_t : descriptors of target image
Output:
- fit_pos : index of corresponding points
'''
# Match descriptor and obtain two best matches
bf = cv2.BFMatcher()
matches = bf.knnMatch(desc_s, desc_t, k=2)
# Initialize output variable
fit_pos = np.array([], dtype=np.int32).reshape((0, 2))
matches_num = len(matches)
for i in range(matches_num):
# Obtain the good match if the ration id smaller than 0.8
if matches[i][0].distance <= RATIO * matches[i][1].distance:
temp = np.array([matches[i][0].queryIdx,
matches[i][0].trainIdx])
# Put points index of good match
fit_pos = np.vstack((fit_pos, temp))
return fit_pos
def affine_matrix(self, kp_s, kp_t, fit_pos):
''' AFFINE_MATRIX
Compute affine transformation matrix by corresponding points.
Input arguments:
- kp_s : key points from source image
- kp_t : key points from target image
- fit_pos : index of corresponding points
Output:
- M : the affine transformation matrix whose dimension
is 2 by 3
'''
# Extract corresponding points from all key points
kp_s = kp_s[:, fit_pos[:, 0]]
kp_t = kp_t[:, fit_pos[:, 1]]
# Apply RANSAC to find most inliers
_, _, inliers = Ransac(self.K, self.threshold).ransac_fit(kp_s, kp_t)
# Extract all inliers from all key points
kp_s = kp_s[:, inliers[0]]
kp_t = kp_t[:, inliers[0]]
# Use all inliers to estimate transform matrix
A, t = Affine().estimate_affine(kp_s, kp_t)
M = np.hstack((A, t))
return M
def warp_image(self, source, target, M):
''' WARP_IMAGE
Warp the source image into target with the affine
transformation matrix.
Input arguments:
- source : the source image to be warped
- target : the target image
- M : the affine transformation matrix
'''
# Obtain the size of target image
rows, cols, _ = target.shape
# Warp the source image
warp = cv2.warpAffine(source, M, (cols, rows))
# Merge warped image with target image to display
merge = np.uint8(target * 0.5 + warp * 0.5)
# Show the result
cv2.imshow('img', warp)
# cv2.imshow('img', merge)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('Images/warped_l.jpg',warp)
return
def align_image(self):
''' ALIGN_IMAGE
Warp the source image into target image.
Two images' path are provided when the
instance Align() is created.
'''
# Load source image and target image
img_source = self.read_image(self.source_path)
img_target = self.read_image(self.target_path)
# Extract key points and SIFT descriptors from
# source image and target image respectively
kp_s, desc_s = self.extract_SIFT(img_source)
kp_t, desc_t = self.extract_SIFT(img_target)
# Obtain the index of correcponding points
fit_pos = self.match_SIFT(desc_s, desc_t)
# Compute the affine transformation matrix
M = self.affine_matrix(kp_s, kp_t, fit_pos)
# Warp the source image and display result
self.warp_image(img_source, img_target, M)
return
| [
"numpy.uint8",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.BFMatcher",
"cv2.imwrite",
"numpy.hstack",
"cv2.imread",
"cv2.warpAffine",
"Align.affine_ransac.Ransac",
"numpy.array",
"cv2.xfeatures2d.SIFT_create",
"cv2.imshow",
"Align.affine_transform.Affine",
"numpy.vstack"... | [((1397, 1419), 'cv2.imread', 'cv2.imread', (['path', 'mode'], {}), '(path, mode)\n', (1407, 1419), False, 'import cv2\n'), ((1921, 1958), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1933, 1958), False, 'import cv2\n'), ((2025, 2054), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (2052, 2054), False, 'import cv2\n'), ((2760, 2775), 'cv2.BFMatcher', 'cv2.BFMatcher', ([], {}), '()\n', (2773, 2775), False, 'import cv2\n'), ((4346, 4363), 'numpy.hstack', 'np.hstack', (['(A, t)'], {}), '((A, t))\n', (4355, 4363), True, 'import numpy as np\n'), ((4865, 4904), 'cv2.warpAffine', 'cv2.warpAffine', (['source', 'M', '(cols, rows)'], {}), '(source, M, (cols, rows))\n', (4879, 4904), False, 'import cv2\n'), ((4980, 5015), 'numpy.uint8', 'np.uint8', (['(target * 0.5 + warp * 0.5)'], {}), '(target * 0.5 + warp * 0.5)\n', (4988, 5015), True, 'import numpy as np\n'), ((5051, 5074), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'warp'], {}), "('img', warp)\n", (5061, 5074), False, 'import cv2\n'), ((5118, 5132), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5129, 5132), False, 'import cv2\n'), ((5141, 5164), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5162, 5164), False, 'import cv2\n'), ((5173, 5213), 'cv2.imwrite', 'cv2.imwrite', (['"""Images/warped_l.jpg"""', 'warp'], {}), "('Images/warped_l.jpg', warp)\n", (5184, 5213), False, 'import cv2\n'), ((2168, 2196), 'numpy.array', 'np.array', (['[p.pt for p in kp]'], {}), '([p.pt for p in kp])\n', (2176, 2196), True, 'import numpy as np\n'), ((2883, 2911), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (2891, 2911), True, 'import numpy as np\n'), ((3167, 3225), 'numpy.array', 'np.array', (['[matches[i][0].queryIdx, matches[i][0].trainIdx]'], {}), '([matches[i][0].queryIdx, matches[i][0].trainIdx])\n', (3175, 3225), True, 'import numpy as np\n'), ((3334, 3360), 'numpy.vstack', 'np.vstack', (['(fit_pos, temp)'], {}), '((fit_pos, temp))\n', (3343, 3360), True, 'import numpy as np\n'), ((4051, 4081), 'Align.affine_ransac.Ransac', 'Ransac', (['self.K', 'self.threshold'], {}), '(self.K, self.threshold)\n', (4057, 4081), False, 'from Align.affine_ransac import Ransac\n'), ((4297, 4305), 'Align.affine_transform.Affine', 'Affine', ([], {}), '()\n', (4303, 4305), False, 'from Align.affine_transform import Affine\n')] |
import numpy as np
class AllPlayers(object): #Parent class for all players
def __init__(self, mark):
self.mark = mark
class Hooman(AllPlayers): #Hooman class for User
pass
class Computer(AllPlayers): #Computer Parent class for 'dumb' Bot and 'learning' Bot
pass
class StartBot(Computer): #'Dumb' Bot that makes random move for 'learning' Bot
@staticmethod
def getMove(board): #'Dumb' Bot's getMove method
empty=board.emptyBoxes()
if empty:
return empty[np.random.choice(len(empty))] # Generates random move based on empty boxes
class Bot(Computer): #Bot class for QPlayer
def __init__(self, mark, Q={}, epsilon=0.2):
super(Bot, self).__init__(mark=mark)
self.Q = Q
self.epsilon = epsilon
def getMove(self, board): # QPlayer's getMove method based on QTable or a random move for exploration
if np.random.uniform() < self.epsilon: # Bot makes a move depending on epsilon & Q
return StartBot.getMove(board)
else:
encodedState = Bot.encodeBoardState(board, self.mark, self.Q)
QValue = self.Q[encodedState]
if self.mark == "X":
return Bot.random_minmax(QValue, max)
elif self.mark == "O":
return Bot.random_minmax(QValue, min)
@staticmethod
def random_minmax(QValue, min_or_max): # Determines either the min or max of the QValue Table, could be random
possibleMoves = min_or_max(list(QValue.values()))
if list(QValue.values()).count(possibleMoves) > 1: # If there is more than one move corresponding to the maximum Q-value, choose one at random
bestPossibleMoves = [move for move in list(QValue.keys()) if QValue[move] == possibleMoves]
move = bestPossibleMoves[np.random.choice(len(bestPossibleMoves))]
else:
move = min_or_max(QValue, key=QValue.get)
return move
@staticmethod
def encodeBoardState(board, mark, Q): # encode Board State and add to table if not already present
initialQ = 1.0 # Initially not a good value for QTable so that learning can be done by choosing random moves
encodedState = board.encodeState(mark)
if Q.get(encodedState) is None:
Q[encodedState] = {move: initialQ for move in board.emptyBoxes()}
return encodedState
| [
"numpy.random.uniform"
] | [((1216, 1235), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1233, 1235), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.metrics import roc_auc_score, f1_score
from src.utils.utils import print_progessbar, get_best_threshold
class DROCC_trainer:
"""
Trainer for the DROCC following the paper of Goyal et al. (2020).
"""
def __init__(self, r, gamma=0.5, mu=0.5, lr=1e-4, lr_adv=1e-2, lr_milestone=(),
weight_decay=1e-6, n_epoch=100, n_epoch_init=15, n_epoch_adv=15,
batch_size=16, device='cuda', n_jobs_dataloader=0, LFOC=False,
print_batch_progress=False):
"""
Build a DROCC trainer.
----------
INPUT
|---- r (float) the radius to use.
|---- gamma (float) the fraction of the radius defining the lower
| bound of the close adversarial samples layer.
|---- mu (float) the adversarial loss weight in the total loss.
|---- lr (float) the learning rate.
|---- lr_adv (float) the learning rate for the adversarial search.
|---- n_epoch (int) the number of epoch.
|---- n_epoch_init (int) the number of epoch without adversarial search.
|---- n_epoch_adv (int) the number of epoch for the gradient ascent.
|---- lr_milestone (tuple) the lr update steps.
|---- batch_size (int) the batch_size to use.
|---- weight_decay (float) the weight_decay for the Adam optimizer.
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_jobs_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to dispay the batch
| progress bar.
|---- LFOC (bool) whether to use the LFOC implementation.
OUTPUT
|---- None
"""
self.LFOC = LFOC # whether to use the DROCC-LF implementation
self.r = r
self.gamma = gamma
self.mu = mu
self.lr = lr
self.lr_adv = lr_adv
self.lr_milestone = lr_milestone
self.weight_decay = weight_decay
self.n_epoch = n_epoch
self.n_epoch_init = n_epoch_init
self.n_epoch_adv = n_epoch_adv
self.batch_size = batch_size
self.device = device
self.n_jobs_dataloader = n_jobs_dataloader
self.print_batch_progress = print_batch_progress
# Results
self.train_time = None
self.train_loss = None
self.valid_auc = None
self.valid_f1 = None
self.valid_time = None
self.valid_scores = None
self.test_auc = None
self.test_f1 = None
self.test_time = None
self.test_scores = None
# threhold to define if anomalous
self.scores_threshold = None
def train(self, dataset, net):
"""
Train the DROCC network on the provided dataset.
----------
INPUT
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is trained. It must return an image, a mask and
| semi-supervised labels.
|---- net (nn.Module) The DROCC to train. The network should return
| the logit of the passed sample.
OUTPUT
|---- net (nn.Module) The trained DROCC.
"""
logger = logging.getLogger()
# make dataloader
train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \
shuffle=True, num_workers=self.n_jobs_dataloader)
# put net to device
net = net.to(self.device)
# loss function
criterion = nn.BCEWithLogitsLoss()
# define optimizer
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# define scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestone, gamma=0.1)
# Start training
logger.info('>>> Start Training the DROCC.')
start_time = time.time()
epoch_loss_list = []
n_batch_tot = train_loader.__len__()
# set network in train mode
net.train()
for epoch in range(self.n_epoch):
epoch_loss = 0.0
n_batch = 0
epoch_start_time = time.time()
for b, data in enumerate(train_loader):
input, _, mask, semi_label, _ = data
input = input.to(self.device).float()
mask = mask.to(self.device)
semi_label = semi_label.to(self.device)
# get 'label' 0 = normal, 1 = abnormal
semi_label = torch.where(semi_label != -1, torch.Tensor([0]).to(self.device), torch.Tensor([1]).to(self.device))
# mask the input
input = input * mask
if epoch < self.n_epoch_init:
# initial steps without adversarial samples
input.requires_grad_(True)
logit = net(input).squeeze(dim=1)
loss = criterion(logit, semi_label)
else:
# get adversarial samples
normal_input = input[semi_label == 0] # get normal input only for the adversarial search
adv_input = self.adversarial_search(normal_input, net)
# forward on both normal and adversarial samples
input.requires_grad_(True)
logit = net(input).squeeze(dim=1)
logit_adv = net(adv_input).squeeze(dim=1)
# loss of samples
loss_sample = criterion(logit, semi_label)
# loss of adversarial samples
loss_adv = criterion(logit_adv, torch.ones(adv_input.shape[0], device=self.device))
# weighted sum of normal and aversarial loss
loss = loss_sample + self.mu * loss_adv
# Gradient step
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
n_batch += 1
if self.print_batch_progress:
print_progessbar(b, n_batch_tot, Name='\t\tBatch', Size=20)
# print epoch statistic
epoch_train_time = time.time() - epoch_start_time
logger.info(f'| Epoch: {epoch + 1:03}/{self.n_epoch:03} | '
f'Train Time: {epoch_train_time:.3f} [s] '
f'| Train Loss: {epoch_loss / n_batch:.6f} |')
# append the epoch loss to results list
epoch_loss_list.append([epoch+1, epoch_loss/n_batch])
# update the learning rate if the milestone is reached
scheduler.step()
if epoch + 1 in self.lr_milestone:
logger.info(f'>>> LR Scheduler : new learning rate {scheduler.get_lr()[0]:g}')
# End training
self.train_loss = epoch_loss_list
self.train_time = time.time() - start_time
logger.info(f'>>> Training Time of DROCC: {self.train_time:.3f} [s]')
logger.info('>>> Finished DROCC Training.\n')
return net
# def adversarial_search2(self, x, net):
# """
# Perform an adversarial sample search by gradient ascent on the batch input.
# ----------
# INPUT
# |---- x (torch.Tensor) a batch of normal samples (B, (C,) H, W).
# |---- net (nn.Module) the network to fool.
# OUTPUT
# |---- x + h (torch.Tensor) the batch of adversarial samples.
# """
# # take the adversarial input around normal samples
# x_adv = x + torch.normal(0, 1, x.shape, device=self.device)
# x_adv = x_adv.detach().requires_grad_(True)
# # set optimizer for the the input h
# optimizer_adv = optim.SGD([x_adv], lr=self.lr_adv) # or SDG ???
# criterion = nn.BCEWithLogitsLoss()
#
# # get the sigmas for the LFOC manifold projection
# # target = 0 (normal samples)
# sigma = self.get_sigma(x, torch.zeros(x.shape[0], device=self.device), net) if self.LFOC else None
#
# # gradient ascent
# # net.eval() # the network parameters are not updated here
# for i in range(self.n_epoch_adv):
# # Update h to increase loss
# optimizer_adv.zero_grad()
# logit = net(x_adv).squeeze(dim=1)
# loss_h = criterion(logit, torch.ones(x_adv.shape[0], device=self.device)) # all adversarial samples are abnormal but the network should be fooled
# (-loss_h).backward()
# optimizer_adv.step()
# # Project h onto the the Ni(r)
# with torch.no_grad():
# h = self.project_on_manifold(x_adv - x, sigma)
# x_adv = x + h
#
# return x_adv.detach()
def adversarial_search(self, x, net):
"""
Perform an adversarial sample search by gradient ascent on the batch input.
----------
INPUT
|---- x (torch.Tensor) a batch of normal samples (B, (C,) H, W).
|---- net (nn.Module) the network to fool.
OUTPUT
|---- x + h (torch.Tensor) the batch of adversarial samples.
"""
# take the adversarial input around normal samples
x = x.detach()
h = torch.normal(0, 1, x.shape, device=self.device).requires_grad_(True)
# set optimizer for the the input h
optimizer_adv = optim.Adam([h], lr=self.lr_adv) # or SDG ???
criterion = nn.BCEWithLogitsLoss()
# get the sigmas for the LFOC manifold projection
# target = 0 (normal samples)
sigma = self.get_sigma(x, torch.zeros(x.shape[0], device=self.device), net) if self.LFOC else None
# gradient ascent
for i in range(self.n_epoch_adv):
# Update h to increase loss
optimizer_adv.zero_grad()
logit = net(x + h).squeeze(dim=1)
loss_h = criterion(logit, torch.ones(h.shape[0], device=self.device)) # all adversarial samples are abnormal but the network should be fooled
(-loss_h).backward()
optimizer_adv.step()
# Project h onto the the Ni(r)
with torch.no_grad():
h = self.project_on_manifold(h, sigma)
return (x + h).detach()
def project_on_manifold(self, h, sigma):
"""
Project the adversarial samples on the manifold.
----------
INPUT
|---- h (torch.Tensor) the difference between the normal and the
| adversarially generated samples (B, (C), H, W).
|---- sigma (torch.Tensor) the gradient of the loss with respect to
| the input for LFOC. It has shape (B, (C), H, W).
OUTPUT
|---- h (torch.Tensor) the projected h.
"""
if self.LFOC:
# solve the 1D optimization problem described in Goyal et al. (2020)
solver = ProjectionSolver(h, sigma, self.r, self.device)
alpha = solver.solve()
h = alpha * h
else:
# get the norm of h by batch
norm_h = torch.sum(h**2, dim=tuple(range(1, h.dim())))
# compute alpha in function of the value of the norm of h (by batch)
alpha = torch.clamp(norm_h, self.gamma * self.r, self.r).to(self.device)
# make use of broadcast to project h
proj = (alpha / norm_h).view(-1, *[1]*(h.dim()-1))
h = proj * h
return h
def get_sigma(self, data, target, net):
"""
Compute the the gradient of the loss with respect to the input.
----------
INPUT
|---- data (torch.Tensor) a batch of data input.
|---- target (torch.Tensor) the target labels associated with the inputs.
|---- net (nn.Modules) the network to use.
OUTPUT
|---- sigma (torch.Tensor) the gradient of the network with respect
| to the input in absolute value divided by the norm.
"""
criterion = nn.BCEWithLogitsLoss()
grad_data, target = data.float().detach().requires_grad_(), target.float()
# evaluate the logit with the data and compute the loss
logit = net(grad_data).squeeze(dim=1)
loss = criterion(logit, target)
# get the derivative of loss compared to input data
grad = torch.autograd.grad(loss, grad_data)[0]
# normalize absolute value gradient by batch
grad_norm = torch.sum(torch.abs(grad), dim=tuple(range(1, grad.dim())))
sigma = torch.abs(grad) / grad_norm.view(-1, *[1]*(grad.dim()-1))
return sigma
def validate(self, dataset, net):
"""
Validate the DROCC network on the provided dataset and find the best
threshold on the score to maximize the f1-score.
----------
INPUT
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is validated. It must return an image and
| semi-supervized labels.
|---- net (nn.Module) The DROCC to validate. The network should return
| the logit of the passed sample.
OUTPUT
|---- None
"""
logger = logging.getLogger()
# make test dataloader using image and mask
valid_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \
shuffle=True, num_workers=self.n_jobs_dataloader)
# put net to device
net = net.to(self.device)
# loss function
criterion = nn.BCEWithLogitsLoss()
# Testing
logger.info('>>> Start Validating of the DROCC.')
epoch_loss = 0.0
n_batch = 0
n_batch_tot = valid_loader.__len__()
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for b, data in enumerate(valid_loader):
input, label, mask, _, idx = data
# put data to device
input, label = input.to(self.device).float(), label.to(self.device).float()
idx, mask = idx.to(self.device), mask.to(self.device)
# mask input
input = input * mask
logit = net(input).squeeze(dim=1)
loss = criterion(logit, label)
# get the anomaly scores
ad_score = torch.sigmoid(logit) # sigmoid of logit : should be high for abnormal (target = 1) and low for normal (target = 0)
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
label.cpu().data.numpy().tolist(),
ad_score.cpu().data.numpy().tolist()))
epoch_loss += loss.item()
n_batch += 1
if self.print_batch_progress:
print_progessbar(b, n_batch_tot, Name='\t\tBatch', Size=20)
self.valid_time = time.time() - start_time
self.valid_scores = idx_label_score
_, label, ad_score = zip(*idx_label_score)
label, ad_score = np.array(label), np.array(ad_score)
self.valid_auc = roc_auc_score(label, ad_score)
self.scores_threshold, self.valid_f1 = get_best_threshold(ad_score, label, metric=f1_score)
# add info to logger
logger.info(f'>>> Validation Time: {self.valid_time:.3f} [s]')
logger.info(f'>>> Validation Loss: {epoch_loss / n_batch:.6f}')
logger.info(f'>>> Validation AUC: {self.valid_auc:.3%}')
logger.info(f'>>> Best Threshold for the score maximizing F1-score: {self.scores_threshold:.3f}')
logger.info(f'>>> Best F1-score: {self.valid_f1:.3%}')
logger.info('>>> Finished validating the DROCC.\n')
def test(self, dataset, net):
"""
Test the DROCC network on the provided dataset.
----------
INPUT
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is tested. It must return an image and
| semi-supervised labels.
|---- net (nn.Module) The DROCC to test. The network should return
| the logit of the passed sample.
OUTPUT
|---- None
"""
logger = logging.getLogger()
# make test dataloader using image and mask
test_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \
shuffle=True, num_workers=self.n_jobs_dataloader)
# put net to device
net = net.to(self.device)
# loss function
criterion = nn.BCEWithLogitsLoss()
# Testing
logger.info('>>> Start Testing of the DROCC.')
epoch_loss = 0.0
n_batch = 0
n_batch_tot = test_loader.__len__()
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for b, data in enumerate(test_loader):
input, label, mask, _, idx = data
# put data to device
input, label = input.to(self.device).float(), label.to(self.device).float()
idx, mask = idx.to(self.device), mask.to(self.device)
# mask input
input = input * mask
logit = net(input).squeeze(dim=1)
loss = criterion(logit, label)
# get anomaly scores
ad_score = torch.sigmoid(logit) # sigmoid of logit : should be high for abnormal (target = 1) and low for normal (target = 0)
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
label.cpu().data.numpy().tolist(),
ad_score.cpu().data.numpy().tolist()))
epoch_loss += loss.item()
n_batch += 1
if self.print_batch_progress:
print_progessbar(b, n_batch_tot, Name='\t\tBatch', Size=20)
self.test_time = time.time() - start_time
self.test_scores = idx_label_score
_, label, ad_score = zip(*idx_label_score)
label, ad_score = np.array(label), np.array(ad_score)
self.test_auc = roc_auc_score(label, ad_score)
self.test_f1 = f1_score(label, np.where(ad_score > self.scores_threshold, 1, 0))
# add info to logger
logger.info(f'>>> Testing Time: {self.test_time:.3f} [s]')
logger.info(f'>>> Test Loss: {epoch_loss / n_batch:.6f}')
logger.info(f'>>> Test AUC: {self.test_auc:.3%}')
logger.info(f'>>> Test F1-score: {self.test_f1:.3%}')
logger.info('>>> Finished testing the DROCC.\n')
class ProjectionSolver:
"""
Numerical solver for the 1D non-convex optimization of DROCC-LF.
"""
def __init__(self, sigma, h, radius, device='cuda', n_search=10):
"""
Build a solver for the projection of DROCC-LF.
----------
INPUT
|---- sigma (torch.Tensor) the gradient of the loss with respect to
| the input. It has shape (B, (C), H, W).
|---- h (torch.Tensor) the difference between the normal sample and
| the adversarialy generated one (B, (C), H, W)
|---- radius (float) the radius around normal points.
|---- device (str) the device to use ('cpu' or 'cuda').
|---- n_search (int) the number of optimization iteration.
OUTPUT
|---- None
"""
self.sigma = sigma
self.h = h
self.radius = radius
self.device = device
self.n_search = n_search
# check where grid search will be done
self.cond_init = self.check_cond_init()
# get the minimal value of tau usable in grid search
self.lower_tau = self.get_lower_tau()
def check_cond_init(self):
"""
Check the initial condition that the Mahalanobis distance of the
difference is grater than r^2.
----------
INPUT
|---- None
OUTPUT
|---- condition (torch.Tensor) boolean tensor for each input of the batch.
"""
# compute the Mahalanobis distance
m_dist = torch.sum(self.sigma * self.h ** 2, dim=tuple(range(1, self.h.dim())))
# return True where the Mahalanobis distance of h is greated than r^2
return m_dist >= self.radius ** 2
def get_lower_tau(self):
"""
Get the lower bound for the tau parameters. It's given by -1/max(sigma).
----------
INPUT
|---- None
OUTPUT
|---- low_tau (torch.Tensor) lower bound for each input of the batch.
"""
# max of sigma by batch
max_sigma, _ = torch.max(torch.flatten(self.sigma, start_dim=1), dim=1)
#eps, _ = torch.min(torch.flatten(self.sigma, start_dim=1), dim=1) # ???
low_tau = -1 / (max_sigma + 1e-10) #+ eps * 1e-4
return low_tau#.detach().cpu().numpy()
def eval_search_condition(self, tau, sigma_i, h_i):
"""
Compute the condition upon which the metric to minimize is valid for a
single sample.
----------
INPUT
|---- tau (float) the vlue of tau to use.
|---- sigma_i (torch.Tensor) the sigma for the given sample (H, W, (C)).
|---- h_i (torch.Tensor) the difference for the given sample (H, W, (C)).
OUTPUT
|---- condition (bool) whether the condition is fulfilled.
"""
# check condition by sample and not by batch
num = tau**2 * h_i**2 * sigma_i**3
denom = (1 + tau * sigma_i)**2 + 1e-10
val = torch.sum(num / denom)
return val >= self.radius**2
def eval_minimum(self, tau, sigma_i, h_i):
"""
Compute the score to minimize for a single sample.
----------
INPUT
|---- tau (float) the vlue of tau to use.
|---- sigma_i (torch.Tensor) the sigma for the given sample (H, W, (C)).
|---- h_i (torch.Tensor) the difference for the given sample (H, W, (C)).
OUTPUT
|---- score (torch.Tensore) the scores to minimize for the sample.
"""
num = tau**2 * h_i**2 * sigma_i**2
denom = (1 + tau * sigma_i)**2 + 1e-10
return torch.sum(num / denom)
def solve(self):
"""
Solve the 1D minimization problem by grid search for the samples in the batch.
----------
INPUT
|---- None
OUTPUT
|---- alpha (torch.Tensor) the coefficient to use to projects the
| samples onto the manifold : x_adv = x + alpha * h.
"""
# placeholder for best taus
best_tau = torch.zeros(self.h.shape[0], device=self.device)
# for each sample individually
for idx, cond_init in enumerate(self.cond_init):
# if the grid search has to be done
if cond_init:
best_tau[idx] = 0
else:
min_val = np.inf
for _ in range(self.n_search):
# pick a random value of tau
tau = torch.FloatTensor(1).uniform_(self.lower_tau[idx], 0).to(self.device)#np.random.uniform(low=self.lower_tau, high=0)
# check the score to minimize
if self.eval_search_condition(tau, self.sigma[idx], self.h[idx]):
val = self.eval_minimum(tau, self.sigma[idx], self.h[idx])
else:
val = torch.tensor(float('inf'))
if val < min_val:
min_val = val
best_tau[idx] = tau
return 1 / (1 + best_tau.view(-1, *[1]*(self.sigma.dim()-1)) * self.sigma)
| [
"torch.autograd.grad",
"torch.no_grad",
"torch.flatten",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.FloatTensor",
"torch.Tensor",
"torch.zeros",
"torch.nn.BCEWithLogitsLoss",
"src.utils.utils.get_best_threshold",
"src.utils.utils.print_progessbar",
"sklearn.metrics.roc_auc_score",
"... | [((3439, 3458), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3456, 3458), False, 'import logging\n'), ((3509, 3628), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.n_jobs_dataloader'}), '(dataset, batch_size=self.batch_size, shuffle=\n True, num_workers=self.n_jobs_dataloader)\n', (3536, 3628), False, 'import torch\n'), ((3784, 3806), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (3804, 3806), True, 'import torch.nn as nn\n'), ((3976, 4062), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'self.lr_milestone', 'gamma': '(0.1)'}), '(optimizer, milestones=self.lr_milestone,\n gamma=0.1)\n', (4006, 4062), True, 'import torch.optim as optim\n'), ((4159, 4170), 'time.time', 'time.time', ([], {}), '()\n', (4168, 4170), False, 'import time\n'), ((9706, 9737), 'torch.optim.Adam', 'optim.Adam', (['[h]'], {'lr': 'self.lr_adv'}), '([h], lr=self.lr_adv)\n', (9716, 9737), True, 'import torch.optim as optim\n'), ((9771, 9793), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (9791, 9793), True, 'import torch.nn as nn\n'), ((12348, 12370), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (12368, 12370), True, 'import torch.nn as nn\n'), ((13572, 13591), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (13589, 13591), False, 'import logging\n'), ((13668, 13787), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.n_jobs_dataloader'}), '(dataset, batch_size=self.batch_size, shuffle=\n True, num_workers=self.n_jobs_dataloader)\n', (13695, 13787), False, 'import torch\n'), ((13932, 13954), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (13952, 13954), True, 'import torch.nn as nn\n'), ((14143, 14154), 'time.time', 'time.time', ([], {}), '()\n', (14152, 14154), False, 'import time\n'), ((15555, 15585), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['label', 'ad_score'], {}), '(label, ad_score)\n', (15568, 15585), False, 'from sklearn.metrics import roc_auc_score, f1_score\n'), ((15633, 15685), 'src.utils.utils.get_best_threshold', 'get_best_threshold', (['ad_score', 'label'], {'metric': 'f1_score'}), '(ad_score, label, metric=f1_score)\n', (15651, 15685), False, 'from src.utils.utils import print_progessbar, get_best_threshold\n'), ((16688, 16707), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (16705, 16707), False, 'import logging\n'), ((16783, 16902), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.n_jobs_dataloader'}), '(dataset, batch_size=self.batch_size, shuffle=\n True, num_workers=self.n_jobs_dataloader)\n', (16810, 16902), False, 'import torch\n'), ((17047, 17069), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (17067, 17069), True, 'import torch.nn as nn\n'), ((17254, 17265), 'time.time', 'time.time', ([], {}), '()\n', (17263, 17265), False, 'import time\n'), ((18658, 18688), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['label', 'ad_score'], {}), '(label, ad_score)\n', (18671, 18688), False, 'from sklearn.metrics import roc_auc_score, f1_score\n'), ((22132, 22154), 'torch.sum', 'torch.sum', (['(num / denom)'], {}), '(num / denom)\n', (22141, 22154), False, 'import torch\n'), ((22780, 22802), 'torch.sum', 'torch.sum', (['(num / denom)'], {}), '(num / denom)\n', (22789, 22802), False, 'import torch\n'), ((23215, 23263), 'torch.zeros', 'torch.zeros', (['self.h.shape[0]'], {'device': 'self.device'}), '(self.h.shape[0], device=self.device)\n', (23226, 23263), False, 'import torch\n'), ((4427, 4438), 'time.time', 'time.time', ([], {}), '()\n', (4436, 4438), False, 'import time\n'), ((7180, 7191), 'time.time', 'time.time', ([], {}), '()\n', (7189, 7191), False, 'import time\n'), ((12679, 12715), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'grad_data'], {}), '(loss, grad_data)\n', (12698, 12715), False, 'import torch\n'), ((12802, 12817), 'torch.abs', 'torch.abs', (['grad'], {}), '(grad)\n', (12811, 12817), False, 'import torch\n'), ((12868, 12883), 'torch.abs', 'torch.abs', (['grad'], {}), '(grad)\n', (12877, 12883), False, 'import torch\n'), ((14217, 14232), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14230, 14232), False, 'import torch\n'), ((15348, 15359), 'time.time', 'time.time', ([], {}), '()\n', (15357, 15359), False, 'import time\n'), ((15494, 15509), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (15502, 15509), True, 'import numpy as np\n'), ((15511, 15529), 'numpy.array', 'np.array', (['ad_score'], {}), '(ad_score)\n', (15519, 15529), True, 'import numpy as np\n'), ((17328, 17343), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17341, 17343), False, 'import torch\n'), ((18453, 18464), 'time.time', 'time.time', ([], {}), '()\n', (18462, 18464), False, 'import time\n'), ((18598, 18613), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (18606, 18613), True, 'import numpy as np\n'), ((18615, 18633), 'numpy.array', 'np.array', (['ad_score'], {}), '(ad_score)\n', (18623, 18633), True, 'import numpy as np\n'), ((18728, 18776), 'numpy.where', 'np.where', (['(ad_score > self.scores_threshold)', '(1)', '(0)'], {}), '(ad_score > self.scores_threshold, 1, 0)\n', (18736, 18776), True, 'import numpy as np\n'), ((21216, 21254), 'torch.flatten', 'torch.flatten', (['self.sigma'], {'start_dim': '(1)'}), '(self.sigma, start_dim=1)\n', (21229, 21254), False, 'import torch\n'), ((6489, 6500), 'time.time', 'time.time', ([], {}), '()\n', (6498, 6500), False, 'import time\n'), ((9568, 9615), 'torch.normal', 'torch.normal', (['(0)', '(1)', 'x.shape'], {'device': 'self.device'}), '(0, 1, x.shape, device=self.device)\n', (9580, 9615), False, 'import torch\n'), ((9925, 9968), 'torch.zeros', 'torch.zeros', (['x.shape[0]'], {'device': 'self.device'}), '(x.shape[0], device=self.device)\n', (9936, 9968), False, 'import torch\n'), ((10229, 10271), 'torch.ones', 'torch.ones', (['h.shape[0]'], {'device': 'self.device'}), '(h.shape[0], device=self.device)\n', (10239, 10271), False, 'import torch\n'), ((10471, 10486), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10484, 10486), False, 'import torch\n'), ((14767, 14787), 'torch.sigmoid', 'torch.sigmoid', (['logit'], {}), '(logit)\n', (14780, 14787), False, 'import torch\n'), ((17873, 17893), 'torch.sigmoid', 'torch.sigmoid', (['logit'], {}), '(logit)\n', (17886, 17893), False, 'import torch\n'), ((6361, 6420), 'src.utils.utils.print_progessbar', 'print_progessbar', (['b', 'n_batch_tot'], {'Name': '"""\t\tBatch"""', 'Size': '(20)'}), "(b, n_batch_tot, Name='\\t\\tBatch', Size=20)\n", (6377, 6420), False, 'from src.utils.utils import print_progessbar, get_best_threshold\n'), ((11561, 11609), 'torch.clamp', 'torch.clamp', (['norm_h', '(self.gamma * self.r)', 'self.r'], {}), '(norm_h, self.gamma * self.r, self.r)\n', (11572, 11609), False, 'import torch\n'), ((15261, 15320), 'src.utils.utils.print_progessbar', 'print_progessbar', (['b', 'n_batch_tot'], {'Name': '"""\t\tBatch"""', 'Size': '(20)'}), "(b, n_batch_tot, Name='\\t\\tBatch', Size=20)\n", (15277, 15320), False, 'from src.utils.utils import print_progessbar, get_best_threshold\n'), ((18367, 18426), 'src.utils.utils.print_progessbar', 'print_progessbar', (['b', 'n_batch_tot'], {'Name': '"""\t\tBatch"""', 'Size': '(20)'}), "(b, n_batch_tot, Name='\\t\\tBatch', Size=20)\n", (18383, 18426), False, 'from src.utils.utils import print_progessbar, get_best_threshold\n'), ((5909, 5959), 'torch.ones', 'torch.ones', (['adv_input.shape[0]'], {'device': 'self.device'}), '(adv_input.shape[0], device=self.device)\n', (5919, 5959), False, 'import torch\n'), ((4813, 4830), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (4825, 4830), False, 'import torch\n'), ((4848, 4865), 'torch.Tensor', 'torch.Tensor', (['[1]'], {}), '([1])\n', (4860, 4865), False, 'import torch\n'), ((23642, 23662), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (23659, 23662), False, 'import torch\n')] |
########################################################################################################
# A Program to read the NIST website and output figures
########################################################################################################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as ml
import pdb
import requests
import NIST_reader as NIST
#import netCDF_writer as ncdf
import criticalProperties as cP
from bs4 import BeautifulSoup
from scipy import interpolate
import tecplotOutput as tec
import scipy as sp
#User defined quantities
fluid='O2'
isoType='isotherm'
Pmin=1.0E6
Pmax=5.0E6
Tmin=80
Tmax=840.0
NbT=25
NbP=100
T=444
P=1.0
NVar=14
ending='.svg'
#Defines the start values of the arrays
startValues = [-1] * (NbT+1)
if isoType=='isotherm':
rangeNIST=np.linspace(Tmin,Tmax,NbT)
elif isoType=='isobar':
rangeNIST=np.linspace(Pmin,Pmax,NbP)
dataIsotherm=np.zeros([NVar,NbT*NbP*10])
for ii,valThermo in enumerate(rangeNIST):
if isoType=='isotherm':
T=valThermo
elif isoType=='isobar':
P=valThermo
dataNIST=NIST.readNIST(isoType, fluid, T, P/1.0E6, Tmin,Tmax,Pmin/1.0E6,Pmax/1.0E6,NbP)
if ii==0:
Tarray=dataNIST[NIST.colNIST('T'),:]
Parray=dataNIST[NIST.colNIST('P'),:]
Harray=dataNIST[NIST.colNIST('H'),:]
RHOarray=dataNIST[NIST.colNIST('rho'),:]
Earray=dataNIST[NIST.colNIST('E'),:]
nPts=np.size(dataNIST[0,:])
else:
Tarray=np.append(Tarray,dataNIST[NIST.colNIST('T'),:])
Parray=np.append(Parray,dataNIST[NIST.colNIST('P'),:])
Harray=np.append(Harray,dataNIST[NIST.colNIST('H'),:])
Earray=np.append(Harray,dataNIST[NIST.colNIST('E'),:])
RHOarray=np.append(RHOarray,dataNIST[NIST.colNIST('rho'),:])
plt.figure(33)
plt.plot(dataNIST[NIST.colNIST('rho'),:],dataNIST[NIST.colNIST('P'),:]/1E6,color='k')
plt.figure(34)
plt.plot(dataNIST[NIST.colNIST('E'),:],dataNIST[NIST.colNIST('P'),:]/1E6,color='k')
plt.figure(35)
plt.plot(dataNIST[NIST.colNIST('rho'),:],dataNIST[NIST.colNIST('E'),:],color='k')
plt.figure(36)
plt.plot(dataNIST[NIST.colNIST('H'),:],dataNIST[NIST.colNIST('S'),:],color='k')
plt.figure(37)
plt.plot(dataNIST[NIST.colNIST('P'),:]/1E6,dataNIST[NIST.colNIST('V'),:],color='k')
plt.figure(38)
plt.plot(dataNIST[NIST.colNIST('rho'),:],dataNIST[NIST.colNIST('T'),:],color='k')
prefix=isoType+'_'+fluid
fig=plt.figure(33)
plt.xlabel('rho (kg/m3)')
plt.ylabel('P (MPa)')
plt.savefig(prefix+'_rhoP'+ending)
plt.figure(34)
plt.xlabel('E (kJ/kg)')
plt.ylabel('P (MPa)')
plt.savefig(prefix+'_eP'+ending)
plt.figure(35)
plt.xlabel('rho (kg/m3)')
plt.ylabel('E (kJ/kg)')
plt.savefig(prefix+'_rhoE'+ending)
plt.figure(36)
plt.xlabel('H (kJ/kg)')
plt.ylabel('S (J/g*K)')
plt.savefig(prefix+'_HS'+ending)
plt.figure(37)
plt.xlabel('P (MPa)')
plt.ylabel('V (m3/kg)')
plt.savefig(prefix+'_PV'+ending)
plt.figure(38)
plt.xlabel('rho (kg/m3)')
plt.ylabel('T (K)')
plt.savefig(prefix+'_rhoT'+ending)
pdb.set_trace()
| [
"numpy.size",
"numpy.zeros",
"matplotlib.pyplot.figure",
"pdb.set_trace",
"numpy.linspace",
"NIST_reader.colNIST",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"NIST_reader.readNIST",
"matplotlib.pyplot.savefig"
] | [((934, 966), 'numpy.zeros', 'np.zeros', (['[NVar, NbT * NbP * 10]'], {}), '([NVar, NbT * NbP * 10])\n', (942, 966), True, 'import numpy as np\n'), ((2477, 2491), 'matplotlib.pyplot.figure', 'plt.figure', (['(33)'], {}), '(33)\n', (2487, 2491), True, 'import matplotlib.pyplot as plt\n'), ((2492, 2517), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rho (kg/m3)"""'], {}), "('rho (kg/m3)')\n", (2502, 2517), True, 'import matplotlib.pyplot as plt\n'), ((2518, 2539), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P (MPa)"""'], {}), "('P (MPa)')\n", (2528, 2539), True, 'import matplotlib.pyplot as plt\n'), ((2540, 2578), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + '_rhoP' + ending)"], {}), "(prefix + '_rhoP' + ending)\n", (2551, 2578), True, 'import matplotlib.pyplot as plt\n'), ((2575, 2589), 'matplotlib.pyplot.figure', 'plt.figure', (['(34)'], {}), '(34)\n', (2585, 2589), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2613), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""E (kJ/kg)"""'], {}), "('E (kJ/kg)')\n", (2600, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2614, 2635), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P (MPa)"""'], {}), "('P (MPa)')\n", (2624, 2635), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2672), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + '_eP' + ending)"], {}), "(prefix + '_eP' + ending)\n", (2647, 2672), True, 'import matplotlib.pyplot as plt\n'), ((2669, 2683), 'matplotlib.pyplot.figure', 'plt.figure', (['(35)'], {}), '(35)\n', (2679, 2683), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2709), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rho (kg/m3)"""'], {}), "('rho (kg/m3)')\n", (2694, 2709), True, 'import matplotlib.pyplot as plt\n'), ((2710, 2733), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""E (kJ/kg)"""'], {}), "('E (kJ/kg)')\n", (2720, 2733), True, 'import matplotlib.pyplot as plt\n'), ((2734, 2772), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + '_rhoE' + ending)"], {}), "(prefix + '_rhoE' + ending)\n", (2745, 2772), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2783), 'matplotlib.pyplot.figure', 'plt.figure', (['(36)'], {}), '(36)\n', (2779, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2784, 2807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""H (kJ/kg)"""'], {}), "('H (kJ/kg)')\n", (2794, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2831), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""S (J/g*K)"""'], {}), "('S (J/g*K)')\n", (2818, 2831), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2868), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + '_HS' + ending)"], {}), "(prefix + '_HS' + ending)\n", (2843, 2868), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2879), 'matplotlib.pyplot.figure', 'plt.figure', (['(37)'], {}), '(37)\n', (2875, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2880, 2901), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""P (MPa)"""'], {}), "('P (MPa)')\n", (2890, 2901), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2925), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""V (m3/kg)"""'], {}), "('V (m3/kg)')\n", (2912, 2925), True, 'import matplotlib.pyplot as plt\n'), ((2926, 2962), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + '_PV' + ending)"], {}), "(prefix + '_PV' + ending)\n", (2937, 2962), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2973), 'matplotlib.pyplot.figure', 'plt.figure', (['(38)'], {}), '(38)\n', (2969, 2973), True, 'import matplotlib.pyplot as plt\n'), ((2974, 2999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rho (kg/m3)"""'], {}), "('rho (kg/m3)')\n", (2984, 2999), True, 'import matplotlib.pyplot as plt\n'), ((3000, 3019), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T (K)"""'], {}), "('T (K)')\n", (3010, 3019), True, 'import matplotlib.pyplot as plt\n'), ((3020, 3058), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + '_rhoT' + ending)"], {}), "(prefix + '_rhoT' + ending)\n", (3031, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3055, 3070), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3068, 3070), False, 'import pdb\n'), ((828, 856), 'numpy.linspace', 'np.linspace', (['Tmin', 'Tmax', 'NbT'], {}), '(Tmin, Tmax, NbT)\n', (839, 856), True, 'import numpy as np\n'), ((1113, 1218), 'NIST_reader.readNIST', 'NIST.readNIST', (['isoType', 'fluid', 'T', '(P / 1000000.0)', 'Tmin', 'Tmax', '(Pmin / 1000000.0)', '(Pmax / 1000000.0)', 'NbP'], {}), '(isoType, fluid, T, P / 1000000.0, Tmin, Tmax, Pmin / \n 1000000.0, Pmax / 1000000.0, NbP)\n', (1126, 1218), True, 'import NIST_reader as NIST\n'), ((1815, 1829), 'matplotlib.pyplot.figure', 'plt.figure', (['(33)'], {}), '(33)\n', (1825, 1829), True, 'import matplotlib.pyplot as plt\n'), ((1924, 1938), 'matplotlib.pyplot.figure', 'plt.figure', (['(34)'], {}), '(34)\n', (1934, 1938), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2045), 'matplotlib.pyplot.figure', 'plt.figure', (['(35)'], {}), '(35)\n', (2041, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2136, 2150), 'matplotlib.pyplot.figure', 'plt.figure', (['(36)'], {}), '(36)\n', (2146, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2239, 2253), 'matplotlib.pyplot.figure', 'plt.figure', (['(37)'], {}), '(37)\n', (2249, 2253), True, 'import matplotlib.pyplot as plt\n'), ((2346, 2360), 'matplotlib.pyplot.figure', 'plt.figure', (['(38)'], {}), '(38)\n', (2356, 2360), True, 'import matplotlib.pyplot as plt\n'), ((893, 921), 'numpy.linspace', 'np.linspace', (['Pmin', 'Pmax', 'NbP'], {}), '(Pmin, Pmax, NbP)\n', (904, 921), True, 'import numpy as np\n'), ((1449, 1472), 'numpy.size', 'np.size', (['dataNIST[0, :]'], {}), '(dataNIST[0, :])\n', (1456, 1472), True, 'import numpy as np\n'), ((1231, 1248), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""T"""'], {}), "('T')\n", (1243, 1248), True, 'import NIST_reader as NIST\n'), ((1276, 1293), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""P"""'], {}), "('P')\n", (1288, 1293), True, 'import NIST_reader as NIST\n'), ((1321, 1338), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""H"""'], {}), "('H')\n", (1333, 1338), True, 'import NIST_reader as NIST\n'), ((1368, 1387), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""rho"""'], {}), "('rho')\n", (1380, 1387), True, 'import NIST_reader as NIST\n'), ((1415, 1432), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""E"""'], {}), "('E')\n", (1427, 1432), True, 'import NIST_reader as NIST\n'), ((1852, 1871), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""rho"""'], {}), "('rho')\n", (1864, 1871), True, 'import NIST_reader as NIST\n'), ((1961, 1978), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""E"""'], {}), "('E')\n", (1973, 1978), True, 'import NIST_reader as NIST\n'), ((2068, 2087), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""rho"""'], {}), "('rho')\n", (2080, 2087), True, 'import NIST_reader as NIST\n'), ((2100, 2117), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""E"""'], {}), "('E')\n", (2112, 2117), True, 'import NIST_reader as NIST\n'), ((2173, 2190), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""H"""'], {}), "('H')\n", (2185, 2190), True, 'import NIST_reader as NIST\n'), ((2203, 2220), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""S"""'], {}), "('S')\n", (2215, 2220), True, 'import NIST_reader as NIST\n'), ((2310, 2327), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""V"""'], {}), "('V')\n", (2322, 2327), True, 'import NIST_reader as NIST\n'), ((2383, 2402), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""rho"""'], {}), "('rho')\n", (2395, 2402), True, 'import NIST_reader as NIST\n'), ((2415, 2432), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""T"""'], {}), "('T')\n", (2427, 2432), True, 'import NIST_reader as NIST\n'), ((1528, 1545), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""T"""'], {}), "('T')\n", (1540, 1545), True, 'import NIST_reader as NIST\n'), ((1591, 1608), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""P"""'], {}), "('P')\n", (1603, 1608), True, 'import NIST_reader as NIST\n'), ((1654, 1671), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""H"""'], {}), "('H')\n", (1666, 1671), True, 'import NIST_reader as NIST\n'), ((1717, 1734), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""E"""'], {}), "('E')\n", (1729, 1734), True, 'import NIST_reader as NIST\n'), ((1784, 1803), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""rho"""'], {}), "('rho')\n", (1796, 1803), True, 'import NIST_reader as NIST\n'), ((1884, 1901), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""P"""'], {}), "('P')\n", (1896, 1901), True, 'import NIST_reader as NIST\n'), ((1991, 2008), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""P"""'], {}), "('P')\n", (2003, 2008), True, 'import NIST_reader as NIST\n'), ((2276, 2293), 'NIST_reader.colNIST', 'NIST.colNIST', (['"""P"""'], {}), "('P')\n", (2288, 2293), True, 'import NIST_reader as NIST\n')] |
import matplotlib
matplotlib.use('Agg')
import re
import argparse
from datetime import datetime, timedelta, time
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import itertools
from sklearn.metrics import confusion_matrix
ANNO_LABEL_DICT = 'annotation-label-dictionary.csv'
DOHERTY2018_DICT_COL = 'label:Doherty2018'
DOHERTY2018_COLOURS = {'sleep':'blue',
'sedentary': 'red',
'tasks-light': 'darkorange',
'walking': 'lightgreen',
'moderate': 'green'}
DOHERTY2018_LABELS = list(DOHERTY2018_COLOURS.keys())
WILLETTS2018_DICT_COL = 'label:Willetts2018'
WILLETTS2018_COLOURS = {'sleep':'blue',
'sit-stand': 'red',
'vehicle': 'darkorange',
'walking': 'lightgreen',
'mixed': 'green',
'bicycling': 'purple'}
WILLETTS2018_LABELS = list(WILLETTS2018_COLOURS.keys())
WALMSLEY2020_DICT_COL = 'label:Walmsley2020'
WALMSLEY2020_COLOURS = {'sleep':'blue',
'sedentary': 'red',
'light': 'darkorange',
'moderate-vigorous': 'green'}
WALMSLEY2020_LABELS = list(WALMSLEY2020_COLOURS.keys())
IMPUTED_COLOR = '#fafc6f' # yellow
UNCODEABLE_COLOR = '#d3d3d3' # lightgray
BACKGROUND_COLOR = '#d3d3d3' # lightgray
def annotationSimilarity(anno1, anno2):
''' Naive sentence similarity '''
DELIMITERS = ";|, | "
words1 = re.split(DELIMITERS, anno1)
words2 = re.split(DELIMITERS, anno2)
shared_words = set(set(words1) & set(words2))
similarity = len(shared_words) / len(words1) # why words1 and not words2? how about averaging?
return similarity
def nearestAnnotation(annoList, annoTarget, threshold=.8):
similarities = [annotationSimilarity(annoTarget, _) for _ in annoList]
if np.max(similarities) < threshold:
print(f"No similar annotation found in dictionary for: '{annoTarget}'")
return None
return annoList[np.argmax(similarities)]
def buildLabelDict(labelDictCSV, labelDictCol):
df = pd.read_csv(labelDictCSV, usecols=['annotation', labelDictCol])
labelDict = {row['annotation']:row[labelDictCol] for i,row in df.iterrows()}
return labelDict
def annotateTsData(tsData, annoData, labelDict):
tsData['annotation'] = 'undefined'
t = tsData['time'].dt.tz_localize(None)
for i, row in annoData.iterrows():
start, end = row['startTime'].tz_localize(None), row['endTime'].tz_localize(None)
annotation = nearestAnnotation(list(labelDict.keys()), row['annotation'])
label = labelDict.get(annotation, 'uncodeable')
tsData.loc[(t > start) & (t < end), 'annotation'] = label
def gatherPredictionLabels(tsData, labels):
tsData['prediction'] = 'undefined'
tsData.loc[tsData['imputed'] == 1, 'prediction'] = 'imputed'
for label in labels:
tsData.loc[(tsData[label] > 0) & (tsData['imputed'] == 0), 'prediction'] = label
def formatXYaxes(ax, day, ymax, ymin):
# run gridlines for each hour bar
ax.get_xaxis().grid(True, which='major', color='grey', alpha=0.5)
ax.get_xaxis().grid(True, which='minor', color='grey', alpha=0.25)
# set x and y-axes
ax.set_xlim((datetime.combine(day,time(0, 0, 0, 0)),
datetime.combine(day + timedelta(days=1), time(0, 0, 0, 0))))
ax.set_xticks(pd.date_range(start=datetime.combine(day,time(0, 0, 0, 0)),
end=datetime.combine(day + timedelta(days=1), time(0, 0, 0, 0)),
freq='4H'))
ax.set_xticks(pd.date_range(start=datetime.combine(day,time(0, 0, 0, 0)),
end=datetime.combine(day + timedelta(days=1), time(0, 0, 0, 0)),
freq='1H'), minor=True)
ax.set_ylim((ymin, ymax))
ax.get_yaxis().set_ticks([]) # hide y-axis lables
# make border less harsh between subplots
ax.spines['top'].set_color(BACKGROUND_COLOR)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
# set background colour to lightgray
ax.set_facecolor(BACKGROUND_COLOR)
def splitByTimeGap(group, seconds=30):
subgroupIDs = (group.index.to_series().diff() > timedelta(seconds=seconds)).cumsum()
subgroups = group.groupby(by=subgroupIDs)
return subgroups
def confusionMatrix(tsData, labels, normalize=False, include_uncodeable_imputed=False):
tsData = tsData.loc[tsData['annotation'] != 'undefined']
y_true = tsData['annotation'].values
y_pred = tsData['prediction'].values
# Compute confusion matrix -- include 'uncodeable' & 'imputed'
cmLabels = labels
if include_uncodeable_imputed:
cmLabels += ['uncodeable', 'imputed']
cm = confusion_matrix(y_true, y_pred, labels=cmLabels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
return cm, cmLabels
def plotTimeSeries(tsData, labels, labelColors=None, plotFile='sample'):
convert_date = np.vectorize(lambda day, x: matplotlib.dates.date2num(datetime.combine(day, x)))
groups = tsData.groupby(by=tsData.index.date)
ndays = len(groups)
nrows = 3*ndays + 1 # ndays x (prediction + annotation + spacing) + legend
fig = plt.figure(figsize=(10,nrows), dpi=200)
gs = fig.add_gridspec(nrows=nrows, ncols=1, height_ratios=[2, 2, 1]*ndays+[2])
axes = []
for i in range(nrows):
if (i+1) % 3 == 0: continue # do not add the axis corresp. to the spacing
axes.append(fig.add_subplot(gs[i]))
if labelColors is None:
color_cycle = itertools.cycle(plt.rcParams['axes.prop_cycle'].by_key()['color'])
labelColors = dict(zip(labels, color_cycle))
colors = [labelColors[l] for l in labels]
ymin = tsData['acceleration'].min()
ymax = tsData['acceleration'].max()
for i, (day, group) in enumerate(groups):
axPred, axAnno = axes[2*i], axes[2*i+1]
# plot acceleration
t = convert_date(day, group.index.time)
axPred.plot(t, group['acceleration'], c='k')
# plot predicted
ys = [(group['prediction'] == l).astype('int') * ymax for l in labels]
axPred.stackplot(t, ys, colors=colors, alpha=.5, edgecolor='none')
axPred.fill_between(t, (group['prediction'] == 'imputed').astype('int') * ymax,
facecolor=IMPUTED_COLOR)
# plot annotated
ys = [(group['annotation'] == l).astype('int') * ymax for l in labels]
axAnno.stackplot(t, ys, colors=colors, alpha=.5, edgecolor='none')
axAnno.fill_between(t, (group['annotation']=='uncodeable').astype('int') * ymax,
facecolor=UNCODEABLE_COLOR, hatch='//')
axPred.set_ylabel('predicted', fontsize='x-small')
axAnno.set_ylabel('annotated', fontsize='x-small')
formatXYaxes(axPred, day, ymax, ymin)
formatXYaxes(axAnno, day, ymax, ymin)
# add date to left hand side of each day's activity plot
axPred.set_title(
day.strftime("%A,\n%d %B"), weight='bold',
x=-.2, y=-.3,
horizontalalignment='left',
verticalalignment='bottom',
rotation='horizontal',
transform=axPred.transAxes,
fontsize='medium',
color='k'
)
# legends
axes[-1].axis('off')
# create a 'patch' for each legend entry
legend_patches = []
legend_patches.append(mlines.Line2D([], [], color='k', label='acceleration'))
legend_patches.append(mpatches.Patch(facecolor=IMPUTED_COLOR, label='imputed/nonwear'))
legend_patches.append(mpatches.Patch(facecolor=UNCODEABLE_COLOR, hatch='//', label='not in dictionary'))
# create legend entry for each label
for label in labels:
legend_patches.append(mpatches.Patch(facecolor=labelColors[label], label=label, alpha=0.5))
# create overall legend
axes[-1].legend(handles=legend_patches, bbox_to_anchor=(0., 0., 1., 1.),
loc='center', ncol=min(4,len(legend_patches)), mode="best",
borderaxespad=0, framealpha=0.6, frameon=True, fancybox=True)
# remove legend border
axes[-1].spines['left'].set_visible(False)
axes[-1].spines['right'].set_visible(False)
axes[-1].spines['top'].set_visible(False)
axes[-1].spines['bottom'].set_visible(False)
# format x-axis to show hours
fig.autofmt_xdate()
# add hour labels to top of plot
hrLabels = ['00:00', '04:00', '08:00', '12:00', '16:00', '20:00', '24:00']
axes[0].set_xticklabels(hrLabels)
axes[0].tick_params(labelbottom=False, labeltop=True, labelleft=False)
fig.savefig(plotFile, dpi=200, bbox_inches='tight')
print('Timeseries plot file:', plotFile)
def plotConfusionMatrix(cm, cmLabels, title=None, plotFile='sample'):
""" https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html """
fig, ax = plt.subplots()
ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=cmLabels, yticklabels=cmLabels,
ylabel='camera annotation',
xlabel='model prediction')
if title is not None: ax.set_title(title)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if np.issubdtype(cm.dtype, np.float64) else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
fig.savefig(plotFile, dpi=200, bbox_inches='tight')
print('Confusion matrix plot file:', plotFile)
def date_parser(t):
''' Parse date a date string of the form e.g
2020-06-14 19:01:15.123+0100 [Europe/London] '''
# tz = re.search(r'(?<=\[).+?(?=\])', t)
# if tz is not None:
# tz = tz.group()
t = re.sub(r'\[(.*?)\]', '', t)
# return pd.to_datetime(t, utc=True).tz_convert(tz)
return pd.to_datetime(t, utc=True)
def main(tsFile, annoFile, labelScheme, normalize, plotFile):
annoData = pd.read_csv(args.annoFile, parse_dates=['startTime', 'endTime'])
tsData = pd.read_csv(args.tsFile, parse_dates=['time'], date_parser=date_parser)
# some minor refactoring
tsData.rename(columns={'acc':'acceleration', 'MVPA':'moderate-vigorous'}, inplace=True)
tsData.set_index('time', drop=False, inplace=True)
if labelScheme == 'Doherty2018':
labelColors = DOHERTY2018_COLOURS
labelDict = buildLabelDict(ANNO_LABEL_DICT, DOHERTY2018_DICT_COL)
labels = DOHERTY2018_LABELS
elif labelScheme == 'Willetts2018':
labelColors = WILLETTS2018_COLOURS
labelDict = buildLabelDict(ANNO_LABEL_DICT, WILLETTS2018_DICT_COL)
labels = WILLETTS2018_LABELS
elif labelScheme == 'Walmsley2020':
labelColors = WALMSLEY2020_COLOURS
labelDict = buildLabelDict(ANNO_LABEL_DICT, WALMSLEY2020_DICT_COL)
labels = WALMSLEY2020_LABELS
else:
raise ValueError(f'Unrecognized label scheme {labelScheme}')
annotateTsData(tsData, annoData, labelDict)
gatherPredictionLabels(tsData, labels)
# smooth acceleration
tsData['acceleration'] = tsData['acceleration'].rolling(window=12, min_periods=1).mean()
# drop dates without any annotation
annotatedDates = np.unique(tsData.index.date[tsData['annotation'] != 'undefined'])
tsData = tsData.loc[np.isin(tsData.index.date, annotatedDates)]
plotTimeSeries(tsData, labels, labelColors, '{}_timeseries.png'.format(plotFile))
# compute & plot confusion matrix
cm, cmLabels = confusionMatrix(tsData, labels, normalize)
cmFile = '{}_confusion.npz'.format(args.plotFile)
np.savez(cmFile, cm=cm, cmLabels=cmLabels)
print('Confusion matrix .npz file: {}'.format(cmFile))
plotConfusionMatrix(cm, cmLabels, None, '{}_confusion.png'.format(plotFile))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('tsFile', help='time series file with predictions by the model')
parser.add_argument('annoFile', help='camera annotation file')
parser.add_argument('--labelScheme', default='Walmsley2020')
parser.add_argument('--normalize', action='store_true')
parser.add_argument('--plotFile', default='image.png')
args = parser.parse_args()
args.plotFile = args.plotFile.split('.')[0] # remove any extension
main(args.tsFile, args.annoFile, args.labelScheme, args.normalize, args.plotFile)
| [
"numpy.isin",
"argparse.ArgumentParser",
"numpy.argmax",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.patches.Patch",
"datetime.time",
"numpy.unique",
"matplotlib.lines.Line2D",
"numpy.max",
"datetime.timedelta",
"matplotlib.pyplot.subplots",
"re.sub",
"re.s... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((315, 347), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (345, 347), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((1685, 1712), 're.split', 're.split', (['DELIMITERS', 'anno1'], {}), '(DELIMITERS, anno1)\n', (1693, 1712), False, 'import re\n'), ((1726, 1753), 're.split', 're.split', (['DELIMITERS', 'anno2'], {}), '(DELIMITERS, anno2)\n', (1734, 1753), False, 'import re\n'), ((2307, 2370), 'pandas.read_csv', 'pd.read_csv', (['labelDictCSV'], {'usecols': "['annotation', labelDictCol]"}), "(labelDictCSV, usecols=['annotation', labelDictCol])\n", (2318, 2370), True, 'import pandas as pd\n'), ((4881, 4930), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {'labels': 'cmLabels'}), '(y_true, y_pred, labels=cmLabels)\n', (4897, 4930), False, 'from sklearn.metrics import confusion_matrix\n'), ((5379, 5419), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, nrows)', 'dpi': '(200)'}), '(figsize=(10, nrows), dpi=200)\n', (5389, 5419), True, 'import matplotlib.pyplot as plt\n'), ((9017, 9031), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9029, 9031), True, 'import matplotlib.pyplot as plt\n'), ((10254, 10282), 're.sub', 're.sub', (['"""\\\\[(.*?)\\\\]"""', '""""""', 't'], {}), "('\\\\[(.*?)\\\\]', '', t)\n", (10260, 10282), False, 'import re\n'), ((10349, 10376), 'pandas.to_datetime', 'pd.to_datetime', (['t'], {'utc': '(True)'}), '(t, utc=True)\n', (10363, 10376), True, 'import pandas as pd\n'), ((10456, 10520), 'pandas.read_csv', 'pd.read_csv', (['args.annoFile'], {'parse_dates': "['startTime', 'endTime']"}), "(args.annoFile, parse_dates=['startTime', 'endTime'])\n", (10467, 10520), True, 'import pandas as pd\n'), ((10534, 10605), 'pandas.read_csv', 'pd.read_csv', (['args.tsFile'], {'parse_dates': "['time']", 'date_parser': 'date_parser'}), "(args.tsFile, parse_dates=['time'], date_parser=date_parser)\n", (10545, 10605), True, 'import pandas as pd\n'), ((11715, 11780), 'numpy.unique', 'np.unique', (["tsData.index.date[tsData['annotation'] != 'undefined']"], {}), "(tsData.index.date[tsData['annotation'] != 'undefined'])\n", (11724, 11780), True, 'import numpy as np\n'), ((12095, 12137), 'numpy.savez', 'np.savez', (['cmFile'], {'cm': 'cm', 'cmLabels': 'cmLabels'}), '(cmFile, cm=cm, cmLabels=cmLabels)\n', (12103, 12137), True, 'import numpy as np\n'), ((12320, 12345), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12343, 12345), False, 'import argparse\n'), ((2069, 2089), 'numpy.max', 'np.max', (['similarities'], {}), '(similarities)\n', (2075, 2089), True, 'import numpy as np\n'), ((2223, 2246), 'numpy.argmax', 'np.argmax', (['similarities'], {}), '(similarities)\n', (2232, 2246), True, 'import numpy as np\n'), ((7556, 7610), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""k"""', 'label': '"""acceleration"""'}), "([], [], color='k', label='acceleration')\n", (7569, 7610), True, 'import matplotlib.lines as mlines\n'), ((7638, 7702), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'facecolor': 'IMPUTED_COLOR', 'label': '"""imputed/nonwear"""'}), "(facecolor=IMPUTED_COLOR, label='imputed/nonwear')\n", (7652, 7702), True, 'import matplotlib.patches as mpatches\n'), ((7730, 7816), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'facecolor': 'UNCODEABLE_COLOR', 'hatch': '"""//"""', 'label': '"""not in dictionary"""'}), "(facecolor=UNCODEABLE_COLOR, hatch='//', label=\n 'not in dictionary')\n", (7744, 7816), True, 'import matplotlib.patches as mpatches\n'), ((9589, 9624), 'numpy.issubdtype', 'np.issubdtype', (['cm.dtype', 'np.float64'], {}), '(cm.dtype, np.float64)\n', (9602, 9624), True, 'import numpy as np\n'), ((11805, 11847), 'numpy.isin', 'np.isin', (['tsData.index.date', 'annotatedDates'], {}), '(tsData.index.date, annotatedDates)\n', (11812, 11847), True, 'import numpy as np\n'), ((7909, 7977), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'facecolor': 'labelColors[label]', 'label': 'label', 'alpha': '(0.5)'}), '(facecolor=labelColors[label], label=label, alpha=0.5)\n', (7923, 7977), True, 'import matplotlib.patches as mpatches\n'), ((9112, 9134), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (9121, 9134), True, 'import numpy as np\n'), ((9154, 9176), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (9163, 9176), True, 'import numpy as np\n'), ((3486, 3502), 'datetime.time', 'time', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3490, 3502), False, 'from datetime import datetime, timedelta, time\n'), ((3555, 3571), 'datetime.time', 'time', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3559, 3571), False, 'from datetime import datetime, timedelta, time\n'), ((4364, 4390), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (4373, 4390), False, 'from datetime import datetime, timedelta, time\n'), ((5186, 5210), 'datetime.datetime.combine', 'datetime.combine', (['day', 'x'], {}), '(day, x)\n', (5202, 5210), False, 'from datetime import datetime, timedelta, time\n'), ((3536, 3553), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3545, 3553), False, 'from datetime import datetime, timedelta, time\n'), ((3634, 3650), 'datetime.time', 'time', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3638, 3650), False, 'from datetime import datetime, timedelta, time\n'), ((3707, 3723), 'datetime.time', 'time', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3711, 3723), False, 'from datetime import datetime, timedelta, time\n'), ((3805, 3821), 'datetime.time', 'time', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3809, 3821), False, 'from datetime import datetime, timedelta, time\n'), ((3878, 3894), 'datetime.time', 'time', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3882, 3894), False, 'from datetime import datetime, timedelta, time\n'), ((3688, 3705), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3697, 3705), False, 'from datetime import datetime, timedelta, time\n'), ((3859, 3876), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3868, 3876), False, 'from datetime import datetime, timedelta, time\n')] |
import gym
from gym import spaces
import numpy as np
from .zinc_coating.base import ZincCoatingBase as base
class ZincCoatingV0(gym.Env):
"""Simple continous zinc coating environment"""
def __init__(self, steps_per_episode=5000, coating_reward_time_offset=0, random_coating_targets=False, random_coil_characteristics=False, random_coil_lengths=False, random_coil_speed=False, coating_dist_mean=0.0, coating_dist_std=0.0, coating_dist_reward=False):
super(ZincCoatingV0, self).__init__()
self.steps_per_episode = steps_per_episode
self.action_space = spaces.Box(
np.array([0]), np.array([700]), dtype=np.float32)
self.observation_space = spaces.Box(
low=0, high=1, shape=(39,), dtype=np.float32)
self.base = base(
coating_reward_time_offset=coating_reward_time_offset,
random_coating_targets=random_coating_targets,
random_coil_characteristics=random_coil_characteristics,
random_coil_lengths=random_coil_lengths,
random_coil_speed=random_coil_speed,
coating_dist_mean=coating_dist_mean,
coating_dist_std=coating_dist_std,
coating_dist_reward=coating_dist_reward,
)
self.seed()
def seed(self, seed=None):
if seed is None:
seed = np.random.randint(0, 10000000)
self.base.seed(seed)
return [seed]
def step(self, nozzle_pressure):
self.current_step += 1
if (self.current_step >= self.steps_per_episode):
self.done = True
else:
self.done = False
observation, reward, zinc_coating_real = self.base.step(nozzle_pressure[0])
return self._transform_observation(observation), reward, self.done, {"coating": zinc_coating_real}
def reset(self):
self.current_step = 0
observation, _, _ = self.base.reset()
return self._transform_observation(observation)
def render(self, mode='human', close=False):
print("hey")
def _transform_observation(self, observation):
coating_delta = observation.zinc_coating - observation.current_coating_target
return ((observation.coil_speed - 1.3) / 2,
(observation.current_coating_target - 8) / 202,
(observation.next_coating_target - 8) / 202,
(observation.zinc_coating - 8) / 202,
(observation.nozzle_pressure / 700),
(coating_delta + 50) / 220,
(1 if coating_delta < 0 else 0),
(1 if coating_delta >= 0 and coating_delta <= 20 else 0),
(1 if coating_delta > 20 else 0)) + one_hot_encode(observation.next_coil_type if observation.coil_switch_next_tick else observation.current_coil_type, 30)
def one_hot_encode(to_encode, discrete_states):
output = [0] * discrete_states
output[to_encode] = 1
return tuple(output)
| [
"numpy.random.randint",
"numpy.array",
"gym.spaces.Box"
] | [((712, 768), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(39,)', 'dtype': 'np.float32'}), '(low=0, high=1, shape=(39,), dtype=np.float32)\n', (722, 768), False, 'from gym import spaces\n'), ((628, 641), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (636, 641), True, 'import numpy as np\n'), ((643, 658), 'numpy.array', 'np.array', (['[700]'], {}), '([700])\n', (651, 658), True, 'import numpy as np\n'), ((1376, 1406), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000000)'], {}), '(0, 10000000)\n', (1393, 1406), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torchvision
import numpy as np
from PIL import Image
import PIL
from matplotlib import pyplot as plt
def pil_to_np(img_PIL):
'''
Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2,0,1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''
Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np*255,0,255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def np_to_torch(img_np):
'''
Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)[None, :]
def torch_to_np(img_var):
'''
Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.detach().cpu().numpy()[0]
def crop_image_by_multiplier(img, d=32):
'''
Make dimensions divisible by `d`
'''
new_size = (img.size[0] - img.size[0] % d,
img.size[1] - img.size[1] % d)
bbox = [
int((img.size[0] - new_size[0])/2),
int((img.size[1] - new_size[1])/2),
int((img.size[0] + new_size[0])/2),
int((img.size[1] + new_size[1])/2),
]
img_cropped = img.crop(bbox)
return img_cropped
def get_image_grid(images_np, nrow=8):
'''Creates a grid from a list of images by concatenating them.'''
images_torch = [torch.from_numpy(x) for x in images_np]
torch_grid = torchvision.utils.make_grid(images_torch, nrow)
return torch_grid.numpy()
def plot_image_grid(images_np, nrow=8, factor=1, interpolation='lanczos'):
"""Draws images in a grid
Args:
images_np: list of images, each image is np.array of size 3xHxW of 1xHxW
nrow: how many images will be in one row
factor: size if the plt.figure
interpolation: interpolation used in plt.imshow
"""
n_channels = max(x.shape[0] for x in images_np)
assert (n_channels == 3) or (n_channels == 1), "images should have 1 or 3 channels"
images_np = [x if (x.shape[0] == n_channels) else np.concatenate([x, x, x], axis=0) for x in images_np]
grid = get_image_grid(images_np, nrow)
plt.figure(figsize=(len(images_np) + factor, 12 + factor))
if images_np[0].shape[0] == 1:
plt.imshow(grid[0], cmap='gray', interpolation=interpolation)
else:
plt.imshow(grid.transpose(1, 2, 0), interpolation=interpolation)
plt.show()
return grid
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.clip",
"torchvision.utils.make_grid",
"numpy.array",
"PIL.Image.fromarray",
"numpy.concatenate",
"torch.from_numpy"
] | [((291, 308), 'numpy.array', 'np.array', (['img_PIL'], {}), '(img_PIL)\n', (299, 308), True, 'import numpy as np\n'), ((748, 767), 'PIL.Image.fromarray', 'Image.fromarray', (['ar'], {}), '(ar)\n', (763, 767), False, 'from PIL import Image\n'), ((1795, 1842), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images_torch', 'nrow'], {}), '(images_torch, nrow)\n', (1822, 1842), False, 'import torchvision\n'), ((2775, 2785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2783, 2785), True, 'from matplotlib import pyplot as plt\n'), ((919, 943), 'torch.from_numpy', 'torch.from_numpy', (['img_np'], {}), '(img_np)\n', (935, 943), False, 'import torch\n'), ((1738, 1757), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1754, 1757), False, 'import torch\n'), ((2625, 2686), 'matplotlib.pyplot.imshow', 'plt.imshow', (['grid[0]'], {'cmap': '"""gray"""', 'interpolation': 'interpolation'}), "(grid[0], cmap='gray', interpolation=interpolation)\n", (2635, 2686), True, 'from matplotlib import pyplot as plt\n'), ((599, 628), 'numpy.clip', 'np.clip', (['(img_np * 255)', '(0)', '(255)'], {}), '(img_np * 255, 0, 255)\n', (606, 628), True, 'import numpy as np\n'), ((2419, 2452), 'numpy.concatenate', 'np.concatenate', (['[x, x, x]'], {'axis': '(0)'}), '([x, x, x], axis=0)\n', (2433, 2452), True, 'import numpy as np\n')] |
"""
Accesing SFD98 maps.
Most code taken from
https://raw.githubusercontent.com/dstndstn/tractor/master/projects/desi/common.py
,
replaced the transformation with ours.
"""
import fitsio
import os
from ..utils import wcs_simplezea
from ..utils.euler import euler
import numpy as np
# to run need to
# export DUST_DIR=/project/projectdirs/desi/software/edison/dust/v0_0/
__all__ = ['SFDMap']
class anwcs_t(object):
def __init__(self, filename, hduid):
header = dict(fitsio.read_header(filename, hduid))
self.scale, self.crpix, self.nsgp = wcs_simplezea.parse_header(header, zero_offset=False)
def radec2pixelxy(self, ra, dec):
input = np.array([ra, dec])
x,y = wcs_simplezea.ang2pix(input, SCALE=self.scale, CRPIX=self.crpix, NSGP=self.nsgp)
return np.zeros_like(x), x, y
def radectolb(ra, dec):
l, b = euler(ra, dec, 1)
return l, b
class SFDMap(object):
"""
SFDMap accesses the SFD98 Map. The map file shall be given in the constructor.
Attributes
----------
extinctions : dict
These values are not useful for us, but we keep them for reference.
These come from Schlafly & Finkbeiner, arxiv 1012.4804v2, Table 6, Rv=3.1
but updated (and adding DES u) via email from Schlafly,
decam-data thread from 11/13/2014, "New recommended SFD coefficients for DECam."
The coefficients for the four WISE filters are derived from Fitzpatrick 1999,
as recommended by Schafly & Finkbeiner, considered better than either the
Cardelli et al 1989 curves or the newer Fitzpatrick & Massa 2009 NIR curve
not vetted beyond 2 micron).
These coefficients are A / E(B-V) = 0.184, 0.113, 0.0241, 0.00910.
Notes
-----
Use :py:meth:`ebv` to query the E(B-V) values.
"""
extinctions = {
'SDSS u': 4.239,
'DES u': 3.995,
'DES g': 3.214,
'DES r': 2.165,
'DES i': 1.592,
'DES z': 1.211,
'DES Y': 1.064,
'WISE W1': 0.184,
'WISE W2': 0.113,
'WISE W3': 0.0241,
'WISE W4': 0.00910,
}
def __init__(self, ngp_filename=None, sgp_filename=None, dustdir=None):
"""
Parameters
----------
ngp_filename : string
filename of the north plane data
sgp_filename : string
filename of the sourth plane data
dustdir : string
directory to look for data files, overrides ngp_filename and sgp_filename,
Will use `DUST_DIR` environment variable if not supplied.
"""
if dustdir is None:
dustdir = os.environ.get('DUST_DIR', None)
if dustdir is not None:
dustdir = os.path.join(dustdir, 'maps')
else:
dustdir = '.'
print('Warning: $DUST_DIR not set; looking for SFD maps in current directory.')
if ngp_filename is None:
ngp_filename = os.path.join(dustdir, 'SFD_dust_4096_ngp.fits')
if sgp_filename is None:
sgp_filename = os.path.join(dustdir, 'SFD_dust_4096_sgp.fits')
if not os.path.exists(ngp_filename):
raise RuntimeError('Error: SFD map does not exist: %s' % ngp_filename)
if not os.path.exists(sgp_filename):
raise RuntimeError('Error: SFD map does not exist: %s' % sgp_filename)
self.north = fitsio.read(ngp_filename)
self.south = fitsio.read(sgp_filename)
self.northwcs = anwcs_t(ngp_filename, 0)
self.southwcs = anwcs_t(sgp_filename, 0)
@staticmethod
def bilinear_interp_nonzero(image, x, y):
H,W = image.shape
x0 = np.floor(x).astype(int)
y0 = np.floor(y).astype(int)
# Bilinear interpolate, but not outside the bounds (where ebv=0)
fx = np.clip(x - x0, 0., 1.)
ebvA = image[y0,x0]
ebvB = image[y0, np.clip(x0+1, 0, W-1)]
ebv1 = (1.-fx) * ebvA + fx * ebvB
ebv1[ebvA == 0] = ebvB[ebvA == 0]
ebv1[ebvB == 0] = ebvA[ebvB == 0]
ebvA = image[np.clip(y0+1, 0, H-1), x0]
ebvB = image[np.clip(y0+1, 0, H-1), np.clip(x0+1, 0, W-1)]
ebv2 = (1.-fx) * ebvA + fx * ebvB
ebv2[ebvA == 0] = ebvB[ebvA == 0]
ebv2[ebvB == 0] = ebvA[ebvB == 0]
fy = np.clip(y - y0, 0., 1.)
ebv = (1.-fy) * ebv1 + fy * ebv2
ebv[ebv1 == 0] = ebv2[ebv1 == 0]
ebv[ebv2 == 0] = ebv1[ebv2 == 0]
return ebv
def ebv(self, ra, dec):
"""
Directly query the SFD map and returns E(B-V).
Parameters
----------
ra : array_like
RA in degrees.
dec : array_like
DEC in degrees.
Returns
-------
ebv : array_like
E(B-V)
"""
l,b = radectolb(ra, dec)
ebv = np.zeros_like(l)
N = (b >= 0)
for wcs,image,cut in [(self.northwcs, self.north, N),
(self.southwcs, self.south, np.logical_not(N))]:
# Our WCS routines are mis-named... the SFD WCSes convert
# X,Y <-> L,B.
if cut.sum() == 0:
continue
ok,x,y = wcs.radec2pixelxy(l[cut], b[cut])
assert(np.all(ok == 0))
H,W = image.shape
assert(np.all(x >= 0.5))
assert(np.all(x <= (W+0.5)))
assert(np.all(y >= 0.5))
assert(np.all(y <= (H+0.5)))
ebv[cut] = SFDMap.bilinear_interp_nonzero(image, x-1., y-1.)
return ebv
def extinction(self, filts, ra, dec, get_ebv=False):
"""
Returns the extinction for different filters.
Do not use this function; copied from old code.
Use :py:meth:`ebv` instead.
"""
ebv = self.ebv(ra, dec)
if filts is not None:
factors = np.array([SFDMap.extinctions[f] for f in filts])
rtn = factors[np.newaxis,:] * ebv[:,np.newaxis]
if get_ebv and filts is not None:
return ebv,rtn
elif filts is None:
return ebv
return rtn
if __name__ == '__main__':
from datarelease import DataRelease
dr = DataRelease()
RA = dr.catalogue['RA']
DEC = dr.catalogue['DEC']
EXT = dr.catalogue['DECAM_EXTINCTION']
m = SFDMap()
EXT2 = m.extinction(['DES %s' % i for i in 'ugrizY'],
RA, DEC)
print(EXT - EXT2)
| [
"numpy.zeros_like",
"numpy.floor",
"numpy.logical_not",
"os.path.exists",
"numpy.all",
"numpy.clip",
"datarelease.DataRelease",
"fitsio.read",
"os.environ.get",
"numpy.array",
"os.path.join",
"fitsio.read_header"
] | [((6209, 6222), 'datarelease.DataRelease', 'DataRelease', ([], {}), '()\n', (6220, 6222), False, 'from datarelease import DataRelease\n'), ((672, 691), 'numpy.array', 'np.array', (['[ra, dec]'], {}), '([ra, dec])\n', (680, 691), True, 'import numpy as np\n'), ((3408, 3433), 'fitsio.read', 'fitsio.read', (['ngp_filename'], {}), '(ngp_filename)\n', (3419, 3433), False, 'import fitsio\n'), ((3455, 3480), 'fitsio.read', 'fitsio.read', (['sgp_filename'], {}), '(sgp_filename)\n', (3466, 3480), False, 'import fitsio\n'), ((3830, 3855), 'numpy.clip', 'np.clip', (['(x - x0)', '(0.0)', '(1.0)'], {}), '(x - x0, 0.0, 1.0)\n', (3837, 3855), True, 'import numpy as np\n'), ((4312, 4337), 'numpy.clip', 'np.clip', (['(y - y0)', '(0.0)', '(1.0)'], {}), '(y - y0, 0.0, 1.0)\n', (4319, 4337), True, 'import numpy as np\n'), ((4865, 4881), 'numpy.zeros_like', 'np.zeros_like', (['l'], {}), '(l)\n', (4878, 4881), True, 'import numpy as np\n'), ((483, 518), 'fitsio.read_header', 'fitsio.read_header', (['filename', 'hduid'], {}), '(filename, hduid)\n', (501, 518), False, 'import fitsio\n'), ((802, 818), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (815, 818), True, 'import numpy as np\n'), ((2666, 2698), 'os.environ.get', 'os.environ.get', (['"""DUST_DIR"""', 'None'], {}), "('DUST_DIR', None)\n", (2680, 2698), False, 'import os\n'), ((2753, 2782), 'os.path.join', 'os.path.join', (['dustdir', '"""maps"""'], {}), "(dustdir, 'maps')\n", (2765, 2782), False, 'import os\n'), ((2975, 3022), 'os.path.join', 'os.path.join', (['dustdir', '"""SFD_dust_4096_ngp.fits"""'], {}), "(dustdir, 'SFD_dust_4096_ngp.fits')\n", (2987, 3022), False, 'import os\n'), ((3083, 3130), 'os.path.join', 'os.path.join', (['dustdir', '"""SFD_dust_4096_sgp.fits"""'], {}), "(dustdir, 'SFD_dust_4096_sgp.fits')\n", (3095, 3130), False, 'import os\n'), ((3146, 3174), 'os.path.exists', 'os.path.exists', (['ngp_filename'], {}), '(ngp_filename)\n', (3160, 3174), False, 'import os\n'), ((3274, 3302), 'os.path.exists', 'os.path.exists', (['sgp_filename'], {}), '(sgp_filename)\n', (3288, 3302), False, 'import os\n'), ((5274, 5289), 'numpy.all', 'np.all', (['(ok == 0)'], {}), '(ok == 0)\n', (5280, 5289), True, 'import numpy as np\n'), ((5340, 5356), 'numpy.all', 'np.all', (['(x >= 0.5)'], {}), '(x >= 0.5)\n', (5346, 5356), True, 'import numpy as np\n'), ((5377, 5397), 'numpy.all', 'np.all', (['(x <= W + 0.5)'], {}), '(x <= W + 0.5)\n', (5383, 5397), True, 'import numpy as np\n'), ((5418, 5434), 'numpy.all', 'np.all', (['(y >= 0.5)'], {}), '(y >= 0.5)\n', (5424, 5434), True, 'import numpy as np\n'), ((5455, 5475), 'numpy.all', 'np.all', (['(y <= H + 0.5)'], {}), '(y <= H + 0.5)\n', (5461, 5475), True, 'import numpy as np\n'), ((5884, 5932), 'numpy.array', 'np.array', (['[SFDMap.extinctions[f] for f in filts]'], {}), '([SFDMap.extinctions[f] for f in filts])\n', (5892, 5932), True, 'import numpy as np\n'), ((3683, 3694), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (3691, 3694), True, 'import numpy as np\n'), ((3720, 3731), 'numpy.floor', 'np.floor', (['y'], {}), '(y)\n', (3728, 3731), True, 'import numpy as np\n'), ((3907, 3932), 'numpy.clip', 'np.clip', (['(x0 + 1)', '(0)', '(W - 1)'], {}), '(x0 + 1, 0, W - 1)\n', (3914, 3932), True, 'import numpy as np\n'), ((4078, 4103), 'numpy.clip', 'np.clip', (['(y0 + 1)', '(0)', '(H - 1)'], {}), '(y0 + 1, 0, H - 1)\n', (4085, 4103), True, 'import numpy as np\n'), ((4126, 4151), 'numpy.clip', 'np.clip', (['(y0 + 1)', '(0)', '(H - 1)'], {}), '(y0 + 1, 0, H - 1)\n', (4133, 4151), True, 'import numpy as np\n'), ((4149, 4174), 'numpy.clip', 'np.clip', (['(x0 + 1)', '(0)', '(W - 1)'], {}), '(x0 + 1, 0, W - 1)\n', (4156, 4174), True, 'import numpy as np\n'), ((5023, 5040), 'numpy.logical_not', 'np.logical_not', (['N'], {}), '(N)\n', (5037, 5040), True, 'import numpy as np\n')] |
"""
IMPORTING LIBS
"""
import numpy as np
import os
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.superpixels_graph_classification.dgn_net import DGNNet
from data.superpixels import SuperPixDataset # import dataset
from train.train_superpixels_graph_classification import train_epoch, evaluate_network
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:', torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(net_params):
model = DGNNet(net_params)
total_param = 0
print("MODEL DETAILS:\n")
for param in model.parameters():
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(dataset, params, net_params):
t0 = time.time()
per_epoch_time = []
trainset, valset, testset = dataset.train, dataset.val, dataset.test
device = net_params['device']
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
model = DGNNet(net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'])
start_epoch = 0
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(start_epoch, params['epochs']), mininterval=params['print_epoch_interval'],
maxinterval=None, unit='epoch', initial=start_epoch, total=params['epochs']) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader,
epoch, net_params['augmentation'], net_params['flip'], net_params['distortion'])
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
t.set_postfix(time=time.time() - start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time() - start)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time() - t0 > params['max_time'] * 3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_acc = evaluate_network(model, device, test_loader, epoch)
_, val_acc = evaluate_network(model, device, val_loader, epoch)
_, train_acc = evaluate_network(model, device, train_loader, epoch)
print("Test Accuracy: {:.4f}".format(test_acc))
print("Val Accuracy: {:.4f}".format(val_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time() - t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--expid', help='Experiment id.')
parser.add_argument('--type_net', default='simple', help='Type of net')
parser.add_argument('--lap_norm', default='none', help='Laplacian normalisation')
parser.add_argument('--augmentation', type=float, default=0., help='Dynamically augmenting with rotations, angle in degrees')
parser.add_argument('--distortion', type=float, default=0., help='Distortion of the vector field')
parser.add_argument('--proportion', type=float, default=1., help='Proportion of the dataset to use')
parser.add_argument('--flip', action='store_true', default=False, help='Flip x-axis')
# eig params
parser.add_argument('--coord_eig', action='store_true', default=False, help='Having the coord. weights')
parser.add_argument('--aggregators', type=str, help='Aggregators to use.')
parser.add_argument('--scalers', type=str, help='Scalers to use.')
parser.add_argument('--towers', type=int, help='Towers to use.')
parser.add_argument('--divide_input_first', type=bool, help='Whether to divide the input in first layers.')
parser.add_argument('--divide_input_last', type=bool, help='Whether to divide the input in last layer.')
parser.add_argument('--edge_dim', type=int, help='Size of edge embeddings.')
parser.add_argument('--pretrans_layers', type=int, help='pretrans_layers.')
parser.add_argument('--posttrans_layers', type=int, help='posttrans_layers.')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# dataset
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = SuperPixDataset(DATASET_NAME, coord_eig=args.coord_eig,
proportion=args.proportion)
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual == 'True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat == 'True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.graph_norm is not None:
net_params['graph_norm'] = True if args.graph_norm == 'True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm == 'True' else False
if args.aggregators is not None:
net_params['aggregators'] = args.aggregators
if args.scalers is not None:
net_params['scalers'] = args.scalers
if args.towers is not None:
net_params['towers'] = args.towers
if args.divide_input_first is not None:
net_params['divide_input_first'] = args.divide_input_first
if args.divide_input_last is not None:
net_params['divide_input_last'] = args.divide_input_last
if args.edge_dim is not None:
net_params['edge_dim'] = args.edge_dim
if args.pretrans_layers is not None:
net_params['pretrans_layers'] = args.pretrans_layers
if args.posttrans_layers is not None:
net_params['posttrans_layers'] = args.posttrans_layers
if args.type_net is not None:
net_params['type_net'] = args.type_net
if args.distortion is not None:
net_params['distortion'] = args.distortion
if args.augmentation is not None:
net_params['augmentation'] = args.augmentation
if args.flip is not None:
net_params['flip'] = args.flip
# Superpixels
net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].size(0)
net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)
num_classes = len(np.unique(np.array(dataset.train[:][1])))
net_params['n_classes'] = num_classes
# calculate logarithmic average degree for scalers
D = torch.cat([torch.sparse.sum(g.adjacency_matrix(transpose=True), dim=-1).to_dense() for g in
dataset.train.graph_lists])
net_params['avg_d'] = dict(lin=torch.mean(D),
exp=torch.mean(torch.exp(torch.div(1, D)) - 1),
log=torch.mean(torch.log(D + 1)))
net_params['total_param'] = view_model_param(net_params)
train_val_pipeline(dataset, params, net_params)
main()
| [
"data.superpixels.SuperPixDataset",
"numpy.random.seed",
"argparse.ArgumentParser",
"nets.superpixels_graph_classification.dgn_net.DGNNet",
"numpy.mean",
"torch.device",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"random.seed",
"torch.cuda.get_device_name",
"tor... | [((1175, 1193), 'nets.superpixels_graph_classification.dgn_net.DGNNet', 'DGNNet', (['net_params'], {}), '(net_params)\n', (1181, 1193), False, 'from nets.superpixels_graph_classification.dgn_net import DGNNet\n'), ((1501, 1512), 'time.time', 'time.time', ([], {}), '()\n', (1510, 1512), False, 'import time\n'), ((1669, 1696), 'random.seed', 'random.seed', (["params['seed']"], {}), "(params['seed'])\n", (1680, 1696), False, 'import random\n'), ((1701, 1731), 'numpy.random.seed', 'np.random.seed', (["params['seed']"], {}), "(params['seed'])\n", (1715, 1731), True, 'import numpy as np\n'), ((1736, 1769), 'torch.manual_seed', 'torch.manual_seed', (["params['seed']"], {}), "(params['seed'])\n", (1753, 1769), False, 'import torch\n'), ((1989, 2007), 'nets.superpixels_graph_classification.dgn_net.DGNNet', 'DGNNet', (['net_params'], {}), '(net_params)\n', (1995, 2007), False, 'from nets.superpixels_graph_classification.dgn_net import DGNNet\n'), ((2160, 2300), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': "params['lr_reduce_factor']", 'patience': "params['lr_schedule_patience']"}), "(optimizer, mode='min', factor=params[\n 'lr_reduce_factor'], patience=params['lr_schedule_patience'])\n", (2196, 2300), True, 'import torch.optim as optim\n'), ((2540, 2639), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': "params['batch_size']", 'shuffle': '(True)', 'collate_fn': 'dataset.collate'}), "(trainset, batch_size=params['batch_size'], shuffle=True,\n collate_fn=dataset.collate)\n", (2550, 2639), False, 'from torch.utils.data import DataLoader\n'), ((2653, 2751), 'torch.utils.data.DataLoader', 'DataLoader', (['valset'], {'batch_size': "params['batch_size']", 'shuffle': '(False)', 'collate_fn': 'dataset.collate'}), "(valset, batch_size=params['batch_size'], shuffle=False,\n collate_fn=dataset.collate)\n", (2663, 2751), False, 'from torch.utils.data import DataLoader\n'), ((2766, 2865), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': "params['batch_size']", 'shuffle': '(False)', 'collate_fn': 'dataset.collate'}), "(testset, batch_size=params['batch_size'], shuffle=False,\n collate_fn=dataset.collate)\n", (2776, 2865), False, 'from torch.utils.data import DataLoader\n'), ((4978, 5029), 'train.train_superpixels_graph_classification.evaluate_network', 'evaluate_network', (['model', 'device', 'test_loader', 'epoch'], {}), '(model, device, test_loader, epoch)\n', (4994, 5029), False, 'from train.train_superpixels_graph_classification import train_epoch, evaluate_network\n'), ((5047, 5097), 'train.train_superpixels_graph_classification.evaluate_network', 'evaluate_network', (['model', 'device', 'val_loader', 'epoch'], {}), '(model, device, val_loader, epoch)\n', (5063, 5097), False, 'from train.train_superpixels_graph_classification import train_epoch, evaluate_network\n'), ((5117, 5169), 'train.train_superpixels_graph_classification.evaluate_network', 'evaluate_network', (['model', 'device', 'train_loader', 'epoch'], {}), '(model, device, train_loader, epoch)\n', (5133, 5169), False, 'from train.train_superpixels_graph_classification import train_epoch, evaluate_network\n'), ((5529, 5554), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5552, 5554), False, 'import argparse, json\n'), ((9448, 9536), 'data.superpixels.SuperPixDataset', 'SuperPixDataset', (['DATASET_NAME'], {'coord_eig': 'args.coord_eig', 'proportion': 'args.proportion'}), '(DATASET_NAME, coord_eig=args.coord_eig, proportion=args.\n proportion)\n', (9463, 9536), False, 'from data.superpixels import SuperPixDataset\n'), ((831, 856), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (854, 856), False, 'import torch\n'), ((960, 980), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (972, 980), False, 'import torch\n'), ((1044, 1063), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1056, 1063), False, 'import torch\n'), ((1803, 1841), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (["params['seed']"], {}), "(params['seed'])\n", (1825, 1841), False, 'import torch\n'), ((9091, 9103), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9100, 9103), False, 'import argparse, json\n'), ((912, 941), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (938, 941), False, 'import torch\n'), ((5437, 5460), 'numpy.mean', 'np.mean', (['per_epoch_time'], {}), '(per_epoch_time)\n', (5444, 5460), True, 'import numpy as np\n'), ((12971, 13000), 'numpy.array', 'np.array', (['dataset.train[:][1]'], {}), '(dataset.train[:][1])\n', (12979, 13000), True, 'import numpy as np\n'), ((13283, 13296), 'torch.mean', 'torch.mean', (['D'], {}), '(D)\n', (13293, 13296), False, 'import torch\n'), ((3253, 3264), 'time.time', 'time.time', ([], {}), '()\n', (3262, 3264), False, 'import time\n'), ((3329, 3466), 'train.train_superpixels_graph_classification.train_epoch', 'train_epoch', (['model', 'optimizer', 'device', 'train_loader', 'epoch', "net_params['augmentation']", "net_params['flip']", "net_params['distortion']"], {}), "(model, optimizer, device, train_loader, epoch, net_params[\n 'augmentation'], net_params['flip'], net_params['distortion'])\n", (3340, 3466), False, 'from train.train_superpixels_graph_classification import train_epoch, evaluate_network\n'), ((3585, 3635), 'train.train_superpixels_graph_classification.evaluate_network', 'evaluate_network', (['model', 'device', 'val_loader', 'epoch'], {}), '(model, device, val_loader, epoch)\n', (3601, 3635), False, 'from train.train_superpixels_graph_classification import train_epoch, evaluate_network\n'), ((3900, 3951), 'train.train_superpixels_graph_classification.evaluate_network', 'evaluate_network', (['model', 'device', 'test_loader', 'epoch'], {}), '(model, device, test_loader, epoch)\n', (3916, 3951), False, 'from train.train_superpixels_graph_classification import train_epoch, evaluate_network\n'), ((5371, 5382), 'time.time', 'time.time', ([], {}), '()\n', (5380, 5382), False, 'import time\n'), ((13423, 13439), 'torch.log', 'torch.log', (['(D + 1)'], {}), '(D + 1)\n', (13432, 13439), False, 'import torch\n'), ((4302, 4313), 'time.time', 'time.time', ([], {}), '()\n', (4311, 4313), False, 'import time\n'), ((4610, 4621), 'time.time', 'time.time', ([], {}), '()\n', (4619, 4621), False, 'import time\n'), ((13354, 13369), 'torch.div', 'torch.div', (['(1)', 'D'], {}), '(1, D)\n', (13363, 13369), False, 'import torch\n'), ((3987, 3998), 'time.time', 'time.time', ([], {}), '()\n', (3996, 3998), False, 'import time\n')] |
import cv2
import numpy as np
import scipy.io
from scipy.spatial import transform
from distilled import hopenet
def get_pt2d_from_mat(mat_path):
# Get 2D landmarks
mat = scipy.io.loadmat(mat_path)
pt2d = mat['pt2d']
return pt2d
def get_ypr_from_mat(mat_path):
# Get yaw, pitch, roll from .mat annotation.
# They are in radians
mat = scipy.io.loadmat(mat_path)
# [pitch yaw roll tdx tdy tdz scale_factor]
pre_pose_params = mat['Pose_Para'][0]
# Get [pitch, yaw, roll]
pose_params = pre_pose_params[:3]
return pose_params
def draw_axes_orig(img, yaw, pitch, roll, tdx=None, tdy=None, size = 100):
""" Orignal Hope code from code/utils.py. Is used for comparison. """
from math import sin, cos
pitch = pitch * np.pi / 180
yaw = -(yaw * np.pi / 180)
roll = roll * np.pi / 180
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X-Axis pointing to right. drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y-Axis | drawn in green
# v
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)
return img
def draw_axes(img, yaw, pitch, roll, tx=None, ty=None, axes=((1, 0, 0), (0, 1, 0), (0, 0, 1)), size=100, thickness=1):
"""
Draws axes using rotation matrix.
:param img: image
:param yaw: angle prediciton in degrees.
:param pitch: angle prediciton in degrees.
:param roll: angle prediciton in degrees.
:param axes: axes unit vectors, default value corresponds to the standard right-handed CS:
x, y as on the screen, z axis looking away from the observer.
:param size: axis length.
:param thickness: line thickness.
"""
axes = np.array(axes, dtype=np.float32)
r = hopenet.angles_to_rotation_matrix(yaw, pitch, roll, degrees=True)
# Make sure this is the rotation matrix (before we create a proper unit test).
assert np.linalg.norm(np.dot(r, r.T) - np.eye(3)) < 1e-5
assert np.allclose(np.linalg.det(r), 1)
if tx is not None and ty is not None:
origin = np.array((tx, ty, 0), dtype=np.float32)
else:
origin = np.array((img.shape[1] / 2, img.shape[0] / 2, 0))
axes = np.dot(axes, r) * size + origin
o = tuple(origin[:2].astype(int))
colors = ((0, 0, 255), (0, 255, 0), (255, 0, 0))
# Draw z-axis first, as it is usually behind the other two
for ai in range(2, -1, -1):
a = tuple(axes[ai, :2].astype(int))
cv2.line(img, o, a, colors[ai], thickness)
return img
def rotation_diff(r1, r2, degrees=True):
"""
Computes the absolute angular difference between two rotation matrices d = |r1 - r2|.
:param r1: rotation matrix 1 as numpy array
:param r2: rotation matrix 2 as numpy array
:param degrees: if True, the angles are in degrees, otherwise in radians.
:return: angular difference
"""
dr = np.dot(r1, np.linalg.inv(r2))
da = np.linalg.norm(transform.Rotation.from_matrix(dr).as_rotvec())
if degrees:
da = np.rad2deg(da)
return da | [
"cv2.line",
"numpy.eye",
"math.sin",
"numpy.rad2deg",
"numpy.linalg.inv",
"numpy.array",
"math.cos",
"scipy.spatial.transform.Rotation.from_matrix",
"numpy.dot",
"numpy.linalg.det",
"distilled.hopenet.angles_to_rotation_matrix"
] | [((2301, 2333), 'numpy.array', 'np.array', (['axes'], {'dtype': 'np.float32'}), '(axes, dtype=np.float32)\n', (2309, 2333), True, 'import numpy as np\n'), ((2343, 2408), 'distilled.hopenet.angles_to_rotation_matrix', 'hopenet.angles_to_rotation_matrix', (['yaw', 'pitch', 'roll'], {'degrees': '(True)'}), '(yaw, pitch, roll, degrees=True)\n', (2376, 2408), False, 'from distilled import hopenet\n'), ((2577, 2593), 'numpy.linalg.det', 'np.linalg.det', (['r'], {}), '(r)\n', (2590, 2593), True, 'import numpy as np\n'), ((2658, 2697), 'numpy.array', 'np.array', (['(tx, ty, 0)'], {'dtype': 'np.float32'}), '((tx, ty, 0), dtype=np.float32)\n', (2666, 2697), True, 'import numpy as np\n'), ((2725, 2774), 'numpy.array', 'np.array', (['(img.shape[1] / 2, img.shape[0] / 2, 0)'], {}), '((img.shape[1] / 2, img.shape[0] / 2, 0))\n', (2733, 2774), True, 'import numpy as np\n'), ((3057, 3099), 'cv2.line', 'cv2.line', (['img', 'o', 'a', 'colors[ai]', 'thickness'], {}), '(img, o, a, colors[ai], thickness)\n', (3065, 3099), False, 'import cv2\n'), ((3491, 3508), 'numpy.linalg.inv', 'np.linalg.inv', (['r2'], {}), '(r2)\n', (3504, 3508), True, 'import numpy as np\n'), ((3611, 3625), 'numpy.rad2deg', 'np.rad2deg', (['da'], {}), '(da)\n', (3621, 3625), True, 'import numpy as np\n'), ((1433, 1441), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (1436, 1441), False, 'from math import sin, cos\n'), ((2786, 2801), 'numpy.dot', 'np.dot', (['axes', 'r'], {}), '(axes, r)\n', (2792, 2801), True, 'import numpy as np\n'), ((1082, 1090), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (1085, 1090), False, 'from math import sin, cos\n'), ((1093, 1102), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (1096, 1102), False, 'from math import sin, cos\n'), ((1268, 1277), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (1271, 1277), False, 'from math import sin, cos\n'), ((1478, 1488), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (1481, 1488), False, 'from math import sin, cos\n'), ((2519, 2533), 'numpy.dot', 'np.dot', (['r', 'r.T'], {}), '(r, r.T)\n', (2525, 2533), True, 'import numpy as np\n'), ((2536, 2545), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2542, 2545), True, 'import numpy as np\n'), ((3534, 3568), 'scipy.spatial.transform.Rotation.from_matrix', 'transform.Rotation.from_matrix', (['dr'], {}), '(dr)\n', (3564, 3568), False, 'from scipy.spatial import transform\n'), ((1127, 1137), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (1130, 1137), False, 'from math import sin, cos\n'), ((1140, 1149), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (1143, 1149), False, 'from math import sin, cos\n'), ((1177, 1185), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (1180, 1185), False, 'from math import sin, cos\n'), ((1257, 1265), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (1260, 1265), False, 'from math import sin, cos\n'), ((1302, 1312), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (1305, 1312), False, 'from math import sin, cos\n'), ((1315, 1324), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (1318, 1324), False, 'from math import sin, cos\n'), ((1351, 1360), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (1354, 1360), False, 'from math import sin, cos\n'), ((1467, 1475), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (1470, 1475), False, 'from math import sin, cos\n'), ((1152, 1161), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (1155, 1161), False, 'from math import sin, cos\n'), ((1164, 1174), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (1167, 1174), False, 'from math import sin, cos\n'), ((1327, 1337), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (1330, 1337), False, 'from math import sin, cos\n'), ((1340, 1348), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (1343, 1348), False, 'from math import sin, cos\n')] |
import pandas as pd
import numpy as np
import os,json,pickle
from scipy.signal import butter, lfilter
def butter_lowpass(cutoff, fs, order):
"""
Designs a lowpass Butterworth filter
cutoff: the cutoff frequency of the lowpass filter
fs: the sampling frequency used to sample the signal
order: the order of the low pass filter
b,a: the filter coefficients of the designed lowpass filter
"""
fny = fs/2
normal_cutoff = cutoff / fny
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def filteringAndWindowing(data,fs,windowSize,slide,cutoff,order):
"""
Filters an entire sequence with a lowpass butterworth filter.
Afterwards the sequence is windowed into smaller trials with
an overlap depending on the slide parameter
data: The json data containing the signals
fs: The sampling rate used to sample the signal
windowSize: The windowSize used to window the sequences
slide: The number of samples the window is slided every time
cutoff: The cutoff frequency of the used filter
order: The order of the filter
newTrials: The resulting trials obtained from filtering and windowing each sequence.
"""
b,a = butter_lowpass(cutoff = cutoff ,fs=fs,order = order)
data = np.array(data)
data = lfilter(b,a,data,axis=0)
newTrials = []
nSamples = len(data)
start = 0
end = windowSize
while end < nSamples:
newTrials.append(data[start:end,:])
start += slide
end += slide
return newTrials
def loadFileApplyfilterAndSlidingWindow(windowSize,slide,cutoff,order):
"""
This function loads in the jsonData, filters all the sequences with a lowpass Butterworth filter
and windows every trial.
windowSize: the length of the window used
slide: how many samples the window should move.
cutoff: the cutoff frequency of the butterworth filter
order: the order of the lowpass Butterworth filter
"""
with open("data.json","r") as fin:
jsonData = json.load(fin)
fs = 32
newJson = {}
for key in jsonData.keys():
category = jsonData[key]
newCategory = []
for f in category:
newCategory.extend(filteringAndWindowing(f,fs,windowSize,slide,cutoff,order))
if key[-5:] == "MODEL":
print("not gonna use " + str(key))
else:
newJson[key] = newCategory
labeledAccelerometerData ={}
labeledAccelerometerData["labels"] = []
labeledAccelerometerData["trials"] = []
for key, data in newJson.items():
for array in data:
labeledAccelerometerData["labels"].append(key)
labeledAccelerometerData["trials"].append(array.tolist())
return labeledAccelerometerData
| [
"scipy.signal.lfilter",
"json.load",
"numpy.array",
"scipy.signal.butter"
] | [((490, 545), 'scipy.signal.butter', 'butter', (['order', 'normal_cutoff'], {'btype': '"""low"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='low', analog=False)\n", (496, 545), False, 'from scipy.signal import butter, lfilter\n'), ((1337, 1351), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1345, 1351), True, 'import numpy as np\n'), ((1363, 1390), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'data'], {'axis': '(0)'}), '(b, a, data, axis=0)\n', (1370, 1390), False, 'from scipy.signal import butter, lfilter\n'), ((2099, 2113), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (2108, 2113), False, 'import os, json, pickle\n')] |
"""
Implements the spike and step check used in the EN quality control
system, pages 20-21 of http://www.metoffice.gov.uk/hadobs/en3/OQCpaper.pdf
The EN quality control system does not directly reject levels that are marked as
steps, it marks them as suspect and then they are subjected to an extra test (a
background check) that can reprieve them. In the future it will be best to
remove these elements and include them within the background check code.
"""
import numpy as np
import util.main as main
def test(p, parameters, suspect=False):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
By default the test returns definite rejections. If the suspect keyword is
set to True the test instead returns suspect levels.
"""
return run_qc(p, suspect, parameters)
def run_qc(p, suspect, parameters):
# check for pre-registered suspect tabulation, if that's what we want:
if suspect:
query = 'SELECT suspect FROM enspikeandstep WHERE uid = ' + str(p.uid()) + ';'
susp = main.dbinteract(query, targetdb=parameters["db"])
if len(susp) > 0:
return main.unpack_row(susp[0])[0]
# Define tolerances used.
tolD = np.array([0, 200, 300, 500, 600])
tolDTrop = np.array([0, 300, 400, 500, 600])
tolT = np.array([5.0, 5.0, 2.5, 2.0, 1.5])
# Define an array to hold results.
qc = np.zeros(p.n_levels(), dtype=bool)
# Get depth and temperature values from the profile.
z = p.z()
t = p.t()
# Find which levels have data.
isTemperature = (t.mask==False)
isDepth = (z.mask==False)
isData = isTemperature & isDepth
# Array to hold temperature differences between levels and gradients.
dt, gt = composeDT(t, z, p.n_levels())
# Spikes and steps detection.
for i in range(1, p.n_levels()):
if i >= 2:
if (isData[i-2] and isData[i-1] and isData[i]) == False:
continue
if z[i] - z[i-2] >= 5.0:
wt1 = (z[i-1] - z[i-2]) / (z[i] - z[i-2])
else:
wt1 = 0.5
else:
if (isData[i-1] and isData[i]) == False:
continue
wt1 = 0.5
dTTol = determineDepthTolerance(z[i-1], np.abs(p.latitude()))
gTTol = 0.05
# Check for low temperatures in the Tropics.
# This might be more appropriate to appear in a separate EN regional
# range check but is included here for now for consistency with the
# original code.
if (np.abs(p.latitude()) < 20.0 and z[i-1] < 1000.0 and
t[i-1] < 1.0):
dt[i] = np.ma.masked
if suspect == True: qc[i-1] = True
continue
qc, dt = conditionA(dt, dTTol, qc, wt1, i, suspect)
qc, dt = conditionB(dt, dTTol, gTTol, qc, gt, i, suspect)
qc = conditionC(dt, dTTol, z, qc, t, i, suspect)
# End of loop over levels.
# Step or 0.0 at the bottom of a profile.
if isData[-1] and dt.mask[-1] == False:
dTTol = determineDepthTolerance(z[-1], np.abs(p.latitude()))
if np.abs(dt[-1]) > dTTol:
if suspect == True: qc[-1] = True
if isTemperature[-1]:
if t[-1] == 0.0:
if suspect == True: qc[-1] = True
# If 4 levels or more than half the profile is rejected then reject all.
if suspect == False:
nRejects = np.count_nonzero(qc)
if nRejects >= 4 or nRejects > p.n_levels()/2:
qc[:] = True
# register suspects, if computed, to db
if suspect:
query = "REPLACE INTO enspikeandstep VALUES(?,?);"
main.dbinteract(query, [p.uid(), main.pack_array(qc)], targetdb=parameters["db"] )
return qc
def composeDT(var, z, nLevels):
'''
build the array of deltas for the variable provided
'''
dt = np.ma.zeros(nLevels)
dt.mask = True
gt = dt.copy()
for i in range(1, nLevels):
if ((z[i] - z[i-1]) <= 50.0 or (z[i] >= 350.0 and (z[i] - z[i-1]) <= 100.0)):
dt[i] = var[i] - var[i-1]
gt[i] = dt[i] / np.max([10.0, z[i] - z[i-1]])
return dt, gt
def determineDepthTolerance(z, lattitude):
'''
determine depth tolerance
'''
if (lattitude < 20.0):
depthTol = 300.0
else:
depthTol = 200.0
if z > 600.0:
tTolFactor = 0.3
elif z > 500.0:
tTolFactor = 0.4
elif z > depthTol + 100.0:
tTolFactor = 0.5
elif z > depthTol:
tTolFactor = 1.0 - 0.005 * (z - depthTol)
else:
tTolFactor = 1.0
return tTolFactor * 5.0
def conditionA(dt, dTTol, qc, wt1, i, suspect):
'''
condition A (large spike check)
'''
if (dt.mask[i-1] == False and dt.mask[i] == False and np.max(np.abs(dt[i-1:i+1])) > dTTol):
if np.abs(dt[i] + dt[i-1]) < 0.5*dTTol:
dt[i-1:i+1] = np.ma.masked
if suspect == False: qc[i-1] = True
elif np.abs((1.0-wt1) * dt[i-1] - wt1*dt[i]) < 0.5*dTTol:
# Likely to be a valid large temperature gradient.
dt[i-1:i+1] = np.ma.masked # Stops the levels being rechecked.
return qc, dt
def conditionB(dt, dTTol, gTTol, qc, gt, i, suspect):
'''
condition B (small spike check)
'''
if (dt.mask[i-1] == False and dt.mask[i] == False and
np.max(np.abs(dt[i-1:i+1])) > 0.5*dTTol and
np.max(np.abs(gt[i-1:i+1])) > gTTol and
np.abs(dt[i] + dt[i-1]) < 0.25*np.abs(dt[i] - dt[i-1])):
dt[i-1:i+1] = np.ma.masked
if suspect == False: qc[i-1] = True
return qc, dt
def conditionC(dt, dTTol, z, qc, t, i, suspect):
'''
condition C (steps)
'''
if dt.mask[i-1] == False and np.abs(dt[i-1]) > dTTol:
if z[i-1] <= 250.0 and dt[i-1] < -dTTol and dt[i-1] > -3.0*dTTol:
# May be sharp thermocline, do not reject.
pass
elif i>1 and z[i] - z[i-2] > 0 and np.abs(t[i-1] - interpolate(z[i-1], z[i-2], z[i], t[i-2], t[i])) < 0.5*dTTol:
# consistent interpolation, do not reject
pass
else:
# mark both sides of the step
if suspect == True: qc[i-2:i] = True
return qc
def interpolate(depth, shallow, deep, shallowVal, deepVal):
'''
interpolate values at <depth>
'''
return (depth - shallow) / (deep - shallow) * (deepVal - shallowVal) + shallowVal
def loadParameters(parameterStore):
main.dbinteract("CREATE TABLE IF NOT EXISTS enspikeandstep (uid INTEGER PRIMARY KEY, suspect BLOB)", targetdb=parameterStore["db"])
| [
"numpy.count_nonzero",
"numpy.abs",
"numpy.max",
"numpy.array",
"util.main.unpack_row",
"numpy.ma.zeros",
"util.main.pack_array",
"util.main.dbinteract"
] | [((1363, 1396), 'numpy.array', 'np.array', (['[0, 200, 300, 500, 600]'], {}), '([0, 200, 300, 500, 600])\n', (1371, 1396), True, 'import numpy as np\n'), ((1412, 1445), 'numpy.array', 'np.array', (['[0, 300, 400, 500, 600]'], {}), '([0, 300, 400, 500, 600])\n', (1420, 1445), True, 'import numpy as np\n'), ((1461, 1496), 'numpy.array', 'np.array', (['[5.0, 5.0, 2.5, 2.0, 1.5]'], {}), '([5.0, 5.0, 2.5, 2.0, 1.5])\n', (1469, 1496), True, 'import numpy as np\n'), ((4074, 4094), 'numpy.ma.zeros', 'np.ma.zeros', (['nLevels'], {}), '(nLevels)\n', (4085, 4094), True, 'import numpy as np\n'), ((6665, 6806), 'util.main.dbinteract', 'main.dbinteract', (['"""CREATE TABLE IF NOT EXISTS enspikeandstep (uid INTEGER PRIMARY KEY, suspect BLOB)"""'], {'targetdb': "parameterStore['db']"}), "(\n 'CREATE TABLE IF NOT EXISTS enspikeandstep (uid INTEGER PRIMARY KEY, suspect BLOB)'\n , targetdb=parameterStore['db'])\n", (6680, 6806), True, 'import util.main as main\n'), ((1182, 1231), 'util.main.dbinteract', 'main.dbinteract', (['query'], {'targetdb': "parameters['db']"}), "(query, targetdb=parameters['db'])\n", (1197, 1231), True, 'import util.main as main\n'), ((3632, 3652), 'numpy.count_nonzero', 'np.count_nonzero', (['qc'], {}), '(qc)\n', (3648, 3652), True, 'import numpy as np\n'), ((3335, 3349), 'numpy.abs', 'np.abs', (['dt[-1]'], {}), '(dt[-1])\n', (3341, 3349), True, 'import numpy as np\n'), ((5042, 5067), 'numpy.abs', 'np.abs', (['(dt[i] + dt[i - 1])'], {}), '(dt[i] + dt[i - 1])\n', (5048, 5067), True, 'import numpy as np\n'), ((5662, 5687), 'numpy.abs', 'np.abs', (['(dt[i] + dt[i - 1])'], {}), '(dt[i] + dt[i - 1])\n', (5668, 5687), True, 'import numpy as np\n'), ((5941, 5958), 'numpy.abs', 'np.abs', (['dt[i - 1]'], {}), '(dt[i - 1])\n', (5947, 5958), True, 'import numpy as np\n'), ((1277, 1301), 'util.main.unpack_row', 'main.unpack_row', (['susp[0]'], {}), '(susp[0])\n', (1292, 1301), True, 'import util.main as main\n'), ((3894, 3913), 'util.main.pack_array', 'main.pack_array', (['qc'], {}), '(qc)\n', (3909, 3913), True, 'import util.main as main\n'), ((4318, 4349), 'numpy.max', 'np.max', (['[10.0, z[i] - z[i - 1]]'], {}), '([10.0, z[i] - z[i - 1]])\n', (4324, 4349), True, 'import numpy as np\n'), ((5000, 5023), 'numpy.abs', 'np.abs', (['dt[i - 1:i + 1]'], {}), '(dt[i - 1:i + 1])\n', (5006, 5023), True, 'import numpy as np\n'), ((5179, 5224), 'numpy.abs', 'np.abs', (['((1.0 - wt1) * dt[i - 1] - wt1 * dt[i])'], {}), '((1.0 - wt1) * dt[i - 1] - wt1 * dt[i])\n', (5185, 5224), True, 'import numpy as np\n'), ((5569, 5592), 'numpy.abs', 'np.abs', (['dt[i - 1:i + 1]'], {}), '(dt[i - 1:i + 1])\n', (5575, 5592), True, 'import numpy as np\n'), ((5621, 5644), 'numpy.abs', 'np.abs', (['gt[i - 1:i + 1]'], {}), '(gt[i - 1:i + 1])\n', (5627, 5644), True, 'import numpy as np\n'), ((5693, 5718), 'numpy.abs', 'np.abs', (['(dt[i] - dt[i - 1])'], {}), '(dt[i] - dt[i - 1])\n', (5699, 5718), True, 'import numpy as np\n')] |
import os
import sys
import argparse
import pickle
import numpy as np
import warnings
import pandas as pd
import functools
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback
from keras import backend as K
from keras.models import load_model
from sklearn.metrics import classification_report
# print(sys.path)
# import CNN_layers.WordCNN
from layers.CNN_layers import WordCNN, WordCNN_Ctxt, CharCNN, HybridCNN
class ClassificationReport(Callback):
def __init__(self, model, x_eval, y_eval, labels):
self.model = model
self.x_eval = x_eval
self.truth = np.argmax(y_eval, axis=1)
self.labels = labels
def on_epoch_end(self, epoch, logs={}):
print("Generating Classification Report:")
preds = np.argmax(self.model.predict(self.x_eval, verbose=1), axis=1)
print("\n%s\n" % classification_report(self.truth, preds, target_names=self.labels))
def returnLabels():
path = os.path.dirname(os.path.abspath(__file__))
dataPath = path + "/../../data"
with open(dataPath+"/crawled_data.pkl", "rb") as f:
id2entities = pickle.load(f)
return list(set([entity[0] for entity in id2entities.values()]))
def vector2matrix(text_vector, max_len=140, N_DIM=70):
matrix = np.zeros((max_len, N_DIM))
for i, index_elem in enumerate(text_vector):
row = np.zeros(N_DIM)
if int(index_elem) != -1:
row[int(index_elem)] = 1
matrix[i] = row
return matrix
def report_average(report_list):
labels = returnLabels()
output_report_list = list()
for report in report_list:
splitted = [' '.join(x.split()) for x in report.split('\n\n')]
header = [x for x in splitted[0].split(' ')]
data = np.array(splitted[1].split(' ')).reshape(-1, len(header) + 1)
masked_data = np.array([[l,'0','0','0','0'] for l in labels])
for i, label in enumerate(labels):
if label not in data:
data = np.insert(data, i, masked_data[i], axis=0)
data = np.delete(data, 0, 1).astype(float)
# avg_total = np.array([x for x in splitted[2].split(' ')][3:]).astype(float).reshape(-1, len(header))
avg_total = np.array([x for x in splitted[2].split('weighted avg ')[1].split(' ')]).astype(float).reshape(-1, len(header))
df = pd.DataFrame(np.concatenate((data, avg_total)), columns=header)
output_report_list.append(df)
res = functools.reduce(lambda x, y: x.add(y, fill_value=0), output_report_list) / len(output_report_list)
metric_labels = labels + ['avg / total']
return res.rename(index={res.index[idx]: metric_labels[idx] for idx in range(len(res.index))})
def load_word_npys(path, splits, index):
text_data = {"train":None, "valid":None}
ctxt_data = {"train":None, "valid":None}
label_data = {"train":None, "valid":None}
file_format = path+"/"+str(index)+"/%s_%s.npy"
for split in splits: # only save train and valid data
file_name = file_format % ("CtxtText_InputText", split)
_CtxtText = np.load(file_name)
ctxt_data[split] = _CtxtText[:,:100]
text_data[split] = _CtxtText[:,100:]
file_name = file_format % ("Label", split)
label_data[split] = np.load(file_name)
return text_data, ctxt_data, label_data
def load_char_npys(path, splits, index):
text_data = {"train":None, "valid":None}
label_data = {"train":None, "valid":None}
file_format = path+"/"+str(index)+"/%s_%s.npy"
for split in splits: # only save train and valid data
file_name = file_format % ("Char_InputText", split)
_Text = np.load(file_name)
text_data[split] = []
for _text_vector in _Text:
text_data[split].append(vector2matrix(_text_vector))
text_data[split] = np.asarray(text_data[split])
file_name = file_format % ("Label", split)
label_data[split] = np.load(file_name)
return text_data, label_data
def train_word_cnn(use_glove, use_ctxt, kfold_index, batch_size, num_epochs, filter_sizes, num_filters, train_embedding, lr, time_index):
labels = returnLabels()
splits = ["train", "valid", "test"]
model_name = "CNN_WORD"
if use_ctxt:
model_name += "_CTXT"
if use_glove:
model_name += "_GLOVE"
path = os.path.dirname(os.path.abspath(__file__))
dataPath = path + "/../../data"
targetPath = dataPath + "/target"
with open(targetPath+"/vocab.pkl", "rb") as f:
vocab = pickle.load(f)
vocab_size = len(vocab["word2id"].keys())
print("vocabulary loaded with %s words" % vocab_size)
embedding_matrix = np.load(targetPath+"/embedding.npy")
assert embedding_matrix.shape[0] == vocab_size
print("loaded embedding table")
K.set_learning_phase(1)
text_data, ctxt_data, label_data = load_word_npys(targetPath, splits[:2], kfold_index)
sequence_length = text_data["train"].shape[1]
print("sequence_length: %s" % sequence_length)
log_path = dataPath + "/output/"+model_name+"/"+str(time_index)+"/"+str(kfold_index)
# define keras training procedure
tb_callback = TensorBoard(log_dir=log_path, histogram_freq=0, write_graph=True, write_images=True)
ckpt_callback = ModelCheckpoint(log_path + "/weights.{epoch:02d}.hdf5",
monitor='val_acc', save_best_only=True,
save_weights_only=False, mode='max', verbose=1)
early_stop_callback = EarlyStopping(monitor='val_acc', min_delta=0, patience=1, verbose=0, mode='max')
if use_ctxt:
model = WordCNN_Ctxt(sequence_length=sequence_length, n_classes=len(labels), vocab_size=vocab_size,
filter_sizes=filter_sizes, num_filters=num_filters, learning_rate=lr,
embedding_size=300, embedding_matrix= embedding_matrix, train_embedding=train_embedding)
clf_report_callback = ClassificationReport(model.model, [text_data["valid"],ctxt_data["valid"]], label_data["valid"], labels)
model.model.fit(x=[text_data["train"],ctxt_data["train"]], y=label_data["train"], batch_size=batch_size, verbose=2, epochs=num_epochs,
callbacks=[tb_callback, clf_report_callback, early_stop_callback, ckpt_callback],
validation_data=([text_data["valid"],ctxt_data["valid"]], label_data["valid"]))
else:
model = WordCNN(sequence_length=sequence_length, n_classes=len(labels), vocab_size=vocab_size,
filter_sizes=filter_sizes, num_filters=num_filters, learning_rate=lr,
embedding_size=300, embedding_matrix= embedding_matrix, train_embedding=train_embedding)
clf_report_callback = ClassificationReport(model.model, text_data["valid"], label_data["valid"], labels)
model.model.fit(x=text_data["train"], y=label_data["train"], batch_size=batch_size, verbose=2, epochs=num_epochs,
callbacks=[tb_callback, clf_report_callback, early_stop_callback, ckpt_callback],
validation_data=(text_data["valid"], label_data["valid"]))
print("Training Finished")
def train_char_cnn(kfold_index, batch_size, num_epochs, filter_sizes, num_filters, train_embedding, lr, time_index):
labels = returnLabels()
splits = ["train", "valid", "test"]
model_name = "CNN_CHAR"
path = os.path.dirname(os.path.abspath(__file__))
dataPath = path + "/../../data"
targetPath = dataPath + "/target"
K.set_learning_phase(1)
text_data, label_data = load_char_npys(targetPath, splits[:2], kfold_index)
char_len = text_data["train"].shape[1]
char_embed_dim = text_data["train"].shape[2]
print("max_char_length: %s" % char_len)
print("char_dim: %s" % char_embed_dim)
log_path = dataPath + "/output/"+model_name+"/"+str(time_index)+"/"+str(kfold_index)
# define keras training procedure
tb_callback = TensorBoard(log_dir=log_path, histogram_freq=0, write_graph=True, write_images=True)
ckpt_callback = ModelCheckpoint(log_path + "/weights.{epoch:02d}.hdf5",
monitor='val_acc', save_best_only=True,
save_weights_only=False, mode='max', verbose=1)
early_stop_callback = EarlyStopping(monitor='val_acc', min_delta=0, patience=1, verbose=0, mode='max')
model = CharCNN(char_len=char_len, char_embed_dim=char_embed_dim, n_classes=len(labels),
filter_sizes=filter_sizes, num_filters=num_filters, learning_rate=lr)
clf_report_callback = ClassificationReport(model.model, text_data["valid"], label_data["valid"], labels)
model.model.fit(x=text_data["train"], y=label_data["train"], batch_size=batch_size, verbose=2, epochs=num_epochs,
callbacks=[tb_callback, clf_report_callback, early_stop_callback, ckpt_callback],
validation_data=(text_data["valid"], label_data["valid"]))
print("Training Finished")
def train_hybrid_cnn(use_glove, kfold_index, batch_size, num_epochs, filter_sizes_word, num_filters_word, filter_sizes_char, num_filters_char, train_embedding, lr, time_index):
labels = returnLabels()
splits = ["train", "valid", "test"]
model_name = "CNN_HYBRID"
if use_glove:
model_name += "_GLOVE"
path = os.path.dirname(os.path.abspath(__file__))
dataPath = path + "/../../data"
targetPath = dataPath + "/target"
with open(targetPath+"/vocab.pkl", "rb") as f:
vocab = pickle.load(f)
vocab_size = len(vocab["word2id"].keys())
print("vocabulary loaded with %s words" % vocab_size)
embedding_matrix = np.load(targetPath+"/embedding.npy")
assert embedding_matrix.shape[0] == vocab_size
print("loaded embedding table")
K.set_learning_phase(1)
text_data_word, _, label_data = load_word_npys(targetPath, splits[:2], kfold_index)
text_data_char, _ = load_char_npys(targetPath, splits[:2], kfold_index)
sequence_length = text_data_word["train"].shape[1]
char_len = text_data_char["train"].shape[1]
char_embed_dim = text_data_char["train"].shape[2]
print("max_char_length: %s" % char_len)
print("char_dim: %s" % char_embed_dim)
print("word_sequence_length: %s" % sequence_length)
log_path = dataPath + "/output/"+model_name+"/"+str(time_index)+"/"+str(kfold_index)
# define keras training procedure
tb_callback = TensorBoard(log_dir=log_path, histogram_freq=0, write_graph=True, write_images=True)
ckpt_callback = ModelCheckpoint(log_path + "/weights.{epoch:02d}.hdf5",
monitor='val_acc', save_best_only=True,
save_weights_only=False, mode='max', verbose=1)
early_stop_callback = EarlyStopping(monitor='val_acc', min_delta=0, patience=1, verbose=0, mode='max')
model = HybridCNN(n_classes=len(labels),
char_len=char_len, char_embed_dim=char_embed_dim, char_filter_sizes=filter_sizes_char, char_num_filters=num_filters_char,
word_sequence_len=sequence_length, word_vocab_size=vocab_size, word_filter_sizes=filter_sizes_word, word_num_filters=num_filters_word,
word_embedding_dim=300, embedding_matrix=embedding_matrix, train_embedding=train_embedding,
learning_rate=lr)
clf_report_callback = ClassificationReport(model.model, [text_data_word["valid"],text_data_char["valid"]], label_data["valid"], labels)
model.model.fit(x=[text_data_word["train"],text_data_char["train"]], y=label_data["train"], batch_size=batch_size, verbose=2, epochs=num_epochs,
callbacks=[tb_callback, clf_report_callback, early_stop_callback, ckpt_callback],
validation_data=([text_data_word["valid"],text_data_char["valid"]], label_data["valid"]))
print("Training Finished")
def test_word_cnn(use_glove, use_ctxt, k, time_index):
labels = returnLabels()
text_data_list, ctxt_data_list, label_data_list = [], [], []
model_name = "CNN_WORD"
if use_ctxt:
model_name += "_CTXT"
if use_glove:
model_name += "_GLOVE"
path = os.path.dirname(os.path.abspath(__file__))
dataPath = path + "/../../data"
targetPath = dataPath + "/target"
text_data_list, ctxt_data_list, label_data_list = [], [], []
file_format = targetPath + "/%s/%s_%s.npy"
for index in range(k):
file_name = file_format % (str(index), "CtxtText_InputText", "test")
_CtxtText = np.load(file_name)
ctxt_data_list.append(_CtxtText[:,:100])
text_data_list.append(_CtxtText[:,100:])
file_name = file_format % (str(index), "Label", "test")
label_data_list.append(np.load(file_name))
report_list = []
for index in range(k):
X_orig = text_data_list[index]
X_ctxt = ctxt_data_list[index]
y = label_data_list[index]
log_path = dataPath + "/output/"+model_name+"/"+str(time_index)+"/"+str(index)
dir_list = os.listdir(log_path)
# Find maximum epoch num for each weight output
max_epoch_num = 0
for file in dir_list:
if file.startswith('weights'):
epoch_num = int(file.split('.')[1])
if epoch_num > max_epoch_num:
max_epoch_num = epoch_num
model = load_model(log_path+"/weights."+str(max_epoch_num).zfill(2)+".hdf5")
if use_ctxt:
preds = model.predict([X_orig, X_ctxt], batch_size=128)
else:
preds = model.predict(X_orig, batch_size=128)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_report = classification_report(np.argmax(y, axis=1), np.argmax(preds, axis=1), digits=4, target_names=labels)
report_list.append(_report)
tot_report = report_average(report_list)
print(tot_report)
def test_char_cnn(k, time_index):
labels = returnLabels()
model_name = "CNN_CHAR"
path = os.path.dirname(os.path.abspath(__file__))
dataPath = path + "/../../data"
targetPath = dataPath + "/target"
file_format = targetPath + "/%s/%s_%s.npy"
report_list = []
for index in range(k):
text_data_list = []
file_name = file_format % (str(index), "Char_InputText", "test")
_text_data = np.load(file_name)
for text_vector in _text_data:
text_data_list.append(vector2matrix(text_vector))
text_data = np.asarray(text_data_list)
file_name = file_format % (str(index), "Label", "test")
label_data = np.load(file_name)
X_orig = text_data
y = label_data
log_path = dataPath + "/output/"+model_name+"/"+str(time_index)+"/"+str(index)
dir_list = os.listdir(log_path)
# Find maximum epoch num for each weight output
max_epoch_num = 0
for file in dir_list:
if file.startswith('weights'):
epoch_num = int(file.split('.')[1])
if epoch_num > max_epoch_num:
max_epoch_num = epoch_num
model = load_model(log_path+"/weights."+str(max_epoch_num).zfill(2)+".hdf5")
preds = model.predict(X_orig, batch_size=128)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_report = classification_report(np.argmax(y, axis=1), np.argmax(preds, axis=1), digits=4, target_names=labels)
report_list.append(_report)
tot_report = report_average(report_list)
print(tot_report)
def test_hybrid_cnn(use_glove, k, time_index):
labels = returnLabels()
# word_text_data_list, char_text_data_list, label_data_list = [], [], []
char_text_data_list = []
model_name = "CNN_HYBRID"
if use_glove:
model_name += "_GLOVE"
path = os.path.dirname(os.path.abspath(__file__))
dataPath = path + "/../../data"
targetPath = dataPath + "/target"
file_format = targetPath + "/%s/%s_%s.npy"
report_list = []
for index in range(k):
file_name = file_format % (str(index), "Char_InputText", "test")
_char_text_data = np.load(file_name)
for text_vector in _char_text_data:
char_text_data_list.append(vector2matrix(text_vector))
char_text_data = np.asarray(char_text_data_list)
file_name = file_format % (str(index), "CtxtText_InputText", "test")
word_text_data = np.load(file_name)[:,100:]
file_name = file_format % (str(index), "Label", "test")
label_data = np.load(file_name)
X_word = word_text_data
X_char = char_text_data
y = label_data
log_path = dataPath + "/output/"+model_name+"/"+str(time_index)+"/"+str(index)
dir_list = os.listdir(log_path)
# Find maximum epoch num for each weight output
max_epoch_num = 0
for file in dir_list:
if file.startswith('weights'):
epoch_num = int(file.split('.')[1])
if epoch_num > max_epoch_num:
max_epoch_num = epoch_num
model = load_model(log_path+"/weights."+str(max_epoch_num).zfill(2)+".hdf5")
preds = model.predict([X_word, X_char], batch_size=128)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_report = classification_report(np.argmax(y, axis=1), np.argmax(preds, axis=1), digits=4, target_names=labels)
report_list.append(_report)
tot_report = report_average(report_list)
print(tot_report)
| [
"numpy.load",
"os.path.abspath",
"numpy.delete",
"warnings.simplefilter",
"numpy.argmax",
"keras.callbacks.ModelCheckpoint",
"numpy.asarray",
"numpy.zeros",
"sklearn.metrics.classification_report",
"numpy.insert",
"keras.backend.set_learning_phase",
"keras.callbacks.TensorBoard",
"keras.call... | [((1208, 1234), 'numpy.zeros', 'np.zeros', (['(max_len, N_DIM)'], {}), '((max_len, N_DIM))\n', (1216, 1234), True, 'import numpy as np\n'), ((4278, 4316), 'numpy.load', 'np.load', (["(targetPath + '/embedding.npy')"], {}), "(targetPath + '/embedding.npy')\n", (4285, 4316), True, 'import numpy as np\n'), ((4398, 4421), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(1)'], {}), '(1)\n', (4418, 4421), True, 'from keras import backend as K\n'), ((4745, 4833), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_path', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), '(log_dir=log_path, histogram_freq=0, write_graph=True,\n write_images=True)\n', (4756, 4833), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((4848, 4995), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(log_path + '/weights.{epoch:02d}.hdf5')"], {'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""max"""', 'verbose': '(1)'}), "(log_path + '/weights.{epoch:02d}.hdf5', monitor='val_acc',\n save_best_only=True, save_weights_only=False, mode='max', verbose=1)\n", (4863, 4995), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((5034, 5119), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(1)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_acc', min_delta=0, patience=1, verbose=0, mode='max'\n )\n", (5047, 5119), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((6835, 6858), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(1)'], {}), '(1)\n', (6855, 6858), True, 'from keras import backend as K\n'), ((7243, 7331), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_path', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), '(log_dir=log_path, histogram_freq=0, write_graph=True,\n write_images=True)\n', (7254, 7331), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((7346, 7493), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(log_path + '/weights.{epoch:02d}.hdf5')"], {'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""max"""', 'verbose': '(1)'}), "(log_path + '/weights.{epoch:02d}.hdf5', monitor='val_acc',\n save_best_only=True, save_weights_only=False, mode='max', verbose=1)\n", (7361, 7493), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((7532, 7617), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(1)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_acc', min_delta=0, patience=1, verbose=0, mode='max'\n )\n", (7545, 7617), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((8799, 8837), 'numpy.load', 'np.load', (["(targetPath + '/embedding.npy')"], {}), "(targetPath + '/embedding.npy')\n", (8806, 8837), True, 'import numpy as np\n'), ((8919, 8942), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(1)'], {}), '(1)\n', (8939, 8942), True, 'from keras import backend as K\n'), ((9523, 9611), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_path', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), '(log_dir=log_path, histogram_freq=0, write_graph=True,\n write_images=True)\n', (9534, 9611), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((9626, 9773), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(log_path + '/weights.{epoch:02d}.hdf5')"], {'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""max"""', 'verbose': '(1)'}), "(log_path + '/weights.{epoch:02d}.hdf5', monitor='val_acc',\n save_best_only=True, save_weights_only=False, mode='max', verbose=1)\n", (9641, 9773), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((9812, 9897), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(1)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_acc', min_delta=0, patience=1, verbose=0, mode='max'\n )\n", (9825, 9897), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, Callback\n'), ((592, 617), 'numpy.argmax', 'np.argmax', (['y_eval'], {'axis': '(1)'}), '(y_eval, axis=1)\n', (601, 617), True, 'import numpy as np\n'), ((932, 957), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (947, 957), False, 'import os\n'), ((1061, 1075), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1072, 1075), False, 'import pickle\n'), ((1289, 1304), 'numpy.zeros', 'np.zeros', (['N_DIM'], {}), '(N_DIM)\n', (1297, 1304), True, 'import numpy as np\n'), ((1709, 1760), 'numpy.array', 'np.array', (["[[l, '0', '0', '0', '0'] for l in labels]"], {}), "([[l, '0', '0', '0', '0'] for l in labels])\n", (1717, 1760), True, 'import numpy as np\n'), ((2841, 2859), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (2848, 2859), True, 'import numpy as np\n'), ((3006, 3024), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (3013, 3024), True, 'import numpy as np\n'), ((3362, 3380), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (3369, 3380), True, 'import numpy as np\n'), ((3511, 3539), 'numpy.asarray', 'np.asarray', (['text_data[split]'], {}), '(text_data[split])\n', (3521, 3539), True, 'import numpy as np\n'), ((3608, 3626), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (3615, 3626), True, 'import numpy as np\n'), ((3989, 4014), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4004, 4014), False, 'import os\n'), ((4142, 4156), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4153, 4156), False, 'import pickle\n'), ((6738, 6763), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (6753, 6763), False, 'import os\n'), ((8510, 8535), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (8525, 8535), False, 'import os\n'), ((8663, 8677), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8674, 8677), False, 'import pickle\n'), ((11080, 11105), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (11095, 11105), False, 'import os\n'), ((11392, 11410), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (11399, 11410), True, 'import numpy as np\n'), ((11836, 11856), 'os.listdir', 'os.listdir', (['log_path'], {}), '(log_path)\n', (11846, 11856), False, 'import os\n'), ((12689, 12714), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (12704, 12714), False, 'import os\n'), ((12976, 12994), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (12983, 12994), True, 'import numpy as np\n'), ((13095, 13121), 'numpy.asarray', 'np.asarray', (['text_data_list'], {}), '(text_data_list)\n', (13105, 13121), True, 'import numpy as np\n'), ((13198, 13216), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (13205, 13216), True, 'import numpy as np\n'), ((13351, 13371), 'os.listdir', 'os.listdir', (['log_path'], {}), '(log_path)\n', (13361, 13371), False, 'import os\n'), ((14277, 14302), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14292, 14302), False, 'import os\n'), ((14547, 14565), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (14554, 14565), True, 'import numpy as np\n'), ((14683, 14714), 'numpy.asarray', 'np.asarray', (['char_text_data_list'], {}), '(char_text_data_list)\n', (14693, 14714), True, 'import numpy as np\n'), ((14909, 14927), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (14916, 14927), True, 'import numpy as np\n'), ((15093, 15113), 'os.listdir', 'os.listdir', (['log_path'], {}), '(log_path)\n', (15103, 15113), False, 'import os\n'), ((2168, 2201), 'numpy.concatenate', 'np.concatenate', (['(data, avg_total)'], {}), '((data, avg_total))\n', (2182, 2201), True, 'import numpy as np\n'), ((11583, 11601), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (11590, 11601), True, 'import numpy as np\n'), ((12311, 12336), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (12334, 12336), False, 'import warnings\n'), ((12341, 12372), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (12362, 12372), False, 'import warnings\n'), ((13743, 13768), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (13766, 13768), False, 'import warnings\n'), ((13773, 13804), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (13794, 13804), False, 'import warnings\n'), ((14806, 14824), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (14813, 14824), True, 'import numpy as np\n'), ((15495, 15520), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (15518, 15520), False, 'import warnings\n'), ((15525, 15556), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (15546, 15556), False, 'import warnings\n'), ((819, 885), 'sklearn.metrics.classification_report', 'classification_report', (['self.truth', 'preds'], {'target_names': 'self.labels'}), '(self.truth, preds, target_names=self.labels)\n', (840, 885), False, 'from sklearn.metrics import classification_report\n'), ((1830, 1872), 'numpy.insert', 'np.insert', (['data', 'i', 'masked_data[i]'], {'axis': '(0)'}), '(data, i, masked_data[i], axis=0)\n', (1839, 1872), True, 'import numpy as np\n'), ((1882, 1903), 'numpy.delete', 'np.delete', (['data', '(0)', '(1)'], {}), '(data, 0, 1)\n', (1891, 1903), True, 'import numpy as np\n'), ((12408, 12428), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (12417, 12428), True, 'import numpy as np\n'), ((12430, 12454), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (12439, 12454), True, 'import numpy as np\n'), ((13840, 13860), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (13849, 13860), True, 'import numpy as np\n'), ((13862, 13886), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (13871, 13886), True, 'import numpy as np\n'), ((15592, 15612), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (15601, 15612), True, 'import numpy as np\n'), ((15614, 15638), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (15623, 15638), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import os
import pandas as pd
from skimage.feature import greycomatrix, greycoprops
from skimage import io, color, img_as_ubyte
# 数据划分为5个等级
def fivegrade(data):
grade = 0
if 0 < data < 0.2:
grade = 0
elif 0.2 <= data < 0.4:
grade = 1
elif 0.4 <= data < 0.6:
grade = 2
elif 0.6 <= data < 0.8:
grade = 3
else:
grade = 4
return grade
# 求三阶矩
def val(x=None):
mid = np.mean(((x - x.mean()) ** 3))
return np.sign(mid) * abs(mid) ** (1/3)
def getColourData(img):
data = [0] * 9
r, g, b = cv2.split(img)
rd = np.asarray(r) / 255
gd = np.asarray(g) / 255
bd = np.asarray(b) / 255
data[0] = rd.mean()
data[1] = gd.mean()
data[2] = bd.mean()
data[3] = rd.std()
data[4] = gd.std()
data[5] = bd.std()
data[6] = val(rd)
data[7] = val(gd)
data[8] = val(bd)
rsj = fivegrade(data[6])
gej = fivegrade(data[4])
gsj = fivegrade(data[7])
coInfo = [rsj, gej, gsj]
return coInfo
def getTexture(img):
gray = color.rgb2gray(img)
image = img_as_ubyte(gray)
bins = np.array([0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 255])
inds = np.digitize(image, bins)
matrix_coocurrence = greycomatrix(inds, [2], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], levels=256)
contrast = greycoprops(matrix_coocurrence, 'contrast')
asm = greycoprops(matrix_coocurrence, 'ASM')
con_m, asm_m = np.mean(contrast), np.mean(asm)
con_z, asm_z = int(round(con_m)), int(round(asm_m))
return [con_z, asm_z]
def createcsv(path):
labels = []
for image in os.listdir(path):
name = path + '\\' + image
img = cv2.imread(name)
# cv2.imshow('img', img)
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
H = img_hsv[..., 0]
S = img_hsv[..., 1]
V = img_hsv[..., 2]
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
L = img_lab[..., 0]
A = img_lab[..., 1]
B = img_lab[..., 2]
img_H = cv2.GaussianBlur(H, (3, 3), 0)
img_A = cv2.GaussianBlur(A, (3, 3), 0)
img_B = cv2.GaussianBlur(B, (3, 3), 0)
reth, threshh = cv2.threshold(img_H, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
reta, thresha = cv2.threshold(img_A, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
retb, threshb = cv2.threshold(img_B, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img_hab = cv2.merge((threshh, thresha, threshb))
# cv2.imshow('hab', img_hab)
finalimg = cv2.bitwise_and(img_hab, img)
# cv2.imshow('final', finalimg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
colorInf = getColourData(finalimg) # 计算颜色特征:r三阶矩,g二阶矩,g三阶矩
textureInf = getTexture(finalimg) # 计算纹理特征:对比度,asm
allInfo = colorInf + textureInf
labels.append((allInfo[0], allInfo[1], allInfo[2], allInfo[3], allInfo[4], image))
labels = pd.DataFrame(labels)
labels.to_csv('labels.txt', header=None, index=None)
| [
"pandas.DataFrame",
"cv2.GaussianBlur",
"skimage.color.rgb2gray",
"cv2.bitwise_and",
"cv2.cvtColor",
"skimage.feature.greycoprops",
"numpy.asarray",
"cv2.threshold",
"skimage.img_as_ubyte",
"skimage.feature.greycomatrix",
"cv2.imread",
"cv2.split",
"numpy.array",
"numpy.mean",
"numpy.sig... | [((633, 647), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (642, 647), False, 'import cv2\n'), ((1131, 1150), 'skimage.color.rgb2gray', 'color.rgb2gray', (['img'], {}), '(img)\n', (1145, 1150), False, 'from skimage import io, color, img_as_ubyte\n'), ((1164, 1182), 'skimage.img_as_ubyte', 'img_as_ubyte', (['gray'], {}), '(gray)\n', (1176, 1182), False, 'from skimage import io, color, img_as_ubyte\n'), ((1195, 1286), 'numpy.array', 'np.array', (['[0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 255]'], {}), '([0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,\n 240, 255])\n', (1203, 1286), True, 'import numpy as np\n'), ((1295, 1319), 'numpy.digitize', 'np.digitize', (['image', 'bins'], {}), '(image, bins)\n', (1306, 1319), True, 'import numpy as np\n'), ((1346, 1423), 'skimage.feature.greycomatrix', 'greycomatrix', (['inds', '[2]', '[0, np.pi / 4, np.pi / 2, 3 * np.pi / 4]'], {'levels': '(256)'}), '(inds, [2], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], levels=256)\n', (1358, 1423), False, 'from skimage.feature import greycomatrix, greycoprops\n'), ((1440, 1483), 'skimage.feature.greycoprops', 'greycoprops', (['matrix_coocurrence', '"""contrast"""'], {}), "(matrix_coocurrence, 'contrast')\n", (1451, 1483), False, 'from skimage.feature import greycomatrix, greycoprops\n'), ((1495, 1533), 'skimage.feature.greycoprops', 'greycoprops', (['matrix_coocurrence', '"""ASM"""'], {}), "(matrix_coocurrence, 'ASM')\n", (1506, 1533), False, 'from skimage.feature import greycomatrix, greycoprops\n'), ((1731, 1747), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1741, 1747), False, 'import os\n'), ((3085, 3105), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {}), '(labels)\n', (3097, 3105), True, 'import pandas as pd\n'), ((536, 548), 'numpy.sign', 'np.sign', (['mid'], {}), '(mid)\n', (543, 548), True, 'import numpy as np\n'), ((658, 671), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (668, 671), True, 'import numpy as np\n'), ((688, 701), 'numpy.asarray', 'np.asarray', (['g'], {}), '(g)\n', (698, 701), True, 'import numpy as np\n'), ((718, 731), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (728, 731), True, 'import numpy as np\n'), ((1554, 1571), 'numpy.mean', 'np.mean', (['contrast'], {}), '(contrast)\n', (1561, 1571), True, 'import numpy as np\n'), ((1573, 1585), 'numpy.mean', 'np.mean', (['asm'], {}), '(asm)\n', (1580, 1585), True, 'import numpy as np\n'), ((1800, 1816), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (1810, 1816), False, 'import cv2\n'), ((1872, 1908), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (1884, 1908), False, 'import cv2\n'), ((2015, 2051), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2LAB'], {}), '(img, cv2.COLOR_BGR2LAB)\n', (2027, 2051), False, 'import cv2\n'), ((2158, 2188), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['H', '(3, 3)', '(0)'], {}), '(H, (3, 3), 0)\n', (2174, 2188), False, 'import cv2\n'), ((2206, 2236), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['A', '(3, 3)', '(0)'], {}), '(A, (3, 3), 0)\n', (2222, 2236), False, 'import cv2\n'), ((2254, 2284), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['B', '(3, 3)', '(0)'], {}), '(B, (3, 3), 0)\n', (2270, 2284), False, 'import cv2\n'), ((2310, 2375), 'cv2.threshold', 'cv2.threshold', (['img_H', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(img_H, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2323, 2375), False, 'import cv2\n'), ((2401, 2466), 'cv2.threshold', 'cv2.threshold', (['img_A', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(img_A, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2414, 2466), False, 'import cv2\n'), ((2492, 2557), 'cv2.threshold', 'cv2.threshold', (['img_B', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(img_B, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2505, 2557), False, 'import cv2\n'), ((2577, 2615), 'cv2.merge', 'cv2.merge', (['(threshh, thresha, threshb)'], {}), '((threshh, thresha, threshb))\n', (2586, 2615), False, 'import cv2\n'), ((2674, 2703), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img_hab', 'img'], {}), '(img_hab, img)\n', (2689, 2703), False, 'import cv2\n')] |
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable, grad
### Messy code for continuous translation
def interpolate_vae_3d(vae,img_1,img_2,img_3,attr_inters = 5,id_inters = 3, attr_max = 1.0, attr_dim=2,
random_test=False,return_each_layer=False, sd =1, disentangle_dim=None):
attr_min = 1.0-attr_max
alphas = np.linspace(attr_min, attr_max, attr_inters)
if disentangle_dim:
alphas = [torch.FloatTensor([*([1 - alpha]*int((attr_dim-disentangle_dim)/3)),
*([0]*int((attr_dim-disentangle_dim)/3)),
*([alpha]*int((attr_dim-disentangle_dim)/3)),
*([ v for i in range(int(disentangle_dim/2)) for v in [1-alpha,alpha]])]) for alpha in alphas]\
+[torch.FloatTensor([*([0]*int((attr_dim-disentangle_dim)/3)),
*([alpha]*int((attr_dim-disentangle_dim)/3)),
*([1-alpha]*int((attr_dim-disentangle_dim)/3)),
*([ v for i in range(int(disentangle_dim/2)) for v in [alpha,1-alpha]])]) for alpha in alphas[1:]]\
+[torch.FloatTensor([*([alpha]*int((attr_dim-disentangle_dim)/3)),
*([1 - alpha]*int((attr_dim-disentangle_dim)/3)),
*([0]*int((attr_dim-disentangle_dim)/3)),
*([ v for i in range(int(disentangle_dim/2)) for v in [1-alpha,alpha]])]) for alpha in alphas[1:-1]]
else:
alphas = [torch.FloatTensor([*([1 - alpha]*int(attr_dim/3)),
*([0]*int(attr_dim/3)),
*([alpha]*int(attr_dim/3))]) for alpha in alphas]\
+[torch.FloatTensor([*([0]*int(attr_dim/3)),
*([alpha]*int(attr_dim/3)),
*([1-alpha]*int(attr_dim/3))]) for alpha in alphas[1:]]\
+[torch.FloatTensor([*([alpha]*int(attr_dim/3)),
*([1 - alpha]*int(attr_dim/3)),
*([0]*int(attr_dim/3))]) for alpha in alphas[1:-1]]
enc_1 = vae(Variable(torch.FloatTensor(img_1).unsqueeze(0).cuda()),return_enc=True).cpu().data.numpy()
enc_2 = vae(Variable(torch.FloatTensor(img_2).unsqueeze(0).cuda()),return_enc=True).cpu().data.numpy()
enc_3 = vae(Variable(torch.FloatTensor(img_3).unsqueeze(0).cuda()),return_enc=True).cpu().data.numpy()
if random_test:
np.random.seed(sd)
enc_1 = np.random.randn(*[i for i in enc_1.shape])
enc_2 = np.random.randn(*[i for i in enc_2.shape])
enc_3 = np.random.randn(*[i for i in enc_3.shape])
if return_each_layer:
pass
else:
d1_outputs = []
d2_outputs = []
d3_outputs = []
for i in range(id_inters+1):
# ID 1 -> 2
row = []
tmp_input = Variable(torch.FloatTensor(enc_1+i*((enc_2-enc_1)/id_inters)).cuda())
for alpha in alphas:
alpha = Variable(alpha.unsqueeze(0).expand((1, attr_dim)).cuda())
tmp = vae.decode( tmp_input, insert_attrs = alpha)
row.append(tmp.cpu().data.numpy()[0].transpose(-2,-1,-3))
d1_outputs.append(row)
# ID 2 -> 3
row = []
tmp_input = Variable(torch.FloatTensor(enc_2+i*((enc_3-enc_2)/id_inters)).cuda())
for alpha in alphas:
alpha = Variable(alpha.unsqueeze(0).expand((1, attr_dim)).cuda())
tmp = vae.decode( tmp_input, insert_attrs = alpha)
row.append(tmp.cpu().data.numpy()[0].transpose(-2,-1,-3))
d2_outputs.append(row)
# ID 3 -> 1
row = []
tmp_input = Variable(torch.FloatTensor(enc_3+i*((enc_1-enc_3)/id_inters)).cuda())
for alpha in alphas:
alpha = Variable(alpha.unsqueeze(0).expand((1, attr_dim)).cuda())
tmp = vae.decode( tmp_input, insert_attrs = alpha)
row.append(tmp.cpu().data.numpy()[0].transpose(-2,-1,-3))
d3_outputs.append(row)
fig = []
for i in range(id_inters+1):
fig.append(np.concatenate(d1_outputs[i],axis=-2))
for i in range(id_inters+1):
fig.append(np.concatenate(d2_outputs[i],axis=-2))
for i in range(id_inters+1):
fig.append(np.concatenate(d3_outputs[i],axis=-2))
fig = np.concatenate(fig,axis=-3)
return fig
def interpolate_vae(vae,img_1,img_2,attr_inters = 7,id_inters = 6, attr_max = 1.2,
attr_dim=2,random_test=False,return_each_layer=False, sd =1,
disentangle_dim=None):
attr_min = 1.0-attr_max
alphas = np.linspace(attr_min, attr_max, attr_inters)
if disentangle_dim:
alphas = [torch.FloatTensor([*([1 - alpha]*int((attr_dim-disentangle_dim)/2)),
*([alpha]*int((attr_dim-disentangle_dim)/2)),
*([ v for i in range(int(disentangle_dim/2)) for v in [1-alpha,alpha]])]) for alpha in alphas]
else:
alphas = [torch.FloatTensor([*([1 - alpha]*int(attr_dim/2)), *([alpha]*int(attr_dim/2))]) for alpha in alphas]
enc_1 = vae(Variable(torch.FloatTensor(img_1).unsqueeze(0).cuda()),return_enc=True).cpu().data.numpy()
enc_2 = vae(Variable(torch.FloatTensor(img_2).unsqueeze(0).cuda()),return_enc=True).cpu().data.numpy()
if random_test:
np.random.seed(sd)
enc_1 = np.random.randn(*[i for i in enc_1.shape])
enc_2 = np.random.randn(*[i for i in enc_2.shape])
if return_each_layer:
pass
else:
outputs = []
for i in range(id_inters+1):
tmp_input = Variable(torch.FloatTensor(enc_1+i*((enc_2-enc_1)/id_inters)).cuda())
for alpha in alphas:
alpha = Variable(alpha.unsqueeze(0).expand((1, attr_dim)).cuda())
tmp = vae.decode(tmp_input, alpha)
if len(tmp.size())==2:
bs = tmp.size()[0]
tmp = tmp.view(bs,1,32,32)
outputs.append(tmp.cpu().data.numpy()[0].transpose(-2,-1,-3))
fig = []
for i in range(id_inters+1):
fig.append(np.concatenate(outputs[i*attr_inters:(i+1)*attr_inters],axis=-2))
fig = np.concatenate(fig,axis=-3)
return fig
# reference : https://github.com/caogang/wgan-gp/blob/master/gan_mnist.py
def calc_gradient_penalty(netD, real_data, fake_data, use_gpu = True, dec_output=2):
alpha = torch.rand(real_data.shape[0], 1)
if len(real_data.shape) == 4:
alpha = alpha.unsqueeze(2).unsqueeze(3)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda() if use_gpu else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_gpu:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
if dec_output==2:
disc_interpolates,_ = netD(interpolates)
elif dec_output == 3:
disc_interpolates,_,_ = netD(interpolates)
else:
disc_interpolates = netD(interpolates)
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if use_gpu else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def vae_loss(recon_x, x, mu, logvar, rec_loss):
loss = rec_loss(recon_x,x)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return loss,KLD
| [
"numpy.random.seed",
"numpy.random.randn",
"torch.autograd.Variable",
"torch.FloatTensor",
"numpy.linspace",
"torch.rand",
"numpy.concatenate"
] | [((383, 427), 'numpy.linspace', 'np.linspace', (['attr_min', 'attr_max', 'attr_inters'], {}), '(attr_min, attr_max, attr_inters)\n', (394, 427), True, 'import numpy as np\n'), ((4948, 4992), 'numpy.linspace', 'np.linspace', (['attr_min', 'attr_max', 'attr_inters'], {}), '(attr_min, attr_max, attr_inters)\n', (4959, 4992), True, 'import numpy as np\n'), ((6798, 6831), 'torch.rand', 'torch.rand', (['real_data.shape[0]', '(1)'], {}), '(real_data.shape[0], 1)\n', (6808, 6831), False, 'import torch\n'), ((7149, 7191), 'torch.autograd.Variable', 'Variable', (['interpolates'], {'requires_grad': '(True)'}), '(interpolates, requires_grad=True)\n', (7157, 7191), False, 'from torch.autograd import Variable, grad\n'), ((2631, 2649), 'numpy.random.seed', 'np.random.seed', (['sd'], {}), '(sd)\n', (2645, 2649), True, 'import numpy as np\n'), ((2666, 2708), 'numpy.random.randn', 'np.random.randn', (['*[i for i in enc_1.shape]'], {}), '(*[i for i in enc_1.shape])\n', (2681, 2708), True, 'import numpy as np\n'), ((2725, 2767), 'numpy.random.randn', 'np.random.randn', (['*[i for i in enc_2.shape]'], {}), '(*[i for i in enc_2.shape])\n', (2740, 2767), True, 'import numpy as np\n'), ((2784, 2826), 'numpy.random.randn', 'np.random.randn', (['*[i for i in enc_3.shape]'], {}), '(*[i for i in enc_3.shape])\n', (2799, 2826), True, 'import numpy as np\n'), ((4642, 4670), 'numpy.concatenate', 'np.concatenate', (['fig'], {'axis': '(-3)'}), '(fig, axis=-3)\n', (4656, 4670), True, 'import numpy as np\n'), ((5700, 5718), 'numpy.random.seed', 'np.random.seed', (['sd'], {}), '(sd)\n', (5714, 5718), True, 'import numpy as np\n'), ((5735, 5777), 'numpy.random.randn', 'np.random.randn', (['*[i for i in enc_1.shape]'], {}), '(*[i for i in enc_1.shape])\n', (5750, 5777), True, 'import numpy as np\n'), ((5794, 5836), 'numpy.random.randn', 'np.random.randn', (['*[i for i in enc_2.shape]'], {}), '(*[i for i in enc_2.shape])\n', (5809, 5836), True, 'import numpy as np\n'), ((6579, 6607), 'numpy.concatenate', 'np.concatenate', (['fig'], {'axis': '(-3)'}), '(fig, axis=-3)\n', (6593, 6607), True, 'import numpy as np\n'), ((4391, 4429), 'numpy.concatenate', 'np.concatenate', (['d1_outputs[i]'], {'axis': '(-2)'}), '(d1_outputs[i], axis=-2)\n', (4405, 4429), True, 'import numpy as np\n'), ((4490, 4528), 'numpy.concatenate', 'np.concatenate', (['d2_outputs[i]'], {'axis': '(-2)'}), '(d2_outputs[i], axis=-2)\n', (4504, 4528), True, 'import numpy as np\n'), ((4589, 4627), 'numpy.concatenate', 'np.concatenate', (['d3_outputs[i]'], {'axis': '(-2)'}), '(d3_outputs[i], axis=-2)\n', (4603, 4627), True, 'import numpy as np\n'), ((6499, 6570), 'numpy.concatenate', 'np.concatenate', (['outputs[i * attr_inters:(i + 1) * attr_inters]'], {'axis': '(-2)'}), '(outputs[i * attr_inters:(i + 1) * attr_inters], axis=-2)\n', (6513, 6570), True, 'import numpy as np\n'), ((3079, 3139), 'torch.FloatTensor', 'torch.FloatTensor', (['(enc_1 + i * ((enc_2 - enc_1) / id_inters))'], {}), '(enc_1 + i * ((enc_2 - enc_1) / id_inters))\n', (3096, 3139), False, 'import torch\n'), ((3509, 3569), 'torch.FloatTensor', 'torch.FloatTensor', (['(enc_2 + i * ((enc_3 - enc_2) / id_inters))'], {}), '(enc_2 + i * ((enc_3 - enc_2) / id_inters))\n', (3526, 3569), False, 'import torch\n'), ((3939, 3999), 'torch.FloatTensor', 'torch.FloatTensor', (['(enc_3 + i * ((enc_1 - enc_3) / id_inters))'], {}), '(enc_3 + i * ((enc_1 - enc_3) / id_inters))\n', (3956, 3999), False, 'import torch\n'), ((5983, 6043), 'torch.FloatTensor', 'torch.FloatTensor', (['(enc_1 + i * ((enc_2 - enc_1) / id_inters))'], {}), '(enc_1 + i * ((enc_2 - enc_1) / id_inters))\n', (6000, 6043), False, 'import torch\n'), ((2306, 2330), 'torch.FloatTensor', 'torch.FloatTensor', (['img_1'], {}), '(img_1)\n', (2323, 2330), False, 'import torch\n'), ((2413, 2437), 'torch.FloatTensor', 'torch.FloatTensor', (['img_2'], {}), '(img_2)\n', (2430, 2437), False, 'import torch\n'), ((2520, 2544), 'torch.FloatTensor', 'torch.FloatTensor', (['img_3'], {}), '(img_3)\n', (2537, 2544), False, 'import torch\n'), ((5482, 5506), 'torch.FloatTensor', 'torch.FloatTensor', (['img_1'], {}), '(img_1)\n', (5499, 5506), False, 'import torch\n'), ((5589, 5613), 'torch.FloatTensor', 'torch.FloatTensor', (['img_2'], {}), '(img_2)\n', (5606, 5613), False, 'import torch\n')] |
import unittest
import numpy
import theano
from theano.tests import unittest_tools as utt
# Skip tests if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if not cuda_ndarray.cuda_available:
raise SkipTest('Optional package cuda not available')
from theano.misc.pycuda_init import pycuda_available
if not pycuda_available:
raise SkipTest('Optional package pycuda not available')
from theano.sandbox.cuda.fftconv import scikits_cuda_available
if not scikits_cuda_available:
raise SkipTest('Optional package scikits.cuda not available')
from theano.sandbox.cuda import float32_shared_constructor as shared
import theano.sandbox.cuda.fftconv
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
class TestConv2dFFT(unittest.TestCase):
def run_conv(self, inputs_shape, filters_shape, pad=False, **other_args):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
conv_ref = theano.tensor.nnet.conv.conv2d(inputs, filters,
**other_args)
conv_fft = theano.sandbox.cuda.fftconv.conv2d_fft(inputs, filters,
pad_last_dim=pad,
**other_args)
f_ref = theano.function([], conv_ref)
f_fft = theano.function([], conv_fft, mode=mode_with_gpu)
res_ref = f_ref()
res_fft = f_fft()
utt.assert_allclose(res_ref, res_fft)
def test_valid(self):
self.run_conv(inputs_shape=(5, 3, 7, 6),
filters_shape=(2, 3, 3, 3),
border_mode='valid')
self.run_conv(inputs_shape=(5, 3, 7, 7),
filters_shape=(2, 3, 3, 3),
border_mode='valid', pad=True)
def test_full(self):
self.run_conv(inputs_shape=(5, 3, 7, 6),
filters_shape=(2, 3, 3, 3),
border_mode='full')
self.run_conv(inputs_shape=(5, 3, 7, 7),
filters_shape=(2, 3, 3, 3),
border_mode='full', pad=True)
def test_opt_valid(self):
inputs_shape = (5, 3, 7, 6)
filters_shape = (2, 3, 3, 3)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
conv = theano.tensor.nnet.conv.conv2d(inputs, filters)
mode = mode_with_gpu.including('conv_fft_valid')
f_ref = theano.function([], conv)
f_fft = theano.function([], conv, mode=mode)
# make sure we inserted the fft trickery
topo = f_fft.maker.fgraph.toposort()
assert sum(isinstance(n.op, theano.sandbox.cuda.fftconv.CuFFTOp)
for n in topo) == 2
res_ref = f_ref()
res_fft = f_fft()
utt.assert_allclose(res_ref, res_fft)
def test_opt_full(self):
inputs_shape = (5, 3, 7, 6)
filters_shape = (2, 3, 3, 3)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
conv = theano.tensor.nnet.conv.conv2d(inputs, filters,
border_mode='full')
mode = mode_with_gpu.including('conv_fft_full')
f_ref = theano.function([], conv)
f_fft = theano.function([], conv, mode=mode)
# make sure we inserted the fft trickery
topo = f_fft.maker.fgraph.toposort()
assert sum(isinstance(n.op, theano.sandbox.cuda.fftconv.CuFFTOp)
for n in topo) == 2
res_ref = f_ref()
res_fft = f_fft()
utt.assert_allclose(res_ref, res_fft)
| [
"theano.compile.mode.get_default_mode",
"nose.plugins.skip.SkipTest",
"theano.compile.mode.get_mode",
"theano.tests.unittest_tools.assert_allclose",
"theano.function",
"numpy.random.random",
"theano.tensor.nnet.conv.conv2d",
"theano.sandbox.cuda.fftconv.conv2d_fft",
"theano.sandbox.cuda.float32_shar... | [((267, 314), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""Optional package cuda not available"""'], {}), "('Optional package cuda not available')\n", (275, 314), False, 'from nose.plugins.skip import SkipTest\n'), ((403, 452), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""Optional package pycuda not available"""'], {}), "('Optional package pycuda not available')\n", (411, 452), False, 'from nose.plugins.skip import SkipTest\n'), ((557, 612), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""Optional package scikits.cuda not available"""'], {}), "('Optional package scikits.cuda not available')\n", (565, 612), False, 'from nose.plugins.skip import SkipTest\n'), ((1206, 1224), 'theano.sandbox.cuda.float32_shared_constructor', 'shared', (['inputs_val'], {}), '(inputs_val)\n', (1212, 1224), True, 'from theano.sandbox.cuda import float32_shared_constructor as shared\n'), ((1243, 1262), 'theano.sandbox.cuda.float32_shared_constructor', 'shared', (['filters_val'], {}), '(filters_val)\n', (1249, 1262), True, 'from theano.sandbox.cuda import float32_shared_constructor as shared\n'), ((1283, 1344), 'theano.tensor.nnet.conv.conv2d', 'theano.tensor.nnet.conv.conv2d', (['inputs', 'filters'], {}), '(inputs, filters, **other_args)\n', (1313, 1344), False, 'import theano\n'), ((1414, 1505), 'theano.sandbox.cuda.fftconv.conv2d_fft', 'theano.sandbox.cuda.fftconv.conv2d_fft', (['inputs', 'filters'], {'pad_last_dim': 'pad'}), '(inputs, filters, pad_last_dim=pad,\n **other_args)\n', (1452, 1505), False, 'import theano\n'), ((1635, 1664), 'theano.function', 'theano.function', (['[]', 'conv_ref'], {}), '([], conv_ref)\n', (1650, 1664), False, 'import theano\n'), ((1681, 1730), 'theano.function', 'theano.function', (['[]', 'conv_fft'], {'mode': 'mode_with_gpu'}), '([], conv_fft, mode=mode_with_gpu)\n', (1696, 1730), False, 'import theano\n'), ((1793, 1830), 'theano.tests.unittest_tools.assert_allclose', 'utt.assert_allclose', (['res_ref', 'res_fft'], {}), '(res_ref, res_fft)\n', (1812, 1830), True, 'from theano.tests import unittest_tools as utt\n'), ((2741, 2759), 'theano.sandbox.cuda.float32_shared_constructor', 'shared', (['inputs_val'], {}), '(inputs_val)\n', (2747, 2759), True, 'from theano.sandbox.cuda import float32_shared_constructor as shared\n'), ((2778, 2797), 'theano.sandbox.cuda.float32_shared_constructor', 'shared', (['filters_val'], {}), '(filters_val)\n', (2784, 2797), True, 'from theano.sandbox.cuda import float32_shared_constructor as shared\n'), ((2814, 2861), 'theano.tensor.nnet.conv.conv2d', 'theano.tensor.nnet.conv.conv2d', (['inputs', 'filters'], {}), '(inputs, filters)\n', (2844, 2861), False, 'import theano\n'), ((2937, 2962), 'theano.function', 'theano.function', (['[]', 'conv'], {}), '([], conv)\n', (2952, 2962), False, 'import theano\n'), ((2979, 3015), 'theano.function', 'theano.function', (['[]', 'conv'], {'mode': 'mode'}), '([], conv, mode=mode)\n', (2994, 3015), False, 'import theano\n'), ((3286, 3323), 'theano.tests.unittest_tools.assert_allclose', 'utt.assert_allclose', (['res_ref', 'res_fft'], {}), '(res_ref, res_fft)\n', (3305, 3323), True, 'from theano.tests import unittest_tools as utt\n'), ((3594, 3612), 'theano.sandbox.cuda.float32_shared_constructor', 'shared', (['inputs_val'], {}), '(inputs_val)\n', (3600, 3612), True, 'from theano.sandbox.cuda import float32_shared_constructor as shared\n'), ((3631, 3650), 'theano.sandbox.cuda.float32_shared_constructor', 'shared', (['filters_val'], {}), '(filters_val)\n', (3637, 3650), True, 'from theano.sandbox.cuda import float32_shared_constructor as shared\n'), ((3667, 3734), 'theano.tensor.nnet.conv.conv2d', 'theano.tensor.nnet.conv.conv2d', (['inputs', 'filters'], {'border_mode': '"""full"""'}), "(inputs, filters, border_mode='full')\n", (3697, 3734), False, 'import theano\n'), ((3855, 3880), 'theano.function', 'theano.function', (['[]', 'conv'], {}), '([], conv)\n', (3870, 3880), False, 'import theano\n'), ((3897, 3933), 'theano.function', 'theano.function', (['[]', 'conv'], {'mode': 'mode'}), '([], conv, mode=mode)\n', (3912, 3933), False, 'import theano\n'), ((4203, 4240), 'theano.tests.unittest_tools.assert_allclose', 'utt.assert_allclose', (['res_ref', 'res_fft'], {}), '(res_ref, res_fft)\n', (4222, 4240), True, 'from theano.tests import unittest_tools as utt\n'), ((780, 820), 'theano.compile.mode.get_mode', 'theano.compile.mode.get_mode', (['"""FAST_RUN"""'], {}), "('FAST_RUN')\n", (808, 820), False, 'import theano\n'), ((864, 902), 'theano.compile.mode.get_default_mode', 'theano.compile.mode.get_default_mode', ([], {}), '()\n', (900, 902), False, 'import theano\n'), ((1061, 1094), 'numpy.random.random', 'numpy.random.random', (['inputs_shape'], {}), '(inputs_shape)\n', (1080, 1094), False, 'import numpy\n'), ((1135, 1169), 'numpy.random.random', 'numpy.random.random', (['filters_shape'], {}), '(filters_shape)\n', (1154, 1169), False, 'import numpy\n'), ((2596, 2629), 'numpy.random.random', 'numpy.random.random', (['inputs_shape'], {}), '(inputs_shape)\n', (2615, 2629), False, 'import numpy\n'), ((2670, 2704), 'numpy.random.random', 'numpy.random.random', (['filters_shape'], {}), '(filters_shape)\n', (2689, 2704), False, 'import numpy\n'), ((3449, 3482), 'numpy.random.random', 'numpy.random.random', (['inputs_shape'], {}), '(inputs_shape)\n', (3468, 3482), False, 'import numpy\n'), ((3523, 3557), 'numpy.random.random', 'numpy.random.random', (['filters_shape'], {}), '(filters_shape)\n', (3542, 3557), False, 'import numpy\n')] |
import os
os.environ['MKL_THREADING_LAYER'] = "GNU" # a temporary workaround
from enum import Enum
import numpy as np
from torch import multiprocessing as mp
from omegaconf import OmegaConf
from pyarrow import plasma
from typing import Callable, List
import atexit
import logging
from .vector_env import VectorEnv
from .utils import CloudpickleWrapper
logger = logging.getLogger(__name__)
class AsyncState(Enum):
DEFAULT = 'default'
WAITING_RESET = 'reset'
WAITING_STEP = 'step'
class AsyncVectorEnv(VectorEnv):
"""Vectorized environment that runs multiple environments in parallel. It
uses `multiprocessing` processes, and pipes for communication.
"""
def __init__(
self,
env_funcs,
observation_space=None,
action_space=None,
context: str = 'spawn',
in_series: int = 1,
plasma_config: OmegaConf = None
):
"""
Args:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
super().__init__(num_envs=len(env_funcs), observation_space=observation_space, action_space=action_space)
self.closed = False
self.in_series = in_series
num_envs = len(env_funcs)
assert num_envs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.num_subproc = num_envs // in_series
env_funcs = np.array_split(env_funcs, self.num_subproc)
ranks = np.arange(num_envs)
ranks = np.array_split(ranks, self.num_subproc)
# multiprocessing
ctx = mp.get_context(context)
self.manager_pipes, self.worker_pipes = zip(*[ctx.Pipe() for _ in range(self.num_subproc)])
self.processes = [
ctx.Process(
target=worker, args=(seg_ranks, worker_pipe, manager_pipe, CloudpickleWrapper(env_func), plasma_config)
) for (worker_pipe, manager_pipe, env_func,
seg_ranks) in zip(self.worker_pipes, self.manager_pipes, env_funcs, ranks)
]
for process in self.processes:
process.daemon = True # if the main process crashes, we should not cause things to hang
process.start()
for pipe in self.worker_pipes:
pipe.close()
self._state = AsyncState.DEFAULT
atexit.register(self.__del__)
def reset_async(self):
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
self.flush_pipe()
logger.warn("Flushing the Pipe due to reset.")
# raise AssertionError('Calling `reset_async` without any prior ')
for pipe in self.manager_pipes:
pipe.send(('reset', None))
# waiting state
self._state = AsyncState.WAITING_RESET
def reset_wait(self, timeout=None):
"""
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET:
raise AssertionError(
'Calling `reset_wait` without any prior '
'call to `reset_async`.', AsyncState.WAITING_RESET.value
)
results = [pipe.recv() for pipe in self.manager_pipes]
self._state = AsyncState.DEFAULT
def step_async(self, actions=None) -> None:
self._assert_is_running()
if actions is None:
for pipe in self.manager_pipes:
action = [None for _ in range(self.in_series)]
pipe.send(('step', action))
else:
actions = np.array_split(actions, self.num_subproc)
for pipe, action in zip(self.manager_pipes, actions):
pipe.send(('step', action))
self._state = AsyncState.WAITING_STEP
def step_wait(self):
results = [pipe.recv() for pipe in self.manager_pipes]
results = _flatten_list(results)
observations, infos, dones = zip(*results)
self._state = AsyncState.DEFAULT
return observations, infos, dones
def seed(self, seeds=None):
self._assert_is_running()
if seeds is None:
seeds = [None for _ in range(self.num_envs)]
elif isinstance(seeds, int):
seeds = [seeds + i for i in range(self.num_envs)]
seeds = np.array_split(seeds, self.num_subproc)
assert len(seeds) == self.num_envs
if self._state != AsyncState.DEFAULT:
raise AssertionError(
'Calling `seed` while waiting '
'for a pending call to `{0}` to complete.'.format(self._state.value), self._state.value
)
for pipe, seed in zip(self.manager_pipes, seeds):
pipe.send(('seed', seed))
def flush_pipe(self):
if self._state == AsyncState.WAITING_RESET or self._state == AsyncState.WAITING_STEP:
[pipe.recv() for pipe in self.manager_pipes]
self._state = AsyncState.DEFAULT
def close_extras(self, timeout=None, terminate=False):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `close` times out. If `None`,
the call to `close` never times out. If the call to `close` times
out, then all processes are terminated.
terminate : bool (default: `False`)
If `True`, then the `close` operation is forced and all processes
are terminated.
"""
try:
if terminate:
for process in self.processes:
if process.is_alive():
process.terminate()
else:
for pipe in self.manager_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.send(('close', None))
for pipe in self.manager_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.recv()
for pipe in self.manager_pipes:
if pipe is not None:
pipe.close()
for process in self.processes:
process.join()
except Exception as e:
print(f"{type(e)} has occured!")
def _assert_is_running(self):
if self.closed:
raise AssertionError(
'Trying to operate on `{0}`, after a '
'call to `close()`.'.format(type(self).__name__)
)
def __del__(self):
if not self.closed:
self.close()
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
def worker(
ranks: int,
pipe: List[mp.Pipe],
parent_pipe: List[mp.Pipe],
env_fn_wrappers: List[Callable],
plasma_config: OmegaConf = None
):
"""
"""
import torch
torch.set_num_threads(1)
# use plasma object in-store
if plasma_config:
plasma_client = plasma.connect(f"/tmp/torchfly/plasma/{plasma_config.plasma_store_name}/plasma.sock")
def step_env(env, action):
observation, info, done = env.step(action)
if plasma_config:
observation = plasma_client.put(observation)
return observation, info, done
parent_pipe.close()
envs = [env_fn_wrapper(rank) for env_fn_wrapper, rank in zip(env_fn_wrappers.x, ranks)]
try:
while True:
command, data = pipe.recv()
if command == 'step':
pipe.send([step_env(env, action) for env, action in zip(envs, data)])
elif command == 'reset':
pipe.send([env.reset() for env in envs])
elif command == 'seed':
[env.seed() for env in envs]
elif command == 'close':
pipe.close()
break
elif command == 'get_spaces_spec':
pipe.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec)))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
except BrokenPipeError:
print(f"{ranks} having BrokenPipeError! Closing the environments.")
finally:
for env in envs:
env.close()
| [
"atexit.register",
"torch.multiprocessing.get_context",
"torch.set_num_threads",
"numpy.arange",
"numpy.array_split",
"pyarrow.plasma.connect",
"logging.getLogger"
] | [((365, 392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (382, 392), False, 'import logging\n'), ((7131, 7155), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (7152, 7155), False, 'import torch\n'), ((1672, 1715), 'numpy.array_split', 'np.array_split', (['env_funcs', 'self.num_subproc'], {}), '(env_funcs, self.num_subproc)\n', (1686, 1715), True, 'import numpy as np\n'), ((1732, 1751), 'numpy.arange', 'np.arange', (['num_envs'], {}), '(num_envs)\n', (1741, 1751), True, 'import numpy as np\n'), ((1768, 1807), 'numpy.array_split', 'np.array_split', (['ranks', 'self.num_subproc'], {}), '(ranks, self.num_subproc)\n', (1782, 1807), True, 'import numpy as np\n'), ((1849, 1872), 'torch.multiprocessing.get_context', 'mp.get_context', (['context'], {}), '(context)\n', (1863, 1872), True, 'from torch import multiprocessing as mp\n'), ((2592, 2621), 'atexit.register', 'atexit.register', (['self.__del__'], {}), '(self.__del__)\n', (2607, 2621), False, 'import atexit\n'), ((4512, 4551), 'numpy.array_split', 'np.array_split', (['seeds', 'self.num_subproc'], {}), '(seeds, self.num_subproc)\n', (4526, 4551), True, 'import numpy as np\n'), ((7235, 7325), 'pyarrow.plasma.connect', 'plasma.connect', (['f"""/tmp/torchfly/plasma/{plasma_config.plasma_store_name}/plasma.sock"""'], {}), "(\n f'/tmp/torchfly/plasma/{plasma_config.plasma_store_name}/plasma.sock')\n", (7249, 7325), False, 'from pyarrow import plasma\n'), ((3782, 3823), 'numpy.array_split', 'np.array_split', (['actions', 'self.num_subproc'], {}), '(actions, self.num_subproc)\n', (3796, 3823), True, 'import numpy as np\n')] |
import pyclesperanto_prototype as cle
import numpy as np
def test_masked_voronoi_labeling():
gpu_input = cle.push(np.asarray([
[0, 0, 1, 1, 0, 0],
[0, 1, 8, 9, 1, 0],
[0, 1, 7, 6, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 8, 7, 1],
[0, 0, 1, 1, 1, 0],
]))
gpu_reference = cle.push(np.asarray([
[0, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 0, 2, 2, 0],
]))
gpu_output = cle.voronoi_otsu_labeling(gpu_input, spot_sigma=1, outline_sigma=1)
a = cle.pull(gpu_output)
b = cle.pull(gpu_reference)
print(a)
print(b)
assert (np.array_equal(a, b)) | [
"numpy.array_equal",
"numpy.asarray",
"pyclesperanto_prototype.voronoi_otsu_labeling",
"pyclesperanto_prototype.pull"
] | [((667, 734), 'pyclesperanto_prototype.voronoi_otsu_labeling', 'cle.voronoi_otsu_labeling', (['gpu_input'], {'spot_sigma': '(1)', 'outline_sigma': '(1)'}), '(gpu_input, spot_sigma=1, outline_sigma=1)\n', (692, 734), True, 'import pyclesperanto_prototype as cle\n'), ((744, 764), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_output'], {}), '(gpu_output)\n', (752, 764), True, 'import pyclesperanto_prototype as cle\n'), ((773, 796), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_reference'], {}), '(gpu_reference)\n', (781, 796), True, 'import pyclesperanto_prototype as cle\n'), ((837, 857), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (851, 857), True, 'import numpy as np\n'), ((124, 285), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1, 1, 0, 0], [0, 1, 8, 9, 1, 0], [0, 1, 7, 6, 1, 0], [0, 0, 1, 1, 1,\n 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 8, 7, 1], [0, 0, 1, 1, 1, 0]]'], {}), '([[0, 0, 1, 1, 0, 0], [0, 1, 8, 9, 1, 0], [0, 1, 7, 6, 1, 0], [0,\n 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 8, 7, 1], [0, 0, 1, 1, 1, 0]]\n )\n', (134, 285), True, 'import numpy as np\n'), ((402, 563), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [0, 0, 2, 2, 2,\n 0], [0, 0, 2, 2, 2, 0], [0, 0, 2, 2, 2, 0], [0, 0, 0, 2, 2, 0]]'], {}), '([[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 0], [0,\n 0, 2, 2, 2, 0], [0, 0, 2, 2, 2, 0], [0, 0, 2, 2, 2, 0], [0, 0, 0, 2, 2, 0]]\n )\n', (412, 563), True, 'import numpy as np\n')] |
import unittest
from ctypes import ArgumentError
import os
import numpy as np
from sdl2 import *
from glaze.GL import *
BASEPATH = 'shots_results'
def getSDLError():
sderr = SDL_GetError()
try:
sderr = sderr.decode()
except Exception:
pass
return sderr
class OGL3Tester(unittest.TestCase):
def setUp(self):
self.addCleanup(self.close)
if SDL_Init(SDL_INIT_EVERYTHING) != 0:
self.fail(getSDLError())
# set_attribs_buffer
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8)
SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 24)
# set_attribs_depth
for depth in [24, 16]:
if SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, depth) == 0:
break
else:
if depth == 16:
error = 'Error setting depth size: ' + getSDLError()
self.fail(error)
# set_attribs_restrict_Context
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2)
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1)
# SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_FORWARD_COMPATIBLE_FLAG)
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE)
errStr = getSDLError()
if errStr != '':
self.fail(errStr)
# set_attribs_use_debug
res = SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_DEBUG_FLAG)
if res != 0:
error = 'Error setting SDL debug context flag: ' + getSDLError()
self.fail(error)
# set_attribs_share_context
if SDL_GL_SetAttribute(SDL_GL_SHARE_WITH_CURRENT_CONTEXT, 1) != 0:
error = 'Error setting SDL shared context flag: ' + getSDLError()
self.fail(error)
# set_attribs_set_double_buffer
isDoubleBuffered = not SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1)
if not isDoubleBuffered:
self.fail('Error setting SDL double buffer flag: ' + getSDLError())
# set_attribs_set_multisample
# from warnings import warn
# warn('Multisample is not implemented')
# open_window
self.size = w, h = 400, 400
flags = SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_HIDDEN
try:
self._SDL_Window = SDL_CreateWindow('test window', 200, 200, w, h, flags)
except ArgumentError:
self._SDL_Window = SDL_CreateWindow(b'test window', 200, 200, w, h, flags)
except Exception as err:
self.fail('error creating sdl window: ' + str(err))
if not self._SDL_Window:
sdlerr = SDL_GetError()
msg = 'Error creating window {}'.format(sdlerr)
self.fail(msg)
self._context = newContext = SDL_GL_CreateContext(self._SDL_Window)
if not newContext:
sdlerr = getSDLError()
error = 'Error creating context: ' + sdlerr
self.fail(error)
self.windowID = SDL_GetWindowID(self._SDL_Window)
loadGL()
def _pollEvents(self):
event = SDL_Event()
while SDL_PollEvent(event):
pass
def GLClear(self):
glClearColor(.1, .3, .8, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
def GLPresent(self):
SDL_GL_SwapWindow(self._SDL_Window)
self._pollEvents()
def test_clear_screen(self):
self.GLClear()
self.GLPresent()
self.compareScreenShot('clearScreen')
def test_draw_triangle_color(self):
self.GLClear()
self.drawTriangle(True)
self.GLPresent()
self.compareScreenShot('drawColorTriangle')
def drawTriangle(self, useShader):
# An array of 3 vectors which represents 3 vertices
g_vertex_buffer_data = np.array([-1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 0.0, 1.0, 0.0], np.float32)
# This will identify our vertex buffer
vertexbuffer = np.array([0], np.uint32)
# Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, vertexbuffer)
# The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer)
# Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, g_vertex_buffer_data.strides[0] * len(g_vertex_buffer_data),
# todo: replace with sizeof
g_vertex_buffer_data, GL_STATIC_DRAW)
VertexArrayID = np.array([0], np.uint32)
glGenVertexArrays(1, VertexArrayID)
glBindVertexArray(VertexArrayID)
if useShader:
programID = LoadShaders()
glUseProgram(programID)
# Draw triangle...
# 1rst attribute buffer : vertices
glEnableVertexAttribArray(0)
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer)
glVertexAttribPointer(0,
3, # size
GL_FLOAT, # type
GL_FALSE, # normalized?
0, # stride
0) # array buffer offset)
# Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3) # Starting from vertex 0 3 vertices total -> 1 triangle
glDisableVertexAttribArray(0)
# end the current frame (internally swaps the front and back buffers)
glDeleteBuffers(1, vertexbuffer)
def compareScreenShot(self, testName):
w, h = self.size
dest = np.empty(w * h * 3, np.uint8)
self.getBackBufferContent(w, h, dest)
filePath = os.path.join(BASEPATH, testName + '.png')
from PIL.Image import fromarray, merge, open
from PIL.ImageOps import flip
capture = fromarray(dest.reshape(h, w, 3))
capture = flip(capture)
b, g, r = capture.split()
capture = merge("RGB", (r, g, b))
if not os.path.exists(filePath):
capture.save(filePath)
else:
stored = open(filePath)
isEqual = np.all(np.asarray(capture) == np.asarray(stored))
self.assertTrue(isEqual)
def getBackBufferContent(self, w, h, destBuffer):
glPixelStorei(GL_PACK_ALIGNMENT, 1)
glReadPixels(0, 0, w, h, GL_BGR, GL_UNSIGNED_BYTE, destBuffer)
def close(self):
SDL_GL_DeleteContext(self._context)
SDL_DestroyWindow(self._SDL_Window)
def LoadShaders():
# Create the shaders
VertexShaderID = glCreateShader(GL_VERTEX_SHADER)
FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER)
# Vertex Shader code
VertexShaderCode = '''#version 120
varying vec3 pos;
attribute vec3 position;
void main(){
pos = position;
gl_Position.xyz = position;
gl_Position.w = 1.0;
}
'''
# Fragment Shader code
FragmentShaderCode = '''#version 120
varying vec3 pos;
void main(){
gl_FragColor = vec4(vec3(pos + 0.25), 1);
}
'''
result = np.array([GL_FALSE], np.int32)
InfoLogLength = np.array([0], np.int32)
# Compile Vertex Shader
VertexSourcePointer = stringToArray(VertexShaderCode)
glShaderSource(VertexShaderID, 1, VertexSourcePointer, None)
glCompileShader(VertexShaderID)
# Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, result)
if result[0] == GL_FALSE:
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, InfoLogLength)
VertexShaderErrorMessage = np.empty((InfoLogLength[0],), np.int8)
glGetShaderInfoLog(VertexShaderID, InfoLogLength, None, VertexShaderErrorMessage)
raise RuntimeError('Compiling vertex shader: ' + arrayToString(VertexShaderErrorMessage))
# Compile Fragment Shader
FragmentSourcePointer = stringToArray(FragmentShaderCode)
glShaderSource(FragmentShaderID, 1, FragmentSourcePointer, None)
glCompileShader(FragmentShaderID)
# Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, result)
if result[0] == GL_FALSE:
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, InfoLogLength)
FragmentShaderErrorMessage = np.empty((InfoLogLength[0],), np.int8)
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, None, FragmentShaderErrorMessage)
raise RuntimeError('Compiling fragment shader: ' + arrayToString(FragmentShaderErrorMessage))
# Link the program
ProgramID = glCreateProgram()
glAttachShader(ProgramID, VertexShaderID)
glAttachShader(ProgramID, FragmentShaderID)
glLinkProgram(ProgramID)
# Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, result)
if result[0] == GL_FALSE:
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, InfoLogLength)
ProgramErrorMessage = np.empty((InfoLogLength[0],), np.int8)
glGetProgramInfoLog(ProgramID, InfoLogLength, None, ProgramErrorMessage)
RuntimeError('Linking program: ' + arrayToString(ProgramErrorMessage))
glDetachShader(ProgramID, VertexShaderID)
glDetachShader(ProgramID, FragmentShaderID)
glDeleteShader(VertexShaderID)
glDeleteShader(FragmentShaderID)
return ProgramID
def stringToArray(string):
return [string]
def arrayToString(array):
strList = [0] * len(array)
for i in range(len(array)):
strList[i] = chr(array[i])
return ''.join(strList)
| [
"numpy.empty",
"numpy.asarray",
"os.path.exists",
"PIL.Image.open",
"numpy.array",
"PIL.ImageOps.flip",
"os.path.join",
"PIL.Image.merge"
] | [((7115, 7145), 'numpy.array', 'np.array', (['[GL_FALSE]', 'np.int32'], {}), '([GL_FALSE], np.int32)\n', (7123, 7145), True, 'import numpy as np\n'), ((7170, 7193), 'numpy.array', 'np.array', (['[0]', 'np.int32'], {}), '([0], np.int32)\n', (7178, 7193), True, 'import numpy as np\n'), ((3916, 3986), 'numpy.array', 'np.array', (['[-1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 0.0, 1.0, 0.0]', 'np.float32'], {}), '([-1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 0.0, 1.0, 0.0], np.float32)\n', (3924, 3986), True, 'import numpy as np\n'), ((4057, 4081), 'numpy.array', 'np.array', (['[0]', 'np.uint32'], {}), '([0], np.uint32)\n', (4065, 4081), True, 'import numpy as np\n'), ((4592, 4616), 'numpy.array', 'np.array', (['[0]', 'np.uint32'], {}), '([0], np.uint32)\n', (4600, 4616), True, 'import numpy as np\n'), ((5605, 5634), 'numpy.empty', 'np.empty', (['(w * h * 3)', 'np.uint8'], {}), '(w * h * 3, np.uint8)\n', (5613, 5634), True, 'import numpy as np\n'), ((5701, 5742), 'os.path.join', 'os.path.join', (['BASEPATH', "(testName + '.png')"], {}), "(BASEPATH, testName + '.png')\n", (5713, 5742), False, 'import os\n'), ((5904, 5917), 'PIL.ImageOps.flip', 'flip', (['capture'], {}), '(capture)\n', (5908, 5917), False, 'from PIL.ImageOps import flip\n'), ((5970, 5993), 'PIL.Image.merge', 'merge', (['"""RGB"""', '(r, g, b)'], {}), "('RGB', (r, g, b))\n", (5975, 5993), False, 'from PIL.Image import fromarray, merge, open\n'), ((7644, 7682), 'numpy.empty', 'np.empty', (['(InfoLogLength[0],)', 'np.int8'], {}), '((InfoLogLength[0],), np.int8)\n', (7652, 7682), True, 'import numpy as np\n'), ((8349, 8387), 'numpy.empty', 'np.empty', (['(InfoLogLength[0],)', 'np.int8'], {}), '((InfoLogLength[0],), np.int8)\n', (8357, 8387), True, 'import numpy as np\n'), ((9021, 9059), 'numpy.empty', 'np.empty', (['(InfoLogLength[0],)', 'np.int8'], {}), '((InfoLogLength[0],), np.int8)\n', (9029, 9059), True, 'import numpy as np\n'), ((6009, 6033), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (6023, 6033), False, 'import os\n'), ((6105, 6119), 'PIL.Image.open', 'open', (['filePath'], {}), '(filePath)\n', (6109, 6119), False, 'from PIL.Image import fromarray, merge, open\n'), ((6149, 6168), 'numpy.asarray', 'np.asarray', (['capture'], {}), '(capture)\n', (6159, 6168), True, 'import numpy as np\n'), ((6172, 6190), 'numpy.asarray', 'np.asarray', (['stored'], {}), '(stored)\n', (6182, 6190), True, 'import numpy as np\n')] |
#! -*- coding:utf-8 -*-
'''
@Author: ZM
@Date and Time: 2021/1/1 20:12
@File: train.py
'''
import math
import numpy as np
import cv2
from keras.layers import Input, Lambda
from keras import Model
from keras.optimizers import Adam
from keras import backend as K
from keras.callbacks import Callback
from Loss import Loss
from Dataset import Dataset
from mnist_dataset import get_mnist
from data_generator import data_generator
from discriminator_model import discriminator_model
from generator_model import generator_model
class WGANGCLoss(Loss):
def compute_loss(self, inputs):
x_real_score, x_fake_score, x_fake_ng_score, y_pred = inputs
return K.mean(x_real_score + x_fake_score - x_fake_ng_score * 2)
if __name__ == '__main__':
batch_size = 128
init_lr = 1e-5
img_size = (28, 28, 1)
dst_img_size = (140, 140)
latent_dim = 100
(X_train, Y_train), _ = get_mnist()
X_train = X_train[Y_train == 8]
X_train = X_train.astype('float32') / 127.5 - 1
X_train = np.expand_dims(X_train, 3)
dataset = Dataset(X_train)
generator = data_generator(dataset, batch_size=batch_size, shuffle=True)
d_input = Input(shape=img_size, dtype='float32')
d_out = discriminator_model(d_input)
d_model = Model(d_input, d_out)
g_input = Input(shape=(latent_dim, ), dtype='float32')
g_out = generator_model(g_input)
g_model = Model(g_input, g_out)
x_in = Input(shape=img_size, dtype='float32')
z_in = Input(shape=(latent_dim,), dtype='float32')
x_real = x_in
x_fake = g_model(z_in)
x_fake_ng = Lambda(K.stop_gradient)(x_fake)
x_real_score = d_model(x_real)
x_fake_score = d_model(x_fake)
x_fake_ng_score = d_model(x_fake_ng)
out = Lambda(lambda inputs: K.concatenate(inputs))([x_real_score, x_fake_score, x_fake_ng_score])
out = WGANGCLoss(output_axis=-1)([x_real_score, x_fake_score, x_fake_ng_score, out])
train_model = Model([x_in, z_in], out)
opt = Adam(learning_rate=init_lr, clipvalue=0.1)
train_model.compile(opt)
def evaluate():
random_latent_vector = np.random.normal(size=(1, latent_dim))
generated_image = g_model.predict_on_batch(random_latent_vector)[0]
img = cv2.resize(np.around((generated_image + 1) * 127.5).astype('uint8'), dst_img_size)
cv2.imwrite('generated_image.png', img)
class Evaluator(Callback):
def __init__(self):
super(Evaluator, self).__init__()
def on_epoch_end(self, epoch, logs=None):
evaluate()
evaluator = Evaluator()
train_model.fit_generator(
generator,
steps_per_epoch=math.ceil(len(X_train) / batch_size),
epochs=150,
callbacks=[evaluator],
shuffle=False,
initial_epoch=0
) | [
"keras.backend.concatenate",
"keras.Model",
"cv2.imwrite",
"keras.optimizers.Adam",
"numpy.expand_dims",
"generator_model.generator_model",
"numpy.around",
"mnist_dataset.get_mnist",
"keras.backend.mean",
"keras.layers.Lambda",
"numpy.random.normal",
"keras.layers.Input",
"Dataset.Dataset",
... | [((919, 930), 'mnist_dataset.get_mnist', 'get_mnist', ([], {}), '()\n', (928, 930), False, 'from mnist_dataset import get_mnist\n'), ((1033, 1059), 'numpy.expand_dims', 'np.expand_dims', (['X_train', '(3)'], {}), '(X_train, 3)\n', (1047, 1059), True, 'import numpy as np\n'), ((1074, 1090), 'Dataset.Dataset', 'Dataset', (['X_train'], {}), '(X_train)\n', (1081, 1090), False, 'from Dataset import Dataset\n'), ((1107, 1167), 'data_generator.data_generator', 'data_generator', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True)\n', (1121, 1167), False, 'from data_generator import data_generator\n'), ((1183, 1221), 'keras.layers.Input', 'Input', ([], {'shape': 'img_size', 'dtype': '"""float32"""'}), "(shape=img_size, dtype='float32')\n", (1188, 1221), False, 'from keras.layers import Input, Lambda\n'), ((1234, 1262), 'discriminator_model.discriminator_model', 'discriminator_model', (['d_input'], {}), '(d_input)\n', (1253, 1262), False, 'from discriminator_model import discriminator_model\n'), ((1277, 1298), 'keras.Model', 'Model', (['d_input', 'd_out'], {}), '(d_input, d_out)\n', (1282, 1298), False, 'from keras import Model\n'), ((1314, 1357), 'keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)', 'dtype': '"""float32"""'}), "(shape=(latent_dim,), dtype='float32')\n", (1319, 1357), False, 'from keras.layers import Input, Lambda\n'), ((1371, 1395), 'generator_model.generator_model', 'generator_model', (['g_input'], {}), '(g_input)\n', (1386, 1395), False, 'from generator_model import generator_model\n'), ((1410, 1431), 'keras.Model', 'Model', (['g_input', 'g_out'], {}), '(g_input, g_out)\n', (1415, 1431), False, 'from keras import Model\n'), ((1444, 1482), 'keras.layers.Input', 'Input', ([], {'shape': 'img_size', 'dtype': '"""float32"""'}), "(shape=img_size, dtype='float32')\n", (1449, 1482), False, 'from keras.layers import Input, Lambda\n'), ((1494, 1537), 'keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)', 'dtype': '"""float32"""'}), "(shape=(latent_dim,), dtype='float32')\n", (1499, 1537), False, 'from keras.layers import Input, Lambda\n'), ((1955, 1979), 'keras.Model', 'Model', (['[x_in, z_in]', 'out'], {}), '([x_in, z_in], out)\n', (1960, 1979), False, 'from keras import Model\n'), ((1990, 2032), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'init_lr', 'clipvalue': '(0.1)'}), '(learning_rate=init_lr, clipvalue=0.1)\n', (1994, 2032), False, 'from keras.optimizers import Adam\n'), ((686, 743), 'keras.backend.mean', 'K.mean', (['(x_real_score + x_fake_score - x_fake_ng_score * 2)'], {}), '(x_real_score + x_fake_score - x_fake_ng_score * 2)\n', (692, 743), True, 'from keras import backend as K\n'), ((1600, 1623), 'keras.layers.Lambda', 'Lambda', (['K.stop_gradient'], {}), '(K.stop_gradient)\n', (1606, 1623), False, 'from keras.layers import Input, Lambda\n'), ((2114, 2152), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, latent_dim)'}), '(size=(1, latent_dim))\n', (2130, 2152), True, 'import numpy as np\n'), ((2335, 2374), 'cv2.imwrite', 'cv2.imwrite', (['"""generated_image.png"""', 'img'], {}), "('generated_image.png', img)\n", (2346, 2374), False, 'import cv2\n'), ((1777, 1798), 'keras.backend.concatenate', 'K.concatenate', (['inputs'], {}), '(inputs)\n', (1790, 1798), True, 'from keras import backend as K\n'), ((2255, 2295), 'numpy.around', 'np.around', (['((generated_image + 1) * 127.5)'], {}), '((generated_image + 1) * 127.5)\n', (2264, 2295), True, 'import numpy as np\n')] |
# coding: utf-8
# # Code to extract out the chi2 values for many different SNR values combinations.
# Like excel sheet
#
# In[1]:
import os
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
home = "/home/jneal/Phd/Analysis/fake_sims_with_var_teff1"
import pandas as pd
import sqlalchemy as sa
from mingle.utilities.db_utils import load_sql_table
# In[2]:
# ls /home/jneal/Phd/Analysis/fake_sims_with_var_teff1/analysis/
# In[3]:
noises=[0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 20,
50, 100, 150, 250, 500, 1000, 2000, 5000, 1000000]
teffs = [2300, 3400, 4000]
# In[4]:
pd.options.display.max_colwidth = 10
def load_min_chi2(teff, noises):
df_store = pd.DataFrame()
for snr in noises:
obsnum = 1
starname = "NOISESCRIPT{}N{}".format(teff, snr)
directory = os.path.join(home, "analysis", starname, "iam")
dbname = f"{starname}-{obsnum}_coadd_iam_chisqr_results.db"
try:
table = load_sql_table(os.path.join(directory,dbname), verbose=False, echo=False)
chi2_val = "coadd_chi2"
dbdf = pd.read_sql(sa.select(table.c).order_by(table.c[chi2_val].asc()).limit(1), table.metadata.bind)
dbdf["snr"] = snr # Add SNR column
df_store = dbdf.append(df_store)
except Exception as e:
print(e)
print(f"Didn't get Database for {teff}-{snr}")
df_store["median_alpha"] = df_store.apply(lambda row: np.median([row.alpha_1, row.alpha_2, row.alpha_3, row.alpha_4]), axis=1)
return df_store
# In[5]:
df_teff = []
for teff in teffs:
df_teff.append(load_min_chi2(teff, noises))
# In[6]:
def analyse_min_chi(df, teff):
print("\nHost Temperature = 5200 K, Companion Temperature = {}".format(teff))
print(df[["snr", "coadd_chi2", "teff_1", "teff_2", "median_alpha"]])
print()
ax = df.plot(x="snr", y="teff_1", style="o-", logx=True)
plt.axhline(y=5200, color="k", linestyle="--")
ax.set_xlabel("SNR")
ax.set_ylabel("Teff [K]")
plt.title("Host Temperature")
ax2 = df.plot(x="snr", y="teff_2", style="o-", logx=True )
plt.axhline(y=teff, color="k", linestyle="--")
ax2.set_xlabel("SNR")
ax2.set_ylabel("Teff [K]")
plt.title("Companion Temperature")
ax3=df.plot(x="snr", y="coadd_chi2", style="o-", logx=True)
plt.title("Chi squared")
ax3.set_xlabel("SNR")
ax3.set_ylabel("$\chi^2$")
plt.show()
# In[7]:
for i, teff in enumerate(teffs):
analyse_min_chi(df_teff[i], teff)
# In[8]:
# Single model simulations
#/home/jneal/Phd/Analysis/sims_variable_params_same_snr/analysis/BHMNOISESCRIPT5200N0
#/home/jneal/Phd/Analysis/sims_variable_params_same_snr/analysis/BHMNOISESCRIPT5200N20
#/home/jneal/Phd/Analysis/sims_variable_params_same_snr/analysis/BHMNOISESCRIPT5200N50
#/home/jneal/Phd/Analysis/sims_variable_params_same_snr/analysis/BHMNOISESCRIPT5200N100
#/home/jneal/Phd/Analysis/sims_variable_params_same_snr/analysis/BHMNOISESCRIPT5200N1000
def load_min_bhm_chi2(teff, noises):
df_store = pd.DataFrame()
for snr in noises:
obsnum = 1
starname = "BHMNOISESCRIPT{}N{}".format(teff, snr)
directory = os.path.join(home, "analysis", starname, "bhm")
dbname = f"{starname}-{obsnum}_coadd_bhm_chisqr_results.db"
try:
table = load_sql_table(os.path.join(directory,dbname), verbose=False, echo=False)
chi2_val = "coadd_chi2"
dbdf = pd.read_sql(sa.select(table.c).order_by(table.c[chi2_val].asc()).limit(1), table.metadata.bind)
dbdf["snr"] = snr # Add SNR column
df_store = dbdf.append(df_store)
except Exception as e:
print(e)
print(f"Didn't get Database for {teff}-{snr}")
#df_store["median_alpha"] = df_store.apply(lambda row: np.median([row.alpha_1, row.alpha_2, row.alpha_3, row.alpha_4]), axis=1)
return df_store
# In[9]:
noises=[0, 20, 50, 100, 1000]
bhm_teffs = [5200]
df_bhm_teff = []
for teff in bhm_teffs:
df_bhm_teff.append(load_min_bhm_chi2(teff, noises))
# In[10]:
#/home/jneal/Phd/Analysis/sims_variable_params_same_snr/analysis/BHMNOISESCRIPT5200N50/bhm/BHMNOISESCRIPT520050-7_coadd_bhm_chisqr_results.db
def analyse_min_chi(df, teff):
print("\nHost Temperature = {} K".format(teff))
print(df[["snr", "coadd_chi2", "teff_1", "teff_2"]])#, "median_alpha"]])
ax = df.plot(x="snr", y="teff_1", style="o-", logx=True )
plt.axhline(y=teff, color="k", linestyle="--")
ax.set_xlabel("SNR")
ax.set_ylabel("Teff [K]")
plt.title("Companion Temperature")
ax3=df.plot(x="snr", y="coadd_chi2", style="o-", logx=True)
plt.title("Chi squared")
ax3.set_xlabel("SNR")
ax3.set_ylabel("$\chi^2$")
plt.show()
# In[11]:
for i, teff in enumerate(bhm_teffs):
analyse_min_chi(df_teff[i], teff)
| [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"numpy.median",
"sqlalchemy.select",
"os.path.join"
] | [((739, 753), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (751, 753), True, 'import pandas as pd\n'), ((2000, 2046), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(5200)', 'color': '"""k"""', 'linestyle': '"""--"""'}), "(y=5200, color='k', linestyle='--')\n", (2011, 2046), True, 'import matplotlib.pyplot as plt\n'), ((2106, 2135), 'matplotlib.pyplot.title', 'plt.title', (['"""Host Temperature"""'], {}), "('Host Temperature')\n", (2115, 2135), True, 'import matplotlib.pyplot as plt\n'), ((2204, 2250), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'teff', 'color': '"""k"""', 'linestyle': '"""--"""'}), "(y=teff, color='k', linestyle='--')\n", (2215, 2250), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2346), 'matplotlib.pyplot.title', 'plt.title', (['"""Companion Temperature"""'], {}), "('Companion Temperature')\n", (2321, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2416, 2440), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi squared"""'], {}), "('Chi squared')\n", (2425, 2440), True, 'import matplotlib.pyplot as plt\n'), ((2505, 2515), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2513, 2515), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3156), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3154, 3156), True, 'import pandas as pd\n'), ((4581, 4627), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'teff', 'color': '"""k"""', 'linestyle': '"""--"""'}), "(y=teff, color='k', linestyle='--')\n", (4592, 4627), True, 'import matplotlib.pyplot as plt\n'), ((4687, 4721), 'matplotlib.pyplot.title', 'plt.title', (['"""Companion Temperature"""'], {}), "('Companion Temperature')\n", (4696, 4721), True, 'import matplotlib.pyplot as plt\n'), ((4791, 4815), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi squared"""'], {}), "('Chi squared')\n", (4800, 4815), True, 'import matplotlib.pyplot as plt\n'), ((4878, 4888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4886, 4888), True, 'import matplotlib.pyplot as plt\n'), ((875, 922), 'os.path.join', 'os.path.join', (['home', '"""analysis"""', 'starname', '"""iam"""'], {}), "(home, 'analysis', starname, 'iam')\n", (887, 922), False, 'import os\n'), ((3281, 3328), 'os.path.join', 'os.path.join', (['home', '"""analysis"""', 'starname', '"""bhm"""'], {}), "(home, 'analysis', starname, 'bhm')\n", (3293, 3328), False, 'import os\n'), ((1530, 1593), 'numpy.median', 'np.median', (['[row.alpha_1, row.alpha_2, row.alpha_3, row.alpha_4]'], {}), '([row.alpha_1, row.alpha_2, row.alpha_3, row.alpha_4])\n', (1539, 1593), True, 'import numpy as np\n'), ((1048, 1079), 'os.path.join', 'os.path.join', (['directory', 'dbname'], {}), '(directory, dbname)\n', (1060, 1079), False, 'import os\n'), ((3454, 3485), 'os.path.join', 'os.path.join', (['directory', 'dbname'], {}), '(directory, dbname)\n', (3466, 3485), False, 'import os\n'), ((1183, 1201), 'sqlalchemy.select', 'sa.select', (['table.c'], {}), '(table.c)\n', (1192, 1201), True, 'import sqlalchemy as sa\n'), ((3589, 3607), 'sqlalchemy.select', 'sa.select', (['table.c'], {}), '(table.c)\n', (3598, 3607), True, 'import sqlalchemy as sa\n')] |
# -*- coding: utf-8 -*-
"""1D ALE plotting."""
import logging
import sys
import warnings
from operator import add, attrgetter, sub
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from loguru import logger as loguru_logger
from matplotlib.lines import Line2D
from empirical_fire_modelling.configuration import Experiment
from empirical_fire_modelling.cx1 import run
from empirical_fire_modelling.data import get_experiment_split_data
from empirical_fire_modelling.logging_config import enable_logging
from empirical_fire_modelling.model import get_model, get_model_scores
from empirical_fire_modelling.plotting import figure_saver
mpl.rc_file(Path(__file__).resolve().parent / "matplotlibrc")
loguru_logger.enable("alepython")
loguru_logger.remove()
loguru_logger.add(sys.stderr, level="WARNING")
logger = logging.getLogger(__name__)
enable_logging(level="WARNING")
warnings.filterwarnings("ignore", ".*Collapsing a non-contiguous coordinate.*")
warnings.filterwarnings("ignore", ".*DEFAULT_SPHERICAL_EARTH_RADIUS.*")
warnings.filterwarnings("ignore", ".*guessing contiguous bounds.*")
warnings.filterwarnings(
"ignore", 'Setting feature_perturbation = "tree_path_dependent".*'
)
def plot_score_groups(experiments, **kwargs):
scores = {}
for experiment in experiments:
# Operate on cached data only.
get_experiment_split_data.check_in_store(experiment)
X_train, X_test, y_train, y_test = get_experiment_split_data(experiment)
# Operate on cached fitted models only.
get_model(X_train, y_train, cache_check=True)
model = get_model(X_train, y_train)
# Cached scores only.
get_model_scores.check_in_store(model, X_test, X_train, y_test, y_train)
scores[experiment] = get_model_scores(model, X_test, X_train, y_test, y_train)
# Sort scores based on the validation R2 score.
sort_indices = np.argsort([score["test_r2"] for score in scores.values()])[::-1]
# Sorted values.
s_train_r2s = np.array([score["train_r2"] for score in scores.values()])[
sort_indices
]
s_validation_r2s = np.array([score["test_r2"] for score in scores.values()])[
sort_indices
]
s_oob_r2s = np.array([score["oob_r2"] for score in scores.values()])[sort_indices]
# Adapted from: https://matplotlib.org/gallery/subplots_axes_and_figures/broken_axis.html
# Ratio of training R2 range to validation R2 range.
train_validation_ratio = np.ptp(s_train_r2s) / np.ptp(s_validation_r2s)
fig = plt.figure(figsize=(4, 2.2), dpi=200)
all_ax = fig.add_subplot(1, 1, 1)
all_ax.set_ylabel(r"$\mathrm{R}^2$", labelpad=29)
all_ax.set_xticks([])
all_ax.set_yticks([])
all_ax.set_frame_on(
False
) # So we don't get black bars showing through the 'broken' gap.
# Break the y-axis into 2 parts.
# fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(6, 3.5))
ax1, ax2 = fig.subplots(
2, 1, sharex=True, gridspec_kw=dict(height_ratios=[train_validation_ratio, 1])
)
fig.subplots_adjust(hspace=0.05) # adjust space between axes
# Plot train and validation R2s.
train_kwargs = dict(linestyle="", marker="x", c="C1", label="train")
ax1.plot(s_train_r2s, **train_kwargs)
validation_kwargs = dict(linestyle="", marker="o", c="C0", label="validation")
ax2.plot(s_validation_r2s, **validation_kwargs)
oob_kwargs = dict(linestyle="", marker="^", c="C2", label="train OOB")
ax2.plot(s_oob_r2s, **oob_kwargs)
ax2.set_yticks(np.arange(0.575, 0.7 + 0.01, 0.025))
ax2.legend(
handles=[
Line2D([0], [0], **kwargs)
for kwargs in (train_kwargs, validation_kwargs, oob_kwargs)
],
loc="lower left",
)
ylim_1 = ax1.get_ylim()
ylim_2 = ax2.get_ylim()
margin_f = (0.22, 0.05) # Two-sided relative margin addition.
ax1.set_ylim(
[
op(ylim_val, factor * np.ptp(ylim_1))
for ylim_val, factor, op in zip(ylim_1, margin_f, (sub, add))
]
)
ax2.set_ylim(
[
op(ylim_val, factor * np.ptp(ylim_1) / train_validation_ratio)
for ylim_val, factor, op in zip(ylim_2, margin_f, (sub, add))
]
)
# ax2.set_ylim(ylim_2[0], ylim_2[1] + margin_f * np.ptp(ylim_1) / train_validation_ratio)
# hide the spines between ax and ax2
ax1.spines["bottom"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(labeltop=False) # don't put tick labels at the top
ax1.xaxis.set_ticks_position("none") # hide top ticks themselves (not just labels)
ax2.xaxis.tick_bottom()
ax2.set_xticks(list(range(len(experiments))))
ax2.set_xticklabels(
list(np.array(list(map(attrgetter("name"), scores)))[sort_indices]),
rotation=45,
ha="right",
)
ax2.tick_params(axis="x", which="major", pad=0)
# Now, let's turn towards the cut-out slanted lines.
# We create line objects in axes coordinates, in which (0,0), (0,1),
# (1,0), and (1,1) are the four corners of the axes.
# The slanted lines themselves are markers at those locations, such that the
# lines keep their angle and position, independent of the axes size or scale
# Finally, we need to disable clipping.
d = 0.5 # proportion of vertical to horizontal extent of the slanted line
kwargs = dict(
marker=[(-1, -d), (1, d)],
markersize=8,
linestyle="none",
color="k",
mec="k",
mew=1,
clip_on=False,
)
ax1.plot([0, 1], [0, 0], transform=ax1.transAxes, **kwargs)
ax2.plot([0, 1], [1, 1], transform=ax2.transAxes, **kwargs)
for ax in (ax1, ax2):
ax.set_xticks(list(range(len(experiments))))
figure_saver.save_figure(fig, "model_comp_scores")
if __name__ == "__main__":
experiment_groups = (
(
Experiment.ALL,
Experiment.TOP15,
Experiment.CURR,
Experiment["15VEG_FAPAR"],
Experiment["15VEG_LAI"],
Experiment["15VEG_SIF"],
Experiment["15VEG_VOD"],
Experiment.CURRDD_FAPAR,
Experiment.CURRDD_LAI,
Experiment.CURRDD_SIF,
Experiment.CURRDD_VOD,
Experiment.BEST15,
),
)
run(plot_score_groups, experiment_groups, cx1_kwargs=False)
| [
"empirical_fire_modelling.logging_config.enable_logging",
"loguru.logger.enable",
"matplotlib.pyplot.figure",
"pathlib.Path",
"numpy.arange",
"loguru.logger.remove",
"empirical_fire_modelling.cx1.run",
"empirical_fire_modelling.data.get_experiment_split_data.check_in_store",
"loguru.logger.add",
"... | [((749, 782), 'loguru.logger.enable', 'loguru_logger.enable', (['"""alepython"""'], {}), "('alepython')\n", (769, 782), True, 'from loguru import logger as loguru_logger\n'), ((783, 805), 'loguru.logger.remove', 'loguru_logger.remove', ([], {}), '()\n', (803, 805), True, 'from loguru import logger as loguru_logger\n'), ((806, 852), 'loguru.logger.add', 'loguru_logger.add', (['sys.stderr'], {'level': '"""WARNING"""'}), "(sys.stderr, level='WARNING')\n", (823, 852), True, 'from loguru import logger as loguru_logger\n'), ((863, 890), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (880, 890), False, 'import logging\n'), ((891, 922), 'empirical_fire_modelling.logging_config.enable_logging', 'enable_logging', ([], {'level': '"""WARNING"""'}), "(level='WARNING')\n", (905, 922), False, 'from empirical_fire_modelling.logging_config import enable_logging\n'), ((924, 1003), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '""".*Collapsing a non-contiguous coordinate.*"""'], {}), "('ignore', '.*Collapsing a non-contiguous coordinate.*')\n", (947, 1003), False, 'import warnings\n'), ((1004, 1075), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '""".*DEFAULT_SPHERICAL_EARTH_RADIUS.*"""'], {}), "('ignore', '.*DEFAULT_SPHERICAL_EARTH_RADIUS.*')\n", (1027, 1075), False, 'import warnings\n'), ((1076, 1143), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '""".*guessing contiguous bounds.*"""'], {}), "('ignore', '.*guessing contiguous bounds.*')\n", (1099, 1143), False, 'import warnings\n'), ((1145, 1240), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""Setting feature_perturbation = "tree_path_dependent".*"""'], {}), '(\'ignore\',\n \'Setting feature_perturbation = "tree_path_dependent".*\')\n', (1168, 1240), False, 'import warnings\n'), ((2570, 2607), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 2.2)', 'dpi': '(200)'}), '(figsize=(4, 2.2), dpi=200)\n', (2580, 2607), True, 'import matplotlib.pyplot as plt\n'), ((5854, 5904), 'empirical_fire_modelling.plotting.figure_saver.save_figure', 'figure_saver.save_figure', (['fig', '"""model_comp_scores"""'], {}), "(fig, 'model_comp_scores')\n", (5878, 5904), False, 'from empirical_fire_modelling.plotting import figure_saver\n'), ((6401, 6460), 'empirical_fire_modelling.cx1.run', 'run', (['plot_score_groups', 'experiment_groups'], {'cx1_kwargs': '(False)'}), '(plot_score_groups, experiment_groups, cx1_kwargs=False)\n', (6404, 6460), False, 'from empirical_fire_modelling.cx1 import run\n'), ((1389, 1441), 'empirical_fire_modelling.data.get_experiment_split_data.check_in_store', 'get_experiment_split_data.check_in_store', (['experiment'], {}), '(experiment)\n', (1429, 1441), False, 'from empirical_fire_modelling.data import get_experiment_split_data\n'), ((1485, 1522), 'empirical_fire_modelling.data.get_experiment_split_data', 'get_experiment_split_data', (['experiment'], {}), '(experiment)\n', (1510, 1522), False, 'from empirical_fire_modelling.data import get_experiment_split_data\n'), ((1580, 1625), 'empirical_fire_modelling.model.get_model', 'get_model', (['X_train', 'y_train'], {'cache_check': '(True)'}), '(X_train, y_train, cache_check=True)\n', (1589, 1625), False, 'from empirical_fire_modelling.model import get_model, get_model_scores\n'), ((1642, 1669), 'empirical_fire_modelling.model.get_model', 'get_model', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1651, 1669), False, 'from empirical_fire_modelling.model import get_model, get_model_scores\n'), ((1709, 1781), 'empirical_fire_modelling.model.get_model_scores.check_in_store', 'get_model_scores.check_in_store', (['model', 'X_test', 'X_train', 'y_test', 'y_train'], {}), '(model, X_test, X_train, y_test, y_train)\n', (1740, 1781), False, 'from empirical_fire_modelling.model import get_model, get_model_scores\n'), ((1811, 1868), 'empirical_fire_modelling.model.get_model_scores', 'get_model_scores', (['model', 'X_test', 'X_train', 'y_test', 'y_train'], {}), '(model, X_test, X_train, y_test, y_train)\n', (1827, 1868), False, 'from empirical_fire_modelling.model import get_model, get_model_scores\n'), ((2512, 2531), 'numpy.ptp', 'np.ptp', (['s_train_r2s'], {}), '(s_train_r2s)\n', (2518, 2531), True, 'import numpy as np\n'), ((2534, 2558), 'numpy.ptp', 'np.ptp', (['s_validation_r2s'], {}), '(s_validation_r2s)\n', (2540, 2558), True, 'import numpy as np\n'), ((3586, 3621), 'numpy.arange', 'np.arange', (['(0.575)', '(0.7 + 0.01)', '(0.025)'], {}), '(0.575, 0.7 + 0.01, 0.025)\n', (3595, 3621), True, 'import numpy as np\n'), ((3670, 3696), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {}), '([0], [0], **kwargs)\n', (3676, 3696), False, 'from matplotlib.lines import Line2D\n'), ((698, 712), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (702, 712), False, 'from pathlib import Path\n'), ((3999, 4013), 'numpy.ptp', 'np.ptp', (['ylim_1'], {}), '(ylim_1)\n', (4005, 4013), True, 'import numpy as np\n'), ((4167, 4181), 'numpy.ptp', 'np.ptp', (['ylim_1'], {}), '(ylim_1)\n', (4173, 4181), True, 'import numpy as np\n'), ((4840, 4858), 'operator.attrgetter', 'attrgetter', (['"""name"""'], {}), "('name')\n", (4850, 4858), False, 'from operator import add, attrgetter, sub\n')] |
import numpy as np
def predict(sample):
matrix_w_input_hidden = np.array([[0.9, 0.3, 0.4], [0.2, 0.8, 0.2], [0.1, 0.5, 0.6]])
matrix_w_hidden_output = np.array([[0.3, 0.7, 0.5], [0.6, 0.5, 0.2], [0.8, 0.1, 0.9]])
# Berechnung zwischen Input Layer und Hidden Layer
# Matrix mal Vektor
z = matrix_w_input_hidden @ sample
# Sigmoid berechnen
# numpy exp: komponentenweise exp berechnen
tmp = 1 / (1 + np.exp(-z))
# Berechnung zwischen Hidden Layer und Output Layer
z = matrix_w_hidden_output @ tmp
o = 1 / (1 + np.exp(-z))
# Bestimme den Index des grössten Elements im Vektor
index = np.argmax(o)
if index == 0:
print("Klasse 1")
elif index == 1:
print("Klasse 2")
else:
print("Klasse 3")
i = np.array([0.9, 0.1, 0.8])
predict(i)
random_vektor = np.random.rand(3, 1)
predict(random_vektor)
| [
"numpy.random.rand",
"numpy.array",
"numpy.exp",
"numpy.argmax"
] | [((782, 807), 'numpy.array', 'np.array', (['[0.9, 0.1, 0.8]'], {}), '([0.9, 0.1, 0.8])\n', (790, 807), True, 'import numpy as np\n'), ((835, 855), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (849, 855), True, 'import numpy as np\n'), ((70, 131), 'numpy.array', 'np.array', (['[[0.9, 0.3, 0.4], [0.2, 0.8, 0.2], [0.1, 0.5, 0.6]]'], {}), '([[0.9, 0.3, 0.4], [0.2, 0.8, 0.2], [0.1, 0.5, 0.6]])\n', (78, 131), True, 'import numpy as np\n'), ((161, 222), 'numpy.array', 'np.array', (['[[0.3, 0.7, 0.5], [0.6, 0.5, 0.2], [0.8, 0.1, 0.9]]'], {}), '([[0.3, 0.7, 0.5], [0.6, 0.5, 0.2], [0.8, 0.1, 0.9]])\n', (169, 222), True, 'import numpy as np\n'), ((635, 647), 'numpy.argmax', 'np.argmax', (['o'], {}), '(o)\n', (644, 647), True, 'import numpy as np\n'), ((432, 442), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (438, 442), True, 'import numpy as np\n'), ((554, 564), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (560, 564), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Distributed under the terms of the MIT License.
"""
Script to plot the chemical space of molecules.
Author: <NAME>
Date Created: 16 Feb 2020
"""
from os.path import exists
import sys
import glob
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import json
import numpy as np
import pandas as pd
from reaction import KEGG_IDs_to_ignore
import plotting_fn as pfn
def cs_purch(purch, not_purch):
fig, ax = plt.subplots(figsize=(8, 5))
plot_prop = {
't': {
'c': '#FA7268',
'e': 'none',
'a': 0.5,
'm': 'o',
's': 50,
'label': 'purchasable'
},
'f': {
'c': '#DAF7A6',
'e': 'none',
'a': 0.5,
'm': 'x',
's': 50,
'label': 'not purchasable'
}
}
# bin each of the sets of data based on X value
for p in plot_prop:
pp = plot_prop[p]
if p == 't':
data = purch
else:
data = not_purch
width = 0.5
X_bins = np.arange(0, 15.5, width)
hist, bin_edges = np.histogram(
a=data,
bins=X_bins,
density=True
)
ax.bar(
bin_edges[:-1],
hist,
align='edge',
alpha=0.8,
width=width,
color=pp['c'],
edgecolor='k',
label=pp['label'],
)
# for X, Y, Z in zip(Xs, Ys, Zs):
# if Z:
# pp = plot_prop['t']
# else:
# pp = plot_prop['f']
#
# ax.scatter(
# X,
# Y,
# c=pp['c'],
# edgecolors=pp['e'],
# marker=pp['m'],
# alpha=pp['a'],
# s=pp['s']
# )
# Vertical lines for different materials.
ax.axvspan(xmin=4.0, xmax=6.6, facecolor='k', alpha=0.2, hatch="/")
# ax.axvspan(xmin=5.4, xmax=6.6, facecolor='k', alpha=0.2)
# plot possible region of ZIF pore limiting diameters from
# Banerjee 2008 - 10.1126/science.1152516
# ax.axvspan(0.0, 13, facecolor='#2ca02c', alpha=0.2)
# HOF size limit:
ax.axvline(x=13.1, c='k', lw=2, linestyle='--')
# # Legend.
# for p in plot_prop:
# pp = plot_prop[p]
# ax.scatter(
# X,
# Y,
# c=pp['c'],
# edgecolors=pp['e'],
# marker=pp['m'],
# alpha=pp['a'],
# s=pp['s'],
# label=pp['label']
# )
ax.legend(fontsize=16)
pfn.define_standard_plot(
ax,
# xtitle='number of heavy atoms',
xtitle=r'intermediate diameter [$\mathrm{\AA}$]',
ytitle='frequency',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_purch.pdf',
dpi=720,
bbox_inches='tight'
)
def cs_purchCT(purch, not_purch):
fig, ax = plt.subplots(figsize=(8, 5))
plot_prop = {
't': {
'c': '#FA7268',
'e': 'none',
'a': 0.5,
'm': 'o',
's': 50,
'label': 'purchasable'
},
'f': {
'c': '#DAF7A6',
'e': 'none',
'a': 0.5,
'm': 'x',
's': 50,
'label': 'not purchasable'
}
}
# bin each of the sets of data based on X value
for p in plot_prop:
pp = plot_prop[p]
if p == 't':
data = purch
else:
data = not_purch
width = 50
X_bins = np.arange(0, 2000, width)
hist, bin_edges = np.histogram(
a=data,
bins=X_bins,
density=False
)
ax.bar(
bin_edges[:-1],
hist,
align='edge',
alpha=0.8,
width=width,
color=pp['c'],
edgecolor='k',
label=pp['label'],
)
ax.legend(fontsize=16)
pfn.define_standard_plot(
ax,
# xtitle='number of heavy atoms',
xtitle=r'BertzCT',
ytitle='frequency',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_purchCT.pdf',
dpi=720,
bbox_inches='tight'
)
def cs_MW(Xs, Ys):
fig, ax = plt.subplots(figsize=(8, 5))
ylim = (0, 17)
xlim = (0, 550)
CS = [(1.0, 1.0, 1.0), (44/255, 62/255, 80/255)]
cm = colors.LinearSegmentedColormap.from_list('test', CS, N=10)
fig, ax, hist = pfn.twoD_histogram(
X_data=Xs,
Y_data=Ys,
xlim=xlim,
ylim=ylim,
cmap=cm,
fig=fig,
ax=ax
)
cbar = fig.colorbar(hist[3], ax=ax)
cbar.ax.set_ylabel('count', fontsize=16)
cbar.ax.tick_params(labelsize=16)
# ax.scatter(
# Xs,
# Ys,
# c='#FF7900',
# edgecolors='k',
# marker='o',
# alpha=1.0,
# s=40
# )
# Horizontal lines for different materials.
ax.axhspan(ymin=4.0, ymax=6.6, facecolor='k', alpha=0.2)
# ax.axvspan(xmin=5.4, xmax=6.6, facecolor='k', alpha=0.2)
# plot possible region of ZIF pore limiting diameters from
# Banerjee 2008 - 10.1126/science.1152516
# ax.axvspan(0.0, 13, facecolor='#2ca02c', alpha=0.2)
# HOF size limit:
# ax.axvline(x=13.1, c='k', lw=2, linestyle='--')
pfn.define_standard_plot(
ax,
# xtitle='number of heavy atoms',
ylim=ylim,
xlim=xlim,
ytitle=r'intermediate diameter [$\mathrm{\AA}$]',
xtitle=r'MW [g/mol]',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_MW.pdf',
dpi=720,
bbox_inches='tight'
)
def cs_NHA(Xs, Ys):
fig, ax = plt.subplots(figsize=(8, 5))
ylim = (0, 17)
xlim = (0, 40)
CS = [(1.0, 1.0, 1.0), (44/255, 62/255, 80/255)]
cm = colors.LinearSegmentedColormap.from_list('test', CS, N=10)
fig, ax, hist = pfn.twoD_histogram(
X_data=Xs,
Y_data=Ys,
xlim=xlim,
ylim=ylim,
cmap=cm,
fig=fig,
ax=ax
)
cbar = fig.colorbar(hist[3], ax=ax)
cbar.ax.set_ylabel('count', fontsize=16)
cbar.ax.tick_params(labelsize=16)
#
# ax.scatter(
# Xs,
# Ys,
# c='#FF7900',
# edgecolors='k',
# marker='o',
# alpha=1.0,
# s=120
# )
# Horizontal lines for different materials.
ax.axhspan(ymin=4.0, ymax=6.6, facecolor='k', alpha=0.2)
# ax.axvspan(xmin=5.4, xmax=6.6, facecolor='k', alpha=0.2)
# plot possible region of ZIF pore limiting diameters from
# Banerjee 2008 - 10.1126/science.1152516
# ax.axvspan(0.0, 13, facecolor='#2ca02c', alpha=0.2)
# HOF size limit:
# ax.axvline(x=13.1, c='k', lw=2, linestyle='--')
pfn.define_standard_plot(
ax,
# xtitle='number of heavy atoms',
ylim=ylim,
xlim=xlim,
ytitle=r'intermediate diameter [$\mathrm{\AA}$]',
xtitle=r'no. heavy atoms',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_NHA.pdf',
dpi=720,
bbox_inches='tight'
)
def cs_NRB(Xs, Ys):
fig, ax = plt.subplots(figsize=(8, 5))
ylim = (0, 17)
xlim = (-1, 30)
CS = [(1.0, 1.0, 1.0), (44/255, 62/255, 80/255)]
cm = colors.LinearSegmentedColormap.from_list('test', CS, N=10)
fig, ax, hist = pfn.twoD_histogram(
X_data=Xs,
Y_data=Ys,
xlim=xlim,
ylim=ylim,
cmap=cm,
fig=fig,
ax=ax
)
cbar = fig.colorbar(hist[3], ax=ax)
cbar.ax.set_ylabel('count', fontsize=16)
cbar.ax.tick_params(labelsize=16)
# ax.scatter(
# Xs,
# Ys,
# c='#FF7900',
# edgecolors='k',
# marker='o',
# alpha=1.0,
# s=120
# )
# Horizontal lines for different materials.
ax.axhspan(ymin=4.0, ymax=6.6, facecolor='k', alpha=0.2)
# ax.axvspan(xmin=5.4, xmax=6.6, facecolor='k', alpha=0.2)
# plot possible region of ZIF pore limiting diameters from
# Banerjee 2008 - 10.1126/science.1152516
# ax.axvspan(0.0, 13, facecolor='#2ca02c', alpha=0.2)
# HOF size limit:
# ax.axvline(x=13.1, c='k', lw=2, linestyle='--')
pfn.define_standard_plot(
ax,
ylim=ylim,
xlim=xlim,
# xtitle='number of heavy atoms',
ytitle=r'intermediate diameter [$\mathrm{\AA}$]',
xtitle=r'no. rotatable bonds',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_NRB.pdf',
dpi=720,
bbox_inches='tight'
)
def cs_NRBr(Xs, Ys):
fig, ax = plt.subplots(figsize=(8, 5))
ylim = (0, 17)
xlim = (0, 1)
CS = [(1.0, 1.0, 1.0), (44/255, 62/255, 80/255)]
cm = colors.LinearSegmentedColormap.from_list('test', CS, N=10)
fig, ax, hist = pfn.twoD_histogram(
X_data=Xs,
Y_data=Ys,
xlim=xlim,
ylim=ylim,
cmap=cm,
fig=fig,
ax=ax
)
cbar = fig.colorbar(hist[3], ax=ax)
cbar.ax.set_ylabel('count', fontsize=16)
cbar.ax.tick_params(labelsize=16)
# ax.scatter(
# Xs,
# Ys,
# c='#FF7900',
# edgecolors='k',
# marker='o',
# alpha=1.0,
# s=120
# )
# Horizontal lines for different materials.
ax.axhspan(ymin=4.0, ymax=6.6, facecolor='k', alpha=0.2)
# ax.axvspan(xmin=5.4, xmax=6.6, facecolor='k', alpha=0.2)
# plot possible region of ZIF pore limiting diameters from
# Banerjee 2008 - 10.1126/science.1152516
# ax.axvspan(0.0, 13, facecolor='#2ca02c', alpha=0.2)
# HOF size limit:
# ax.axvline(x=13.1, c='k', lw=2, linestyle='--')
pfn.define_standard_plot(
ax,
ylim=ylim,
xlim=xlim,
# xtitle='number of heavy atoms',
ytitle=r'intermediate diameter [$\mathrm{\AA}$]',
xtitle=r'no. rotatable bonds / no. heavy bonds',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_NRBr.pdf',
dpi=720,
bbox_inches='tight'
)
def cs_sol(logPs, logSs, HlogPs, HlogSs):
fig, ax = plt.subplots(figsize=(8, 5))
ylim = (-13, 4)
xlim = (-9, 14)
CS = [(1.0, 1.0, 1.0), (44/255, 62/255, 80/255)]
cm = colors.LinearSegmentedColormap.from_list('test', CS, N=10)
fig, ax, hist = pfn.twoD_histogram(
X_data=logPs,
Y_data=logSs,
xlim=xlim,
ylim=ylim,
cmap=cm,
fig=fig,
ax=ax
)
cbar = fig.colorbar(hist[3], ax=ax)
cbar.ax.set_ylabel('count', fontsize=16)
cbar.ax.tick_params(labelsize=16)
ax.scatter(
HlogPs,
HlogSs,
c='#E74C3C',
edgecolors='k',
marker='o',
alpha=1.0,
s=80
)
pfn.define_standard_plot(
ax,
ylim=ylim,
xlim=xlim,
# xtitle='number of heavy atoms',
xtitle=r'logP',
ytitle=r'logS$_{\mathrm{w}}$',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_sol.pdf',
dpi=720,
bbox_inches='tight'
)
def cs_logPvsNHA(logPs, Xs, HlogPs, HXs):
fig, ax = plt.subplots(figsize=(8, 5))
xlim = (0, 40)
ylim = (-9, 14)
CS = [(1.0, 1.0, 1.0), (44/255, 62/255, 80/255)]
cm = colors.LinearSegmentedColormap.from_list('test', CS, N=10)
fig, ax, hist = pfn.twoD_histogram(
X_data=Xs,
Y_data=logPs,
xlim=xlim,
ylim=ylim,
cmap=cm,
fig=fig,
ax=ax
)
cbar = fig.colorbar(hist[3], ax=ax)
cbar.ax.set_ylabel('count', fontsize=16)
cbar.ax.tick_params(labelsize=16)
ax.scatter(
HXs,
HlogPs,
c='#E74C3C',
edgecolors='k',
marker='o',
alpha=1.0,
s=120
)
pfn.define_standard_plot(
ax,
ylim=ylim,
xlim=xlim,
# xtitle='number of heavy atoms',
ytitle=r'logP',
xtitle=r'no. heavy atoms',
)
fig.tight_layout()
fig.savefig(
f'chemical_space_logPNHA.pdf',
dpi=720,
bbox_inches='tight'
)
def chemical_space_plot():
"""
Output chemical space plot.
"""
molecule_list = glob.glob('*_unopt.mol')
print(f'{len(molecule_list)} molecules in DB.')
KEGG_IDs_to_highlight = [
'C01387', 'C00756',
# 'C00123', 'C00183', 'C00041',
# 'C00079', 'C00407', 'C00078', 'C00073', 'C00082'
]
Xs = []
Ys = []
MWs = []
NRBs = []
NRBrs = []
Zs = []
purch = []
not_purch = []
purch_CT = []
not_purch_CT = []
logPs = []
logSs = []
HlogPs = []
HXs = []
HlogSs = []
COUNTER = 0
for mol in sorted(molecule_list):
name = mol.replace('_unopt.mol', '')
etkdg_fail = name+'_unopt.ETKDGFAILED'
diam_file = name+'_size.csv'
toobig_file = name+'_size.TOOBIG'
prop_file = name+'_prop.json'
if exists(toobig_file):
continue
if exists(etkdg_fail):
continue
if name in KEGG_IDs_to_ignore():
continue
if exists(prop_file) and exists(diam_file):
# Get molecular properties from 2D structure.
with open(prop_file, 'r') as f:
prop_dict = json.load(f)
# Get size and update output lists.
results = pd.read_csv(diam_file)
min_mid_diam = min(results['diam2'])
Xs.append(prop_dict['NHA'])
MWs.append(prop_dict['MW'])
NRBs.append(prop_dict['NRB'])
NRBrs.append(prop_dict['NRBr'])
Zs.append(prop_dict['purchasability'])
Ys.append(min_mid_diam)
logPs.append(prop_dict['logP'])
logSs.append(prop_dict['logS'])
if name in KEGG_IDs_to_highlight:
HlogPs.append(prop_dict['logP'])
HlogSs.append(prop_dict['logS'])
HXs.append(prop_dict['NHA'])
if prop_dict['purchasability']:
purch.append(min_mid_diam)
purch_CT.append(prop_dict['bertzCT'])
else:
not_purch.append(min_mid_diam)
not_purch_CT.append(prop_dict['bertzCT'])
if prop_dict['NRB'] > 10 or min_mid_diam > 10:
print(
name,
prop_dict['NRB'],
prop_dict['NRBr'],
min_mid_diam
)
if min_mid_diam < 6.6:
COUNTER += 1
print(f'{COUNTER} with intermediate diameter < 6.6 A')
# rn [
# '#FA7268', '#F8A72A', '#DAF7A6', '#900C3F', '#6BADB0',
# '#DB869D', '#F6D973', 'mediumvioletred'
cs_purch(purch, not_purch)
cs_purchCT(purch_CT, not_purch_CT)
cs_MW(Xs=MWs, Ys=Ys)
cs_NHA(Xs=Xs, Ys=Ys)
cs_NRB(Xs=NRBs, Ys=Ys)
cs_NRBr(Xs=NRBrs, Ys=Ys)
cs_sol(logPs, logSs, HlogPs, HlogSs)
cs_logPvsNHA(logPs, Xs, HlogPs, HXs)
def main():
if (not len(sys.argv) == 1):
print("""
Usage: chemical_space_plot.py
""")
sys.exit()
else:
pass
chemical_space_plot()
if __name__ == "__main__":
main()
| [
"matplotlib.colors.LinearSegmentedColormap.from_list",
"plotting_fn.define_standard_plot",
"json.load",
"pandas.read_csv",
"reaction.KEGG_IDs_to_ignore",
"os.path.exists",
"numpy.histogram",
"plotting_fn.twoD_histogram",
"numpy.arange",
"glob.glob",
"matplotlib.pyplot.subplots",
"sys.exit"
] | [((482, 510), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (494, 510), True, 'import matplotlib.pyplot as plt\n'), ((2626, 2730), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'xtitle': '"""intermediate diameter [$\\\\mathrm{\\\\AA}$]"""', 'ytitle': '"""frequency"""'}), "(ax, xtitle=\n 'intermediate diameter [$\\\\mathrm{\\\\AA}$]', ytitle='frequency')\n", (2650, 2730), True, 'import plotting_fn as pfn\n'), ((2976, 3004), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2988, 3004), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4095), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'xtitle': '"""BertzCT"""', 'ytitle': '"""frequency"""'}), "(ax, xtitle='BertzCT', ytitle='frequency')\n", (4053, 4095), True, 'import plotting_fn as pfn\n'), ((4335, 4363), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (4347, 4363), True, 'import matplotlib.pyplot as plt\n'), ((4465, 4523), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""test"""', 'CS'], {'N': '(10)'}), "('test', CS, N=10)\n", (4505, 4523), True, 'import matplotlib.colors as colors\n'), ((4544, 4636), 'plotting_fn.twoD_histogram', 'pfn.twoD_histogram', ([], {'X_data': 'Xs', 'Y_data': 'Ys', 'xlim': 'xlim', 'ylim': 'ylim', 'cmap': 'cm', 'fig': 'fig', 'ax': 'ax'}), '(X_data=Xs, Y_data=Ys, xlim=xlim, ylim=ylim, cmap=cm, fig\n =fig, ax=ax)\n', (4562, 4636), True, 'import plotting_fn as pfn\n'), ((5399, 5526), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'ylim': 'ylim', 'xlim': 'xlim', 'ytitle': '"""intermediate diameter [$\\\\mathrm{\\\\AA}$]"""', 'xtitle': '"""MW [g/mol]"""'}), "(ax, ylim=ylim, xlim=xlim, ytitle=\n 'intermediate diameter [$\\\\mathrm{\\\\AA}$]', xtitle='MW [g/mol]')\n", (5423, 5526), True, 'import plotting_fn as pfn\n'), ((5772, 5800), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (5784, 5800), True, 'import matplotlib.pyplot as plt\n'), ((5901, 5959), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""test"""', 'CS'], {'N': '(10)'}), "('test', CS, N=10)\n", (5941, 5959), True, 'import matplotlib.colors as colors\n'), ((5980, 6072), 'plotting_fn.twoD_histogram', 'pfn.twoD_histogram', ([], {'X_data': 'Xs', 'Y_data': 'Ys', 'xlim': 'xlim', 'ylim': 'ylim', 'cmap': 'cm', 'fig': 'fig', 'ax': 'ax'}), '(X_data=Xs, Y_data=Ys, xlim=xlim, ylim=ylim, cmap=cm, fig\n =fig, ax=ax)\n', (5998, 6072), True, 'import plotting_fn as pfn\n'), ((6842, 6974), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'ylim': 'ylim', 'xlim': 'xlim', 'ytitle': '"""intermediate diameter [$\\\\mathrm{\\\\AA}$]"""', 'xtitle': '"""no. heavy atoms"""'}), "(ax, ylim=ylim, xlim=xlim, ytitle=\n 'intermediate diameter [$\\\\mathrm{\\\\AA}$]', xtitle='no. heavy atoms')\n", (6866, 6974), True, 'import plotting_fn as pfn\n'), ((7221, 7249), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (7233, 7249), True, 'import matplotlib.pyplot as plt\n'), ((7351, 7409), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""test"""', 'CS'], {'N': '(10)'}), "('test', CS, N=10)\n", (7391, 7409), True, 'import matplotlib.colors as colors\n'), ((7430, 7522), 'plotting_fn.twoD_histogram', 'pfn.twoD_histogram', ([], {'X_data': 'Xs', 'Y_data': 'Ys', 'xlim': 'xlim', 'ylim': 'ylim', 'cmap': 'cm', 'fig': 'fig', 'ax': 'ax'}), '(X_data=Xs, Y_data=Ys, xlim=xlim, ylim=ylim, cmap=cm, fig\n =fig, ax=ax)\n', (7448, 7522), True, 'import plotting_fn as pfn\n'), ((8286, 8422), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'ylim': 'ylim', 'xlim': 'xlim', 'ytitle': '"""intermediate diameter [$\\\\mathrm{\\\\AA}$]"""', 'xtitle': '"""no. rotatable bonds"""'}), "(ax, ylim=ylim, xlim=xlim, ytitle=\n 'intermediate diameter [$\\\\mathrm{\\\\AA}$]', xtitle='no. rotatable bonds')\n", (8310, 8422), True, 'import plotting_fn as pfn\n'), ((8670, 8698), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (8682, 8698), True, 'import matplotlib.pyplot as plt\n'), ((8798, 8856), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""test"""', 'CS'], {'N': '(10)'}), "('test', CS, N=10)\n", (8838, 8856), True, 'import matplotlib.colors as colors\n'), ((8877, 8969), 'plotting_fn.twoD_histogram', 'pfn.twoD_histogram', ([], {'X_data': 'Xs', 'Y_data': 'Ys', 'xlim': 'xlim', 'ylim': 'ylim', 'cmap': 'cm', 'fig': 'fig', 'ax': 'ax'}), '(X_data=Xs, Y_data=Ys, xlim=xlim, ylim=ylim, cmap=cm, fig\n =fig, ax=ax)\n', (8895, 8969), True, 'import plotting_fn as pfn\n'), ((9733, 9892), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'ylim': 'ylim', 'xlim': 'xlim', 'ytitle': '"""intermediate diameter [$\\\\mathrm{\\\\AA}$]"""', 'xtitle': '"""no. rotatable bonds / no. heavy bonds"""'}), "(ax, ylim=ylim, xlim=xlim, ytitle=\n 'intermediate diameter [$\\\\mathrm{\\\\AA}$]', xtitle=\n 'no. rotatable bonds / no. heavy bonds')\n", (9757, 9892), True, 'import plotting_fn as pfn\n'), ((10157, 10185), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (10169, 10185), True, 'import matplotlib.pyplot as plt\n'), ((10288, 10346), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""test"""', 'CS'], {'N': '(10)'}), "('test', CS, N=10)\n", (10328, 10346), True, 'import matplotlib.colors as colors\n'), ((10367, 10465), 'plotting_fn.twoD_histogram', 'pfn.twoD_histogram', ([], {'X_data': 'logPs', 'Y_data': 'logSs', 'xlim': 'xlim', 'ylim': 'ylim', 'cmap': 'cm', 'fig': 'fig', 'ax': 'ax'}), '(X_data=logPs, Y_data=logSs, xlim=xlim, ylim=ylim, cmap=\n cm, fig=fig, ax=ax)\n', (10385, 10465), True, 'import plotting_fn as pfn\n'), ((10803, 10904), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'ylim': 'ylim', 'xlim': 'xlim', 'xtitle': '"""logP"""', 'ytitle': '"""logS$_{\\\\mathrm{w}}$"""'}), "(ax, ylim=ylim, xlim=xlim, xtitle='logP', ytitle=\n 'logS$_{\\\\mathrm{w}}$')\n", (10827, 10904), True, 'import plotting_fn as pfn\n'), ((11174, 11202), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (11186, 11202), True, 'import matplotlib.pyplot as plt\n'), ((11304, 11362), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""test"""', 'CS'], {'N': '(10)'}), "('test', CS, N=10)\n", (11344, 11362), True, 'import matplotlib.colors as colors\n'), ((11383, 11477), 'plotting_fn.twoD_histogram', 'pfn.twoD_histogram', ([], {'X_data': 'Xs', 'Y_data': 'logPs', 'xlim': 'xlim', 'ylim': 'ylim', 'cmap': 'cm', 'fig': 'fig', 'ax': 'ax'}), '(X_data=Xs, Y_data=logPs, xlim=xlim, ylim=ylim, cmap=cm,\n fig=fig, ax=ax)\n', (11401, 11477), True, 'import plotting_fn as pfn\n'), ((11814, 11910), 'plotting_fn.define_standard_plot', 'pfn.define_standard_plot', (['ax'], {'ylim': 'ylim', 'xlim': 'xlim', 'ytitle': '"""logP"""', 'xtitle': '"""no. heavy atoms"""'}), "(ax, ylim=ylim, xlim=xlim, ytitle='logP', xtitle=\n 'no. heavy atoms')\n", (11838, 11910), True, 'import plotting_fn as pfn\n'), ((12226, 12250), 'glob.glob', 'glob.glob', (['"""*_unopt.mol"""'], {}), "('*_unopt.mol')\n", (12235, 12250), False, 'import glob\n'), ((1125, 1150), 'numpy.arange', 'np.arange', (['(0)', '(15.5)', 'width'], {}), '(0, 15.5, width)\n', (1134, 1150), True, 'import numpy as np\n'), ((1177, 1224), 'numpy.histogram', 'np.histogram', ([], {'a': 'data', 'bins': 'X_bins', 'density': '(True)'}), '(a=data, bins=X_bins, density=True)\n', (1189, 1224), True, 'import numpy as np\n'), ((3618, 3643), 'numpy.arange', 'np.arange', (['(0)', '(2000)', 'width'], {}), '(0, 2000, width)\n', (3627, 3643), True, 'import numpy as np\n'), ((3670, 3718), 'numpy.histogram', 'np.histogram', ([], {'a': 'data', 'bins': 'X_bins', 'density': '(False)'}), '(a=data, bins=X_bins, density=False)\n', (3682, 3718), True, 'import numpy as np\n'), ((12972, 12991), 'os.path.exists', 'exists', (['toobig_file'], {}), '(toobig_file)\n', (12978, 12991), False, 'from os.path import exists\n'), ((13025, 13043), 'os.path.exists', 'exists', (['etkdg_fail'], {}), '(etkdg_fail)\n', (13031, 13043), False, 'from os.path import exists\n'), ((15105, 15115), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15113, 15115), False, 'import sys\n'), ((13085, 13105), 'reaction.KEGG_IDs_to_ignore', 'KEGG_IDs_to_ignore', ([], {}), '()\n', (13103, 13105), False, 'from reaction import KEGG_IDs_to_ignore\n'), ((13140, 13157), 'os.path.exists', 'exists', (['prop_file'], {}), '(prop_file)\n', (13146, 13157), False, 'from os.path import exists\n'), ((13162, 13179), 'os.path.exists', 'exists', (['diam_file'], {}), '(diam_file)\n', (13168, 13179), False, 'from os.path import exists\n'), ((13394, 13416), 'pandas.read_csv', 'pd.read_csv', (['diam_file'], {}), '(diam_file)\n', (13405, 13416), True, 'import pandas as pd\n'), ((13311, 13323), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13320, 13323), False, 'import json\n')] |
import os
import sys
import yaml
import shutil
import numpy as np
from PIL import Image
from tqdm import tqdm
from subprocess import call
def mkdir(folder):
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
policy = 'single_obs_1_food'
output_video_folder = 'output_progressive_' + policy
test_folder = 'data_arr_progressive_' + policy
test_files = os.listdir(test_folder)
mkdir(output_video_folder)
for p_file in tqdm(test_files):
filepath = os.path.join(test_folder, p_file)
output_video_filepath = os.path.join(output_video_folder, p_file)
mkdir(output_video_filepath)
num_frames = int(len(os.listdir(filepath)) / 2)
for frame_idx in range(num_frames):
# load input and output
input_filename = 'frame_' + str(frame_idx) + '.npy'
input_filepath = os.path.join(filepath, input_filename)
output_filename = 'output_' + str(frame_idx) + '.npy'
output_filepath = os.path.join(filepath, output_filename)
input_arr = np.load(input_filepath)
output_arr = np.load(output_filepath)
input_img = Image.fromarray(input_arr.astype('uint8'))
output_img = Image.fromarray(output_arr.astype('uint8'))
input_img.save(os.path.join(output_video_filepath, 'input_' + str(frame_idx) + '.png'))
output_img.save(os.path.join(output_video_filepath, 'output_' + str(frame_idx) + '.png'))
i_command = "ffmpeg -r 30 -f image2 -s 1920x1080 -i " + output_video_filepath + "/input_%1d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p " + output_video_filepath + "/input_video.mp4" + " -loglevel quiet"
o_command = "ffmpeg -r 30 -f image2 -s 1920x1080 -i " + output_video_filepath + "/output_%1d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p " + output_video_filepath + "/output_video.mp4" + " -loglevel quiet"
exit_code = call(i_command, shell=True)
exit_code = call(o_command, shell=True)
| [
"tqdm.tqdm",
"numpy.load",
"os.makedirs",
"os.path.exists",
"subprocess.call",
"shutil.rmtree",
"os.path.join",
"os.listdir"
] | [((393, 416), 'os.listdir', 'os.listdir', (['test_folder'], {}), '(test_folder)\n', (403, 416), False, 'import os\n'), ((458, 474), 'tqdm.tqdm', 'tqdm', (['test_files'], {}), '(test_files)\n', (462, 474), False, 'from tqdm import tqdm\n'), ((170, 192), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (184, 192), False, 'import os\n'), ((228, 247), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (239, 247), False, 'import os\n'), ((491, 524), 'os.path.join', 'os.path.join', (['test_folder', 'p_file'], {}), '(test_folder, p_file)\n', (503, 524), False, 'import os\n'), ((553, 594), 'os.path.join', 'os.path.join', (['output_video_folder', 'p_file'], {}), '(output_video_folder, p_file)\n', (565, 594), False, 'import os\n'), ((1857, 1884), 'subprocess.call', 'call', (['i_command'], {'shell': '(True)'}), '(i_command, shell=True)\n', (1861, 1884), False, 'from subprocess import call\n'), ((1901, 1928), 'subprocess.call', 'call', (['o_command'], {'shell': '(True)'}), '(o_command, shell=True)\n', (1905, 1928), False, 'from subprocess import call\n'), ((202, 223), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {}), '(folder)\n', (215, 223), False, 'import shutil\n'), ((837, 875), 'os.path.join', 'os.path.join', (['filepath', 'input_filename'], {}), '(filepath, input_filename)\n', (849, 875), False, 'import os\n'), ((964, 1003), 'os.path.join', 'os.path.join', (['filepath', 'output_filename'], {}), '(filepath, output_filename)\n', (976, 1003), False, 'import os\n'), ((1024, 1047), 'numpy.load', 'np.load', (['input_filepath'], {}), '(input_filepath)\n', (1031, 1047), True, 'import numpy as np\n'), ((1069, 1093), 'numpy.load', 'np.load', (['output_filepath'], {}), '(output_filepath)\n', (1076, 1093), True, 'import numpy as np\n'), ((653, 673), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (663, 673), False, 'import os\n')] |
import tensorflow as tf
from lanenet_model import hnet_model
from data_provider import hnet_data_processor
import numpy as np
import cv2
import glob
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--phase', type=str, help='The phase is train or pretrain')
parser.add_argument('--pre_hnet_weights', type=str, help='The pre hnet weights path')
parser.add_argument('--hnet_weights', type=str, help='The hnet model weights path')
args = parser.parse_args()
batch_size = 10
tensor_in = tf.placeholder(dtype=tf.float32, shape=[batch_size, 64, 128, 3])
gt_label_pts = tf.placeholder(dtype=tf.float32, shape=[batch_size, 56, 3])
net = hnet_model.HNet(is_training=True)
c_loss, coef, pre_loss = net.compute_loss(tensor_in, gt_label_pts=gt_label_pts, name='hnet')
var_list = tf.trainable_variables()
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list += bn_moving_vars
saver = tf.train.Saver(var_list=var_list, max_to_keep=5)
train_dataset = hnet_data_processor.DataSet(glob.glob('./data/tusimple_data/*.json'))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
pre_optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss=pre_loss, var_list=tf.trainable_variables())
optimizer = tf.train.AdamOptimizer(learning_rate=0.00005).minimize(loss=c_loss, var_list=tf.trainable_variables())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# step1: training pre loss to initialize H matrix
if args.phase == 'pretrain':
print('Start pre train hnet......')
if args.pre_hnet_weights:
saver.restore(sess, args.pre_hnet_weights)
for epoch in range(20005):
image, label_pts = train_dataset.next_batch(batch_size)
image = np.array(image)
_, loss, coefficient = sess.run([pre_optimizer, pre_loss, coef], feed_dict={tensor_in: image})
if epoch % 100 == 0:
print('[{}] pretrain hnet pre loss = {}'.format(epoch, loss))
if epoch % 1000 == 0:
predict = coefficient[0]
R = np.zeros([3, 3], np.float32)
R[0, 0] = predict[0]
R[0, 1] = predict[1]
R[0, 2] = predict[2]
R[1, 1] = predict[3]
R[1, 2] = predict[4]
R[2, 1] = predict[5]
R[2, 2] = 1
print(R)
warp_image = cv2.warpPerspective(image[0], R, dsize=(image[0].shape[1], image[0].shape[0]))
cv2.imwrite("src.png", image[0])
cv2.imwrite("ret.png", warp_image)
if epoch % 5000 == 0:
saver.save(sess=sess, save_path='./model/hnet/pre_hnet', global_step=epoch)
elif args.phase == 'train':
print('Start train hnet......')
if args.hnet_weights:
print('restore hnet weights......')
saver.restore(sess, args.hnet_weights)
elif args.pre_hnet_weights:
print('restore pre hnet weights......')
saver.restore(sess, args.pre_hnet_weights)
else:
print('train from scratch without H matrix initialize.')
for epoch in range(20005):
image, label_pts = train_dataset.next_batch(batch_size)
label_pts = np.array(label_pts)
label_pts[:, :, 0] = label_pts[:, :, 0] * (512. / 1280.) * 0.25
label_pts[:, :, 1] = label_pts[:, :, 1] * (256. / 720.) * 0.25
image = np.array(image)
_, loss, coefficient = sess.run([optimizer, c_loss, coef], feed_dict={tensor_in: image, gt_label_pts: label_pts})
if epoch % 50 == 0:
print('epoch[{}], hnet training loss = {}'.format(epoch, loss))
if epoch % 1000 == 0:
predict = coefficient[0]
R = np.zeros([3, 3], np.float32)
R[0, 0] = predict[0]
R[0, 1] = predict[1]
R[0, 2] = predict[2]
R[1, 1] = predict[3]
R[1, 2] = predict[4]
R[2, 1] = predict[5]
R[2, 2] = 1
print(R)
pts = label_pts[0]
new_pts = []
for k in range(len(pts)):
if pts[k][2] == 1:
new_pts.append(pts[k])
new_pts = np.float32(new_pts)
new_pts = np.transpose(new_pts, (1, 0))
print(new_pts)
trans_pts = np.matmul(R, new_pts)
trans_pts = trans_pts / trans_pts[2, :]
print(trans_pts)
for k in range(len(trans_pts)):
cv2.circle(image[0], (trans_pts[0][k], trans_pts[1][k]), 1, (0, 0, 255), 2)
#warp_image = cv2.warpPerspective(image[0], R, dsize=(image[0].shape[1], image[0].shape[0]))
cv2.imwrite("src.png", image[0])
#cv2.imwrite("ret.png", warp_image)
if epoch % 1000 == 0:
saver.save(sess=sess, save_path='./model/hnet/hnet', global_step=epoch)
epoch += 1
else:
print('Wrong phase!!!!!!') | [
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.global_variables",
"glob.glob",
"cv2.warpPerspective",
"cv2.imwrite",
"numpy.transpose",
"tensorflow.placeholder",
"lanenet_model.hnet_model.HNet",
"tensorflow.control_dependencies",
"cv2.circ... | [((175, 200), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (198, 200), False, 'import argparse\n'), ((507, 571), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[batch_size, 64, 128, 3]'}), '(dtype=tf.float32, shape=[batch_size, 64, 128, 3])\n', (521, 571), True, 'import tensorflow as tf\n'), ((587, 646), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[batch_size, 56, 3]'}), '(dtype=tf.float32, shape=[batch_size, 56, 3])\n', (601, 646), True, 'import tensorflow as tf\n'), ((654, 687), 'lanenet_model.hnet_model.HNet', 'hnet_model.HNet', ([], {'is_training': '(True)'}), '(is_training=True)\n', (669, 687), False, 'from lanenet_model import hnet_model\n'), ((793, 817), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (815, 817), True, 'import tensorflow as tf\n'), ((827, 848), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (846, 848), True, 'import tensorflow as tf\n'), ((1017, 1065), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list', 'max_to_keep': '(5)'}), '(var_list=var_list, max_to_keep=5)\n', (1031, 1065), True, 'import tensorflow as tf\n'), ((1167, 1209), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (1184, 1209), True, 'import tensorflow as tf\n'), ((1111, 1151), 'glob.glob', 'glob.glob', (['"""./data/tusimple_data/*.json"""'], {}), "('./data/tusimple_data/*.json')\n", (1120, 1151), False, 'import glob\n'), ((1215, 1250), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (1238, 1250), True, 'import tensorflow as tf\n'), ((1501, 1513), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1511, 1513), True, 'import tensorflow as tf\n'), ((1536, 1569), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1567, 1569), True, 'import tensorflow as tf\n'), ((1272, 1316), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (1294, 1316), True, 'import tensorflow as tf\n'), ((1350, 1374), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1372, 1374), True, 'import tensorflow as tf\n'), ((1392, 1435), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(5e-05)'}), '(learning_rate=5e-05)\n', (1414, 1435), True, 'import tensorflow as tf\n'), ((1469, 1493), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1491, 1493), True, 'import tensorflow as tf\n'), ((1915, 1930), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1923, 1930), True, 'import numpy as np\n'), ((2244, 2272), 'numpy.zeros', 'np.zeros', (['[3, 3]', 'np.float32'], {}), '([3, 3], np.float32)\n', (2252, 2272), True, 'import numpy as np\n'), ((2577, 2655), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image[0]', 'R'], {'dsize': '(image[0].shape[1], image[0].shape[0])'}), '(image[0], R, dsize=(image[0].shape[1], image[0].shape[0]))\n', (2596, 2655), False, 'import cv2\n'), ((2672, 2704), 'cv2.imwrite', 'cv2.imwrite', (['"""src.png"""', 'image[0]'], {}), "('src.png', image[0])\n", (2683, 2704), False, 'import cv2\n'), ((2721, 2755), 'cv2.imwrite', 'cv2.imwrite', (['"""ret.png"""', 'warp_image'], {}), "('ret.png', warp_image)\n", (2732, 2755), False, 'import cv2\n'), ((3436, 3455), 'numpy.array', 'np.array', (['label_pts'], {}), '(label_pts)\n', (3444, 3455), True, 'import numpy as np\n'), ((3627, 3642), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3635, 3642), True, 'import numpy as np\n'), ((3976, 4004), 'numpy.zeros', 'np.zeros', (['[3, 3]', 'np.float32'], {}), '([3, 3], np.float32)\n', (3984, 4004), True, 'import numpy as np\n'), ((4498, 4517), 'numpy.float32', 'np.float32', (['new_pts'], {}), '(new_pts)\n', (4508, 4517), True, 'import numpy as np\n'), ((4544, 4573), 'numpy.transpose', 'np.transpose', (['new_pts', '(1, 0)'], {}), '(new_pts, (1, 0))\n', (4556, 4573), True, 'import numpy as np\n'), ((4633, 4654), 'numpy.matmul', 'np.matmul', (['R', 'new_pts'], {}), '(R, new_pts)\n', (4642, 4654), True, 'import numpy as np\n'), ((5013, 5045), 'cv2.imwrite', 'cv2.imwrite', (['"""src.png"""', 'image[0]'], {}), "('src.png', image[0])\n", (5024, 5045), False, 'import cv2\n'), ((4812, 4887), 'cv2.circle', 'cv2.circle', (['image[0]', '(trans_pts[0][k], trans_pts[1][k])', '(1)', '(0, 0, 255)', '(2)'], {}), '(image[0], (trans_pts[0][k], trans_pts[1][k]), 1, (0, 0, 255), 2)\n', (4822, 4887), False, 'import cv2\n')] |
import numpy as np
import tensorflow as tf
def reset_random_state(seed):
np.random.seed(seed)
tf.random.set_seed(seed)
| [
"tensorflow.random.set_seed",
"numpy.random.seed"
] | [((79, 99), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (93, 99), True, 'import numpy as np\n'), ((104, 128), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (122, 128), True, 'import tensorflow as tf\n')] |
# Texas A&M University
# Electronic Systems Engineering Technology
# ESET-420 Capstone II
# Author: <NAME>
# File: main.py
# --------
# Main is the heart of the program that runs the code. The code will determine
# if it connected to the internet then request data from the google Datastore
# database then run the hackrf and collect its data. Depending on the internet
# connection the file be stored either on the sd card or google Storage
from pylibhackrf import hackrfCtrl
from i2c_lcd import I2cLcd
import customChar
import Adafruit_BBIO.GPIO as GPIO
import socket
import time
import serial
import os
#import timeit
import numpy as np
from google.cloud import storage
def introScreen():
''' Intro Screen '''
lcd.clear()
lcd.move_to(2,0)
lcd.putstr('Wave TechUHF')
lcd.move_to(4,1)
lcd.putstr('UHF Hub')
time.sleep(3)
def mainScreen():
''' Main Screen '''
lcd.custom_char(0,customChar.RecordSym('offleft'))
lcd.custom_char(1,customChar.RecordSym('offright'))
lcd.custom_char(2,customChar.RecordSym('onleft'))
lcd.custom_char(3,customChar.RecordSym('onright'))
lcd.move_to(0,0)
lcd.putchar(chr(0))
lcd.move_to(1,0)
lcd.putchar(chr(1))
lcd.move_to(5, 0)
lcd.putstr(time.strftime('%m/%d %H:%M', time.localtime()))
LCD_I2C_ADDR = 0x3f
lcd = I2cLcd(1, LCD_I2C_ADDR, 2, 16)
introScreen()
from google.cloud import datastore
#Initialize global variables to be used
minFreq = 0
maxFreq = 0
samprate= 0
incremFreq = 0
timer1 = 0
timer2 = 0
TIMER1_TIME = 15
TIMER2_TIME = 20
#BASE_PATH = '/tmp/'
SDDIR = '/media/card/'
SDSAVEDFILESDIR = '/media/card/savedFiles/'
CREDDIR = '/media/card/Credentials.txt'
CONFIGDIR = '/media/card/config.txt'
UART_PORT = '/dev/ttyO4'
CD_PIN = 'P2_35'
DEBUG = False
ENABLE_SD = True
request = False
''' Section to detect if Credentials file exists. if not it creates it'''
def fileCheck():
global JSON_LOC, BUCKET_NAME, KIND, ID_NAME, ADV_NAME
if ENABLE_SD:
if not os.path.exists(CREDDIR):
credFile = open(CREDDIR, 'w')
credFile.write("JSON File Name:\n")
credFile.write("Card Detect Pin:\n")
credFile.write("Bucket Name:\n")
credFile.write("Datastore Kind Name:\n")
credFile.write("Datastore ID Name:\n")
credFile.write("Datastore Adv ID Name:\n")
credFile.close()
# print("Output to LCD: Credentials file created.\
# Input credentials and restart")
while True:
lcd.move_to(0,0)
lcd.putstr('Credential File')
lcd.move_to(4,1)
lcd.putstr('Created.')
time.sleep(5)
lcd.clear()
lcd.move_to(0,0)
lcd.putstr('Input credential')
lcd.move_to(1,0)
lcd.putstr('and Restart')
time.sleep(5)
else:
f = open(CREDDIR, 'r')
information = f.readlines()
infoArray = np.empty(6, dtype='U256')
for index, lines in enumerate(information):
tempinfo = lines.split(":")
infoArray[index] = tempinfo[1].replace("\n", '')
JSON_LOC = SDDIR + str(infoArray[0])
#CD_PIN = str(infoArray[1])
BUCKET_NAME = str(infoArray[2])
KIND = str(infoArray[3])
ID_NAME = str(infoArray[4])
ADV_NAME = str(infoArray[5])
else:
pass
hackrf = hackrfCtrl(DEBUG)
lcd.clear()
GPIO.setup("USR3", GPIO.OUT)
GPIO.output("USR3", GPIO.LOW)
# SD card object declaration
if ENABLE_SD:
GPIO.setup(CD_PIN, GPIO.IN)
if DEBUG:
#print("Importing UART")
ser = serial.Serial(port=UART_PORT, baudrate=115200)
ser.close()
''' This is the main function that runs on startup. First determines if sd card
is inserted so device can work. Then runs a request from the Datastore
database if connected to internet. It then runs the HackRF and stores its
data in a file and stores it appropriately. '''
def main():
global timer1, timer2
if DEBUG:
writeToUARTln('Start Main')
else:
print("Start Main")
timer1 = time.time()
timer2 = time.time()
filecheck = False
while True:
if ENABLE_SD:
if not GPIO.input(CD_PIN):
if not filecheck:
fileCheck()
filecheck = True
mainScreen()
dataStoreCheck()
else:
lcd.move_to(1,0)
lcd.putstr("Insert SD Card")
lcd.move_to(0,1)
lcd.putstr('Unplug to start')
#time.sleep(5)
else:
mainScreen()
dataStoreCheck()
''' This function checks the interntflag and determines if should request data
from dataStore or to use the sd card to store file '''
def dataStoreCheck():
global JSON_LOC, BUCKET_NAME, KIND, ID_NAME, ADV_NAME
global timer1, timer2
global request
global data
global InternetFlag
''' First "if" statement checks if timer1 is greater than specified
time and if connected to internet request from Datastore. '''
if time.time() - timer1 > TIMER1_TIME:
InternetFlag = InternetCheck()
GPIO.output("USR3", GPIO.HIGH)
if InternetFlag:
lcd.clearRow(1)
lcd.move_to(0,1)
lcd.putstr('Requesting Data')
if DEBUG:
writeToUARTln('Requesting Data from Datastore')
else:
print('Requesting Data')
#Request data from database
client = datastore.Client.from_service_account_json(JSON_LOC)
key_complete = client.key(KIND, ID_NAME)
tasks = client.get(key_complete)
#Put properties of request into varaibles
request = tasks['Request']
advRequest = tasks['ADV_Request']
minFreq = tasks['min_frequency']
incremFreq = tasks['increment_frequency']
maxFreq = tasks['max_frequency']
samprate= tasks['sample_rate']
lna = tasks['lna']
vga = tasks['vga']
numscans = tasks['Scans']
data = [minFreq, incremFreq, maxFreq, samprate, lna, vga, numscans]
lcd.clearRow(1)
lcd.move_to(0,1)
lcd.putstr('{} to {}'. format(incremFreq/1.0e6, (incremFreq + samprate)/1.0e6))
if DEBUG:
for element in data:
writeToUART(element)
writeToUART('\n')
else:
print(data)
#If there was a request collect hackrf data immediately
if request == True:
runHackrf(InternetFlag, data)
timer2 = time.time()
timer1 = time.time()
else:
'''This is for if not connected to internet. Nothing much
to do besides nothing '''
lcd.clearRow(1)
lcd.move_to(0,1)
lcd.putstr('Reading Params')
time.sleep(2)
if DEBUG:
writeToUARTln('Requesting Data from SD config file')
else:
print("No Internet == Read SD card")
params = []
with open(SDDIR + 'config.txt', 'r') as configFile:
for line in configFile:
numbers_float = map(float, line.split(', '))
for number in numbers_float:
params.append(number)
minFreq = params[1]
incremFreq = params[2]
maxFreq = params[3]
samprate= params[4]
data = [minFreq, incremFreq, maxFreq, samprate]
lcd.clearRow(1)
lcd.move_to(0,1)
lcd.putstr('{} to {}'. format(incremFreq/1.0e6, (incremFreq + samprate)/1.0e6))
if DEBUG:
for element in data:
writeToUART(element)
writeToUART('\n')
else:
print(data)
timer1 = time.time()
''' Second "if" statement is used to run the hackrf. The global
variables allow the database to set them and be placed into
this function. '''
if time.time() - timer2 > TIMER2_TIME:
runHackrf(InternetFlag, data)
GPIO.output("USR3", GPIO.LOW)
timer1 = time.time()
timer2 = time.time()
'''The Hackrf run function determines if the internet is connected. If it is
then use the parameters passed from the database otherwise read the
parameters from the sd card. Run the hackrf the appropriate amount of times
then save it to a file and save the file appropriately. '''
def runHackrf(internetflag, dataParams=[]):
global JSON_LOC, BUCKET_NAME, KIND, ID_NAME, ADV_NAME
global ENABLE_SD
''' Start of Hackrf Func '''
lcd.move_to(0,0)
lcd.putchar(chr(2))
lcd.move_to(1,0)
lcd.putchar(chr(3))
#Error = False
#Collect the data
if internetflag:
lna = dataParams[4]
vga = dataParams[5]
scans = int(dataParams[6])
else:
lna = 16
vga = 20
scans = 5
''' Increment Frequency + sample rate/2 '''
center_frequency = int(dataParams[1] + (dataParams[3]/2))
data_pts = hackrf.setParameters(center_frequency, dataParams[3], lna, vga)
iq, Error = hackrf.hackrf_run(scans)
''' End of Hackrf Func '''
lcd.custom_char(0,customChar.RecordSym('offleft'))
lcd.custom_char(1,customChar.RecordSym('offright'))
lcd.custom_char(2,customChar.RecordSym('onleft'))
lcd.custom_char(3,customChar.RecordSym('onright'))
lcd.clearRow(1)
lcd.move_to(0,0)
lcd.putchar(chr(0))
lcd.move_to(1,0)
lcd.putchar(chr(1))
lcd.move_to(0,1)
lcd.putstr('Record Complete')
if not Error:
''' Store data to file name '''
strname = str(time.strftime('%m-%d_%H-%M_', time.localtime()) + \
str(dataParams[1]/1e6) + 'e6-' + \
str((dataParams[1] + dataParams[3])/1e6) + 'e6')
if DEBUG:
writeToUARTln(strname)
else:
print(strname)
if internetflag:
''' Save npz file '''
np.savez_compressed(os.path.join(SDSAVEDFILESDIR, strname), data_pts = data_pts, iq = iq)
else:
np.savez_compressed(os.path.join(SDSAVEDFILESDIR, strname), data_pts = data_pts, iq = iq)
strname = strname + '.npz'
''' Perform second internet check if internet lost during hackrf capture'''
newInternetFlag = InternetCheck()
#Save file to storage or SD card
if newInternetFlag:
hackrf.close()
storage_client = storage.Client.from_service_account_json(JSON_LOC)
bucket = storage_client.get_bucket(BUCKET_NAME)
blob = bucket.blob(os.path.basename(SDSAVEDFILESDIR + strname))
blob.upload_from_filename(SDSAVEDFILESDIR + strname)
confirmation = "File {} stored via Cloud".format(strname)
if DEBUG:
writeToUARTln(confirmation)
else:
print(confirmation)
#os.remove(strname)
os.path.join(SDSAVEDFILESDIR, strname)
#Request data from database
client = datastore.Client.from_service_account_json(JSON_LOC)
key_complete = client.key(KIND, ID_NAME)
tasks = client.get(key_complete)
#Put properties of request into varibles
request = tasks['Request']
minFreq = tasks['min_frequency']
maxFreq = tasks['max_frequency']
samprate= tasks['sample_rate']
incremFreq = tasks['increment_frequency']
newfreq = incremFreq + samprate
if newfreq >= maxFreq:
print("Setting increment frequency back to minimum frequency")
if DEBUG:
writeToUARTln("Setting increment frequency back to minimum frequency")
tasks['increment_frequency'] = minFreq
else:
tasks['increment_frequency'] = newfreq
if DEBUG:
writeToUARTln("Data Updated")
else:
print('Data Updated')
client.put(tasks)
''' Uploading '''
lcd.clearRow(1)
lcd.move_to(0,1)
lcd.putstr('Upload Complete')
time.sleep(3)
lcd.clearRow(1)
else:
hackrf.close()
if ENABLE_SD:
confirmation = "File {} stored via SD card".format(strname)
if DEBUG:
writeToUARTln(confirmation)
else:
print(confirmation)
infoArray = np.empty(6)
newfreq = dataParams[1] + dataParams[3]
infoArray[0] = 0
infoArray[1] = 420e6
infoArray[3] = 512e6
infoArray[4] = 2.5e6
infoArray[5] = 0
if newfreq >= dataParams[2]:
if DEBUG:
writeToUARTln("Setting increment frequency back to minimum frequency")
else:
print("Setting increment frequency back to minimum frequency")
infoArray[2] = dataParams[0]
else:
infoArray[2] = newfreq
if DEBUG:
writeToUARTln("Data Updated on SD card")
else:
print('Data Updated')
writeFile = open(CONFIGDIR, 'w')
writeFile.write(str(infoArray[0]) + ', ' + str(infoArray[1]) +
', ' + str(infoArray[2]) + ', ' + str(infoArray[3]) + ', ' +
str(infoArray[4]) + ', ' + str(infoArray[5]))
writeFile.close()
''' Uploading '''
lcd.clearRow(1)
lcd.move_to(0,1)
lcd.putstr('Update Complete')
time.sleep(2)
lcd.clearRow(1)
else:
pass
else:
''' When Error is reported '''
lcd.move_to(0,1)
lcd.putstr('Reported Error')
time.sleep(2)
lcd.clearRow(1)
if DEBUG:
writeToUARTln("Reported Error")
else:
print("I reported an error")
return
''' The internetCheck function sees if the device can connected to gmail.com
and if uncommented, will print out the IP address since there is a socket
setup between the internet device and gmail '''
def InternetCheck():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
#print("IP: " + s.getsockname()[0])
s.close()
return True
except Exception:
print("Not connected to internet")
return False
''' Function to write string to UART '''
def writeToUART(message):
ser.open()
if ser.isOpen():
ser.write(str(message))
#print("Serial is open!")
ser.close()
''' Same as write to UART except adds a carriage return '''
def writeToUARTln(message):
ser.open()
if ser.isOpen():
ser.write(str(message) + '\n')
#print("Serial is open!")
ser.close()
if __name__ == '__main__':
main()
| [
"serial.Serial",
"google.cloud.datastore.Client.from_service_account_json",
"Adafruit_BBIO.GPIO.input",
"Adafruit_BBIO.GPIO.output",
"google.cloud.storage.Client.from_service_account_json",
"i2c_lcd.I2cLcd",
"Adafruit_BBIO.GPIO.setup",
"numpy.empty",
"socket.socket",
"os.path.exists",
"customCha... | [((1319, 1349), 'i2c_lcd.I2cLcd', 'I2cLcd', (['(1)', 'LCD_I2C_ADDR', '(2)', '(16)'], {}), '(1, LCD_I2C_ADDR, 2, 16)\n', (1325, 1349), False, 'from i2c_lcd import I2cLcd\n'), ((3536, 3553), 'pylibhackrf.hackrfCtrl', 'hackrfCtrl', (['DEBUG'], {}), '(DEBUG)\n', (3546, 3553), False, 'from pylibhackrf import hackrfCtrl\n'), ((3567, 3595), 'Adafruit_BBIO.GPIO.setup', 'GPIO.setup', (['"""USR3"""', 'GPIO.OUT'], {}), "('USR3', GPIO.OUT)\n", (3577, 3595), True, 'import Adafruit_BBIO.GPIO as GPIO\n'), ((3596, 3625), 'Adafruit_BBIO.GPIO.output', 'GPIO.output', (['"""USR3"""', 'GPIO.LOW'], {}), "('USR3', GPIO.LOW)\n", (3607, 3625), True, 'import Adafruit_BBIO.GPIO as GPIO\n'), ((839, 852), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (849, 852), False, 'import time\n'), ((3673, 3700), 'Adafruit_BBIO.GPIO.setup', 'GPIO.setup', (['CD_PIN', 'GPIO.IN'], {}), '(CD_PIN, GPIO.IN)\n', (3683, 3700), True, 'import Adafruit_BBIO.GPIO as GPIO\n'), ((3755, 3801), 'serial.Serial', 'serial.Serial', ([], {'port': 'UART_PORT', 'baudrate': '(115200)'}), '(port=UART_PORT, baudrate=115200)\n', (3768, 3801), False, 'import serial\n'), ((4255, 4266), 'time.time', 'time.time', ([], {}), '()\n', (4264, 4266), False, 'import time\n'), ((4280, 4291), 'time.time', 'time.time', ([], {}), '()\n', (4289, 4291), False, 'import time\n'), ((918, 949), 'customChar.RecordSym', 'customChar.RecordSym', (['"""offleft"""'], {}), "('offleft')\n", (938, 949), False, 'import customChar\n'), ((973, 1005), 'customChar.RecordSym', 'customChar.RecordSym', (['"""offright"""'], {}), "('offright')\n", (993, 1005), False, 'import customChar\n'), ((1029, 1059), 'customChar.RecordSym', 'customChar.RecordSym', (['"""onleft"""'], {}), "('onleft')\n", (1049, 1059), False, 'import customChar\n'), ((1083, 1114), 'customChar.RecordSym', 'customChar.RecordSym', (['"""onright"""'], {}), "('onright')\n", (1103, 1114), False, 'import customChar\n'), ((5392, 5422), 'Adafruit_BBIO.GPIO.output', 'GPIO.output', (['"""USR3"""', 'GPIO.HIGH'], {}), "('USR3', GPIO.HIGH)\n", (5403, 5422), True, 'import Adafruit_BBIO.GPIO as GPIO\n'), ((8696, 8725), 'Adafruit_BBIO.GPIO.output', 'GPIO.output', (['"""USR3"""', 'GPIO.LOW'], {}), "('USR3', GPIO.LOW)\n", (8707, 8725), True, 'import Adafruit_BBIO.GPIO as GPIO\n'), ((8755, 8766), 'time.time', 'time.time', ([], {}), '()\n', (8764, 8766), False, 'import time\n'), ((8784, 8795), 'time.time', 'time.time', ([], {}), '()\n', (8793, 8795), False, 'import time\n'), ((9865, 9896), 'customChar.RecordSym', 'customChar.RecordSym', (['"""offleft"""'], {}), "('offleft')\n", (9885, 9896), False, 'import customChar\n'), ((9920, 9952), 'customChar.RecordSym', 'customChar.RecordSym', (['"""offright"""'], {}), "('offright')\n", (9940, 9952), False, 'import customChar\n'), ((9976, 10006), 'customChar.RecordSym', 'customChar.RecordSym', (['"""onleft"""'], {}), "('onleft')\n", (9996, 10006), False, 'import customChar\n'), ((10030, 10061), 'customChar.RecordSym', 'customChar.RecordSym', (['"""onright"""'], {}), "('onright')\n", (10050, 10061), False, 'import customChar\n'), ((14871, 14884), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14881, 14884), False, 'import time\n'), ((15313, 15361), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (15326, 15361), False, 'import socket\n'), ((1272, 1288), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1286, 1288), False, 'import time\n'), ((1993, 2016), 'os.path.exists', 'os.path.exists', (['CREDDIR'], {}), '(CREDDIR)\n', (2007, 2016), False, 'import os\n'), ((3047, 3072), 'numpy.empty', 'np.empty', (['(6)'], {'dtype': '"""U256"""'}), "(6, dtype='U256')\n", (3055, 3072), True, 'import numpy as np\n'), ((5309, 5320), 'time.time', 'time.time', ([], {}), '()\n', (5318, 5320), False, 'import time\n'), ((5792, 5844), 'google.cloud.datastore.Client.from_service_account_json', 'datastore.Client.from_service_account_json', (['JSON_LOC'], {}), '(JSON_LOC)\n', (5834, 5844), False, 'from google.cloud import datastore\n'), ((7087, 7098), 'time.time', 'time.time', ([], {}), '()\n', (7096, 7098), False, 'import time\n'), ((7334, 7347), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7344, 7347), False, 'import time\n'), ((8406, 8417), 'time.time', 'time.time', ([], {}), '()\n', (8415, 8417), False, 'import time\n'), ((8614, 8625), 'time.time', 'time.time', ([], {}), '()\n', (8623, 8625), False, 'import time\n'), ((11171, 11221), 'google.cloud.storage.Client.from_service_account_json', 'storage.Client.from_service_account_json', (['JSON_LOC'], {}), '(JSON_LOC)\n', (11211, 11221), False, 'from google.cloud import storage\n'), ((11695, 11733), 'os.path.join', 'os.path.join', (['SDSAVEDFILESDIR', 'strname'], {}), '(SDSAVEDFILESDIR, strname)\n', (11707, 11733), False, 'import os\n'), ((11808, 11860), 'google.cloud.datastore.Client.from_service_account_json', 'datastore.Client.from_service_account_json', (['JSON_LOC'], {}), '(JSON_LOC)\n', (11850, 11860), False, 'from google.cloud import datastore\n'), ((13001, 13014), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (13011, 13014), False, 'import time\n'), ((2707, 2720), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2717, 2720), False, 'import time\n'), ((2920, 2933), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2930, 2933), False, 'import time\n'), ((4376, 4394), 'Adafruit_BBIO.GPIO.input', 'GPIO.input', (['CD_PIN'], {}), '(CD_PIN)\n', (4386, 4394), True, 'import Adafruit_BBIO.GPIO as GPIO\n'), ((7025, 7036), 'time.time', 'time.time', ([], {}), '()\n', (7034, 7036), False, 'import time\n'), ((10668, 10706), 'os.path.join', 'os.path.join', (['SDSAVEDFILESDIR', 'strname'], {}), '(SDSAVEDFILESDIR, strname)\n', (10680, 10706), False, 'import os\n'), ((10784, 10822), 'os.path.join', 'os.path.join', (['SDSAVEDFILESDIR', 'strname'], {}), '(SDSAVEDFILESDIR, strname)\n', (10796, 10822), False, 'import os\n'), ((11351, 11394), 'os.path.basename', 'os.path.basename', (['(SDSAVEDFILESDIR + strname)'], {}), '(SDSAVEDFILESDIR + strname)\n', (11367, 11394), False, 'import os\n'), ((13350, 13361), 'numpy.empty', 'np.empty', (['(6)'], {}), '(6)\n', (13358, 13361), True, 'import numpy as np\n'), ((14666, 14679), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14676, 14679), False, 'import time\n'), ((10343, 10359), 'time.localtime', 'time.localtime', ([], {}), '()\n', (10357, 10359), False, 'import time\n')] |
import copy
import numpy
import time
from pauxy.estimators.mixed import local_energy
from pauxy.estimators.greens_function import gab
from pauxy.utils.linalg import diagonalise_sorted
from pauxy.systems.hubbard import decode_basis
from pauxy.utils.io import get_input_value
class UHF(object):
r"""UHF trial wavefunction.
Search for UHF trial wavefunction by self consistenly solving the mean field
Hamiltonian:
.. math::
H^{\sigma} = \sum_{\langle ij\rangle} \left(
c^{\dagger}_{i\sigma}c_{j\sigma} + h.c.\right) +
U_{\mathrm{eff}} \sum_i \hat{n}_{i\sigma}\langle\hat{n}_{i\bar{\sigma}}\rangle -
\frac{1}{2} U_{\mathrm{eff}} \sum_i \langle\hat{n}_{i\sigma}\rangle
\langle\hat{n}_{i\bar{\sigma}}\rangle.
See [Xu11]_ for more details.
.. Warning::
This is for the Hubbard model only
.. todo:: We should generalise in the future perhaps.
Parameters
----------
system : :class:`pauxy.systems.hubbard.Hubbard` object
System parameters.
cplx : bool
True if the trial wavefunction etc is complex.
trial : dict
Trial wavefunction input options.
Attributes
----------
psi : :class:`numpy.ndarray`
Trial wavefunction.
eigs : :class:`numpy.array`
One-electron eigenvalues.
emin : float
Ground state mean field total energy of trial wavefunction.
"""
def __init__(self, system, trial={}, verbose=0):
assert "Hubbard" in system.name
if verbose:
print("# Constructing UHF trial wavefunction")
self.verbose = verbose
init_time = time.time()
self.name = "UHF"
self.type = "UHF"
self.initial_wavefunction = trial.get('initial_wavefunction',
'trial')
self.trial_type = complex
# Unpack input options.
self.ninitial = get_input_value(trial, 'ninitial', default=10,
verbose=verbose)
self.nconv = get_input_value(trial, 'nconv', default=5000,
verbose=verbose)
self.ueff = get_input_value(trial, 'ueff',
default=0.4,
verbose=verbose)
self.deps = get_input_value(trial, 'deps', default=1e-8,
verbose=verbose)
self.alpha = get_input_value(trial, 'alpha', default=0.5,
verbose=verbose)
# For interface compatability
self.coeffs = 1.0
self.type = 'UHF'
self.ndets = 1
self.initial_guess = trial.get('initial', 'random')
if self.initial_guess == 'random':
if self.verbose:
print("# Solving UHF equations.")
(self.psi, self.eigs, self.emin, self.error, self.nav) = (
self.find_uhf_wfn(system, self.ueff, self.ninitial,
self.nconv, self.alpha, self.deps, verbose)
)
if self.error:
warnings.warn('Error in constructing trial wavefunction. Exiting')
sys.exit()
elif self.initial_guess == 'checkerboard':
if self.verbose:
print("# Using checkerboard breakup.")
self.psi, unused = self.checkerboard(system.nbasis, system.nup, system.ndown)
Gup = gab(self.psi[:,:system.nup], self.psi[:,:system.nup]).T
if (system.ndown > 0):
Gdown = gab(self.psi[:,system.nup:], self.psi[:,system.nup:]).T
else:
Gdown = numpy.zeros_like(Gup)
self.le_oratio = 1.0
self.G = numpy.array([Gup, Gdown])
self.etrial = local_energy(system, self.G)[0].real
self.bp_wfn = trial.get('bp_wfn', None)
self.initialisation_time = time.time() - init_time
self.init = self.psi
self._mem_required = 0.0
self._rchol = None
def find_uhf_wfn(self, system, ueff, ninit,
nit_max, alpha, deps=1e-8, verbose=0):
emin = 0
uold = system.U
system.U = ueff
minima = [] # Local minima
nup = system.nup
# Search over different random starting points.
for attempt in range(0, ninit):
# Set up initial (random) guess for the density.
(self.trial, eold) = self.initialise(system.nbasis, system.nup,
system.ndown)
niup = self.density(self.trial[:,:nup])
nidown = self.density(self.trial[:,nup:])
niup_old = self.density(self.trial[:,:nup])
nidown_old = self.density(self.trial[:,nup:])
for it in range(0, nit_max):
(niup, nidown, e_up, e_down) = (
self.diagonalise_mean_field(system, ueff, niup, nidown)
)
# Construct Green's function to compute the energy.
Gup = gab(self.trial[:,:nup], self.trial[:,:nup]).T
if (system.ndown>0):
Gdown = gab(self.trial[:,nup:], self.trial[:,nup:]).T
else:
Gdown = numpy.zeros((system.nbasis, system.nbasis))
enew = local_energy(system, numpy.array([Gup, Gdown]))[0].real
if verbose > 1:
print("# %d %f %f" % (it, enew, eold))
sc = self.self_consistant(enew, eold, niup, niup_old, nidown,
nidown_old, it, deps, verbose)
if sc:
# Global minimum search.
if attempt == 0:
minima.append(enew)
psi_accept = copy.deepcopy(self.trial)
e_accept = numpy.append(e_up, e_down)
elif all(numpy.array(minima) - enew > deps):
minima.append(enew)
psi_accept = copy.deepcopy(self.trial)
e_accept = numpy.append(e_up, e_down)
break
else:
mixup = self.mix_density(niup, niup_old, alpha)
mixdown = self.mix_density(nidown, nidown_old, alpha)
niup_old = niup
nidown_old = nidown
niup = mixup
nidown = mixdown
eold = enew
if verbose > 1:
print("# SCF cycle: {:3d}. After {:4d} steps the minimum UHF"
" energy found is: {: 8f}".format(attempt, it, eold))
system.U = uold
if verbose:
print("# Minimum energy found: {: 8f}".format(min(minima)))
nocca = system.nup
noccb = system.ndown
MS = numpy.abs(nocca-noccb) / 2.0
S2exact = MS * (MS+1.)
Sij = psi_accept[:,:nocca].T.dot(psi_accept[:,nocca:])
S2 = S2exact + min(nocca, noccb) - numpy.sum(numpy.abs(Sij*Sij).ravel())
print("# <S^2> = {: 3f}".format(S2))
try:
return (psi_accept, e_accept, min(minima), False, [niup, nidown])
except UnboundLocalError:
warnings.warn("Warning: No UHF wavefunction found."
"Delta E: %f" % (enew - emin))
return (trial, numpy.append(e_up, e_down), None, True, None)
def initialise(self, nbasis, nup, ndown):
(e_up, ev_up) = self.random_starting_point(nbasis)
(e_down, ev_down) = self.random_starting_point(nbasis)
trial = numpy.zeros(shape=(nbasis, nup+ndown),
dtype=numpy.complex128)
trial[:,:nup] = ev_up[:,:nup]
trial[:,nup:] = ev_down[:,:ndown]
eold = sum(e_up[:nup]) + sum(e_down[:ndown])
return (trial, eold)
def random_starting_point(self, nbasis):
random = numpy.random.random((nbasis, nbasis))
random = 0.5 * (random + random.T)
(energies, eigv) = diagonalise_sorted(random)
return (energies, eigv)
def checkerboard(self, nbasis, nup, ndown):
nalpha = 0
nbeta = 0
wfn = numpy.zeros(shape=(nbasis, nup+ndown),
dtype=numpy.complex128)
for i in range(nbasis):
x, y = decode_basis(4,4,i)
if x % 2 == 0 and y % 2 == 0:
wfn[i,nalpha] = 1.0
nalpha += 1
elif x % 2 == 0 and y % 2 == 1:
wfn[i,nup+nbeta] = -1.0
nbeta += 1
elif x % 2 == 1 and y % 2 == 0:
wfn[i,nup+nbeta] = -1.0
nbeta += 1
elif x % 2 == 1 and y % 2 == 1:
wfn[i,nalpha] = 1.0
nalpha += 1
return wfn, 10
def density(self, wfn):
return numpy.diag(wfn.dot((wfn.conj()).T))
def self_consistant(self, enew, eold, niup, niup_old, nidown, nidown_old,
it, deps=1e-8, verbose=0):
'''Check if system parameters are converged'''
depsn = deps**0.5
ediff = abs(enew-eold)
nup_diff = sum(abs(niup-niup_old))/len(niup)
ndown_diff = sum(abs(nidown-nidown_old))/len(nidown)
if verbose > 1:
print("# de: %.10e dniu: %.10e dnid: %.10e"%(ediff, nup_diff, ndown_diff))
return (ediff < deps) and (nup_diff < depsn) and (ndown_diff < depsn)
def mix_density(self, new, old, alpha):
return (1-alpha)*new + alpha*old
def diagonalise_mean_field(self, system, ueff, niup, nidown):
# mean field Hamiltonians.
HMFU = system.T[0] + numpy.diag(ueff*nidown)
HMFD = system.T[1] + numpy.diag(ueff*niup)
(e_up, ev_up) = diagonalise_sorted(HMFU)
(e_down, ev_down) = diagonalise_sorted(HMFD)
# Construct new wavefunction given new density.
self.trial[:,:system.nup] = ev_up[:,:system.nup]
self.trial[:,system.nup:] = ev_down[:,:system.ndown]
# Construct corresponding site densities.
niup = self.density(self.trial[:,:system.nup])
nidown = self.density(self.trial[:,system.nup:])
return (niup, nidown, e_up, e_down)
def calculate_energy(self, system):
if self.verbose:
print ("# Computing trial energy.")
(self.energy, self.e1b, self.e2b) = local_energy(system, self.G)
if self.verbose:
print ("# (E, E1B, E2B): (%13.8e, %13.8e, %13.8e)"
%(self.energy.real, self.e1b.real, self.e2b.real))
| [
"copy.deepcopy",
"numpy.zeros_like",
"numpy.abs",
"pauxy.estimators.greens_function.gab",
"numpy.zeros",
"pauxy.utils.io.get_input_value",
"time.time",
"pauxy.estimators.mixed.local_energy",
"numpy.append",
"numpy.random.random",
"numpy.array",
"numpy.diag",
"pauxy.utils.linalg.diagonalise_s... | [((1695, 1706), 'time.time', 'time.time', ([], {}), '()\n', (1704, 1706), False, 'import time\n'), ((1974, 2037), 'pauxy.utils.io.get_input_value', 'get_input_value', (['trial', '"""ninitial"""'], {'default': '(10)', 'verbose': 'verbose'}), "(trial, 'ninitial', default=10, verbose=verbose)\n", (1989, 2037), False, 'from pauxy.utils.io import get_input_value\n'), ((2099, 2161), 'pauxy.utils.io.get_input_value', 'get_input_value', (['trial', '"""nconv"""'], {'default': '(5000)', 'verbose': 'verbose'}), "(trial, 'nconv', default=5000, verbose=verbose)\n", (2114, 2161), False, 'from pauxy.utils.io import get_input_value\n'), ((2218, 2278), 'pauxy.utils.io.get_input_value', 'get_input_value', (['trial', '"""ueff"""'], {'default': '(0.4)', 'verbose': 'verbose'}), "(trial, 'ueff', default=0.4, verbose=verbose)\n", (2233, 2278), False, 'from pauxy.utils.io import get_input_value\n'), ((2371, 2433), 'pauxy.utils.io.get_input_value', 'get_input_value', (['trial', '"""deps"""'], {'default': '(1e-08)', 'verbose': 'verbose'}), "(trial, 'deps', default=1e-08, verbose=verbose)\n", (2386, 2433), False, 'from pauxy.utils.io import get_input_value\n'), ((2490, 2551), 'pauxy.utils.io.get_input_value', 'get_input_value', (['trial', '"""alpha"""'], {'default': '(0.5)', 'verbose': 'verbose'}), "(trial, 'alpha', default=0.5, verbose=verbose)\n", (2505, 2551), False, 'from pauxy.utils.io import get_input_value\n'), ((3756, 3781), 'numpy.array', 'numpy.array', (['[Gup, Gdown]'], {}), '([Gup, Gdown])\n', (3767, 3781), False, 'import numpy\n'), ((7655, 7719), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(nbasis, nup + ndown)', 'dtype': 'numpy.complex128'}), '(shape=(nbasis, nup + ndown), dtype=numpy.complex128)\n', (7666, 7719), False, 'import numpy\n'), ((7972, 8009), 'numpy.random.random', 'numpy.random.random', (['(nbasis, nbasis)'], {}), '((nbasis, nbasis))\n', (7991, 8009), False, 'import numpy\n'), ((8080, 8106), 'pauxy.utils.linalg.diagonalise_sorted', 'diagonalise_sorted', (['random'], {}), '(random)\n', (8098, 8106), False, 'from pauxy.utils.linalg import diagonalise_sorted\n'), ((8239, 8303), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(nbasis, nup + ndown)', 'dtype': 'numpy.complex128'}), '(shape=(nbasis, nup + ndown), dtype=numpy.complex128)\n', (8250, 8303), False, 'import numpy\n'), ((9804, 9828), 'pauxy.utils.linalg.diagonalise_sorted', 'diagonalise_sorted', (['HMFU'], {}), '(HMFU)\n', (9822, 9828), False, 'from pauxy.utils.linalg import diagonalise_sorted\n'), ((9857, 9881), 'pauxy.utils.linalg.diagonalise_sorted', 'diagonalise_sorted', (['HMFD'], {}), '(HMFD)\n', (9875, 9881), False, 'from pauxy.utils.linalg import diagonalise_sorted\n'), ((10420, 10448), 'pauxy.estimators.mixed.local_energy', 'local_energy', (['system', 'self.G'], {}), '(system, self.G)\n', (10432, 10448), False, 'from pauxy.estimators.mixed import local_energy\n'), ((3491, 3546), 'pauxy.estimators.greens_function.gab', 'gab', (['self.psi[:, :system.nup]', 'self.psi[:, :system.nup]'], {}), '(self.psi[:, :system.nup], self.psi[:, :system.nup])\n', (3494, 3546), False, 'from pauxy.estimators.greens_function import gab\n'), ((3688, 3709), 'numpy.zeros_like', 'numpy.zeros_like', (['Gup'], {}), '(Gup)\n', (3704, 3709), False, 'import numpy\n'), ((3924, 3935), 'time.time', 'time.time', ([], {}), '()\n', (3933, 3935), False, 'import time\n'), ((8381, 8402), 'pauxy.systems.hubbard.decode_basis', 'decode_basis', (['(4)', '(4)', 'i'], {}), '(4, 4, i)\n', (8393, 8402), False, 'from pauxy.systems.hubbard import decode_basis\n'), ((9705, 9730), 'numpy.diag', 'numpy.diag', (['(ueff * nidown)'], {}), '(ueff * nidown)\n', (9715, 9730), False, 'import numpy\n'), ((9758, 9781), 'numpy.diag', 'numpy.diag', (['(ueff * niup)'], {}), '(ueff * niup)\n', (9768, 9781), False, 'import numpy\n'), ((3598, 3653), 'pauxy.estimators.greens_function.gab', 'gab', (['self.psi[:, system.nup:]', 'self.psi[:, system.nup:]'], {}), '(self.psi[:, system.nup:], self.psi[:, system.nup:])\n', (3601, 3653), False, 'from pauxy.estimators.greens_function import gab\n'), ((3804, 3832), 'pauxy.estimators.mixed.local_energy', 'local_energy', (['system', 'self.G'], {}), '(system, self.G)\n', (3816, 3832), False, 'from pauxy.estimators.mixed import local_energy\n'), ((6885, 6909), 'numpy.abs', 'numpy.abs', (['(nocca - noccb)'], {}), '(nocca - noccb)\n', (6894, 6909), False, 'import numpy\n'), ((5057, 5102), 'pauxy.estimators.greens_function.gab', 'gab', (['self.trial[:, :nup]', 'self.trial[:, :nup]'], {}), '(self.trial[:, :nup], self.trial[:, :nup])\n', (5060, 5102), False, 'from pauxy.estimators.greens_function import gab\n'), ((5264, 5307), 'numpy.zeros', 'numpy.zeros', (['(system.nbasis, system.nbasis)'], {}), '((system.nbasis, system.nbasis))\n', (5275, 5307), False, 'import numpy\n'), ((7423, 7449), 'numpy.append', 'numpy.append', (['e_up', 'e_down'], {}), '(e_up, e_down)\n', (7435, 7449), False, 'import numpy\n'), ((5168, 5213), 'pauxy.estimators.greens_function.gab', 'gab', (['self.trial[:, nup:]', 'self.trial[:, nup:]'], {}), '(self.trial[:, nup:], self.trial[:, nup:])\n', (5171, 5213), False, 'from pauxy.estimators.greens_function import gab\n'), ((5815, 5840), 'copy.deepcopy', 'copy.deepcopy', (['self.trial'], {}), '(self.trial)\n', (5828, 5840), False, 'import copy\n'), ((5876, 5902), 'numpy.append', 'numpy.append', (['e_up', 'e_down'], {}), '(e_up, e_down)\n', (5888, 5902), False, 'import numpy\n'), ((5352, 5377), 'numpy.array', 'numpy.array', (['[Gup, Gdown]'], {}), '([Gup, Gdown])\n', (5363, 5377), False, 'import numpy\n'), ((6049, 6074), 'copy.deepcopy', 'copy.deepcopy', (['self.trial'], {}), '(self.trial)\n', (6062, 6074), False, 'import copy\n'), ((6110, 6136), 'numpy.append', 'numpy.append', (['e_up', 'e_down'], {}), '(e_up, e_down)\n', (6122, 6136), False, 'import numpy\n'), ((7073, 7093), 'numpy.abs', 'numpy.abs', (['(Sij * Sij)'], {}), '(Sij * Sij)\n', (7082, 7093), False, 'import numpy\n'), ((5932, 5951), 'numpy.array', 'numpy.array', (['minima'], {}), '(minima)\n', (5943, 5951), False, 'import numpy\n')] |
import colorsys
import os
from pathlib import Path
import mmcv
import numpy as np
from scipy import interpolate
from mmhuman3d.core.post_processing import build_post_processing
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
def xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
if not isinstance(bbox_xyxy, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xyxy)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[..., 2] = bbox_xywh[..., 2] - bbox_xywh[..., 0]
bbox_xywh[..., 3] = bbox_xywh[..., 3] - bbox_xywh[..., 1]
return bbox_xywh
def xywh2xyxy(bbox_xywh):
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (np.ndarray): Bounding boxes (with scores), shaped
(n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, right, bottom, [score])
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[..., 2] = bbox_xyxy[..., 2] + bbox_xyxy[..., 0] - 1
bbox_xyxy[..., 3] = bbox_xyxy[..., 3] + bbox_xyxy[..., 1] - 1
return bbox_xyxy
def box2cs(bbox_xywh, aspect_ratio=1.0, bbox_scale_factor=1.25):
"""Convert xywh coordinates to center and scale.
Args:
bbox_xywh (numpy.ndarray): the height of the bbox_xywh
aspect_ratio (int, optional): Defaults to 1.0
bbox_scale_factor (float, optional): Defaults to 1.25
Returns:
numpy.ndarray: center of the bbox
numpy.ndarray: the scale of the bbox w & h
"""
if not isinstance(bbox_xywh, np.ndarray):
raise TypeError(
f'Input type is {type(bbox_xywh)}, which should be numpy.ndarray.')
bbox_xywh = bbox_xywh.copy()
pixel_std = 1
center = np.stack([
bbox_xywh[..., 0] + bbox_xywh[..., 2] * 0.5,
bbox_xywh[..., 1] + bbox_xywh[..., 3] * 0.5
], -1)
mask_h = bbox_xywh[..., 2] > aspect_ratio * bbox_xywh[..., 3]
mask_w = ~mask_h
bbox_xywh[mask_h, 3] = bbox_xywh[mask_h, 2] / aspect_ratio
bbox_xywh[mask_w, 2] = bbox_xywh[mask_w, 3] * aspect_ratio
scale = np.stack([
bbox_xywh[..., 2] * 1.0 / pixel_std,
bbox_xywh[..., 3] * 1.0 / pixel_std
], -1)
scale = scale * bbox_scale_factor
return center, scale
def convert_crop_cam_to_orig_img(cam: np.ndarray,
bbox: np.ndarray,
img_width: int,
img_height: int,
aspect_ratio: float = 1.0,
bbox_scale_factor: float = 1.25,
bbox_format: Literal['xyxy', 'xywh',
'cs'] = 'xyxy'):
"""This function is modified from [VIBE](https://github.com/
mkocabas/VIBE/blob/master/lib/utils/demo_utils.py#L242-L259). Original
license please see docs/additional_licenses.md.
Args:
cam (np.ndarray): cam (ndarray, shape=(frame, 3) or
(frame,num_person, 3)):
weak perspective camera in cropped img coordinates
bbox (np.ndarray): bbox coordinates
img_width (int): original image width
img_height (int): original image height
aspect_ratio (float, optional): Defaults to 1.0.
bbox_scale_factor (float, optional): Defaults to 1.25.
bbox_format (Literal['xyxy', 'xywh', 'cs']): Defaults to 'xyxy'.
'xyxy' means the left-up point and right-bottomn point of the
bbox.
'xywh' means the left-up point and the width and height of the
bbox.
'cs' means the center of the bbox (x,y) and the scale of the
bbox w & h.
Returns:
orig_cam: shape = (frame, 4) or (frame, num_person, 4)
"""
if not isinstance(bbox, np.ndarray):
raise TypeError(
f'Input type is {type(bbox)}, which should be numpy.ndarray.')
bbox = bbox.copy()
if bbox_format == 'xyxy':
bbox_xywh = xyxy2xywh(bbox)
center, scale = box2cs(bbox_xywh, aspect_ratio, bbox_scale_factor)
bbox_cs = np.concatenate([center, scale], axis=-1)
elif bbox_format == 'xywh':
center, scale = box2cs(bbox, aspect_ratio, bbox_scale_factor)
bbox_cs = np.concatenate([center, scale], axis=-1)
elif bbox_format == 'cs':
bbox_cs = bbox
else:
raise ValueError('Only supports the format of `xyxy`, `cs` and `xywh`')
cx, cy, h = bbox_cs[..., 0], bbox_cs[..., 1], bbox_cs[..., 2] + 1e-6
hw, hh = img_width / 2., img_height / 2.
sx = cam[..., 0] * (1. / (img_width / h))
sy = cam[..., 0] * (1. / (img_height / h))
tx = ((cx - hw) / hw / (sx + 1e-6)) + cam[..., 1]
ty = ((cy - hh) / hh / (sy + 1e-6)) + cam[..., 2]
orig_cam = np.stack([sx, sy, tx, ty], axis=-1)
return orig_cam
def convert_bbox_to_intrinsic(bboxes: np.ndarray,
img_width: int = 224,
img_height: int = 224,
bbox_scale_factor: float = 1.25,
bbox_format: Literal['xyxy', 'xywh'] = 'xyxy'):
"""Convert bbox to intrinsic parameters.
Args:
bbox (np.ndarray): (frame, num_person, 4) or (frame, 4)
img_width (int): image width of training data.
img_height (int): image height of training data.
bbox_scale_factor (float): scale factor for expanding the bbox.
bbox_format (Literal['xyxy', 'xywh'] ): 'xyxy' means the left-up point
and right-bottomn point of the bbox.
'xywh' means the left-up point and the width and height of the
bbox.
Returns:
np.ndarray: (frame, num_person, 3, 3) or (frame, 3, 3)
"""
if not isinstance(bboxes, np.ndarray):
raise TypeError(
f'Input type is {type(bboxes)}, which should be numpy.ndarray.')
assert bbox_format in ['xyxy', 'xywh']
if bbox_format == 'xyxy':
bboxes = xyxy2xywh(bboxes)
center_x = bboxes[..., 0] + bboxes[..., 2] / 2.0
center_y = bboxes[..., 1] + bboxes[..., 3] / 2.0
W = np.max(bboxes[..., 2:], axis=-1) * bbox_scale_factor
num_frame = bboxes.shape[0]
if bboxes.ndim == 3:
num_person = bboxes.shape[1]
Ks = np.zeros((num_frame, num_person, 3, 3))
elif bboxes.ndim == 2:
Ks = np.zeros((num_frame, 3, 3))
elif bboxes.ndim == 1:
Ks = np.zeros((3, 3))
else:
raise ValueError('Wrong input bboxes shape {bboxes.shape}')
Ks[..., 0, 0] = W / img_width
Ks[..., 1, 1] = W / img_height
Ks[..., 0, 2] = center_x - W / 2.0
Ks[..., 1, 2] = center_y - W / 2.0
Ks[..., 2, 2] = 1
return Ks
def get_default_hmr_intrinsic(num_frame=1,
focal_length=1000,
det_width=224,
det_height=224) -> np.ndarray:
"""Get default hmr intrinsic, defined by how you trained.
Args:
num_frame (int, optional): num of frames. Defaults to 1.
focal_length (int, optional): defined same as your training.
Defaults to 1000.
det_width (int, optional): the size you used to detect.
Defaults to 224.
det_height (int, optional): the size you used to detect.
Defaults to 224.
Returns:
np.ndarray: shape of (N, 3, 3)
"""
K = np.zeros((num_frame, 3, 3))
K[:, 0, 0] = focal_length
K[:, 1, 1] = focal_length
K[:, 0, 2] = det_width / 2
K[:, 1, 2] = det_height / 2
K[:, 2, 2] = 1
return K
def convert_kp2d_to_bbox(
kp2d: np.ndarray,
bbox_format: Literal['xyxy', 'xywh'] = 'xyxy') -> np.ndarray:
"""Convert kp2d to bbox.
Args:
kp2d (np.ndarray): shape should be (num_frame, num_points, 2/3)
or (num_frame, num_person, num_points, 2/3).
bbox_format (Literal['xyxy', 'xywh'], optional): Defaults to 'xyxy'.
Returns:
np.ndarray: shape will be (num_frame, num_person, 4)
"""
assert bbox_format in ['xyxy', 'xywh']
if kp2d.ndim == 2:
kp2d = kp2d[None, None]
elif kp2d.ndim == 3:
kp2d = kp2d[:, None]
num_frame, num_person, _, _ = kp2d.shape
x1 = np.max(kp2d[..., 0], axis=-2)
y1 = np.max(kp2d[..., 1], axis=-2)
x2 = np.max(kp2d[..., 2], axis=-2)
y2 = np.max(kp2d[..., 3], axis=-2)
bbox = np.concatenate([x1, y1, x2, y2], axis=-1)
assert bbox.shape == (num_frame, num_person, 4)
if bbox_format == 'xywh':
bbox = xyxy2xywh(bbox)
return bbox
def conver_verts_to_cam_coord(verts,
pred_cams,
bboxes_xy,
focal_length=5000.,
bbox_scale_factor=1.25,
bbox_format='xyxy'):
"""Convert vertices from the world coordinate to camera coordinate.
Args:
verts ([np.ndarray]): The vertices in the world coordinate.
The shape is (frame,num_person,6890,3) or (frame,6890,3).
pred_cams ([np.ndarray]): Camera parameters estimated by HMR or SPIN.
The shape is (frame,num_person,3) or (frame,6890,3).
bboxes_xy ([np.ndarray]): (frame, num_person, 4|5) or (frame, 4|5)
focal_length ([float],optional): Defined same as your training.
bbox_scale_factor (float): scale factor for expanding the bbox.
bbox_format (Literal['xyxy', 'xywh'] ): 'xyxy' means the left-up point
and right-bottomn point of the bbox.
'xywh' means the left-up point and the width and height of the
bbox.
Returns:
np.ndarray: The vertices in the camera coordinate.
The shape is (frame,num_person,6890,3) or (frame,6890,3).
np.ndarray: The intrinsic parameters of the pred_cam.
The shape is (num_frame, 3, 3).
"""
K0 = get_default_hmr_intrinsic(
focal_length=focal_length, det_height=224, det_width=224)
K1 = convert_bbox_to_intrinsic(
bboxes_xy,
bbox_scale_factor=bbox_scale_factor,
bbox_format=bbox_format)
# K1K0(RX+T)-> K0(K0_inv K1K0)
Ks = np.linalg.inv(K0) @ K1 @ K0
# convert vertices from world to camera
cam_trans = np.concatenate([
pred_cams[..., [1]], pred_cams[..., [2]], 2 * focal_length /
(224 * pred_cams[..., [0]] + 1e-9)
], -1)
verts = verts + cam_trans[..., None, :]
if verts.ndim == 4:
verts = np.einsum('fnij,fnkj->fnki', Ks, verts)
elif verts.ndim == 3:
verts = np.einsum('fij,fkj->fki', Ks, verts)
return verts, K0
def smooth_process(x,
smooth_type='savgol',
cfg_base_dir='configs/_base_/post_processing/'):
"""Smooth the array with the specified smoothing type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
smooth_type (str, optional): Smooth type.
choose in ['oneeuro', 'gaus1d', 'savgol'].
Defaults to 'savgol'.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input smoothing type.
Returns:
np.ndarray: Smoothed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
assert smooth_type in ['oneeuro', 'gaus1d', 'savgol']
cfg = os.path.join(cfg_base_dir, smooth_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.copy()
assert x.ndim == 3 or x.ndim == 4
smooth_func = build_post_processing(dict(cfg['smooth_cfg']))
if x.ndim == 4:
for i in range(x.shape[1]):
x[:, i] = smooth_func(x[:, i])
elif x.ndim == 3:
x = smooth_func(x)
return x
def speed_up_process(x,
speed_up_type='deciwatch',
cfg_base_dir='configs/_base_/post_processing/'):
"""Speed up the process with the specified speed up type.
Args:
x (np.ndarray): Shape should be (frame,num_person,K,C)
or (frame,K,C).
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir.
Defaults to 'configs/_base_/post_processing/'
Raises:
ValueError: check the input speed up type.
Returns:
np.ndarray: Completed data. The shape should be
(frame,num_person,K,C) or (frame,K,C).
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
x = x.clone()
assert x.ndim == 4 or x.ndim == 5
cfg_dict = cfg['speed_up_cfg']
cfg_dict['device'] = x.device
speed_up_func = build_post_processing(cfg_dict)
if x.ndim == 5:
for i in range(x.shape[1]):
x[:, i] = speed_up_func(x[:, i])
elif x.ndim == 4:
x = speed_up_func(x)
return np.array(x.cpu())
def get_speed_up_interval(speed_up_type,
cfg_base_dir='configs/_base_/post_processing/'):
"""Get the interval of specific speed up type.
Args:
speed_up_type (str, optional): Speed up type.
choose in ['deciwatch',
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',]. Defaults to 'deciwatch'.
cfg_base_dir (str, optional): Config base dir,
default configs/_base_/post_processing/
Raises:
ValueError: check the input speed up type.
Returns:
int: speed up interval
"""
if speed_up_type == 'deciwatch':
speed_up_type = 'deciwatch_interval5_q3'
assert speed_up_type in [
'deciwatch_interval5_q1',
'deciwatch_interval5_q2',
'deciwatch_interval5_q3',
'deciwatch_interval5_q4',
'deciwatch_interval5_q5',
'deciwatch_interval10_q1',
'deciwatch_interval10_q2',
'deciwatch_interval10_q3',
'deciwatch_interval10_q4',
'deciwatch_interval10_q5',
]
cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')
if isinstance(cfg, str):
cfg = mmcv.Config.fromfile(cfg)
elif not isinstance(cfg, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(cfg)}')
return cfg['speed_up_cfg']['interval']
def speed_up_interpolate(selected_frames, speed_up_frames, smpl_poses,
smpl_betas, pred_cams, bboxes_xyxy):
"""Interpolate smpl_betas, pred_cams, and bboxes_xyxyx for speed up.
Args:
selected_frames (np.ndarray): Shape should be (selected frame number).
speed_up_frames (int): Total speed up frame number
smpl_poses (np.ndarray): selected frame smpl poses parameter
smpl_betas (np.ndarray): selected frame smpl shape paeameter
pred_cams (np.ndarray): selected frame camera parameter
bboxes_xyxy (np.ndarray): selected frame bbox
Returns:
smpl_poses (np.ndarray): interpolated frame smpl poses parameter
smpl_betas (np.ndarray): interpolated frame smpl shape paeameter
pred_cams (np.ndarray): interpolated frame camera parameter
bboxes_xyxy (np.ndarray): interpolated frame bbox
"""
selected_frames = selected_frames[selected_frames <= speed_up_frames]
pred_cams[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, pred_cams[selected_frames, :], kind='linear', axis=0)(
np.arange(0, max(selected_frames)))
bboxes_xyxy[:speed_up_frames, :] = interpolate.interp1d(
selected_frames,
bboxes_xyxy[selected_frames, :],
kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
smpl_betas[:speed_up_frames, :] = interpolate.interp1d(
selected_frames, smpl_betas[selected_frames, :], kind='linear',
axis=0)(
np.arange(0, max(selected_frames)))
return smpl_poses, smpl_betas, pred_cams, bboxes_xyxy
def process_mmtracking_results(mmtracking_results,
max_track_id,
bbox_thr=None):
"""Process mmtracking results.
Args:
mmtracking_results ([list]): mmtracking_results.
bbox_thr (float): threshold for bounding boxes.
max_track_id (int): the maximum track id.
Returns:
person_results ([list]): a list of tracked bounding boxes
max_track_id (int): the maximum track id.
instance_num (int): the number of instance.
"""
person_results = []
# 'track_results' is changed to 'track_bboxes'
# in https://github.com/open-mmlab/mmtracking/pull/300
if 'track_bboxes' in mmtracking_results:
tracking_results = mmtracking_results['track_bboxes'][0]
elif 'track_results' in mmtracking_results:
tracking_results = mmtracking_results['track_results'][0]
tracking_results = np.array(tracking_results)
if bbox_thr is not None:
assert tracking_results.shape[-1] == 6
valid_idx = np.where(tracking_results[:, 5] > bbox_thr)[0]
tracking_results = tracking_results[valid_idx]
for track in tracking_results:
person = {}
person['track_id'] = int(track[0])
if max_track_id < int(track[0]):
max_track_id = int(track[0])
person['bbox'] = track[1:]
person_results.append(person)
person_results = sorted(person_results, key=lambda x: x.get('track_id', 0))
instance_num = len(person_results)
return person_results, max_track_id, instance_num
def process_mmdet_results(mmdet_results, cat_id=1, bbox_thr=None):
"""Process mmdet results, and return a list of bboxes.
Args:
mmdet_results (list|tuple): mmdet results.
bbox_thr (float): threshold for bounding boxes.
cat_id (int): category id (default: 1 for human)
Returns:
person_results (list): a list of detected bounding boxes
"""
if isinstance(mmdet_results, tuple):
det_results = mmdet_results[0]
else:
det_results = mmdet_results
bboxes = det_results[cat_id - 1]
person_results = []
bboxes = np.array(bboxes)
if bbox_thr is not None:
assert bboxes.shape[-1] == 5
valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]
bboxes = bboxes[valid_idx]
for bbox in bboxes:
person = {}
person['bbox'] = bbox
person_results.append(person)
return person_results
def prepare_frames(input_path=None):
"""Prepare frames from input_path.
Args:
input_path (str, optional): Defaults to None.
Raises:
ValueError: check the input path.
Returns:
List[np.ndarray]: prepared frames
"""
if Path(input_path).is_file():
img_list = [mmcv.imread(input_path)]
if img_list[0] is None:
video = mmcv.VideoReader(input_path)
assert video.opened, f'Failed to load file {input_path}'
img_list = list(video)
elif Path(input_path).is_dir():
# input_type = 'folder'
file_list = [
os.path.join(input_path, fn) for fn in os.listdir(input_path)
if fn.lower().endswith(('.png', '.jpg'))
]
file_list.sort()
img_list = [mmcv.imread(img_path) for img_path in file_list]
assert len(img_list), f'Failed to load image from {input_path}'
else:
raise ValueError('Input path should be an file or folder.'
f' Got invalid input path: {input_path}')
return img_list
def extract_feature_sequence(extracted_results,
frame_idx,
causal,
seq_len,
step=1):
"""Extract the target frame from person results, and pad the sequence to a
fixed length.
Args:
extracted_results (List[List[Dict]]): Multi-frame feature extraction
results stored in a nested list. Each element of the outer list
is the feature extraction results of a single frame, and each
element of the inner list is the feature information of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
frame_idx (int): The index of the frame in the original video.
causal (bool): If True, the target frame is the first frame in
a sequence. Otherwise, the target frame is in the middle of a
sequence.
seq_len (int): The number of frames in the input sequence.
step (int): Step size to extract frames from the video.
Returns:
List[List[Dict]]: Multi-frame feature extraction results stored in a
nested list with a length of seq_len.
int: The target frame index in the padded sequence.
"""
if causal:
frames_left = 0
frames_right = seq_len - 1
else:
frames_left = (seq_len - 1) // 2
frames_right = frames_left
num_frames = len(extracted_results)
# get the padded sequence
pad_left = max(0, frames_left - frame_idx // step)
pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step)
start = max(frame_idx % step, frame_idx - frames_left * step)
end = min(num_frames - (num_frames - 1 - frame_idx) % step,
frame_idx + frames_right * step + 1)
extracted_results_seq = [extracted_results[0]] * pad_left + \
extracted_results[start:end:step] + [extracted_results[-1]] * pad_right
return extracted_results_seq
def get_different_colors(number_of_colors,
flag=0,
alpha: float = 1.0,
mode: str = 'bgr',
int_dtype: bool = True):
"""Get a numpy of colors of shape (N, 3)."""
mode = mode.lower()
assert set(mode).issubset({'r', 'g', 'b', 'a'})
nst0 = np.random.get_state()
np.random.seed(flag)
colors = []
for i in np.arange(0., 360., 360. / number_of_colors):
hue = i / 360.
lightness = (50 + np.random.rand() * 10) / 100.
saturation = (90 + np.random.rand() * 10) / 100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
colors_np = np.asarray(colors)
if int_dtype:
colors_bgr = (255 * colors_np).astype(np.uint8)
else:
colors_bgr = colors_np.astype(np.float32)
# recover the random state
np.random.set_state(nst0)
color_dict = {}
if 'a' in mode:
color_dict['a'] = np.ones((colors_bgr.shape[0], 3)) * alpha
color_dict['b'] = colors_bgr[:, 0:1]
color_dict['g'] = colors_bgr[:, 1:2]
color_dict['r'] = colors_bgr[:, 2:3]
colors_final = []
for channel in mode:
colors_final.append(color_dict[channel])
colors_final = np.concatenate(colors_final, -1)
return colors_final
| [
"numpy.random.seed",
"numpy.einsum",
"mmcv.VideoReader",
"numpy.ones",
"numpy.random.set_state",
"pathlib.Path",
"numpy.arange",
"mmcv.Config.fromfile",
"colorsys.hls_to_rgb",
"scipy.interpolate.interp1d",
"os.path.join",
"mmcv.imread",
"numpy.max",
"numpy.stack",
"numpy.asarray",
"mmh... | [((2316, 2425), 'numpy.stack', 'np.stack', (['[bbox_xywh[..., 0] + bbox_xywh[..., 2] * 0.5, bbox_xywh[..., 1] + bbox_xywh\n [..., 3] * 0.5]', '(-1)'], {}), '([bbox_xywh[..., 0] + bbox_xywh[..., 2] * 0.5, bbox_xywh[..., 1] + \n bbox_xywh[..., 3] * 0.5], -1)\n', (2324, 2425), True, 'import numpy as np\n'), ((2670, 2762), 'numpy.stack', 'np.stack', (['[bbox_xywh[..., 2] * 1.0 / pixel_std, bbox_xywh[..., 3] * 1.0 / pixel_std]', '(-1)'], {}), '([bbox_xywh[..., 2] * 1.0 / pixel_std, bbox_xywh[..., 3] * 1.0 /\n pixel_std], -1)\n', (2678, 2762), True, 'import numpy as np\n'), ((5371, 5406), 'numpy.stack', 'np.stack', (['[sx, sy, tx, ty]'], {'axis': '(-1)'}), '([sx, sy, tx, ty], axis=-1)\n', (5379, 5406), True, 'import numpy as np\n'), ((7986, 8013), 'numpy.zeros', 'np.zeros', (['(num_frame, 3, 3)'], {}), '((num_frame, 3, 3))\n', (7994, 8013), True, 'import numpy as np\n'), ((8829, 8858), 'numpy.max', 'np.max', (['kp2d[..., 0]'], {'axis': '(-2)'}), '(kp2d[..., 0], axis=-2)\n', (8835, 8858), True, 'import numpy as np\n'), ((8868, 8897), 'numpy.max', 'np.max', (['kp2d[..., 1]'], {'axis': '(-2)'}), '(kp2d[..., 1], axis=-2)\n', (8874, 8897), True, 'import numpy as np\n'), ((8907, 8936), 'numpy.max', 'np.max', (['kp2d[..., 2]'], {'axis': '(-2)'}), '(kp2d[..., 2], axis=-2)\n', (8913, 8936), True, 'import numpy as np\n'), ((8946, 8975), 'numpy.max', 'np.max', (['kp2d[..., 3]'], {'axis': '(-2)'}), '(kp2d[..., 3], axis=-2)\n', (8952, 8975), True, 'import numpy as np\n'), ((8987, 9028), 'numpy.concatenate', 'np.concatenate', (['[x1, y1, x2, y2]'], {'axis': '(-1)'}), '([x1, y1, x2, y2], axis=-1)\n', (9001, 9028), True, 'import numpy as np\n'), ((10861, 10983), 'numpy.concatenate', 'np.concatenate', (['[pred_cams[..., [1]], pred_cams[..., [2]], 2 * focal_length / (224 *\n pred_cams[..., [0]] + 1e-09)]', '(-1)'], {}), '([pred_cams[..., [1]], pred_cams[..., [2]], 2 * focal_length /\n (224 * pred_cams[..., [0]] + 1e-09)], -1)\n', (10875, 10983), True, 'import numpy as np\n'), ((12043, 12090), 'os.path.join', 'os.path.join', (['cfg_base_dir', "(smooth_type + '.py')"], {}), "(cfg_base_dir, smooth_type + '.py')\n", (12055, 12090), False, 'import os\n'), ((14343, 14392), 'os.path.join', 'os.path.join', (['cfg_base_dir', "(speed_up_type + '.py')"], {}), "(cfg_base_dir, speed_up_type + '.py')\n", (14355, 14392), False, 'import os\n'), ((14772, 14803), 'mmhuman3d.core.post_processing.build_post_processing', 'build_post_processing', (['cfg_dict'], {}), '(cfg_dict)\n', (14793, 14803), False, 'from mmhuman3d.core.post_processing import build_post_processing\n'), ((16507, 16556), 'os.path.join', 'os.path.join', (['cfg_base_dir', "(speed_up_type + '.py')"], {}), "(cfg_base_dir, speed_up_type + '.py')\n", (16519, 16556), False, 'import os\n'), ((19396, 19422), 'numpy.array', 'np.array', (['tracking_results'], {}), '(tracking_results)\n', (19404, 19422), True, 'import numpy as np\n'), ((20641, 20657), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (20649, 20657), True, 'import numpy as np\n'), ((24586, 24607), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (24605, 24607), True, 'import numpy as np\n'), ((24612, 24632), 'numpy.random.seed', 'np.random.seed', (['flag'], {}), '(flag)\n', (24626, 24632), True, 'import numpy as np\n'), ((24662, 24709), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(360.0 / number_of_colors)'], {}), '(0.0, 360.0, 360.0 / number_of_colors)\n', (24671, 24709), True, 'import numpy as np\n'), ((24931, 24949), 'numpy.asarray', 'np.asarray', (['colors'], {}), '(colors)\n', (24941, 24949), True, 'import numpy as np\n'), ((25119, 25144), 'numpy.random.set_state', 'np.random.set_state', (['nst0'], {}), '(nst0)\n', (25138, 25144), True, 'import numpy as np\n'), ((25491, 25523), 'numpy.concatenate', 'np.concatenate', (['colors_final', '(-1)'], {}), '(colors_final, -1)\n', (25505, 25523), True, 'import numpy as np\n'), ((4690, 4730), 'numpy.concatenate', 'np.concatenate', (['[center, scale]'], {'axis': '(-1)'}), '([center, scale], axis=-1)\n', (4704, 4730), True, 'import numpy as np\n'), ((6705, 6737), 'numpy.max', 'np.max', (['bboxes[..., 2:]'], {'axis': '(-1)'}), '(bboxes[..., 2:], axis=-1)\n', (6711, 6737), True, 'import numpy as np\n'), ((6866, 6905), 'numpy.zeros', 'np.zeros', (['(num_frame, num_person, 3, 3)'], {}), '((num_frame, num_person, 3, 3))\n', (6874, 6905), True, 'import numpy as np\n'), ((11085, 11124), 'numpy.einsum', 'np.einsum', (['"""fnij,fnkj->fnki"""', 'Ks', 'verts'], {}), "('fnij,fnkj->fnki', Ks, verts)\n", (11094, 11124), True, 'import numpy as np\n'), ((12134, 12159), 'mmcv.Config.fromfile', 'mmcv.Config.fromfile', (['cfg'], {}), '(cfg)\n', (12154, 12159), False, 'import mmcv\n'), ((14436, 14461), 'mmcv.Config.fromfile', 'mmcv.Config.fromfile', (['cfg'], {}), '(cfg)\n', (14456, 14461), False, 'import mmcv\n'), ((16600, 16625), 'mmcv.Config.fromfile', 'mmcv.Config.fromfile', (['cfg'], {}), '(cfg)\n', (16620, 16625), False, 'import mmcv\n'), ((17850, 17946), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['selected_frames', 'pred_cams[selected_frames, :]'], {'kind': '"""linear"""', 'axis': '(0)'}), "(selected_frames, pred_cams[selected_frames, :], kind=\n 'linear', axis=0)\n", (17870, 17946), False, 'from scipy import interpolate\n'), ((18039, 18137), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['selected_frames', 'bboxes_xyxy[selected_frames, :]'], {'kind': '"""linear"""', 'axis': '(0)'}), "(selected_frames, bboxes_xyxy[selected_frames, :], kind\n ='linear', axis=0)\n", (18059, 18137), False, 'from scipy import interpolate\n'), ((18253, 18350), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['selected_frames', 'smpl_betas[selected_frames, :]'], {'kind': '"""linear"""', 'axis': '(0)'}), "(selected_frames, smpl_betas[selected_frames, :], kind=\n 'linear', axis=0)\n", (18273, 18350), False, 'from scipy import interpolate\n'), ((4851, 4891), 'numpy.concatenate', 'np.concatenate', (['[center, scale]'], {'axis': '(-1)'}), '([center, scale], axis=-1)\n', (4865, 4891), True, 'import numpy as np\n'), ((6946, 6973), 'numpy.zeros', 'np.zeros', (['(num_frame, 3, 3)'], {}), '((num_frame, 3, 3))\n', (6954, 6973), True, 'import numpy as np\n'), ((10773, 10790), 'numpy.linalg.inv', 'np.linalg.inv', (['K0'], {}), '(K0)\n', (10786, 10790), True, 'import numpy as np\n'), ((11167, 11203), 'numpy.einsum', 'np.einsum', (['"""fij,fkj->fki"""', 'Ks', 'verts'], {}), "('fij,fkj->fki', Ks, verts)\n", (11176, 11203), True, 'import numpy as np\n'), ((19520, 19563), 'numpy.where', 'np.where', (['(tracking_results[:, 5] > bbox_thr)'], {}), '(tracking_results[:, 5] > bbox_thr)\n', (19528, 19563), True, 'import numpy as np\n'), ((20745, 20778), 'numpy.where', 'np.where', (['(bboxes[:, 4] > bbox_thr)'], {}), '(bboxes[:, 4] > bbox_thr)\n', (20753, 20778), True, 'import numpy as np\n'), ((21226, 21242), 'pathlib.Path', 'Path', (['input_path'], {}), '(input_path)\n', (21230, 21242), False, 'from pathlib import Path\n'), ((21274, 21297), 'mmcv.imread', 'mmcv.imread', (['input_path'], {}), '(input_path)\n', (21285, 21297), False, 'import mmcv\n'), ((21351, 21379), 'mmcv.VideoReader', 'mmcv.VideoReader', (['input_path'], {}), '(input_path)\n', (21367, 21379), False, 'import mmcv\n'), ((24866, 24913), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['hue', 'lightness', 'saturation'], {}), '(hue, lightness, saturation)\n', (24885, 24913), False, 'import colorsys\n'), ((25211, 25244), 'numpy.ones', 'np.ones', (['(colors_bgr.shape[0], 3)'], {}), '((colors_bgr.shape[0], 3))\n', (25218, 25244), True, 'import numpy as np\n'), ((7014, 7030), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7022, 7030), True, 'import numpy as np\n'), ((21493, 21509), 'pathlib.Path', 'Path', (['input_path'], {}), '(input_path)\n', (21497, 21509), False, 'from pathlib import Path\n'), ((21586, 21614), 'os.path.join', 'os.path.join', (['input_path', 'fn'], {}), '(input_path, fn)\n', (21598, 21614), False, 'import os\n'), ((21756, 21777), 'mmcv.imread', 'mmcv.imread', (['img_path'], {}), '(img_path)\n', (21767, 21777), False, 'import mmcv\n'), ((21625, 21647), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (21635, 21647), False, 'import os\n'), ((24757, 24773), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (24771, 24773), True, 'import numpy as np\n'), ((24814, 24830), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (24828, 24830), True, 'import numpy as np\n')] |
"""
Weather Underground PWS Metadata Scraping Module
Code to scrape PWS network metadata
"""
import pandas as pd
import urllib3
from bs4 import BeautifulSoup as BS
import numpy as np
import requests
# import time
def scrape_station_info(state="WA"):
"""
A script to scrape the station information published at the following URL:
https://www.wunderground.com/weatherstation/ListStations.asp?
selectedState=WA&selectedCountry=United+States&MR=1
:param state: US State by which to subset WU Station table
:return: numpy array with station info
"""
url = "https://www.wunderground.com/" \
"weatherstation/ListStations.asp?selectedState=" \
+ state + "&selectedCountry=United+States&MR=1"
raw_site_content = requests.get(url).content
soup = BS(raw_site_content, 'html.parser')
list_stations_info = soup.find_all("tr")
all_station_info = np.array(['id', 'neighborhood', 'city', 'type', 'lat',
'lon', 'elevation'])
for i in range(1, len(list_stations_info)): # start at 1 to omit headers
station_info = str(list_stations_info[i]).splitlines()
# pull out station info
station_id = station_info[1].split('ID=')[1].split('"')[0]
station_neighborhood = station_info[2].split('<td>')[1]
station_neighborhood = station_neighborhood.split('\xa0')[0]
station_city = station_info[3].split('<td>')[1].split('\xa0')[0]
station_type = station_info[4].split('station-type">')[1]
station_type = station_type.split('\xa0')[0]
station_id = station_id.strip()
station_neighborhood = station_neighborhood.strip()
station_city = station_city.strip()
station_type = station_type.strip()
# grab the latitude, longitude, and elevation metadata
lat, lon, elev = scrape_lat_lon_fly(station_id)
# put all data into an array
header = [station_id, station_neighborhood, station_city, station_type,
lat, lon, elev]
head_len = len(header)
all_station_info = np.vstack([all_station_info, header])
all_station_info = pd.DataFrame(all_station_info)
all_station_info.columns = all_station_info.ix[0, :]
# do some dataframe editing
all_station_info = all_station_info.drop(all_station_info
.index[0]).reset_index()
all_station_info = all_station_info.drop(all_station_info.columns[0],
axis=1)
return(all_station_info.to_csv('./data/station_data_from_FUN.csv'))
def scrape_lat_lon_fly(stationID):
"""
Add latitude, longitude and elevation data to the stationID that is
inputted as the argument to the function. Boom.
:param stationID: str
a unique identifier for the weather underground personal
weather station
:return: (latitude,longitude,elevation) as a tuple. Double Boom.
"""
http = urllib3.PoolManager(maxsize=10, block=True,
cert_reqs='CERT_REQUIRED')
try:
url = 'https://api.wunderground.com/weatherstation/' \
'WXDailyHistory.asp?ID={0}&format=XML'.format(stationID)
r = http.request('GET', url, preload_content=False)
soup = BS(r, 'xml')
lat = soup.find_all('latitude')[0].get_text()
long = soup.find_all('longitude')[0].get_text()
elev = soup.find_all('elevation')[0].get_text()
return(lat, long, elev)
except Exception as err:
lat = 'NA'
long = 'NA'
elev = 'NA'
return(lat, long, elev)
def subset_stations_by_coords(station_data, lat_range, lon_range):
"""
Subset station metadata by latitude and longitude
:param station_data_csv: str or Pandas.DataFrame
filename of csv with station metadata (from scrape_lat_lon)
or
Pandas.DataFrame with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: pandas.DataFrame with station metadata subset by lat/lon bounds
"""
lat_range.sort()
lon_range.sort()
if isinstance(station_data, str):
df = pd.read_csv(station_data, index_col=1)
df = df.dropna(subset=["Latitude", "Longitude"])
elif isinstance(station_data, pd.DataFrame):
df = station_data
else:
pass
# TODO: add exception here if type not supported
df = df[(df["Latitude"] >= lat_range[0]) &
(df["Latitude"] <= lat_range[1]) &
(df["Longitude"] >= lon_range[0]) &
(df["Longitude"] <= lon_range[1])]
return df
def get_station_ids_by_coords(station_data_csv, lat_range, lon_range):
"""
Wrapper around subset_stations_by_coords; returns just the IDs of the
stations in a box
:param station_data_csv: str
filename of csv with station metadata (from scrape_lat_lon)
:param lat_range: 2-element list
min and max latitude range, e.g. [47.4, 47.8]
:param lon_range: 2-element list
min and max longitude range, e.g. [-122.5, -122.2]
:return: list of station IDs (strings)
"""
df = subset_stations_by_coords(station_data_csv, lat_range, lon_range)
return list(df.index)
# TESTING
# station_data_csv = "data/station_data.csv"
# lat_range = [47.4, 47.8]
# lon_range = [-122.5, -122.2]
# print(get_station_ids_by_coords(station_data_csv, lat_range, lon_range))
| [
"pandas.DataFrame",
"pandas.read_csv",
"numpy.array",
"urllib3.PoolManager",
"requests.get",
"bs4.BeautifulSoup",
"numpy.vstack"
] | [((801, 836), 'bs4.BeautifulSoup', 'BS', (['raw_site_content', '"""html.parser"""'], {}), "(raw_site_content, 'html.parser')\n", (803, 836), True, 'from bs4 import BeautifulSoup as BS\n'), ((907, 982), 'numpy.array', 'np.array', (["['id', 'neighborhood', 'city', 'type', 'lat', 'lon', 'elevation']"], {}), "(['id', 'neighborhood', 'city', 'type', 'lat', 'lon', 'elevation'])\n", (915, 982), True, 'import numpy as np\n'), ((2997, 3067), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {'maxsize': '(10)', 'block': '(True)', 'cert_reqs': '"""CERT_REQUIRED"""'}), "(maxsize=10, block=True, cert_reqs='CERT_REQUIRED')\n", (3016, 3067), False, 'import urllib3\n'), ((764, 781), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (776, 781), False, 'import requests\n'), ((2102, 2139), 'numpy.vstack', 'np.vstack', (['[all_station_info, header]'], {}), '([all_station_info, header])\n', (2111, 2139), True, 'import numpy as np\n'), ((2168, 2198), 'pandas.DataFrame', 'pd.DataFrame', (['all_station_info'], {}), '(all_station_info)\n', (2180, 2198), True, 'import pandas as pd\n'), ((3317, 3329), 'bs4.BeautifulSoup', 'BS', (['r', '"""xml"""'], {}), "(r, 'xml')\n", (3319, 3329), True, 'from bs4 import BeautifulSoup as BS\n'), ((4349, 4387), 'pandas.read_csv', 'pd.read_csv', (['station_data'], {'index_col': '(1)'}), '(station_data, index_col=1)\n', (4360, 4387), True, 'import pandas as pd\n')] |
from IPython.display import Markdown as md
import numpy as np
def fixCORs(dataset):
'''
Last column and row of XCOR and YCOR are all 0's, this function fixes that. Nice for plotting
'''
dataset.XCOR.load()
dataset.YCOR.load()
dataset.XCOR[-1,:] = dataset.XCOR.isel(MC=-2).values + dataset.XCOR.isel(MC=1).values
dataset.XCOR[:,-1] = dataset.XCOR.isel(NC=-2).values
dataset.YCOR[:,-1] = dataset.YCOR.isel(NC=-2).values + dataset.YCOR.isel(NC=1).values
dataset.YCOR[-1,:] = dataset.YCOR.isel(MC=-2).values
return dataset
def makeMeshGrid(length=45000, width=18000, x_gridstep=300, y_gridstep=300):
'''
Make a uniform meshgrid using given parameters
TODO
----
* Non-uniform meshgrids?
* These default arguments only make sense for my current model
'''
first_x_center = int(x_gridstep/2)
xList = [0] + [i for i in range(first_x_center, int(width) + 1 * int(x_gridstep), int(x_gridstep))] # prepend a zero
first_y_center = int(100 - y_gridstep/2) # grid always starts at 100
yList = [i for i in range(first_y_center, int(length) + 1 * int(y_gridstep), int(y_gridstep))]
xDim, yDim = [len(xList), len(yList)]
print(xDim, "x", yDim, "grid")
XZ, YZ = np.meshgrid(xList, yList)
return XZ.T, YZ.T # Why transpose again tho?
def fixMeshGrid(dataset, mystery_flag=False):
'''
Derives gridsteps and dimensions from passed DataSet
Assumes uniform grid, curvilinear grid wont work here!
Reference to XZ and YZ need to be passed explicitly because Dask loads the netCDF lazily
Parameters
----------
dataset : xarray DataSet
The delft3d-flow dataset
mystery_flag : bool
The mystery flag is a Boolean because sometimes 1 and sometimes 2 gridsteps need to be subtracted
from the length ¯\_(ツ)_/¯ , don't really know why (off-by-one? even vs uneven?)
Maybe this is not necessary if masks are applied properly
Returns
-------
dataset : xarray DataSet
The delft3d-flow dataset with fixed grid
'''
print("● Fixing mesh grid, assuming a uniform grid ")
dataset.XZ.load()
dataset.YZ.load()
x_gridstep = dataset.XZ.values[2][-1] - dataset.XZ.values[1][-1]
y_gridstep = dataset.YZ.values[-2][-1] - dataset.YZ.values[-2][-2]
width = (dataset.XZ.shape[0]-2) * x_gridstep
if mystery_flag:
length = (dataset.XZ.shape[1] - 1) * y_gridstep # eeehhh hmmmm -1? sometimes -2?
else:
length = (dataset.XZ.shape[1] - 2) * y_gridstep # eeehhh hmmmm -1? sometimes -2?
md(f"""
# Times
| Name | Value |
| --- | --- |
| x gridstep | {x_gridstep} |
| y gridstep | {y_gridstep} |
| Width | {width} |
| Length | {length} |
""")
XZ, YZ = makeMeshGrid(length=length, width=width, x_gridstep=x_gridstep, y_gridstep=y_gridstep)
dataset.XZ.values = XZ
dataset.YZ.values = YZ
return dataset | [
"IPython.display.Markdown",
"numpy.meshgrid"
] | [((1272, 1297), 'numpy.meshgrid', 'np.meshgrid', (['xList', 'yList'], {}), '(xList, yList)\n', (1283, 1297), True, 'import numpy as np\n'), ((2659, 2854), 'IPython.display.Markdown', 'md', (['f"""\n # Times\n | Name | Value |\n | --- | --- |\n | x gridstep | {x_gridstep} |\n | y gridstep | {y_gridstep} |\n | Width | {width} |\n | Length | {length} |\n \n """'], {}), '(f"""\n # Times\n | Name | Value |\n | --- | --- |\n | x gridstep | {x_gridstep} |\n | y gridstep | {y_gridstep} |\n | Width | {width} |\n | Length | {length} |\n \n """\n )\n', (2661, 2854), True, 'from IPython.display import Markdown as md\n')] |
# Import Dependencies
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def home():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Convert and query the results to a dictionary using 'date as the key and 'prcp' as the value.
one_year_prior = dt.date(2017, 8, 23) - dt.timedelta(days = 365)
precipitation_data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= one_year_prior).all()
precipitation_dict = {date: prcp for date, prcp in precipitation_data}
# Return the JSON representation of your dictionary.
return jsonify(precipitation_dict)
@app.route("/api/v1.0/stations")
def stations():
# Return a JSON list of stations from the dataset.
results = session.query(Station.station).all()
stations_list = list(np.ravel(results))
return jsonify(stations_list)
@app.route("/api/v1.0/tobs")
def tobs():
# Query the dates and temperature observations of most active station for last year of data.
one_year_prior = dt.date(2017, 8, 23) - dt.timedelta(days = 365)
tobs_results = session.query(Measurement.tobs).filter(Measurement.date >= one_year_prior).\
filter(Measurement.station == 'USC00519281').all()
tobs_list = list(np.ravel(tobs_results))
return jsonify(tobs_list)
@app.route("/api/v1.0/<start>")
def start_date(start):
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
start_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
group_by(Measurement.date).all()
start_date_list = list(start_date)
return jsonify(start_date_list)
@app.route("/api/v1.0/<start>/<end>")
def start_end_date(start, end):
start_end_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).\
group_by(Measurement.date).all()
start_end_date_list = list(start_end_date)
return jsonify(start_end_date_list)
if __name__ == '__main__':
app.run(debug=True) | [
"sqlalchemy.func.avg",
"numpy.ravel",
"flask.Flask",
"sqlalchemy.orm.Session",
"flask.jsonify",
"sqlalchemy.func.min",
"sqlalchemy.create_engine",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.func.max"
] | [((350, 400), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///Resources/hawaii.sqlite"""'], {}), "('sqlite:///Resources/hawaii.sqlite')\n", (363, 400), False, 'from sqlalchemy import create_engine, func\n'), ((457, 471), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (469, 471), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((690, 705), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (697, 705), False, 'from sqlalchemy.orm import Session\n'), ((827, 842), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (832, 842), False, 'from flask import Flask, jsonify\n'), ((1755, 1782), 'flask.jsonify', 'jsonify', (['precipitation_dict'], {}), '(precipitation_dict)\n', (1762, 1782), False, 'from flask import Flask, jsonify\n'), ((1994, 2016), 'flask.jsonify', 'jsonify', (['stations_list'], {}), '(stations_list)\n', (2001, 2016), False, 'from flask import Flask, jsonify\n'), ((2448, 2466), 'flask.jsonify', 'jsonify', (['tobs_list'], {}), '(tobs_list)\n', (2455, 2466), False, 'from flask import Flask, jsonify\n'), ((2939, 2963), 'flask.jsonify', 'jsonify', (['start_date_list'], {}), '(start_date_list)\n', (2946, 2963), False, 'from flask import Flask, jsonify\n'), ((3389, 3417), 'flask.jsonify', 'jsonify', (['start_end_date_list'], {}), '(start_end_date_list)\n', (3396, 3417), False, 'from flask import Flask, jsonify\n'), ((1964, 1981), 'numpy.ravel', 'np.ravel', (['results'], {}), '(results)\n', (1972, 1981), True, 'import numpy as np\n'), ((2413, 2435), 'numpy.ravel', 'np.ravel', (['tobs_results'], {}), '(tobs_results)\n', (2421, 2435), True, 'import numpy as np\n'), ((2694, 2720), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2702, 2720), False, 'from sqlalchemy import create_engine, func\n'), ((2722, 2748), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2730, 2748), False, 'from sqlalchemy import create_engine, func\n'), ((2750, 2776), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2758, 2776), False, 'from sqlalchemy import create_engine, func\n'), ((3070, 3096), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3078, 3096), False, 'from sqlalchemy import create_engine, func\n'), ((3098, 3124), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3106, 3124), False, 'from sqlalchemy import create_engine, func\n'), ((3126, 3152), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3134, 3152), False, 'from sqlalchemy import create_engine, func\n')] |
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for point."""
from typing import Union, TypeVar, Iterable, Optional, Iterator
import math
import numpy as np
from shapely.geometry import Point as ShapelyPoint
from geometry_msgs.msg import Point as ROSPoint
Vector3 = TypeVar('Vector3')
Quaternion = TypeVar('Quaternion')
class Point:
"""
Point class
"""
def __init__(self, x: float = 0.0, y: float = 0.0, z: float = 0.0, *,
buffer: Optional[Iterable[float]] = None):
"""
Constructor
Args:
x (float): x value
y (float): y value
z (float): z value
buffer (Optional[Iterable[float]]): buffer to copy directly to internal representation.
"""
buffer = buffer if buffer is not None else [x, y, z]
if len(buffer) < 3:
raise ValueError("buffer must contain at least 3 elements")
self._buffer = np.array(buffer[:3], dtype=float)
@property
def x(self) -> float:
"""
Returns x value of the point.
Returns:
float: x value of the point.
"""
return self._buffer[0]
@x.setter
def x(self, value: float) -> None:
"""
Set new x value of the point.
Args:
value (float): new x value.
"""
self._buffer[0] = value
@property
def y(self) -> float:
"""
Returns y value of the point.
Returns:
float: y value of the point.
"""
return self._buffer[1]
@y.setter
def y(self, value: float) -> None:
"""
Set new y value of the point.
Args:
value (float): new y value.
"""
self._buffer[1] = value
@property
def z(self) -> float:
"""
Returns z value of the point.
Returns:
float: z value of the point.
"""
return self._buffer[2]
@z.setter
def z(self, value: float) -> None:
"""
Set new z value of the point.
Args:
value (float): new z value.
"""
self._buffer[2] = value
@property
def buffer(self) -> np.ndarray:
"""
Returns internal buffer.
- Use with caution as changing the returned buffer will result changing the object.
Returns:
np.ndarray: internal buffer.
"""
return self._buffer
@staticmethod
def rotate_point(p: 'Point', q: Quaternion) -> 'Point':
"""
Rotate the given point by given quaternion.
Args:
p (Point): point to apply the given quaternion.
q (Quaternion): A quaternion
Returns:
Point: rotated point.
"""
return p.to_vector().rotate(q).to_point()
@staticmethod
def project(p: 'Point', on_normal: Vector3) -> 'Point':
"""
Project point p onto on_normal:
- The projection is just on_normal rescaled to that it reaches that point on the line v.
Args:
p (Point): point to project to on_normal
on_normal (Vector3): direction vector
Returns:
Point: projection in Point format
"""
from deepsim.core.vector3 import Vector3
return Vector3.project(p.to_vector(), on_normal).to_point()
def rotate(self, q: Quaternion) -> 'Point':
"""
Returns the rotated point in the orientation of the given quaternion.
Args:
q (Quaternion): A quaternion
Returns:
Point: final point from p with q applied.
"""
return self.to_vector().rotate(q).to_point()
def rotate_inplace(self, q: Quaternion) -> None:
"""
Rotate the point in the orientation of the given quaternion in place.
Args:
q (Quaternion): A quaternion
"""
self._buffer = self.to_vector().rotate(q).buffer
def to_ros(self) -> ROSPoint:
"""
Convert to ROS model
Returns:
geometry_msgs.msg.Vector3: ROS Point
"""
ros_point = ROSPoint()
ros_point.x = self.x
ros_point.y = self.y
ros_point.z = self.z
return ros_point
def to_list(self) -> list:
"""
Convert to Python list
Returns:
list: list containing x, y, z in order.
"""
return list(self._buffer)
def to_numpy(self) -> np.ndarray:
"""
Convert to Numpy array
Returns:
numpy.array: numpy array containing x, y, z in order.
"""
return self._buffer.copy()
def to_vector(self) -> Vector3:
"""
Convert to Vector3
Returns:
Vector3: Vector3 containing x, y, z
"""
from deepsim.core.vector3 import Vector3
return Vector3(buffer=self._buffer)
def to_shapely(self) -> ShapelyPoint:
"""
Convert to Shapely
Returns:
ShapelyPoint: shapely Point containing x, y, z
"""
return ShapelyPoint(self.buffer)
def to_shapely_2d(self) -> ShapelyPoint:
"""
Convert to Shapely
Returns:
ShapelyPoint: shapely Point containing x, y
"""
return ShapelyPoint(self.buffer[0:2])
@staticmethod
def from_ros(value: ROSPoint) -> 'Point':
"""
Returns new Point object created from ROS Point
Args:
value (geometry_msgs.msg.Point): ROS Point
Returns:
ROSPoint: new ROSPoint object created from ROS Point
"""
return Point(x=value.x,
y=value.y,
z=value.z)
@staticmethod
def from_list(value: Union[list, tuple]) -> 'Point':
"""
Return new Point object from list
Args:
value (Union[list, tuple]): Point in list or tuple type
Returns:
Point: new Point object created from list
"""
return Point(buffer=value)
@staticmethod
def from_numpy(value: np.ndarray) -> 'Point':
"""
Return new Point object from numpy.array
Args:
value (np.ndarray): Point in numpy.array type
Returns:
Point: new Point object created from numpy.array
"""
return Point(buffer=value)
@staticmethod
def from_vector(value: Vector3) -> 'Point':
"""
Return new Point object from Vector3
Args:
value (Vector3): Vector3 object
Returns:
Point: new Point object created from Vector3
"""
return Point(buffer=value.buffer)
@staticmethod
def from_shapely(value: ShapelyPoint) -> 'Point':
"""
Return new Point object from ShapelyPoint
Args:
value (ShapelyPoint): shapely point.
Returns:
Point: new Point object created from ShapelyPoint
"""
return Point(x=value.x,
y=value.y,
z=value.z if value.has_z else 0.0)
@staticmethod
def get_angle_in_2d_rad(pt1: 'Point', pt2: 'Point') -> float:
"""
Return angle between two points in radian measured counter-clockwise from the +x axis.
Args:
pt1 (Point): point 1
pt2 (Point): point 2
Returns:
float: the angle between two points in radian measured counter-clockwise from the +x axis.
"""
return math.atan2(pt2.y - pt1.y, pt2.x - pt1.x)
def copy(self) -> 'Point':
"""
Returns a copy.
Returns:
Point: the copied point
"""
return Point(buffer=self.buffer)
def __add__(self, other: Union['Point', Vector3]) -> Union['Point', Vector3]:
"""
Returns a Point from the addition of self and other.
- P + v is P translated by v
Args:
other (Union[Point, Vector3]): other to subtract
Returns:
Union['Point', Vector3]: a Point from the addition of self and other
"""
from deepsim.core.vector3 import Vector3
if isinstance(other, Vector3):
return Point(buffer=self.buffer + other.buffer)
elif isinstance(other, Point):
return Vector3(buffer=self.buffer + other.buffer)
else:
return NotImplemented
def __sub__(self, other: Union['Point', Vector3]) -> Union['Point', Vector3]:
"""
Returns a Point from the subtraction of other from self.
Args:
other (Point): other to subtract
Returns:
Point: a Point from the subtraction of other from self.
"""
from deepsim.core.vector3 import Vector3
if isinstance(other, Vector3):
return Point(buffer=self.buffer - other.buffer)
elif isinstance(other, Point):
return Vector3(buffer=self.buffer - other.buffer)
else:
return NotImplemented
def __mul__(self, other: Union[float, int, 'Point', Vector3]) -> 'Point':
"""
Returns scale or element multiplication, p * scale or p * v or p * p.
Args:
other (Union[float, int, Vector3, Point]): scale
Returns:
Vector3: v * scale
"""
from deepsim.core.vector3 import Vector3
if isinstance(other, float) or isinstance(other, int):
return Point(buffer=self.buffer * other)
if isinstance(other, Vector3) or isinstance(other, Point):
return Point(buffer=self.buffer * other.buffer)
return NotImplemented
def __rmul__(self, other: Union[float, int, Quaternion]) -> 'Point':
"""
Returns scale multiplication, scale * p or Q * p.
Args:
other (Union[float, int, Quaternion]): scale or quaternion.
Returns:
Vector3: scale * v or Q * v
"""
from deepsim.core.quaternion import Quaternion
if isinstance(other, float) or isinstance(other, int):
return self.__mul__(other)
if isinstance(other, Quaternion):
return self.to_vector().rotate(other).to_point()
return NotImplemented
def __truediv__(self, scale: Union[float, int]) -> 'Point':
"""
Returns a point resulted by p / scale
- division of a point by a float r is scaling by (1/r)
Args:
scale (Union[float, int]): scale
Returns:
Point: p / scale
"""
if isinstance(scale, float) or isinstance(scale, int):
return self.__mul__(1.0 / scale)
return NotImplemented
def __iadd__(self, other: Union['Point', Vector3]) -> 'Point':
"""
Assign the point sum.
Args:
other (Vector3): other to add.
Returns:
Point: self += other
"""
from deepsim.core.vector3 import Vector3
if isinstance(other, Vector3) or isinstance(other, Point):
self._buffer += other.buffer
else:
raise ValueError("Not supported type {}.".format(type(other)))
return self
def __isub__(self, other: Union['Point', Vector3]) -> 'Point':
"""
Assign the point difference.
Args:
other (Vector3): other to subtract.
Returns:
Point: p -= v
"""
from deepsim.core.vector3 import Vector3
if isinstance(other, Vector3) or isinstance(other, Point):
self._buffer -= other.buffer
else:
raise ValueError("Not supported type {}.".format(type(other)))
return self
def __imul__(self, other: Union[float, int, 'Point', Vector3]) -> 'Point':
"""
Assign other multiplication, p *= scale, p *= p, p *= v.
Args:
other (Union[float, int, 'Point', Vector3]): scale, vector, or point
Returns:
Point: p *= scale, p *= p, p *= v
"""
from deepsim.core.vector3 import Vector3
if isinstance(other, Vector3) or isinstance(other, Point):
self._buffer *= other.buffer
elif isinstance(other, float) or isinstance(other, int):
self._buffer *= other
else:
raise ValueError("Not supported type {}.".format(type(other)))
return self
def __idiv__(self, scale: Union[float, int]) -> 'Point':
"""
Assign a point resulted by p / scale
- division of a point by a float r is scaling by (1/r)
Args:
scale (Union[float, int]): scale
Returns:
Point: p /= scale
"""
if isinstance(scale, float) or isinstance(scale, int):
return self.__imul__(1.0 / scale)
else:
raise ValueError("Not supported type {}.".format(type(scale)))
def __neg__(self) -> 'Point':
"""
Returns negated point.
- The negation of a point is negation of all its coordinates
Returns:
Point: the negated vector
"""
return Point(buffer=-self.buffer)
def __iter__(self) -> Iterator[float]:
"""
Iterator over the coordinates
Returns:
Iterator[float]: iterator
"""
return self.buffer.__iter__()
def __eq__(self, other: 'Point') -> bool:
"""
Equality of point is equality of all coordinates to within epsilon.
Args:
other (Point): other to compare
Returns:
bool: True if the differences of all coordinates are within epsilon, Otherwise False.
"""
return np.all(np.isclose(self.buffer, other.buffer))
def __ne__(self, other: 'Point') -> bool:
"""
Inequality of point is inequality of any coordinates
Args:
other (Point): other to compare
Returns:
bool: False if the differences of all coordinates are within epsilon, Otherwise True.
"""
return not self.__eq__(other)
def __getitem__(self, i: int) -> float:
"""
Return P[i] where P[i] is x, y, z for i in 0, 1, 2 respectively.
Args:
i (int): index
Returns:
float: value at the given index.
"""
return self.buffer[i]
def __setitem__(self, i: int, value: float) -> None:
"""
Set P[i] where P[i] is x, y, z for i in 0, 1, 2 respectively.
Args:
i (int): index
value (float): new value
"""
self.buffer[i] = value
def __str__(self) -> str:
"""
String representation of a point
Returns:
str: String representation of a point
"""
return "(%f,%f,%f)" % (self.x, self.y, self.z)
def __repr__(self) -> str:
"""
String representation including class
Returns:
str: String representation including class
"""
return "Point" + str(self)
| [
"deepsim.core.vector3.Vector3",
"shapely.geometry.Point",
"math.atan2",
"numpy.isclose",
"numpy.array",
"geometry_msgs.msg.Point",
"typing.TypeVar"
] | [((1463, 1481), 'typing.TypeVar', 'TypeVar', (['"""Vector3"""'], {}), "('Vector3')\n", (1470, 1481), False, 'from typing import Union, TypeVar, Iterable, Optional, Iterator\n'), ((1495, 1516), 'typing.TypeVar', 'TypeVar', (['"""Quaternion"""'], {}), "('Quaternion')\n", (1502, 1516), False, 'from typing import Union, TypeVar, Iterable, Optional, Iterator\n'), ((2134, 2167), 'numpy.array', 'np.array', (['buffer[:3]'], {'dtype': 'float'}), '(buffer[:3], dtype=float)\n', (2142, 2167), True, 'import numpy as np\n'), ((5330, 5340), 'geometry_msgs.msg.Point', 'ROSPoint', ([], {}), '()\n', (5338, 5340), True, 'from geometry_msgs.msg import Point as ROSPoint\n'), ((6075, 6103), 'deepsim.core.vector3.Vector3', 'Vector3', ([], {'buffer': 'self._buffer'}), '(buffer=self._buffer)\n', (6082, 6103), False, 'from deepsim.core.vector3 import Vector3\n'), ((6290, 6315), 'shapely.geometry.Point', 'ShapelyPoint', (['self.buffer'], {}), '(self.buffer)\n', (6302, 6315), True, 'from shapely.geometry import Point as ShapelyPoint\n'), ((6502, 6532), 'shapely.geometry.Point', 'ShapelyPoint', (['self.buffer[0:2]'], {}), '(self.buffer[0:2])\n', (6514, 6532), True, 'from shapely.geometry import Point as ShapelyPoint\n'), ((8760, 8800), 'math.atan2', 'math.atan2', (['(pt2.y - pt1.y)', '(pt2.x - pt1.x)'], {}), '(pt2.y - pt1.y, pt2.x - pt1.x)\n', (8770, 8800), False, 'import math\n'), ((14925, 14962), 'numpy.isclose', 'np.isclose', (['self.buffer', 'other.buffer'], {}), '(self.buffer, other.buffer)\n', (14935, 14962), True, 'import numpy as np\n'), ((9562, 9604), 'deepsim.core.vector3.Vector3', 'Vector3', ([], {'buffer': '(self.buffer + other.buffer)'}), '(buffer=self.buffer + other.buffer)\n', (9569, 9604), False, 'from deepsim.core.vector3 import Vector3\n'), ((10177, 10219), 'deepsim.core.vector3.Vector3', 'Vector3', ([], {'buffer': '(self.buffer - other.buffer)'}), '(buffer=self.buffer - other.buffer)\n', (10184, 10219), False, 'from deepsim.core.vector3 import Vector3\n')] |
import argparse
import os
import random
import numpy as np
import tensorboardX
import torch
import torchvision
from prefetch_generator import BackgroundGenerator
# from pudb import set_trace
from torch import distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from tqdm import tqdm
from metrics import AverageMeter, ExpStat
class DataLoaderX(DataLoader):
"""(加速组件) 重新封装Dataloader,使prefetch不用等待整个iteration完成"""
def __iter__(self):
return BackgroundGenerator(super().__iter__(), max_prefetch=10)
def main(args):
#######################################################################
# Initialize DDP setting
#######################################################################
if args.local_rank != -1:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl')
args.world_size = dist.get_world_size()
if args.local_rank in [-1, 0]:
# 初始化实验记录工具
writer = tensorboardX.SummaryWriter(log_dir="./log/")
#######################################################################
# Initialize Dataset and Dataloader
#######################################################################
transform = {
"train":
transforms.Compose([
transforms.RandomCrop(size=(32, 32), padding=4),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
]),
"val":
transforms.Compose([
transforms.Resize(size=(32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
}
trainset = torchvision.datasets.CIFAR10(root="./data/cifar10",
train=True,
transform=transform["train"],
download=True)
train_num_samples_per_cls = [
trainset.targets.count(i) for i in range(len(trainset.classes))
]
valset = torchvision.datasets.CIFAR10(root="./data/cifar10",
train=False,
transform=transform["val"],
download=True)
val_num_samples_per_cls = [
valset.targets.count(i) for i in range(len(valset.classes))
]
# DistributedSampler 负责数据分发到多卡
if args.local_rank != -1:
args.batch_size = int(args.batch_size / args.world_size)
train_sampler = DistributedSampler(trainset)
val_sampler = DistributedSampler(valset)
else:
train_sampler = None
val_sampler = None
train_loader = DataLoaderX(
trainset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=(train_sampler is None),
pin_memory=True,
drop_last=True,
sampler=train_sampler,
)
val_loader = DataLoaderX(
valset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=(val_sampler is None),
pin_memory=True,
drop_last=False,
sampler=val_sampler,
)
#######################################################################
# 初始化网络模型
#######################################################################
if args.local_rank in [-1, 0]:
print("Initializing Model...")
model = torchvision.models.resnet18(
num_classes=len(trainset.classes)).cuda()
#######################################################################
# 初始化 Loss
#######################################################################
if args.local_rank in [-1, 0]:
print("Initializing Criterion...")
criterion = torch.nn.CrossEntropyLoss(weight=None).cuda()
#######################################################################
# 初始化 Optimizer
#######################################################################
if args.local_rank in [-1, 0]:
print("Initializing Optimizer...")
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.9,
nesterov=True)
#######################################################################
# 初始化 DistributedDataParallel
#######################################################################
if args.local_rank != -1:
if args.local_rank in [-1, 0]:
print("Initializing DistributedDataParallel...")
model = DistributedDataParallel(model,
device_ids=[args.local_rank],
output_device=args.local_rank)
#######################################################################
# 初始化 LR Scheduler
#######################################################################
if args.local_rank in [-1, 0]:
print("Initializing lr_scheduler...")
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=30,
gamma=0.1)
#######################################################################
# 开始训练
#######################################################################
if args.local_rank in [-1, 0]:
print(f"\nStart {args.total_epoch}-epoch training ...\n")
for epoch in range(args.total_epoch):
if args.local_rank != -1:
# DistributedSampler 需要在每个epoch 打乱顺序分配到各卡, 为了同步
# 各个卡组成的数据互补,则采用epoch为seed,生成相同的序列,让各个
# 程序各取所需,详情看源代码
train_sampler.set_epoch(epoch)
val_sampler.set_epoch(epoch)
dist.barrier()
train_stat, train_loss = train_epoch(
epoch=epoch,
train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
num_samples_per_cls=train_num_samples_per_cls,
args=args,
)
val_stat, val_loss = eval_epoch(
epoch=epoch,
val_loader=val_loader,
model=model,
criterion=criterion,
num_samples_per_cls=val_num_samples_per_cls,
args=args,
)
# Record the experimental result.
# - tensorboard.SummaryWriter
# - logging
if args.local_rank in [-1, 0]:
writer.add_scalars("Loss", {
"train": train_loss,
"val": val_loss
}, epoch)
writer.add_scalars("Acc", {
"train": train_stat.acc,
"val": val_stat.acc
}, epoch)
lr_scheduler.step()
print("End Experiments.")
def train_epoch(epoch, train_loader, model, criterion, optimizer, lr_scheduler,
num_samples_per_cls, args):
model.train()
train_loss_meter = AverageMeter()
train_stat = ExpStat(num_samples_per_cls)
if args.local_rank in [-1, 0]:
train_pbar = tqdm(total=len(train_loader),
desc=f"Train Epoch[{epoch:>3d}/{args.total_epoch}]")
for i, (batch_imgs, batch_targets) in enumerate(train_loader):
optimizer.zero_grad()
batch_imgs, batch_targets = batch_imgs.cuda(), batch_targets.cuda()
batch_probs = model(batch_imgs)
batch_avg_loss = criterion(batch_probs, batch_targets)
if args.local_rank != -1:
dist.barrier()
# torch.distributed.barrier()的作用是,阻塞进程
# 确保每个进程都运行到这一行代码,才能继续执行,这样计算
# 平均loss和平均acc的时候,不会出现因为进程执行速度不一致
# 而导致错误
batch_avg_loss.backward()
optimizer.step()
batch_avg_loss = _reduce_tensor(batch_avg_loss, args.world_size)
else:
batch_avg_loss.backward()
optimizer.step()
train_loss_meter.update(batch_avg_loss.item())
batch_preds = torch.argmax(batch_probs, dim=1)
train_stat.update(batch_targets, batch_preds)
if args.local_rank in [-1, 0]:
train_pbar.update()
train_pbar.set_postfix_str(
f"LR:{optimizer.param_groups[0]['lr']:.1e} "
f"Loss:{train_loss_meter.avg:>3.1f}")
if args.local_rank != -1:
# all reduce the statistical confusion matrix
dist.barrier()
# 统计所有进程的train_stat里的confusion matrix
# 由于ddp通信只能通过tensor, 所以这里采用cm,信息最全面,
# 可操作性强
train_stat._cm = _reduce_tensor(train_stat._cm,
args.world_size,
op='sum')
lr_scheduler.step() # 如果是iter更新,则放入内循环
if args.local_rank in [-1, 0]:
train_pbar.set_postfix_str(f"LR:{optimizer.param_groups[0]['lr']:.1e} "
f"Loss:{train_loss_meter.avg:>4.2f} "
f"MR:{train_stat.mr:>6.2%} ")
return train_stat, train_loss_meter.avg
def eval_epoch(epoch, val_loader, model, criterion, num_samples_per_cls, args):
model.eval()
val_loss_meter = AverageMeter()
val_stat = ExpStat(num_samples_per_cls)
if args.local_rank in [-1, 0]:
val_pbar = tqdm(total=len(val_loader),
ncols=0,
desc=" Eval")
for i, (batch_imgs, batch_targets) in enumerate(val_loader):
batch_imgs, batch_targets = batch_imgs.cuda(), batch_targets.cuda()
batch_probs = model(batch_imgs)
batch_avg_loss = criterion(batch_probs, batch_targets)
if args.local_rank != -1:
dist.barrier()
# torch.distributed.barrier()的作用是,阻塞进程
# 确保每个进程都运行到这一行代码,才能继续执行,这样计算
# 平均loss和平均acc的时候,不会出现因为进程执行速度不一致
# 而导致错误
batch_avg_loss = _reduce_tensor(batch_avg_loss, args.world_size)
val_loss_meter.update(batch_avg_loss.item())
batch_preds = torch.argmax(batch_probs, dim=1)
val_stat.update(batch_targets, batch_preds)
if args.local_rank in [-1, 0]:
val_pbar.update()
val_pbar.set_postfix_str(f"Loss:{val_loss_meter.avg:>3.1f}")
if args.local_rank != -1:
# all reduce the statistical confusion matrix
dist.barrier()
# 统计所有进程的train_stat里的confusion matrix
# 由于ddp通信只能通过tensor, 所以这里采用cm,信息最全面,
# 可操作性强
val_stat._cm = _reduce_tensor(val_stat._cm, args.world_size, op='sum')
if args.local_rank in [-1, 0]:
val_pbar.set_postfix_str(f"Loss:{val_loss_meter.avg:>4.2f} "
f"MR:{val_stat.mr:>6.2%} ")
return val_stat, val_loss_meter.avg
def _reduce_tensor(tensor, nproc, op='mean'):
reduced_tensor = tensor.clone()
dist.all_reduce(reduced_tensor, op=dist.ReduceOp.SUM)
if op == 'mean':
reduced_tensor /= nproc
return reduced_tensor
def _set_random_seed(seed=0, cuda_deterministic=False):
"""Set seed and control the balance between reproducity and efficiency
Reproducity: cuda_deterministic = True
Efficiency: cuda_deterministic = False
"""
random.seed(seed)
np.random.seed(seed)
assert torch.cuda.is_available()
torch.manual_seed(seed) # sets the seed for generating random numbers.
torch.cuda.manual_seed_all(seed)
if cuda_deterministic: # slower, but more reproducible
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True # 固定内部随机性
torch.backends.cudnn.benchmark = False
else:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True # 输入尺寸一致,加速训练
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--local_rank',
type=int,
default=-1,
help='Local Rank for distributed training. '
'if single-GPU, default: -1')
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--batch-size", type=int, default=256)
parser.add_argument("--num-workers", type=int, default=2)
parser.add_argument("--total-epoch", type=int, default=100)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if "LOCAL_RANK" in os.environ:
args.local_rank = int(os.environ["LOCAL_RANK"])
_set_random_seed(args.seed)
main(args)
| [
"torch.optim.lr_scheduler.StepLR",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.argmax",
"torchvision.datasets.CIFAR10",
"torch.distributed.get_world_size",
"torchvision.transforms.Normalize",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.distributed.DistributedSampler",
... | [((1891, 2004), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data/cifar10"""', 'train': '(True)', 'transform': "transform['train']", 'download': '(True)'}), "(root='./data/cifar10', train=True, transform=\n transform['train'], download=True)\n", (1919, 2004), False, 'import torchvision\n'), ((2257, 2369), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data/cifar10"""', 'train': '(False)', 'transform': "transform['val']", 'download': '(True)'}), "(root='./data/cifar10', train=False, transform=\n transform['val'], download=True)\n", (2285, 2369), False, 'import torchvision\n'), ((5259, 5326), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(30)', 'gamma': '(0.1)'}), '(optimizer, step_size=30, gamma=0.1)\n', (5290, 5326), False, 'import torch\n'), ((7244, 7258), 'metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7256, 7258), False, 'from metrics import AverageMeter, ExpStat\n'), ((7276, 7304), 'metrics.ExpStat', 'ExpStat', (['num_samples_per_cls'], {}), '(num_samples_per_cls)\n', (7283, 7304), False, 'from metrics import AverageMeter, ExpStat\n'), ((9431, 9445), 'metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9443, 9445), False, 'from metrics import AverageMeter, ExpStat\n'), ((9461, 9489), 'metrics.ExpStat', 'ExpStat', (['num_samples_per_cls'], {}), '(num_samples_per_cls)\n', (9468, 9489), False, 'from metrics import AverageMeter, ExpStat\n'), ((11094, 11147), 'torch.distributed.all_reduce', 'dist.all_reduce', (['reduced_tensor'], {'op': 'dist.ReduceOp.SUM'}), '(reduced_tensor, op=dist.ReduceOp.SUM)\n', (11109, 11147), True, 'from torch import distributed as dist\n'), ((11462, 11479), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (11473, 11479), False, 'import random\n'), ((11484, 11504), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11498, 11504), True, 'import numpy as np\n'), ((11517, 11542), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11540, 11542), False, 'import torch\n'), ((11547, 11570), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (11564, 11570), False, 'import torch\n'), ((11623, 11655), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (11649, 11655), False, 'import torch\n'), ((12069, 12094), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12092, 12094), False, 'import argparse\n'), ((911, 949), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (932, 949), False, 'import torch\n'), ((958, 997), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (981, 997), True, 'from torch import distributed as dist\n'), ((1024, 1045), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1043, 1045), True, 'from torch import distributed as dist\n'), ((1119, 1163), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', ([], {'log_dir': '"""./log/"""'}), "(log_dir='./log/')\n", (1145, 1163), False, 'import tensorboardX\n'), ((2752, 2780), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['trainset'], {}), '(trainset)\n', (2770, 2780), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((2803, 2829), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['valset'], {}), '(valset)\n', (2821, 2829), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((4810, 4906), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank'}), '(model, device_ids=[args.local_rank], output_device=\n args.local_rank)\n', (4833, 4906), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((8274, 8306), 'torch.argmax', 'torch.argmax', (['batch_probs'], {'dim': '(1)'}), '(batch_probs, dim=1)\n', (8286, 8306), False, 'import torch\n'), ((8681, 8695), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (8693, 8695), True, 'from torch import distributed as dist\n'), ((10277, 10309), 'torch.argmax', 'torch.argmax', (['batch_probs'], {'dim': '(1)'}), '(batch_probs, dim=1)\n', (10289, 10309), False, 'import torch\n'), ((10598, 10612), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (10610, 10612), True, 'from torch import distributed as dist\n'), ((3989, 4027), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'weight': 'None'}), '(weight=None)\n', (4014, 4027), False, 'import torch\n'), ((6007, 6021), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (6019, 6021), True, 'from torch import distributed as dist\n'), ((7797, 7811), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (7809, 7811), True, 'from torch import distributed as dist\n'), ((9950, 9964), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (9962, 9964), True, 'from torch import distributed as dist\n'), ((1433, 1480), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', ([], {'size': '(32, 32)', 'padding': '(4)'}), '(size=(32, 32), padding=4)\n', (1454, 1480), False, 'from torchvision import transforms\n'), ((1494, 1532), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1525, 1532), False, 'from torchvision import transforms\n'), ((1546, 1567), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1565, 1567), False, 'from torchvision import transforms\n'), ((1581, 1644), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.5, 0.5, 0.5)', 'std': '(0.5, 0.5, 0.5)'}), '(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n', (1601, 1644), False, 'from torchvision import transforms\n'), ((1713, 1745), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(32, 32)'}), '(size=(32, 32))\n', (1730, 1745), False, 'from torchvision import transforms\n'), ((1759, 1780), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1778, 1780), False, 'from torchvision import transforms\n'), ((1794, 1857), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.5, 0.5, 0.5)', 'std': '(0.5, 0.5, 0.5)'}), '(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n', (1814, 1857), False, 'from torchvision import transforms\n')] |
# This code is part of Mthree.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module
"""Test is various methods agree"""
import numpy as np
import mthree
def test_bad_A_matrix_sdd():
"""Test a bad A-matrix for SDD"""
mit = mthree.M3Mitigation(None)
mit.single_qubit_cals = BAD_CALS
is_sdd = mit._check_sdd(COUNTS, range(5))
assert not is_sdd
def test_goood_A_matrix_sdd():
"""Test a bad A-matrix for SDD"""
mit = mthree.M3Mitigation(None)
mit.single_qubit_cals = GOOD_CALS
is_sdd = mit._check_sdd(COUNTS, range(5))
assert is_sdd
BAD_CALS = [np.array([[0.99, 0.08288574],
[0.01, 0.91711426]]),
np.array([[0.91967773, 0.14404297],
[0.08032227, 0.85595703]]),
np.array([[0.9, 0.13195801],
[0.1, 0.86804199]]),
np.array([[0.85, 0.0703125],
[0.15, 0.9296875]]),
np.array([[0.9, 0.23425293],
[0.1, 0.76574707]])]
GOOD_CALS = [np.array([[1, 0.05419922],
[0, 0.94580078]]),
np.array([[0.95532227, 0.06750488],
[0.04467773, 0.93249512]]),
np.array([[0.99047852, 0.03967285],
[0.00952148, 0.96032715]]),
np.array([[0.96643066, 0.09606934],
[0.03356934, 0.90393066]]),
np.array([[0.99255371, 0.06066895],
[0.00744629, 0.93933105]])]
COUNTS = {'00000': 3591,
'00001': 7,
'10000': 77,
'10001': 2,
'10010': 2,
'10011': 14,
'10100': 5,
'10101': 22,
'10110': 29,
'10111': 305,
'11000': 17,
'11001': 10,
'11010': 8,
'11011': 128,
'11100': 69,
'11101': 196,
'11110': 199,
'11111': 2734,
'00010': 153,
'00011': 40,
'00100': 46,
'00101': 6,
'00110': 6,
'00111': 72,
'01000': 152,
'01001': 1,
'01010': 14,
'01011': 12,
'01100': 5,
'01101': 22,
'01110': 8,
'01111': 240}
| [
"numpy.array",
"mthree.M3Mitigation"
] | [((664, 689), 'mthree.M3Mitigation', 'mthree.M3Mitigation', (['None'], {}), '(None)\n', (683, 689), False, 'import mthree\n'), ((877, 902), 'mthree.M3Mitigation', 'mthree.M3Mitigation', (['None'], {}), '(None)\n', (896, 902), False, 'import mthree\n'), ((1019, 1069), 'numpy.array', 'np.array', (['[[0.99, 0.08288574], [0.01, 0.91711426]]'], {}), '([[0.99, 0.08288574], [0.01, 0.91711426]])\n', (1027, 1069), True, 'import numpy as np\n'), ((1105, 1167), 'numpy.array', 'np.array', (['[[0.91967773, 0.14404297], [0.08032227, 0.85595703]]'], {}), '([[0.91967773, 0.14404297], [0.08032227, 0.85595703]])\n', (1113, 1167), True, 'import numpy as np\n'), ((1203, 1251), 'numpy.array', 'np.array', (['[[0.9, 0.13195801], [0.1, 0.86804199]]'], {}), '([[0.9, 0.13195801], [0.1, 0.86804199]])\n', (1211, 1251), True, 'import numpy as np\n'), ((1287, 1335), 'numpy.array', 'np.array', (['[[0.85, 0.0703125], [0.15, 0.9296875]]'], {}), '([[0.85, 0.0703125], [0.15, 0.9296875]])\n', (1295, 1335), True, 'import numpy as np\n'), ((1371, 1419), 'numpy.array', 'np.array', (['[[0.9, 0.23425293], [0.1, 0.76574707]]'], {}), '([[0.9, 0.23425293], [0.1, 0.76574707]])\n', (1379, 1419), True, 'import numpy as np\n'), ((1457, 1501), 'numpy.array', 'np.array', (['[[1, 0.05419922], [0, 0.94580078]]'], {}), '([[1, 0.05419922], [0, 0.94580078]])\n', (1465, 1501), True, 'import numpy as np\n'), ((1539, 1601), 'numpy.array', 'np.array', (['[[0.95532227, 0.06750488], [0.04467773, 0.93249512]]'], {}), '([[0.95532227, 0.06750488], [0.04467773, 0.93249512]])\n', (1547, 1601), True, 'import numpy as np\n'), ((1639, 1701), 'numpy.array', 'np.array', (['[[0.99047852, 0.03967285], [0.00952148, 0.96032715]]'], {}), '([[0.99047852, 0.03967285], [0.00952148, 0.96032715]])\n', (1647, 1701), True, 'import numpy as np\n'), ((1739, 1801), 'numpy.array', 'np.array', (['[[0.96643066, 0.09606934], [0.03356934, 0.90393066]]'], {}), '([[0.96643066, 0.09606934], [0.03356934, 0.90393066]])\n', (1747, 1801), True, 'import numpy as np\n'), ((1839, 1901), 'numpy.array', 'np.array', (['[[0.99255371, 0.06066895], [0.00744629, 0.93933105]]'], {}), '([[0.99255371, 0.06066895], [0.00744629, 0.93933105]])\n', (1847, 1901), True, 'import numpy as np\n')] |
# Copyright 2019-2020, University of Freiburg
# Author: <NAME> <<EMAIL>>
import sys
import logging
import os
import getopt
import matplotlib
import datrie
import string
import numpy as np
import tensorflow as tf
# Setting the seed for numpy-generated random numbers to make sure results are
# reproduceable
# np.random.seed(2407)
import matplotlib.pyplot as plt
from enum import Enum
from time import strftime, localtime
from collections import defaultdict
from operator import itemgetter
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import Sequential
from keras.models import model_from_json, load_model
from keras.backend import set_session
logging.basicConfig(format='%(asctime)s : %(message)s', datefmt="%H:%M:%S",
level=logging.INFO)
logger = logging.getLogger(__name__)
POLYAXON_EXP = False
if POLYAXON_EXP:
from polyaxon_client.tracking import get_outputs_path
DATA_PATH = "/data/1/prangen/data/"
MODEL_LOAD_PATH = "data/1/prangen/model/"
MODEL_SAVE_PATH = get_outputs_path() + "/"
INFO_PATH = get_outputs_path() + "/"
CHECKPOINT_SAVE_PATH = get_outputs_path() + "/"
CHECKPOINT_LOAD_PATH = "/data/1/prangen/checkpoint/"
else:
import global_paths as gp
DATA_PATH = gp.LANGUAGE_MODELS_LSTM + "data/"
MODEL_LOAD_PATH = gp.LANGUAGE_MODELS_LSTM + "model/"
MODEL_SAVE_PATH = gp.LANGUAGE_MODELS_LSTM + "model/"
INFO_PATH = gp.LANGUAGE_MODELS_LSTM + "info/"
CHECKPOINT_SAVE_PATH = gp.LANGUAGE_MODELS_LSTM + "checkpoint/"
CHECKPOINT_LOAD_PATH = ""
MIN_WORD_COUNT = 3
MAX_SEQUENCE_LEN = 30
class PredictTypes(Enum):
NEVER = 0
ONLY = 1
ALSO = 2
class LM():
def __init__(self, input_file):
# Training data generation vars
self.use_generator = True
# Architecture vars
self.embed_size = 100
self.lstm_units = 512
self.dropout = 0.2
self.dense_units = 256
# Training vars
self.batch_size = 512
self.num_epochs = 10
self.val_split = 0.15
# get the file name without path and file extension
file_suffix = input_file.split("/")[-1]
file_suffix = file_suffix.split(".")
file_suffix = file_suffix[:-1] if len(file_suffix) > 1 else file_suffix
file_suffix = '.'.join(file_suffix)
# Load the word_dict and generate the vocab and ids files if necessary
self.input_file = input_file
self.ids_file = DATA_PATH+file_suffix+".ids"
if not os.path.isfile(self.ids_file):
self.gen_vocab(input_file, file_suffix, MIN_WORD_COUNT)
self.get_word_dict(file_suffix)
self.ids_file = self.gen_id_seqs(input_file, file_suffix)
else:
self.get_word_dict(file_suffix)
def get_word_dict(self, file_suffix):
with open(DATA_PATH+file_suffix+".vocab", "r",
encoding="latin-1") as vocab_file:
lines = [line.strip() for line in vocab_file.readlines()]
self.word_dict = dict([(b, a) for (a, b) in enumerate(lines)])
self.ids_dict = dict([(a, b) for (a, b) in enumerate(lines)])
def word_to_id(self, word):
id = self.word_dict.get(word)
return id if id is not None else self.word_dict.get("_UNK_")
def id_to_word(self, id):
word = self.ids_dict.get(id)
return word if word is not None else "_UNK_"
def gen_vocab(self, file_path, file_suffix, threshold):
"""Generate the vocab from the given training data
"""
logger.info("Generate vocab for %s" % file_path)
# Get all words from the corpus and count their occurrences
word_counter = defaultdict(int)
with open(file_path, "r", encoding="latin-1") as currentFile:
for line in currentFile.readlines():
for word in line.strip().split():
word_counter[word] += 1
# Filter out words that occur less than <threshold> times in the corpus
word_list = list()
for word, count in sorted(word_counter.items(), key=itemgetter(1),
reverse=True):
if count >= threshold:
word_list.append(word)
# We need to tell LSTM the start and the end of a sentence.
# And to deal with input sentences with variable lengths,
# we also need padding position as 0.
word_list = ["_PAD_", "_BOS_", "_EOS_", "_UNK_"] + word_list
# Write the vocab to a file and create the word_dict
with open(DATA_PATH+file_suffix+".vocab", "w",
encoding="latin-1") as vocab_file:
for i, word in enumerate(word_list):
vocab_file.write(word + "\n")
def gen_id_seqs(self, file_path, file_suffix):
"""Generate the id sequences from the training data
"""
logger.info("Generate id sequences for %s" % file_path)
with open(file_path, "r", encoding="latin-1") as raw_file:
ids_file = DATA_PATH+file_suffix+".ids"
with open(ids_file, "w", encoding="latin-1") as current_file:
for line in raw_file.readlines():
token_list = line.strip().replace("<unk>", "_UNK_").split()
# each sentence has the start and the end.
token_list = ["_BOS_"] + token_list + ["_EOS_"]
token_id_list = [self.word_to_id(t) for t in token_list]
id_string = " ".join([str(id) for id in token_id_list])
current_file.write("%s\n" % id_string)
return ids_file
def gen_training_data(self):
"""Generate the data for training the model
"""
logger.info("Generate training data for %s" % self.ids_file)
# create input sequences using list of tokens
with open(self.ids_file, "r", encoding="latin-1") as file:
input_sequences = []
for line in file:
token_list = [int(id) for id in line.split()]
for i in range(len(token_list[:MAX_SEQUENCE_LEN])):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
# pad sequences
self.max_sequence_len = max([len(x) for x in input_sequences])
logger.info("Max length: %d" % self.max_sequence_len)
if self.use_generator:
split = int(len(input_sequences) * (1-self.val_split))
self.NUM_TRAIN_SAMPLES = len(input_sequences[:split])
self.NUM_VAL_SAMPLES = len(input_sequences[split:])
self.train_gen = self.batch_generator(input_sequences[:split],
self.batch_size,
mode="train")
self.val_gen = self.batch_generator(input_sequences[split:],
self.batch_size,
mode="val")
else:
input_sequences = np.array(pad_sequences(input_sequences,
maxlen=self.max_sequence_len,
padding='pre'))
# create predictors and label
self.predictors = input_sequences[:, :-1]
self.labels = input_sequences[:, -1]
def batch_generator(self, input_sequences, batch_size, mode="train"):
"""Yield data batch as input for keras' fit_generator
"""
curr_index = 0
# Yield data batches indefinitely
while True:
batch_sequences = []
while len(batch_sequences) < batch_size:
# Fill up the batch
batch_sequences.append(input_sequences[curr_index])
curr_index += 1
curr_index %= len(input_sequences)
if curr_index == 0 and mode == "val":
# If we are evaluating we have to return the current batch
# to ensure we don't continue to fill up the batch with
# samples at the beginning of the file
break
batch_sequences = np.array(pad_sequences(batch_sequences,
maxlen=self.max_sequence_len,
padding='pre'))
# create predictors and label
predictors = batch_sequences[:, :-1]
labels = batch_sequences[:, -1]
yield predictors, labels
def create_model(self):
"""Create the LSTM model
"""
logger.info("Train the model")
model = Sequential()
model.add(Embedding(len(self.word_dict), self.embed_size,
input_length=self.max_sequence_len-1))
model.add(LSTM(self.lstm_units, return_sequences=True))
model.add(Dropout(self.dropout))
model.add(LSTM(self.lstm_units))
model.add(Dropout(self.dropout))
# model.add(Dense(self.dense_units, activation='relu'))
model.add(Dense(len(self.word_dict), activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adagrad',
metrics=['accuracy'])
self.model = model
def train_model(self):
"""Train the LSTM model
"""
checkpoint_name = "checkpoint-{epoch:02d}-{loss:.4f}.hdf5"
filepath = CHECKPOINT_SAVE_PATH + checkpoint_name
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1,
save_best_only=True, mode='min')
earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5,
verbose=0, mode='auto')
if self.use_generator:
train_steps = self.NUM_TRAIN_SAMPLES // self.batch_size
val_steps = self.NUM_VAL_SAMPLES // self.batch_size
self.history = self.model.fit_generator(
self.train_gen,
steps_per_epoch=train_steps,
validation_data=self.val_gen,
validation_steps=val_steps,
epochs=self.num_epochs,
verbose=1,
callbacks=[earlystop, checkpoint])
else:
self.history = self.model.fit(self.predictors,
self.labels,
batch_size=self.batch_size,
epochs=self.num_epochs,
verbose=1,
validation_split=self.val_split,
callbacks=[earlystop, checkpoint])
def predict_words(self, token_list, prefix="",
predict_types=PredictTypes.ALSO, max_words=20):
# Get padded token list of input
token_list = ["_BOS_"] + token_list
token_list = [self.word_to_id(t) for t in token_list]
token_list = pad_sequences([token_list],
maxlen=self.max_sequence_len-1,
padding='pre')
# This is necessary for the usage in a threaded flask server
global graph
global session
with graph.as_default():
with session.as_default():
# Get probabilities for the next word
y_prob = self.model.predict(token_list)[0]
if prefix:
# Only consider words that match the prefix or are types depending
# on predict_types
if predict_types == PredictTypes.NEVER:
matching_ids = self.prefix_trie.values(prefix)
elif predict_types == PredictTypes.ONLY:
matching_ids = self.prefix_trie.values("[")
else:
matching_ids = self.prefix_trie.values(prefix)
matching_ids += self.prefix_trie.values("[")
prob_id_arr = np.array([y_prob[matching_ids], matching_ids])
sorted_indices = np.argsort(prob_id_arr[0], axis=-1)
sorted_ids = prob_id_arr[1][sorted_indices].astype(int)
else:
# Consider probabilities for all words
sorted_ids = np.argsort(y_prob, axis=-1)
# Set the index for slicing the classes to length <max_words>
if max_words:
max_words = -max_words
sorted_ids = sorted_ids[max_words:][::-1]
sorted_words = [(self.id_to_word(id), y_prob[id]) for id in sorted_ids]
return sorted_words
def probability_for_word(self, context, word):
"""Returns the probability of a word given a context as computed by the
language model.
TODO: I don't actually need to compute the probabilities for all words
but right now I don't know a more efficient way to do this with keras
"""
token_list = ["_BOS_"] + context
token_list = [self.word_to_id(t) for t in token_list]
token_list = pad_sequences([token_list],
maxlen=self.max_sequence_len-1,
padding='pre')
# Get probabilities for the next word
matching_id = self.word_to_id(word)
# This is necessary for the usage in a threaded flask server
global graph
global session
with graph.as_default():
with session.as_default():
y_prob = self.model.predict(token_list)[0]
return y_prob[matching_id]
def probability_for_context(self, context):
"""Returns the probability for a given context
= product of the probabilities of all words in the context given their
context
"""
if len(context) == 0:
return 1
return self.probability_for_word(context[:-1], context[-1])
def initialize_trie(self):
logger.info("Create prefix trie")
extra_chrs = "[]-_/:0123456789'?"
self.prefix_trie = datrie.BaseTrie(string.ascii_lowercase + extra_chrs)
for w, id in self.word_dict.items():
self.prefix_trie[w] = id
def save_model(self, model_name):
logger.info("Save model to disk")
# serialize model to JSON
model_json = self.model.to_json()
with open(MODEL_SAVE_PATH+model_name+".json", "w",
encoding="latin-1") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights(MODEL_SAVE_PATH+model_name+".h5")
logger.info("Model saved")
def load_model(self, model_name):
"""Load json and create model.
"""
logger.info("Load model from disk")
# This is necessary for the usage in a threaded flask server
global graph
global session
graph = tf.Graph()
with graph.as_default():
session = tf.compat.v1.Session()
with session.as_default():
# Exclude the extension from the model name and add it separately for
# each operation
if ".json" in model_name or ".h5" in model_name:
model_name = model_name.replace(".json", "").replace(".h5", "")
json_file = open(model_name+".json", 'r', encoding="latin-1")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name+".h5")
self.model = loaded_model
self.max_sequence_len = self.model.layers[0].input_length + 1
logger.info("Model loaded")
def create_plots(self, model_name):
logger.info("Create plots")
matplotlib.use('Agg')
# Accuracy plot
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(INFO_PATH+model_name+'_acc.pdf')
plt.close()
# Loss plot
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(INFO_PATH+model_name+'_loss.pdf')
def write_info(self, model_name):
logger.info("Write model information to file")
if self.model and self.history:
with open(INFO_PATH+model_name+"_info.txt", "w",
encoding="latin-1") as file:
file.write("*"*80+"\n")
datetime = strftime("%Y-%m-%d %H:%M", localtime())
file.write("%s %s\n" % (model_name, datetime))
file.write("*"*80+"\n")
file.write("\n")
heading = "Input file:"
file.write("%s\n" % heading)
file.write("-"*len(heading)+"\n")
file.write("Input file name:\t%s\n" % self.input_file)
file.write("#distinct_words:\t%d\n" % len(self.word_dict))
file.write("MAX_SEQUENCE_LEN:\t%d\n" % MAX_SEQUENCE_LEN)
file.write("\n")
heading = "Training parameters:"
file.write("%s\n" % heading)
file.write("-"*len(heading)+"\n")
if 'batch_size' in self.history.params:
batch_size = self.history.params['batch_size']
else:
batch_size = self.batch_size
file.write("Batch size:\t%d\n" % batch_size)
file.write("#epochs:\t%d\n" % self.history.params['epochs'])
if 'samples' in self.history.params:
samples = self.history.params['samples']
else:
samples = self.NUM_TRAIN_SAMPLES + self.NUM_VAL_SAMPLES
file.write("#samples:\t%d\n" % samples)
file.write("\n")
heading = "Results:"
file.write("%s\n" % heading)
file.write("-"*len(heading)+"\n")
file.write("Final val_loss:\t%f\n" %
self.history.history['val_loss'][-1])
file.write("Final val_acc:\t%f\n" %
self.history.history['val_acc'][-1])
file.write("\n")
heading = "Model architecture:\n"
file.write("%s" % heading)
self.model.summary(print_fn=lambda x: file.write(x+"\n"))
else:
logger.warning("Model or history does not exist.")
def print_usage_and_exit():
usage_str = ("Usage: python3 %s <training_data_path> [-ich] " +
"[-s <model_name>][-l <model_name>]" % sys.argv[0])
logger.warning(usage_str)
exit(2)
if __name__ == "__main__":
# Handle command line arguments
options = "s:l:c:h"
long_options = ["save_model", "load_model",
"continue_training", "help"]
try:
opts, args = getopt.gnu_getopt(sys.argv, options, long_options)
except getopt.GetoptError:
logger.error("Error while parsing the command line arguments.")
print_usage_and_exit()
save_model = ""
load_model_path = ""
continue_training = ""
for opt, opt_args in opts:
if opt == '-s' or opt == '--save_model':
save_model = opt_args
elif opt == '-l' or opt == '--load_model':
load_model_path = opt_args
elif opt == '-c' or opt == '--continue_training':
continue_training = opt_args
elif opt == '-h' or opt == '--help':
hstrng = ("Usage: python3 %s <training_data_path> [arguments]\n\n"
"Arguments:\n"
"-s, --save_model\t\tSave the trained model to the "
"specified path\n"
"-l, --load_model\t\tLoad an existing model from the"
" specified path\n"
"-c, --continue_training\t\tContinue training the"
" specified checkpoint of a model\n"
"-h, --help\t\tShow help options\n" % args[0])
logger.warning(hstrng)
sys.exit(2)
else:
print_usage_and_exit()
if len(args) != 2:
print_usage_and_exit()
train_data_path = args[1]
lm = LM(train_data_path)
if load_model_path:
# Load an existing model from file
# Get the model name in case the user entered a path
model_name = load_model_path.split("/")[-1]
model_name = os.path.splitext(model_name)[0]
lm.load_model(model_name)
elif continue_training:
# Continue training an existing hdf5 model
lm.gen_training_data()
model_path = CHECKPOINT_LOAD_PATH + continue_training
lm.model = load_model(model_path)
lm.train_model()
else:
# Train a new model
lm.gen_training_data()
lm.create_model()
lm.train_model()
if save_model:
# Save the model
lm.save_model(save_model)
lm.create_plots(save_model)
lm.write_info(save_model)
| [
"matplotlib.pyplot.title",
"keras.models.load_model",
"getopt.gnu_getopt",
"keras.preprocessing.sequence.pad_sequences",
"logging.getLogger",
"collections.defaultdict",
"numpy.argsort",
"os.path.isfile",
"polyaxon_client.tracking.get_outputs_path",
"matplotlib.pyplot.close",
"tensorflow.compat.v... | [((792, 891), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(message)s', datefmt='%H:%M:%S',\n level=logging.INFO)\n", (811, 891), False, 'import logging\n'), ((917, 944), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (934, 944), False, 'import logging\n'), ((1151, 1169), 'polyaxon_client.tracking.get_outputs_path', 'get_outputs_path', ([], {}), '()\n', (1167, 1169), False, 'from polyaxon_client.tracking import get_outputs_path\n'), ((1192, 1210), 'polyaxon_client.tracking.get_outputs_path', 'get_outputs_path', ([], {}), '()\n', (1208, 1210), False, 'from polyaxon_client.tracking import get_outputs_path\n'), ((1244, 1262), 'polyaxon_client.tracking.get_outputs_path', 'get_outputs_path', ([], {}), '()\n', (1260, 1262), False, 'from polyaxon_client.tracking import get_outputs_path\n'), ((3812, 3828), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3823, 3828), False, 'from collections import defaultdict\n'), ((8780, 8792), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8790, 8792), False, 'from keras.models import Sequential\n'), ((9639, 9728), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='loss', verbose=1, save_best_only=True,\n mode='min')\n", (9654, 9728), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((9782, 9869), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto')\n", (9795, 9869), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((11271, 11347), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[token_list]'], {'maxlen': '(self.max_sequence_len - 1)', 'padding': '"""pre"""'}), "([token_list], maxlen=self.max_sequence_len - 1, padding='pre')\n", (11284, 11347), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((13276, 13352), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[token_list]'], {'maxlen': '(self.max_sequence_len - 1)', 'padding': '"""pre"""'}), "([token_list], maxlen=self.max_sequence_len - 1, padding='pre')\n", (13289, 13352), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((14264, 14316), 'datrie.BaseTrie', 'datrie.BaseTrie', (['(string.ascii_lowercase + extra_chrs)'], {}), '(string.ascii_lowercase + extra_chrs)\n', (14279, 14316), False, 'import datrie\n'), ((15108, 15118), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (15116, 15118), True, 'import tensorflow as tf\n'), ((16083, 16104), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (16097, 16104), False, 'import matplotlib\n'), ((16137, 16174), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['acc']"], {}), "(self.history.history['acc'])\n", (16145, 16174), True, 'import matplotlib.pyplot as plt\n'), ((16183, 16224), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_acc']"], {}), "(self.history.history['val_acc'])\n", (16191, 16224), True, 'import matplotlib.pyplot as plt\n'), ((16233, 16260), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (16242, 16260), True, 'import matplotlib.pyplot as plt\n'), ((16269, 16291), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (16279, 16291), True, 'import matplotlib.pyplot as plt\n'), ((16300, 16319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (16310, 16319), True, 'import matplotlib.pyplot as plt\n'), ((16328, 16374), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper left"""'}), "(['train', 'val'], loc='upper left')\n", (16338, 16374), True, 'import matplotlib.pyplot as plt\n'), ((16383, 16431), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(INFO_PATH + model_name + '_acc.pdf')"], {}), "(INFO_PATH + model_name + '_acc.pdf')\n", (16394, 16431), True, 'import matplotlib.pyplot as plt\n'), ((16436, 16447), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16445, 16447), True, 'import matplotlib.pyplot as plt\n'), ((16476, 16514), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['loss']"], {}), "(self.history.history['loss'])\n", (16484, 16514), True, 'import matplotlib.pyplot as plt\n'), ((16523, 16565), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_loss']"], {}), "(self.history.history['val_loss'])\n", (16531, 16565), True, 'import matplotlib.pyplot as plt\n'), ((16574, 16597), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (16583, 16597), True, 'import matplotlib.pyplot as plt\n'), ((16606, 16624), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (16616, 16624), True, 'import matplotlib.pyplot as plt\n'), ((16633, 16652), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (16643, 16652), True, 'import matplotlib.pyplot as plt\n'), ((16661, 16707), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper left"""'}), "(['train', 'val'], loc='upper left')\n", (16671, 16707), True, 'import matplotlib.pyplot as plt\n'), ((16716, 16765), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(INFO_PATH + model_name + '_loss.pdf')"], {}), "(INFO_PATH + model_name + '_loss.pdf')\n", (16727, 16765), True, 'import matplotlib.pyplot as plt\n'), ((19487, 19537), 'getopt.gnu_getopt', 'getopt.gnu_getopt', (['sys.argv', 'options', 'long_options'], {}), '(sys.argv, options, long_options)\n', (19504, 19537), False, 'import getopt\n'), ((2629, 2658), 'os.path.isfile', 'os.path.isfile', (['self.ids_file'], {}), '(self.ids_file)\n', (2643, 2658), False, 'import os\n'), ((8944, 8988), 'keras.layers.LSTM', 'LSTM', (['self.lstm_units'], {'return_sequences': '(True)'}), '(self.lstm_units, return_sequences=True)\n', (8948, 8988), False, 'from keras.layers import Embedding, LSTM, Dense, Dropout\n'), ((9008, 9029), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (9015, 9029), False, 'from keras.layers import Embedding, LSTM, Dense, Dropout\n'), ((9049, 9070), 'keras.layers.LSTM', 'LSTM', (['self.lstm_units'], {}), '(self.lstm_units)\n', (9053, 9070), False, 'from keras.layers import Embedding, LSTM, Dense, Dropout\n'), ((9090, 9111), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (9097, 9111), False, 'from keras.layers import Embedding, LSTM, Dense, Dropout\n'), ((12242, 12288), 'numpy.array', 'np.array', (['[y_prob[matching_ids], matching_ids]'], {}), '([y_prob[matching_ids], matching_ids])\n', (12250, 12288), True, 'import numpy as np\n'), ((12318, 12353), 'numpy.argsort', 'np.argsort', (['prob_id_arr[0]'], {'axis': '(-1)'}), '(prob_id_arr[0], axis=-1)\n', (12328, 12353), True, 'import numpy as np\n'), ((12512, 12539), 'numpy.argsort', 'np.argsort', (['y_prob'], {'axis': '(-1)'}), '(y_prob, axis=-1)\n', (12522, 12539), True, 'import numpy as np\n'), ((15174, 15196), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (15194, 15196), True, 'import tensorflow as tf\n'), ((21069, 21097), 'os.path.splitext', 'os.path.splitext', (['model_name'], {}), '(model_name)\n', (21085, 21097), False, 'import os\n'), ((21326, 21348), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (21336, 21348), False, 'from keras.models import model_from_json, load_model\n'), ((4210, 4223), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (4220, 4223), False, 'from operator import itemgetter\n'), ((7182, 7257), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['input_sequences'], {'maxlen': 'self.max_sequence_len', 'padding': '"""pre"""'}), "(input_sequences, maxlen=self.max_sequence_len, padding='pre')\n", (7195, 7257), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((8324, 8399), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['batch_sequences'], {'maxlen': 'self.max_sequence_len', 'padding': '"""pre"""'}), "(batch_sequences, maxlen=self.max_sequence_len, padding='pre')\n", (8337, 8399), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((15700, 15734), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (15715, 15734), False, 'from keras.models import model_from_json, load_model\n'), ((17102, 17113), 'time.localtime', 'localtime', ([], {}), '()\n', (17111, 17113), False, 'from time import strftime, localtime\n'), ((20691, 20702), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (20699, 20702), False, 'import sys\n')] |
import pandas as pd
import numpy as np
class regout(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
stat_names=['coeff', 'se', 't', 'p>t', 'CI_low', 'CI_high']
var_names=['mpg', 'length', '_cons']
liml_std = regout(
summary=pd.DataFrame(np.array([
[-2883.485724395141,
5736.949960235051,
-.5026165025634982,
.6167893963364323,
-14322.63904926804,
8555.667600477756,
],
[-561.1914219781756,
1262.970349885851,
-.4443425152687845,
.6581464352023914,
-3079.482774918735,
1957.099930962384,
],
[173041.7784742117,
359459.7850372744,
.4813939853001025,
.6317169039765493,
-543700.6759080754,
889784.2328564988,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[32912594.84624095,
7238707.669577062,
-2061337257.276809,
],
[7238707.669577062,
1595094.104690788,
-453934824.3404015,
],
[-2061337257.276809,
-453934824.3404015,
129211337059.0436,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-6452353337.012137,
tss=np.nan,
rss=7087418733.133758,
kappa=1.003869278067604,
F=1.004921031139624,
pF=.3712199957893489,
)
liml_robust = regout(
summary=pd.DataFrame(np.array([
[-2883.485724395141,
9782.004628173405,
-.2947745205609839,
.7690263310468843,
-22388.2489769767,
16621.27752818642,
],
[-561.1914219781756,
2125.8470700299,
-.2639848509753255,
.7925563185482027,
-4800.010088318348,
3677.627244361996,
],
[173041.7784742117,
607512.0672069436,
.2848367757858323,
.7765983822485902,
-1038302.878819259,
1384386.435767682,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[95687614.5456059,
20788482.10331459,
-5941786208.955258,
],
[20788482.10331459,
4519225.765154713,
-1291436616.40693,
],
[-5941786208.955258,
-1291436616.40693,
369070911802.054,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-6452353337.012137,
tss=np.nan,
rss=7087418733.133758,
kappa=1.003869278067604,
F=.7898880420543719,
pF=.457843156074147,
)
liml_cluster = regout(
summary=pd.DataFrame(np.array([
[-2883.485724395141,
8819.325864658204,
-.3269508087857565,
.7456524626556127,
-20787.66908406108,
15020.6976352708,
],
[-561.1914219781756,
1931.700235224021,
-.2905168264438782,
.7731353760228708,
-4482.751384509515,
3360.368540553163,
],
[173041.7784742117,
550665.2533557557,
.3142413243248863,
.7552032861505424,
-944868.1181752922,
1290951.675123716,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[77780508.70702916,
17028976.85657852,
-4855525327.740576,
],
[17028976.85657852,
3731465.798764537,
-1063671099.509924,
],
[-4855525327.740576,
-1063671099.509924,
303232221253.3586,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-6452353337.012137,
tss=np.nan,
rss=7087418733.133758,
kappa=1.003869278067604,
F=.820341839024799,
pF=.4485680388523306,
)
| [
"numpy.array"
] | [((278, 674), 'numpy.array', 'np.array', (['[[-2883.485724395141, 5736.949960235051, -0.5026165025634982, \n 0.6167893963364323, -14322.63904926804, 8555.667600477756], [-\n 561.1914219781756, 1262.970349885851, -0.4443425152687845, \n 0.6581464352023914, -3079.482774918735, 1957.099930962384], [\n 173041.7784742117, 359459.7850372744, 0.4813939853001025, \n 0.6317169039765493, -543700.6759080754, 889784.2328564988]]'], {}), '([[-2883.485724395141, 5736.949960235051, -0.5026165025634982, \n 0.6167893963364323, -14322.63904926804, 8555.667600477756], [-\n 561.1914219781756, 1262.970349885851, -0.4443425152687845, \n 0.6581464352023914, -3079.482774918735, 1957.099930962384], [\n 173041.7784742117, 359459.7850372744, 0.4813939853001025, \n 0.6317169039765493, -543700.6759080754, 889784.2328564988]])\n', (286, 674), True, 'import numpy as np\n'), ((734, 935), 'numpy.array', 'np.array', (['[[32912594.84624095, 7238707.669577062, -2061337257.276809], [\n 7238707.669577062, 1595094.104690788, -453934824.3404015], [-\n 2061337257.276809, -453934824.3404015, 129211337059.0436]]'], {}), '([[32912594.84624095, 7238707.669577062, -2061337257.276809], [\n 7238707.669577062, 1595094.104690788, -453934824.3404015], [-\n 2061337257.276809, -453934824.3404015, 129211337059.0436]])\n', (742, 935), True, 'import numpy as np\n'), ((1202, 1595), 'numpy.array', 'np.array', (['[[-2883.485724395141, 9782.004628173405, -0.2947745205609839, \n 0.7690263310468843, -22388.2489769767, 16621.27752818642], [-\n 561.1914219781756, 2125.8470700299, -0.2639848509753255, \n 0.7925563185482027, -4800.010088318348, 3677.627244361996], [\n 173041.7784742117, 607512.0672069436, 0.2848367757858323, \n 0.7765983822485902, -1038302.878819259, 1384386.435767682]]'], {}), '([[-2883.485724395141, 9782.004628173405, -0.2947745205609839, \n 0.7690263310468843, -22388.2489769767, 16621.27752818642], [-\n 561.1914219781756, 2125.8470700299, -0.2639848509753255, \n 0.7925563185482027, -4800.010088318348, 3677.627244361996], [\n 173041.7784742117, 607512.0672069436, 0.2848367757858323, \n 0.7765983822485902, -1038302.878819259, 1384386.435767682]])\n', (1210, 1595), True, 'import numpy as np\n'), ((1655, 1852), 'numpy.array', 'np.array', (['[[95687614.5456059, 20788482.10331459, -5941786208.955258], [\n 20788482.10331459, 4519225.765154713, -1291436616.40693], [-\n 5941786208.955258, -1291436616.40693, 369070911802.054]]'], {}), '([[95687614.5456059, 20788482.10331459, -5941786208.955258], [\n 20788482.10331459, 4519225.765154713, -1291436616.40693], [-\n 5941786208.955258, -1291436616.40693, 369070911802.054]])\n', (1663, 1852), True, 'import numpy as np\n'), ((2119, 2514), 'numpy.array', 'np.array', (['[[-2883.485724395141, 8819.325864658203, -0.3269508087857565, \n 0.7456524626556127, -20787.66908406108, 15020.6976352708], [-\n 561.1914219781756, 1931.700235224021, -0.2905168264438782, \n 0.7731353760228707, -4482.751384509515, 3360.368540553163], [\n 173041.7784742117, 550665.2533557557, 0.3142413243248863, \n 0.7552032861505424, -944868.1181752922, 1290951.675123716]]'], {}), '([[-2883.485724395141, 8819.325864658203, -0.3269508087857565, \n 0.7456524626556127, -20787.66908406108, 15020.6976352708], [-\n 561.1914219781756, 1931.700235224021, -0.2905168264438782, \n 0.7731353760228707, -4482.751384509515, 3360.368540553163], [\n 173041.7784742117, 550665.2533557557, 0.3142413243248863, \n 0.7552032861505424, -944868.1181752922, 1290951.675123716]])\n', (2127, 2514), True, 'import numpy as np\n'), ((2574, 2775), 'numpy.array', 'np.array', (['[[77780508.70702916, 17028976.85657852, -4855525327.740576], [\n 17028976.85657852, 3731465.798764537, -1063671099.509924], [-\n 4855525327.740576, -1063671099.509924, 303232221253.3586]]'], {}), '([[77780508.70702916, 17028976.85657852, -4855525327.740576], [\n 17028976.85657852, 3731465.798764537, -1063671099.509924], [-\n 4855525327.740576, -1063671099.509924, 303232221253.3586]])\n', (2582, 2775), True, 'import numpy as np\n')] |
import numpy as np
from .base_symbol_analyzer import SymbolAnalyzer
class MedianVolume(SymbolAnalyzer):
timeframes = ['1D']
lookback = 200
@classmethod
def run(cls, df):
return dict(median_volume=int(np.median(df.Volume)))
| [
"numpy.median"
] | [((228, 248), 'numpy.median', 'np.median', (['df.Volume'], {}), '(df.Volume)\n', (237, 248), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod, abstractproperty
import matplotlib.pyplot as plot
import numpy as np
from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT
from spike_swarm_sim.register import neuron_model_registry
from spike_swarm_sim.utils import sigmoid, tanh, increase_time
class BaseNeuronModel(ABC):
""" Base abstract class for neuron models."""
def __init__(self, dt, num_neurons):
self.dt = dt
self.num_neurons = num_neurons
self.bias = np.zeros(num_neurons)
self._volt = None
@abstractmethod
def step(self, Isyn):
pass
def build(self, **kwargs):
for var, val in kwargs.items():
self.__dict__[var] = np.array(val) if isinstance(val, list) else val
if isinstance(self.__dict__[var], float) or isinstance(self.__dict__[var], int):
self.__dict__[var] = np.repeat(self.__dict__[var], self.num_neurons)
@abstractmethod
def reset(self):
pass
@property
def voltages(self):
return self._volt.copy()
class SpikingNeuronModel(BaseNeuronModel):
def __init__(self, *args, **kwargs):
super(SpikingNeuronModel, self).__init__(*args, **kwargs)
self._theta = None
self._recov = None
@property
def recovery(self):
return self._recov.copy()
@property
def theta(self):
return self._theta.copy()
class NonSpikingNeuronModel(BaseNeuronModel):
def __init__(self, *args, **kwargs):
super(NonSpikingNeuronModel, self).__init__(*args, **kwargs)
self.bias = np.zeros(self.num_neurons)
@GET('neurons:bias')
def get_bias(self, neuron_name, min_val=0, max_val=1):
return (self.bias.copy() - min_val) / (max_val - min_val)
@SET('neurons:bias')
def set_bias(self, neuron_name, data, min_val=0, max_val=1):
data = min_val + data.copy() * (max_val - min_val)
self.bias = data.copy()
@LEN('neurons:bias')
def len_bias(self, neuron_name):
return self.get_bias(neuron_name, 0, 1).shape[0]
@INIT('neurons:bias')
def init_bias(self, neuron_name, min_val=-1., max_val=1.):
biases_len = self.len_bias(neuron_name)
random_biases = 0.5*np.random.randn(biases_len)*0.2
random_biases = np.clip(random_biases, a_min=0, a_max=1)
self.set_bias(neuron_name, random_biases, min_val=min_val, max_val=max_val)
@neuron_model_registry(name='rate_model')
class RateModel(NonSpikingNeuronModel):
""" Class for the Rate model or non spiking model mainly used as building block
of CTRNNs.
"""
def __init__(self, *args, tau=1., gain=1., bias=0., activation='sigmoid'):
super(RateModel, self).__init__(*args)
self.tau = None
self.bias = None
self.gain = None
self.activation = None
self.build(tau=tau, gain=gain, bias=bias, activation=activation)
self.reset()
def step(self, Isyn):
self._volt += (self.dt / self.tau) * (Isyn.copy() - self._volt)
outputs = self.gain * self._volt.copy() + self.bias
outputs[self.activation == 'sigmoid'] = sigmoid(outputs[self.activation == 'sigmoid'])
outputs[self.activation == 'tanh'] = tanh(outputs[self.activation == 'tanh'])
return outputs, self._volt.copy()
def reset(self):
self._volt = np.zeros(self.num_neurons)
def build(self, tau=1., gain=1., bias=0., activation='sigmoid'):
super().build(tau=tau, gain=gain, bias=bias)
self.activation = np.array(activation) if isinstance(activation, list) else activation
if isinstance(activation, str):
self.activation = np.repeat(activation, self.num_neurons)
@GET('neurons:tau')
def get_tau(self, neuron_name, min_val=0, max_val=1):
return (np.log10(0.5*self.tau.copy()) - min_val) / (max_val - min_val)
@SET('neurons:tau')
def set_tau(self, neuron_name, data, min_val=0, max_val=1):
self.tau = 2 * 10 ** (data.copy() * (max_val - min_val) + min_val)
@LEN('neurons:tau')
def len_tau(self, neuron_name):
return self.tau.shape[0]
@INIT('neurons:tau')
def init_tau(self, neuron_name, min_val, max_val):
tau_len = self.len_tau(neuron_name)
random_taus = np.random.random(size=tau_len)
self.set_tau(neuron_name, random_taus, min_val=min_val, max_val=max_val)
@GET('neurons:gain')
def get_gain(self, neuron_name, min_val=0, max_val=1):
return (self.gain.copy() - min_val) / (max_val - min_val)
@SET('neurons:gain')
def set_gain(self, neuron_name, data, min_val=0, max_val=1):
data = min_val + data.copy() * (max_val - min_val)
self.gain = data.copy()
@LEN('neurons:gain')
def len_gain(self, neuron_name):
return self.gain.shape[0]
@INIT('neurons:gain')
def init_gain(self, neuron_name, min_val, max_val):
gain_len = self.len_gain(neuron_name)
random_gains = np.random.random(size=gain_len)
self.set_gain(neuron_name, random_gains, min_val=min_val, max_val=max_val)
@neuron_model_registry(name='adex')
class AdExModel(SpikingNeuronModel):
""" Class for the Adaptive Exponenitial LIF spiking neuron model. """
def __init__(self, *args, tau_m=10., tau_w=70., V_rest=-70.,
V_reset=-55., A=0., B=5., theta_rest=-45., ):
super(AdExModel, self).__init__(*args)
self.tau_w = None
self.tau_m = None
self.V_rest = None
self.V_reset = None
self.A = None
self.B = None
self.theta_rest = None
self.time_refrac = 10
self.R = 1.
self.refractoriness = None
#* --- Build Params ---
self.build(tau_m=tau_m, tau_w=tau_w, V_rest=V_rest,
V_reset=V_reset, A=A, B=B, theta_rest=theta_rest)
self.reset()
self.t = 0
@increase_time
def step(self, Isyn):
self._volt += (self.dt / self.tau_m) * (self.V_rest - self._volt\
+ 2 * np.exp((self._volt - (self._theta)) / 2) - self._recov + Isyn)
self._volt = np.clip(self._volt, a_min=None, a_max=30.)
spikes = (self._volt >= 30.).astype(int)
out_voltage = self._volt.copy()
self._volt[self._volt >= 30.] = self.V_reset[self._volt >= 30.]
self._recov += (self.dt / self.tau_w) * (self.A * (self._volt - self.V_rest)\
- self._recov + self.B * self.tau_w * spikes)
self._theta += self.dt * ((1 - spikes) * (self.theta_rest - self._theta) / 50. + spikes * 20.)
# self.refractoriness += self.time_refrac * spikes
return spikes, out_voltage
def reset(self):
self.t = 0
self._volt = self.V_rest * np.ones(self.num_neurons)
self._recov = self.A * (self._volt - self.V_rest)
self.refractoriness = np.zeros_like(self._volt)
self._theta = self.theta_rest * np.ones(self.num_neurons)
@neuron_model_registry(name='izhikevich')
class IzhikevichModel(SpikingNeuronModel):
""" Class for the Izhikevich spiking neuron model. """
def __init__(self, *args,
A=0.02, B=0.2, C=-65., D=8.):
super(IzhikevichModel, self).__init__(*args)
# define vars, they are initialized in reset
self.A = None
self.B = None
self.C = None
self.D = None
self.build(A=A, B=B, C=C, D=D)
self.reset()
def step(self, Isyn):
Isyn *= 0.1
self._recov[self._volt >= 30.] += self.D[self._volt >= 30.].copy()
self._volt[self._volt >= 30.] = self.C[self._volt >= 30.].copy()
self._volt += self.dt * (.04 * (self._volt ** 2) \
+ 5 * self._volt + 140 - self._recov + Isyn)
self._volt = np.clip(self._volt, a_max=30., a_min=-85.)
self._recov += self.dt * (self.A * (self.B * self._volt - self._recov))
spikes = (self._volt.copy() >= 30).astype(int)
return spikes, self._volt.copy()
@property
def theta(self):
""" There is no theta in this neuron model. Zero array returned. """
return np.zeros_like(self._volt)
def reset(self):
# Initialize at stable fixed point
self._volt = self.C.copy()
self._recov = (self.B * self._volt).copy()
@neuron_model_registry(name='lif')
class LIFModel(SpikingNeuronModel):
""" Class for the Leaky Integrate and Fire (LIF) spiking neuron model. """
def __init__(self, *args, tau=20., R=1., v_rest=-65.,
thresh=-50., time_refrac=5.):
super(LIFModel, self).__init__(*args)
# define vars, they are initialized in reset
self.tau = None
self.v_rest = None
self.thresh = None
self.time_refrac = None
self.R = None
self.refractoriness = None
self.build(tau=tau, R=R, v_rest=v_rest, \
thresh=thresh, time_refrac=time_refrac)
self.reset()
def step(self, Isyn):
Isyn[self.refractoriness > 0] = 0.
self._volt += (self.dt / self.tau) * (self.v_rest - self._volt + Isyn)
spikes = (self._volt >= self.thresh).astype(int)
out_voltage = self._volt.copy()
self._volt[spikes.astype(bool)] = self.v_rest[spikes.astype(bool)]
self.refractoriness[self.refractoriness > 0] -= 1
self.refractoriness[spikes.astype(bool)] = self.time_refrac[spikes.astype(bool)]
return spikes.copy(), out_voltage
@property
def recovery(self):
""" There is no recovery var. in this neuron model. Zero array returned."""
return np.zeros_like(self._volt)
@property
def theta(self):
""" There is no theta in this neuron model. Zero array returned. """
return np.zeros_like(self._volt)
def reset(self):
self._volt = self.v_rest * np.ones(self.num_neurons)
self.refractoriness = np.zeros_like(self._volt)
@neuron_model_registry(name='exp_lif')
class ExpLIFModel(SpikingNeuronModel):
""" Class for the Exponetial LIF spiking neuron model. """
def __init__(self, *args, tau=20., R=1., v_rest=-65., time_refrac=10., thresh=-40.):
super(ExpLIFModel, self).__init__(*args)
self.tau = None
self.v_rest = None
self.thresh = None
self.time_refrac = None
self.R = None
self.refractoriness = None
self.build(tau=tau, R=R, v_rest=v_rest,\
thresh=thresh, time_refrac=time_refrac)
self.reset()
def step(self, Isyn):
Isyn *= 0.6
Isyn[self.refractoriness > 0] = 0.
self._volt += (self.dt / self.tau) * (self.v_rest - self._volt \
+ .3 * np.exp((self._volt - self.thresh) / 1) + Isyn)
self._volt = np.clip(self._volt, a_max=30., a_min=None)
spikes = (self._volt >= 30.).astype(int)
out_voltages = self._volt.copy()
self._volt[self._volt >= 30.] = self.v_rest[self._volt >= 30.]
self.refractoriness[self.refractoriness > 0] -= 1
self.refractoriness[spikes.astype(bool)] = self.time_refrac[spikes.astype(bool)]
return spikes, out_voltages
@property
def recovery(self):
""" There is no recovery var. in this neuron model. Zero array returned."""
return np.zeros_like(self._volt)
@property
def theta(self):
""" There is no theta in this neuron model. Zero array returned. """
return np.zeros_like(self._volt)
def reset(self):
self._volt = self.v_rest * np.ones(self.num_neurons)
self.refractoriness = np.zeros_like(self._volt)
@neuron_model_registry(name='morris_lecar')
class MorrisLecarModel(SpikingNeuronModel):
""" Class for the Morris Lecar spiking neuron model. """
def __init__(self, *args, phi=0.067, g_Ca=4., g_K=8., g_L=2., V1=-1.2, V2=18.,
V3=12., V4=17.4, E_Ca=120., E_K=-84., E_L=-60., Cm=20.):
super(MorrisLecarModel, self).__init__(*args)
# Model parameters (fixed for the moment)
self.phi = None
self.g_Ca = None
self.g_K = None
self.g_L = None
self.V1 = None
self.V2 = None
self.V3 = None
self.V4 = None
self.E_Ca = None
self.E_K = None
self.E_L = None
self.Cm = None
self.build(phi=phi, g_Ca=g_Ca, g_K=g_K, g_L=g_L, V1=V1, V2=V2,
V3=V3, V4=V4, E_Ca=E_Ca, E_K=E_K, E_L=E_L, Cm=Cm)
self.reset()
def step(self, Isyn):
m_inf = .5 * (1 + np.tanh((self._volt - self.V1) / self.V2))
n_inf = .5 * (1 + np.tanh((self._volt - self.V3) / self.V4))
tau_n = 1 / np.cosh((self._volt - self.V3) / (2 * self.V4))
self._volt += (self.dt / self.Cm) * (Isyn - self.g_L * (self._volt - self.E_L)\
- self.g_K * self._recov * (self._volt - self.E_K)\
- self.g_Ca * m_inf * (self._volt - self.E_Ca))
self._volt = np.clip(self._volt, a_max=40., a_min=-85.)
self._recov += self.dt * (self.phi * (n_inf - self._recov) / tau_n)
spikes = ((self._volt.copy() >= 35.5) * self._volt.copy() / 35.5).astype(int)
return spikes, self._volt.copy()
def reset(self):
# Initialize at stable fixed point
self._volt = -50 * np.ones(self.num_neurons)
self._recov = .5 * (1 + np.tanh((self._volt - self.V3) / self.V4))
@property
def theta(self):
""" There is no theta in this neuron model. Zero array returned. """
return np.zeros_like(self._volt) | [
"spike_swarm_sim.utils.sigmoid",
"numpy.zeros_like",
"spike_swarm_sim.algorithms.interfaces.SET",
"numpy.tanh",
"numpy.random.randn",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"spike_swarm_sim.algorithms.interfaces.GET",
"spike_swarm_sim.utils.tanh",
"spike_swarm_sim.algorithms.interfaces.INIT"... | [((2517, 2557), 'spike_swarm_sim.register.neuron_model_registry', 'neuron_model_registry', ([], {'name': '"""rate_model"""'}), "(name='rate_model')\n", (2538, 2557), False, 'from spike_swarm_sim.register import neuron_model_registry\n'), ((5279, 5313), 'spike_swarm_sim.register.neuron_model_registry', 'neuron_model_registry', ([], {'name': '"""adex"""'}), "(name='adex')\n", (5300, 5313), False, 'from spike_swarm_sim.register import neuron_model_registry\n'), ((7187, 7227), 'spike_swarm_sim.register.neuron_model_registry', 'neuron_model_registry', ([], {'name': '"""izhikevich"""'}), "(name='izhikevich')\n", (7208, 7227), False, 'from spike_swarm_sim.register import neuron_model_registry\n'), ((8573, 8606), 'spike_swarm_sim.register.neuron_model_registry', 'neuron_model_registry', ([], {'name': '"""lif"""'}), "(name='lif')\n", (8594, 8606), False, 'from spike_swarm_sim.register import neuron_model_registry\n'), ((10247, 10284), 'spike_swarm_sim.register.neuron_model_registry', 'neuron_model_registry', ([], {'name': '"""exp_lif"""'}), "(name='exp_lif')\n", (10268, 10284), False, 'from spike_swarm_sim.register import neuron_model_registry\n'), ((11980, 12022), 'spike_swarm_sim.register.neuron_model_registry', 'neuron_model_registry', ([], {'name': '"""morris_lecar"""'}), "(name='morris_lecar')\n", (12001, 12022), False, 'from spike_swarm_sim.register import neuron_model_registry\n'), ((1694, 1713), 'spike_swarm_sim.algorithms.interfaces.GET', 'GET', (['"""neurons:bias"""'], {}), "('neurons:bias')\n", (1697, 1713), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((1849, 1868), 'spike_swarm_sim.algorithms.interfaces.SET', 'SET', (['"""neurons:bias"""'], {}), "('neurons:bias')\n", (1852, 1868), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((2040, 2059), 'spike_swarm_sim.algorithms.interfaces.LEN', 'LEN', (['"""neurons:bias"""'], {}), "('neurons:bias')\n", (2043, 2059), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((2164, 2184), 'spike_swarm_sim.algorithms.interfaces.INIT', 'INIT', (['"""neurons:bias"""'], {}), "('neurons:bias')\n", (2168, 2184), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((3861, 3879), 'spike_swarm_sim.algorithms.interfaces.GET', 'GET', (['"""neurons:tau"""'], {}), "('neurons:tau')\n", (3864, 3879), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((4031, 4049), 'spike_swarm_sim.algorithms.interfaces.SET', 'SET', (['"""neurons:tau"""'], {}), "('neurons:tau')\n", (4034, 4049), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((4199, 4217), 'spike_swarm_sim.algorithms.interfaces.LEN', 'LEN', (['"""neurons:tau"""'], {}), "('neurons:tau')\n", (4202, 4217), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((4297, 4316), 'spike_swarm_sim.algorithms.interfaces.INIT', 'INIT', (['"""neurons:tau"""'], {}), "('neurons:tau')\n", (4301, 4316), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((4562, 4581), 'spike_swarm_sim.algorithms.interfaces.GET', 'GET', (['"""neurons:gain"""'], {}), "('neurons:gain')\n", (4565, 4581), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((4721, 4740), 'spike_swarm_sim.algorithms.interfaces.SET', 'SET', (['"""neurons:gain"""'], {}), "('neurons:gain')\n", (4724, 4740), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((4908, 4927), 'spike_swarm_sim.algorithms.interfaces.LEN', 'LEN', (['"""neurons:gain"""'], {}), "('neurons:gain')\n", (4911, 4927), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((5009, 5029), 'spike_swarm_sim.algorithms.interfaces.INIT', 'INIT', (['"""neurons:gain"""'], {}), "('neurons:gain')\n", (5013, 5029), False, 'from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT\n'), ((510, 531), 'numpy.zeros', 'np.zeros', (['num_neurons'], {}), '(num_neurons)\n', (518, 531), True, 'import numpy as np\n'), ((1651, 1677), 'numpy.zeros', 'np.zeros', (['self.num_neurons'], {}), '(self.num_neurons)\n', (1659, 1677), True, 'import numpy as np\n'), ((2384, 2424), 'numpy.clip', 'np.clip', (['random_biases'], {'a_min': '(0)', 'a_max': '(1)'}), '(random_biases, a_min=0, a_max=1)\n', (2391, 2424), True, 'import numpy as np\n'), ((3264, 3310), 'spike_swarm_sim.utils.sigmoid', 'sigmoid', (["outputs[self.activation == 'sigmoid']"], {}), "(outputs[self.activation == 'sigmoid'])\n", (3271, 3310), False, 'from spike_swarm_sim.utils import sigmoid, tanh, increase_time\n'), ((3357, 3397), 'spike_swarm_sim.utils.tanh', 'tanh', (["outputs[self.activation == 'tanh']"], {}), "(outputs[self.activation == 'tanh'])\n", (3361, 3397), False, 'from spike_swarm_sim.utils import sigmoid, tanh, increase_time\n'), ((3487, 3513), 'numpy.zeros', 'np.zeros', (['self.num_neurons'], {}), '(self.num_neurons)\n', (3495, 3513), True, 'import numpy as np\n'), ((4441, 4471), 'numpy.random.random', 'np.random.random', ([], {'size': 'tau_len'}), '(size=tau_len)\n', (4457, 4471), True, 'import numpy as np\n'), ((5158, 5189), 'numpy.random.random', 'np.random.random', ([], {'size': 'gain_len'}), '(size=gain_len)\n', (5174, 5189), True, 'import numpy as np\n'), ((6330, 6373), 'numpy.clip', 'np.clip', (['self._volt'], {'a_min': 'None', 'a_max': '(30.0)'}), '(self._volt, a_min=None, a_max=30.0)\n', (6337, 6373), True, 'import numpy as np\n'), ((7088, 7113), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (7101, 7113), True, 'import numpy as np\n'), ((8028, 8072), 'numpy.clip', 'np.clip', (['self._volt'], {'a_max': '(30.0)', 'a_min': '(-85.0)'}), '(self._volt, a_max=30.0, a_min=-85.0)\n', (8035, 8072), True, 'import numpy as np\n'), ((8383, 8408), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (8396, 8408), True, 'import numpy as np\n'), ((9911, 9936), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (9924, 9936), True, 'import numpy as np\n'), ((10070, 10095), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (10083, 10095), True, 'import numpy as np\n'), ((10213, 10238), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (10226, 10238), True, 'import numpy as np\n'), ((11112, 11155), 'numpy.clip', 'np.clip', (['self._volt'], {'a_max': '(30.0)', 'a_min': 'None'}), '(self._volt, a_max=30.0, a_min=None)\n', (11119, 11155), True, 'import numpy as np\n'), ((11648, 11673), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (11661, 11673), True, 'import numpy as np\n'), ((11807, 11832), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (11820, 11832), True, 'import numpy as np\n'), ((11950, 11975), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (11963, 11975), True, 'import numpy as np\n'), ((13350, 13394), 'numpy.clip', 'np.clip', (['self._volt'], {'a_max': '(40.0)', 'a_min': '(-85.0)'}), '(self._volt, a_max=40.0, a_min=-85.0)\n', (13357, 13394), True, 'import numpy as np\n'), ((13930, 13955), 'numpy.zeros_like', 'np.zeros_like', (['self._volt'], {}), '(self._volt)\n', (13943, 13955), True, 'import numpy as np\n'), ((3667, 3687), 'numpy.array', 'np.array', (['activation'], {}), '(activation)\n', (3675, 3687), True, 'import numpy as np\n'), ((3808, 3847), 'numpy.repeat', 'np.repeat', (['activation', 'self.num_neurons'], {}), '(activation, self.num_neurons)\n', (3817, 3847), True, 'import numpy as np\n'), ((6972, 6997), 'numpy.ones', 'np.ones', (['self.num_neurons'], {}), '(self.num_neurons)\n', (6979, 6997), True, 'import numpy as np\n'), ((7155, 7180), 'numpy.ones', 'np.ones', (['self.num_neurons'], {}), '(self.num_neurons)\n', (7162, 7180), True, 'import numpy as np\n'), ((10156, 10181), 'numpy.ones', 'np.ones', (['self.num_neurons'], {}), '(self.num_neurons)\n', (10163, 10181), True, 'import numpy as np\n'), ((11893, 11918), 'numpy.ones', 'np.ones', (['self.num_neurons'], {}), '(self.num_neurons)\n', (11900, 11918), True, 'import numpy as np\n'), ((13057, 13104), 'numpy.cosh', 'np.cosh', (['((self._volt - self.V3) / (2 * self.V4))'], {}), '((self._volt - self.V3) / (2 * self.V4))\n', (13064, 13104), True, 'import numpy as np\n'), ((13695, 13720), 'numpy.ones', 'np.ones', (['self.num_neurons'], {}), '(self.num_neurons)\n', (13702, 13720), True, 'import numpy as np\n'), ((732, 745), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (740, 745), True, 'import numpy as np\n'), ((912, 959), 'numpy.repeat', 'np.repeat', (['self.__dict__[var]', 'self.num_neurons'], {}), '(self.__dict__[var], self.num_neurons)\n', (921, 959), True, 'import numpy as np\n'), ((2327, 2354), 'numpy.random.randn', 'np.random.randn', (['biases_len'], {}), '(biases_len)\n', (2342, 2354), True, 'import numpy as np\n'), ((12923, 12964), 'numpy.tanh', 'np.tanh', (['((self._volt - self.V1) / self.V2)'], {}), '((self._volt - self.V1) / self.V2)\n', (12930, 12964), True, 'import numpy as np\n'), ((12993, 13034), 'numpy.tanh', 'np.tanh', (['((self._volt - self.V3) / self.V4)'], {}), '((self._volt - self.V3) / self.V4)\n', (13000, 13034), True, 'import numpy as np\n'), ((13754, 13795), 'numpy.tanh', 'np.tanh', (['((self._volt - self.V3) / self.V4)'], {}), '((self._volt - self.V3) / self.V4)\n', (13761, 13795), True, 'import numpy as np\n'), ((11043, 11081), 'numpy.exp', 'np.exp', (['((self._volt - self.thresh) / 1)'], {}), '((self._volt - self.thresh) / 1)\n', (11049, 11081), True, 'import numpy as np\n'), ((6245, 6283), 'numpy.exp', 'np.exp', (['((self._volt - self._theta) / 2)'], {}), '((self._volt - self._theta) / 2)\n', (6251, 6283), True, 'import numpy as np\n')] |
"""
Name: percussivenessEstimation
Date: Jun 2019
Programmer: <NAME>, <NAME>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
If you use the 'NMF toolbox' please refer to:
[1] <NAME>, <NAME>, <NAME>, and <NAME>
NMF Toolbox: Music Processing Applications of Nonnegative Matrix
Factorization
In Proceedings of the International Conference on Digital Audio Effects
(DAFx), 2019.
License:
This file is part of 'NMF toolbox'.
https://www.audiolabs-erlangen.de/resources/MIR/NMFtoolbox/
'NMF toolbox' is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
'NMF toolbox' is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with 'NMF toolbox'. If not, see http://www.gnu.org/licenses/.
"""
import numpy as np
def percussivenessEstimation(W):
"""This function takes a matrix or tensor of NMF templates and estimates the
percussiveness by assuming that the lower part explains percussive and the
upper part explains harmonic components. This is explained in sec. 2.4,
especially eq. (4) in [2].
References
----------
[2] <NAME>, <NAME>, <NAME>: "Unifying
Local and Global Methods for Harmonic-Percussive Source Separation"
In Proceedings of the IEEE International Conference on Acoustics,
Speech, and Signal Processing (ICASSP), 2018.
Parameters
----------
W: array-like
K x R matrix (or K x R x T tensor) of NMF (NMFD) templates
Returns
-------
percWeight: array-like
The resulting percussiveness estimate per component
"""
# get dimensions of templates
K, R, T = W.shape
# this assumes that the matrix (tensor) is formed in the way we need it
numBins = int(K/2)
# do the calculation, which is essentially a ratio
percWeight = np.zeros(R)
for c in range(R):
percPart = W[:numBins, c, :]
# harmPart = squeeze(W(1:end,c,:));
harmPart = W[:, c, :]
percWeight[c] = percPart.sum() / harmPart.sum()
return percWeight
| [
"numpy.zeros"
] | [((2284, 2295), 'numpy.zeros', 'np.zeros', (['R'], {}), '(R)\n', (2292, 2295), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mayanqiong'
import numpy as np
from pandas import DataFrame
from tqsdk.datetime import _get_trading_timestamp, _get_trade_timestamp
from tqsdk.rangeset import _rangeset_head, _rangeset_slice, _rangeset_length
"""
检查参数类型
"""
from inspect import isfunction
def _check_volume_limit(min_volume, max_volume):
if min_volume is not None and min_volume <= 0:
raise Exception("最小下单手数(min_volume) %s 错误, 请检查 min_volume 是否填写正确" % min_volume)
if max_volume is not None and max_volume <= 0:
raise Exception("最大下单手数(max_volume) %s 错误, 请检查 max_volume 是否填写正确" % max_volume)
if (min_volume is None and max_volume) or (max_volume is None and min_volume):
raise Exception("最小下单手数(min_volume) %s 和 最大下单手数(max_volume) %s 必须用时填写" % (min_volume, max_volume))
if min_volume and max_volume and min_volume > max_volume:
raise Exception("最小下单手数(min_volume) %s ,最大下单手数(max_volume) %s 错误, 请检查 min_volume, max_volume 是否填写正确" % (
min_volume, max_volume))
return int(min_volume) if min_volume else None, int(max_volume) if max_volume else None
def _check_direction(direction):
if direction not in ("BUY", "SELL"):
raise Exception("下单方向(direction) %s 错误, 请检查 direction 参数是否填写正确" % direction)
return direction
def _check_offset(offset):
if offset not in ("OPEN", "CLOSE", "CLOSETODAY"):
raise Exception("开平标志(offset) %s 错误, 请检查 offset 是否填写正确" % offset)
return offset
def _check_offset_priority(offset_priority):
if len(offset_priority.replace(",", "").replace("今", "", 1).replace("昨", "", 1).replace("开", "", 1)) > 0:
raise Exception("开平仓顺序(offset_priority) %s 错误, 请检查 offset_priority 参数是否填写正确" % offset_priority)
return offset_priority
def _check_volume(volume):
_volume = int(volume)
if _volume <= 0:
raise Exception("下单手数(volume) %s 错误, 请检查 volume 是否填写正确" % volume)
return _volume
def _check_price(price):
if price in ("ACTIVE", "PASSIVE") or isfunction(price):
return price
else:
raise Exception("下单方式(price) %s 错误, 请检查 price 参数是否填写正确" % price)
def _check_time_table(time_table: DataFrame):
if not isinstance(time_table, DataFrame):
raise Exception(f"time_table 参数应该是 pandas.DataFrame 类型")
need_columns = {'price', 'target_pos', 'interval'} - set(time_table.columns)
if need_columns:
raise Exception(f"缺少必要的列 {need_columns}")
if time_table.shape[0] > 0:
if time_table['interval'].isnull().values.any() or np.where(time_table['interval'] < 0, True, False).any():
raise Exception(f"interval 列必须为正数,请检查参数 {time_table['interval']}")
if time_table['target_pos'].isnull().values.any() or not np.issubdtype(time_table['target_pos'].dtype, np.integer):
raise Exception(f"target_pos 列必须为整数,请检查参数 {time_table['target_pos']}")
if not (np.isin(time_table['price'], ('PASSIVE', 'ACTIVE', None)) | time_table['price'].apply(isfunction)).all():
raise Exception(f"price 列必须为 ('PASSIVE', 'ACTIVE', None, Callable) 之一,请检查参数 {time_table['price']}")
return time_table
def _get_deadline_from_interval(quote, interval):
"""将 interval (持续长度 seconds)列转换为 deadline(结束时间 nano_timestamp)"""
# 当前交易日完整的交易时间段
trading_timestamp = _get_trading_timestamp(quote, quote.datetime)
trading_timestamp_nano_range = trading_timestamp['night'] + trading_timestamp['day'] # 当前交易日完整的交易时间段
# 当前时间 行情时间
current_timestamp_nano = _get_trade_timestamp(quote.datetime, float('nan'))
if not trading_timestamp_nano_range[0][0] <= current_timestamp_nano < trading_timestamp_nano_range[-1][1]:
raise Exception("当前时间不在指定的交易时间段内")
deadline = []
for index, value in interval.items():
r = _rangeset_head(_rangeset_slice(trading_timestamp_nano_range, current_timestamp_nano), int(value * 1e9))
if _rangeset_length(r) < int(value * 1e9):
raise Exception("指定时间段超出当前交易日")
deadline.append(r[-1][1])
current_timestamp_nano = r[-1][1]
return deadline
| [
"numpy.isin",
"tqsdk.rangeset._rangeset_length",
"numpy.where",
"inspect.isfunction",
"tqsdk.datetime._get_trading_timestamp",
"tqsdk.rangeset._rangeset_slice",
"numpy.issubdtype"
] | [((3315, 3360), 'tqsdk.datetime._get_trading_timestamp', '_get_trading_timestamp', (['quote', 'quote.datetime'], {}), '(quote, quote.datetime)\n', (3337, 3360), False, 'from tqsdk.datetime import _get_trading_timestamp, _get_trade_timestamp\n'), ((2025, 2042), 'inspect.isfunction', 'isfunction', (['price'], {}), '(price)\n', (2035, 2042), False, 'from inspect import isfunction\n'), ((3804, 3873), 'tqsdk.rangeset._rangeset_slice', '_rangeset_slice', (['trading_timestamp_nano_range', 'current_timestamp_nano'], {}), '(trading_timestamp_nano_range, current_timestamp_nano)\n', (3819, 3873), False, 'from tqsdk.rangeset import _rangeset_head, _rangeset_slice, _rangeset_length\n'), ((3904, 3923), 'tqsdk.rangeset._rangeset_length', '_rangeset_length', (['r'], {}), '(r)\n', (3920, 3923), False, 'from tqsdk.rangeset import _rangeset_head, _rangeset_slice, _rangeset_length\n'), ((2751, 2808), 'numpy.issubdtype', 'np.issubdtype', (["time_table['target_pos'].dtype", 'np.integer'], {}), "(time_table['target_pos'].dtype, np.integer)\n", (2764, 2808), True, 'import numpy as np\n'), ((2550, 2599), 'numpy.where', 'np.where', (["(time_table['interval'] < 0)", '(True)', '(False)'], {}), "(time_table['interval'] < 0, True, False)\n", (2558, 2599), True, 'import numpy as np\n'), ((2909, 2966), 'numpy.isin', 'np.isin', (["time_table['price']", "('PASSIVE', 'ACTIVE', None)"], {}), "(time_table['price'], ('PASSIVE', 'ACTIVE', None))\n", (2916, 2966), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from gym_jsbsim.agents import RandomAgent, ConstantAgent
from gym_jsbsim.tests.stubs import FlightTaskStub
class TestRandomAgent(unittest.TestCase):
def setUp(self):
self.action_space = FlightTaskStub().get_action_space()
self.agent = RandomAgent(action_space=self.action_space)
def test_act_generates_valid_actions(self):
num_test_actions = 5
for _ in range(num_test_actions):
action = self.agent.act(None)
self.assertTrue(self.action_space.contains(action))
class TestConstantAgent(unittest.TestCase):
def setUp(self):
self.task = FlightTaskStub()
self.action_space = self.task.get_action_space()
self.agent = ConstantAgent(action_space=self.action_space)
def test_act_generates_valid_actions(self):
num_test_actions = 3
for _ in range(num_test_actions):
action = self.agent.act(None)
self.assertTrue(self.action_space.contains(action))
def test_act_returns_same_action(self):
num_test_actions = 5
old_action = self.agent.act(None)
for _ in range(num_test_actions):
action = self.agent.act(None)
np.testing.assert_array_almost_equal(old_action, action)
old_action = action
| [
"numpy.testing.assert_array_almost_equal",
"gym_jsbsim.agents.ConstantAgent",
"gym_jsbsim.tests.stubs.FlightTaskStub",
"gym_jsbsim.agents.RandomAgent"
] | [((292, 335), 'gym_jsbsim.agents.RandomAgent', 'RandomAgent', ([], {'action_space': 'self.action_space'}), '(action_space=self.action_space)\n', (303, 335), False, 'from gym_jsbsim.agents import RandomAgent, ConstantAgent\n'), ((649, 665), 'gym_jsbsim.tests.stubs.FlightTaskStub', 'FlightTaskStub', ([], {}), '()\n', (663, 665), False, 'from gym_jsbsim.tests.stubs import FlightTaskStub\n'), ((744, 789), 'gym_jsbsim.agents.ConstantAgent', 'ConstantAgent', ([], {'action_space': 'self.action_space'}), '(action_space=self.action_space)\n', (757, 789), False, 'from gym_jsbsim.agents import RandomAgent, ConstantAgent\n'), ((1228, 1284), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['old_action', 'action'], {}), '(old_action, action)\n', (1264, 1284), True, 'import numpy as np\n'), ((235, 251), 'gym_jsbsim.tests.stubs.FlightTaskStub', 'FlightTaskStub', ([], {}), '()\n', (249, 251), False, 'from gym_jsbsim.tests.stubs import FlightTaskStub\n')] |
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
# Some utility functions we need for the class.
# For the class Data Science: Practical Deep Learning Concepts in Theano and TensorFlow
# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow
# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow
# Note: run this from the current folder it is in.
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
def get_clouds():
Nclass = 500
D = 2
X1 = np.random.randn(Nclass, D) + np.array([0, -2])
X2 = np.random.randn(Nclass, D) + np.array([2, 2])
X3 = np.random.randn(Nclass, D) + np.array([-2, 2])
X = np.vstack([X1, X2, X3])
Y = np.array([0]*Nclass + [1]*Nclass + [2]*Nclass)
return X, Y
def get_spiral():
# Idea: radius -> low...high
# (don't start at 0, otherwise points will be "mushed" at origin)
# angle = low...high proportional to radius
# [0, 2pi/6, 4pi/6, ..., 10pi/6] --> [pi/2, pi/3 + pi/2, ..., ]
# x = rcos(theta), y = rsin(theta) as usual
radius = np.linspace(1, 10, 100)
thetas = np.empty((6, 100))
for i in range(6):
start_angle = np.pi*i / 3.0
end_angle = start_angle + np.pi / 2
points = np.linspace(start_angle, end_angle, 100)
thetas[i] = points
# convert into cartesian coordinates
x1 = np.empty((6, 100))
x2 = np.empty((6, 100))
for i in range(6):
x1[i] = radius * np.cos(thetas[i])
x2[i] = radius * np.sin(thetas[i])
# inputs
X = np.empty((600, 2))
X[:,0] = x1.flatten()
X[:,1] = x2.flatten()
# add noise
X += np.random.randn(600, 2)*0.5
# targets
Y = np.array([0]*100 + [1]*100 + [0]*100 + [1]*100 + [0]*100 + [1]*100)
return X, Y
def get_transformed_data():
print("Reading in and transforming data...")
df = pd.read_csv(r'C:\Users\Marcel\OneDrive\Python Courses\Machine Learning\train.csv')
data = df.values.astype(np.float32)
np.random.shuffle(data)
X = data[:, 1:]
Y = data[:, 0].astype(np.int32)
Xtrain = X[:-1000]
Ytrain = Y[:-1000]
Xtest = X[-1000:]
Ytest = Y[-1000:]
# center the data
mu = Xtrain.mean(axis=0)
Xtrain = Xtrain - mu
Xtest = Xtest - mu
# transform the data
pca = PCA()
Ztrain = pca.fit_transform(Xtrain)
Ztest = pca.transform(Xtest)
plot_cumulative_variance(pca)
# take first 300 cols of Z
Ztrain = Ztrain[:, :300]
Ztest = Ztest[:, :300]
# normalize Z
mu = Ztrain.mean(axis=0)
std = Ztrain.std(axis=0)
Ztrain = (Ztrain - mu) / std
Ztest = (Ztest - mu) / std
return Ztrain, Ztest, Ytrain, Ytest
def get_normalized_data():
print("Reading in and transforming data...")
df = pd.read_csv(r'C:\Users\Marcel\OneDrive\Python Courses\Machine Learning\train.csv')
data = df.values.astype(np.float32)
np.random.shuffle(data)
X = data[:, 1:]
Y = data[:, 0]
Xtrain = X[:-1000]
Ytrain = Y[:-1000]
Xtest = X[-1000:]
Ytest = Y[-1000:]
# normalize the data
mu = Xtrain.mean(axis=0)
std = Xtrain.std(axis=0)
np.place(std, std == 0, 1)
Xtrain = (Xtrain - mu) / std
Xtest = (Xtest - mu) / std
return Xtrain, Xtest, Ytrain, Ytest
def plot_cumulative_variance(pca):
P = []
for p in pca.explained_variance_ratio_:
if len(P) == 0:
P.append(p)
else:
P.append(p + P[-1])
plt.plot(P)
plt.show()
return P
def forward(X, W, b):
# softmax
a = X.dot(W) + b
expa = np.exp(a)
y = expa / expa.sum(axis=1, keepdims=True)
return y
def predict(p_y):
return np.argmax(p_y, axis=1)
def error_rate(p_y, t):
prediction = predict(p_y)
return np.mean(prediction != t)
def cost(p_y, t):
tot = t * np.log(p_y)
return -tot.sum()
def gradW(t, y, X):
return X.T.dot(t - y)
def gradb(t, y):
return (t - y).sum(axis=0)
def y2indicator(y):
N = len(y)
ind = np.zeros((N, 10))
ind[np.arange(N), y[:].astype(np.int32)] = 1
return ind
def benchmark_full():
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
print("Performing logistic regression...")
# lr = LogisticRegression(solver='lbfgs')
# convert Ytrain and Ytest to (N x K) matrices of indicator variables
_, D = Xtrain.shape
Ytrain_ind = y2indicator(Ytrain)
Ytest_ind = y2indicator(Ytest)
W = np.random.randn(D, 10) / np.sqrt(D)
b = np.zeros(10)
LL = []
LLtest = []
CRtest = []
# reg = 1
# learning rate 0.0001 is too high, 0.00005 is also too high
# 0.00003 / 2000 iterations => 0.363 error, -7630 cost
# 0.00004 / 1000 iterations => 0.295 error, -7902 cost
# 0.00004 / 2000 iterations => 0.321 error, -7528 cost
# reg = 0.1, still around 0.31 error
# reg = 0.01, still around 0.31 error
lr = 0.00004
reg = 0.01
for i in range(500):
p_y = forward(Xtrain, W, b)
# print "p_y:", p_y
ll = cost(p_y, Ytrain_ind)
LL.append(ll)
p_y_test = forward(Xtest, W, b)
lltest = cost(p_y_test, Ytest_ind)
LLtest.append(lltest)
err = error_rate(p_y_test, Ytest)
CRtest.append(err)
W += lr*(gradW(Ytrain_ind, p_y, Xtrain) - reg*W)
b += lr*(gradb(Ytrain_ind, p_y) - reg*b)
if i % 10 == 0:
print("Cost at iteration %d: %.6f" % (i, ll))
print("Error rate:", err)
p_y = forward(Xtest, W, b)
print("Final error rate:", error_rate(p_y, Ytest))
iters = range(len(LL))
plt.plot(iters, LL, iters, LLtest)
plt.show()
plt.plot(CRtest)
plt.show()
def benchmark_pca():
Xtrain, Xtest, Ytrain, Ytest = get_transformed_data()
print("Performing logistic regression...")
N, D = Xtrain.shape
Ytrain_ind = np.zeros((N, 10))
for i in range(N):
Ytrain_ind[i, Ytrain[i]] = 1
Ntest = len(Ytest)
Ytest_ind = np.zeros((Ntest, 10))
for i in range(Ntest):
Ytest_ind[i, Ytest[i]] = 1
W = np.random.randn(D, 10) / np.sqrt(D)
b = np.zeros(10)
LL = []
LLtest = []
CRtest = []
# D = 300 -> error = 0.07
lr = 0.0001
reg = 0.01
for i in range(200):
p_y = forward(Xtrain, W, b)
# print "p_y:", p_y
ll = cost(p_y, Ytrain_ind)
LL.append(ll)
p_y_test = forward(Xtest, W, b)
lltest = cost(p_y_test, Ytest_ind)
LLtest.append(lltest)
err = error_rate(p_y_test, Ytest)
CRtest.append(err)
W += lr*(gradW(Ytrain_ind, p_y, Xtrain) - reg*W)
b += lr*(gradb(Ytrain_ind, p_y) - reg*b)
if i % 10 == 0:
print("Cost at iteration %d: %.6f" % (i, ll))
print("Error rate:", err)
p_y = forward(Xtest, W, b)
print("Final error rate:", error_rate(p_y, Ytest))
iters = range(len(LL))
plt.plot(iters, LL, iters, LLtest)
plt.show()
plt.plot(CRtest)
plt.show()
if __name__ == '__main__':
# benchmark_pca()
benchmark_full()
| [
"numpy.argmax",
"pandas.read_csv",
"numpy.empty",
"numpy.mean",
"numpy.sin",
"numpy.exp",
"numpy.arange",
"builtins.range",
"numpy.random.randn",
"numpy.place",
"numpy.linspace",
"numpy.random.shuffle",
"matplotlib.pyplot.show",
"numpy.cos",
"numpy.vstack",
"numpy.log",
"matplotlib.p... | [((898, 921), 'numpy.vstack', 'np.vstack', (['[X1, X2, X3]'], {}), '([X1, X2, X3])\n', (907, 921), True, 'import numpy as np\n'), ((931, 983), 'numpy.array', 'np.array', (['([0] * Nclass + [1] * Nclass + [2] * Nclass)'], {}), '([0] * Nclass + [1] * Nclass + [2] * Nclass)\n', (939, 983), True, 'import numpy as np\n'), ((1325, 1348), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(100)'], {}), '(1, 10, 100)\n', (1336, 1348), True, 'import numpy as np\n'), ((1362, 1380), 'numpy.empty', 'np.empty', (['(6, 100)'], {}), '((6, 100))\n', (1370, 1380), True, 'import numpy as np\n'), ((1394, 1402), 'builtins.range', 'range', (['(6)'], {}), '(6)\n', (1399, 1402), False, 'from builtins import range\n'), ((1620, 1638), 'numpy.empty', 'np.empty', (['(6, 100)'], {}), '((6, 100))\n', (1628, 1638), True, 'import numpy as np\n'), ((1648, 1666), 'numpy.empty', 'np.empty', (['(6, 100)'], {}), '((6, 100))\n', (1656, 1666), True, 'import numpy as np\n'), ((1680, 1688), 'builtins.range', 'range', (['(6)'], {}), '(6)\n', (1685, 1688), False, 'from builtins import range\n'), ((1798, 1816), 'numpy.empty', 'np.empty', (['(600, 2)'], {}), '((600, 2))\n', (1806, 1816), True, 'import numpy as np\n'), ((1946, 2025), 'numpy.array', 'np.array', (['([0] * 100 + [1] * 100 + [0] * 100 + [1] * 100 + [0] * 100 + [1] * 100)'], {}), '([0] * 100 + [1] * 100 + [0] * 100 + [1] * 100 + [0] * 100 + [1] * 100)\n', (1954, 2025), True, 'import numpy as np\n'), ((2120, 2212), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Marcel\\\\OneDrive\\\\Python Courses\\\\Machine Learning\\\\train.csv"""'], {}), "(\n 'C:\\\\Users\\\\Marcel\\\\OneDrive\\\\Python Courses\\\\Machine Learning\\\\train.csv')\n", (2131, 2212), True, 'import pandas as pd\n'), ((2247, 2270), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (2264, 2270), True, 'import numpy as np\n'), ((2558, 2563), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (2561, 2563), False, 'from sklearn.decomposition import PCA\n'), ((3030, 3122), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Marcel\\\\OneDrive\\\\Python Courses\\\\Machine Learning\\\\train.csv"""'], {}), "(\n 'C:\\\\Users\\\\Marcel\\\\OneDrive\\\\Python Courses\\\\Machine Learning\\\\train.csv')\n", (3041, 3122), True, 'import pandas as pd\n'), ((3157, 3180), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (3174, 3180), True, 'import numpy as np\n'), ((3401, 3427), 'numpy.place', 'np.place', (['std', '(std == 0)', '(1)'], {}), '(std, std == 0, 1)\n', (3409, 3427), True, 'import numpy as np\n'), ((3727, 3738), 'matplotlib.pyplot.plot', 'plt.plot', (['P'], {}), '(P)\n', (3735, 3738), True, 'import matplotlib.pyplot as plt\n'), ((3743, 3753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3751, 3753), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3846), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (3843, 3846), True, 'import numpy as np\n'), ((3938, 3960), 'numpy.argmax', 'np.argmax', (['p_y'], {'axis': '(1)'}), '(p_y, axis=1)\n', (3947, 3960), True, 'import numpy as np\n'), ((4028, 4052), 'numpy.mean', 'np.mean', (['(prediction != t)'], {}), '(prediction != t)\n', (4035, 4052), True, 'import numpy as np\n'), ((4266, 4283), 'numpy.zeros', 'np.zeros', (['(N, 10)'], {}), '((N, 10))\n', (4274, 4283), True, 'import numpy as np\n'), ((4748, 4760), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (4756, 4760), True, 'import numpy as np\n'), ((5191, 5201), 'builtins.range', 'range', (['(500)'], {}), '(500)\n', (5196, 5201), False, 'from builtins import range\n'), ((5861, 5895), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'LL', 'iters', 'LLtest'], {}), '(iters, LL, iters, LLtest)\n', (5869, 5895), True, 'import matplotlib.pyplot as plt\n'), ((5900, 5910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5908, 5910), True, 'import matplotlib.pyplot as plt\n'), ((5915, 5931), 'matplotlib.pyplot.plot', 'plt.plot', (['CRtest'], {}), '(CRtest)\n', (5923, 5931), True, 'import matplotlib.pyplot as plt\n'), ((5936, 5946), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5944, 5946), True, 'import matplotlib.pyplot as plt\n'), ((6117, 6134), 'numpy.zeros', 'np.zeros', (['(N, 10)'], {}), '((N, 10))\n', (6125, 6134), True, 'import numpy as np\n'), ((6148, 6156), 'builtins.range', 'range', (['N'], {}), '(N)\n', (6153, 6156), False, 'from builtins import range\n'), ((6235, 6256), 'numpy.zeros', 'np.zeros', (['(Ntest, 10)'], {}), '((Ntest, 10))\n', (6243, 6256), True, 'import numpy as np\n'), ((6270, 6282), 'builtins.range', 'range', (['Ntest'], {}), '(Ntest)\n', (6275, 6282), False, 'from builtins import range\n'), ((6372, 6384), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (6380, 6384), True, 'import numpy as np\n'), ((6504, 6514), 'builtins.range', 'range', (['(200)'], {}), '(200)\n', (6509, 6514), False, 'from builtins import range\n'), ((7166, 7200), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'LL', 'iters', 'LLtest'], {}), '(iters, LL, iters, LLtest)\n', (7174, 7200), True, 'import matplotlib.pyplot as plt\n'), ((7205, 7215), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7213, 7215), True, 'import matplotlib.pyplot as plt\n'), ((7220, 7236), 'matplotlib.pyplot.plot', 'plt.plot', (['CRtest'], {}), '(CRtest)\n', (7228, 7236), True, 'import matplotlib.pyplot as plt\n'), ((7241, 7251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7249, 7251), True, 'import matplotlib.pyplot as plt\n'), ((732, 758), 'numpy.random.randn', 'np.random.randn', (['Nclass', 'D'], {}), '(Nclass, D)\n', (747, 758), True, 'import numpy as np\n'), ((761, 778), 'numpy.array', 'np.array', (['[0, -2]'], {}), '([0, -2])\n', (769, 778), True, 'import numpy as np\n'), ((788, 814), 'numpy.random.randn', 'np.random.randn', (['Nclass', 'D'], {}), '(Nclass, D)\n', (803, 814), True, 'import numpy as np\n'), ((817, 833), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (825, 833), True, 'import numpy as np\n'), ((843, 869), 'numpy.random.randn', 'np.random.randn', (['Nclass', 'D'], {}), '(Nclass, D)\n', (858, 869), True, 'import numpy as np\n'), ((872, 889), 'numpy.array', 'np.array', (['[-2, 2]'], {}), '([-2, 2])\n', (880, 889), True, 'import numpy as np\n'), ((1501, 1541), 'numpy.linspace', 'np.linspace', (['start_angle', 'end_angle', '(100)'], {}), '(start_angle, end_angle, 100)\n', (1512, 1541), True, 'import numpy as np\n'), ((1895, 1918), 'numpy.random.randn', 'np.random.randn', (['(600)', '(2)'], {}), '(600, 2)\n', (1910, 1918), True, 'import numpy as np\n'), ((4087, 4098), 'numpy.log', 'np.log', (['p_y'], {}), '(p_y)\n', (4093, 4098), True, 'import numpy as np\n'), ((4704, 4726), 'numpy.random.randn', 'np.random.randn', (['D', '(10)'], {}), '(D, 10)\n', (4719, 4726), True, 'import numpy as np\n'), ((4729, 4739), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (4736, 4739), True, 'import numpy as np\n'), ((6328, 6350), 'numpy.random.randn', 'np.random.randn', (['D', '(10)'], {}), '(D, 10)\n', (6343, 6350), True, 'import numpy as np\n'), ((6353, 6363), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (6360, 6363), True, 'import numpy as np\n'), ((1715, 1732), 'numpy.cos', 'np.cos', (['thetas[i]'], {}), '(thetas[i])\n', (1721, 1732), True, 'import numpy as np\n'), ((1758, 1775), 'numpy.sin', 'np.sin', (['thetas[i]'], {}), '(thetas[i])\n', (1764, 1775), True, 'import numpy as np\n'), ((4292, 4304), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4301, 4304), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import seaborn as sns
def visualize_network(M, graph, num, iterations):
# to plot the nodes and edges of friendships. Only plots at last iteration, does not take average.
scores = graph.to_numpy()
for j in range(len(scores)):
for t in range(len(scores[0])):
if scores[j][t] != 0:
M.add_edge(j, t, weight=scores[j][t])
# agents
slow = {idx: data['pos'] for (idx, data) in M.nodes(data=True) if data['speed'] == 1}
fast = {idx: data['pos'] for (idx, data) in M.nodes(data=True) if data['speed'] == 2}
#draw only once! Last iteration
if (num+1)== iterations:
nx.draw_networkx_nodes(
M,
nx.get_node_attributes(M, 'pos'),
nodelist=slow, node_size=50,
node_color='gray'
)
nx.draw_networkx_nodes(
M,
nx.get_node_attributes(M, 'pos'),
nodelist=fast, node_size=50,
node_color='red'
)
# connections between agents
close = [(u, v) for (u, v, d) in M.edges(data=True) if d['weight'] < 5]
mid = [(u, v) for (u, v, d) in M.edges(data=True) if 5 <= d['weight'] < 10]
far = [(u, v) for (u, v, d) in M.edges(data=True) if d['weight'] >= 10]
if (num+1)== iterations:
nx.draw_networkx_edges(
M,
nx.get_node_attributes(M, 'pos'),
edgelist=close, width=0.7, edge_color='navy'
)
nx.draw_networkx_edges(
M,
nx.get_node_attributes(M, 'pos'),
edgelist=mid, width=0.7, edge_color='royalblue'
)
nx.draw_networkx_edges(
M,
nx.get_node_attributes(M, 'pos'),
edgelist=far, width=0.7, edge_color='skyblue'
)
plt.axes().set_aspect('equal')
plt.savefig('data/img/Node_Graph.png')
plt.close()
def distance_histograms(M, friends, num, iterations, scores):
# creating stacked histograms
edges = list(M.edges())
maxdist = friends.height + friends.width
close = np.zeros(maxdist)
mid = np.zeros(maxdist)
far = np.zeros(maxdist)
close2 = np.zeros(maxdist)
mid2 = np.zeros(maxdist)
far2 = np.zeros(maxdist)
character = nx.get_node_attributes(M, 'character')
pos = nx.get_node_attributes(M, 'pos')
for i in range(len(edges)):
p1 = edges[i][0]
p2 = edges[i][1]
dist = abs(pos[p1][0] - pos[p2][0]) + abs(pos[p1][1] - pos[p2][1])
index = int(dist)
weight_friend = M[p1][p2]['weight']
if abs(character[edges[i][0]] - character[edges[i][1]]) < 0.3:
close[index] += 1
close2[index] += weight_friend
elif abs(character[edges[i][0]] - character[edges[i][1]]) < 0.6:
mid[index] += 1
mid2[index] += weight_friend
else:
far[index] += 1
far2[index] += weight_friend
for i in range(len(far2)):
if close2[i] != 0:
close2[i] = close2[i]/close[i]
if mid2[i] != 0:
mid2[i] = mid2[i]/mid[i]
if far2[i] != 0:
far2[i] = far2[i]/far[i]
scores = np.vstack((scores,close))
scores = np.vstack((scores,mid))
scores = np.vstack((scores,far))
scores = np.vstack((scores,close2))
scores = np.vstack((scores,mid2))
scores = np.vstack((scores,far2))
if (num+1) == iterations:
fig4 = plt.figure(figsize=(8,5))
ax4 = fig4.add_subplot(111, axisbelow=True)
scores = np.delete(scores, (0), axis=0)
close = scores[::6]
mid = scores[1::6]
far = scores[2::6]
avg_close = np.mean(close, axis = 0)
sd_close = np.std(close, axis=0)
avg_mid = np.mean(mid, axis = 0)
sd_mid = np.std(mid, axis=0)
avg_far = np.mean(far, axis = 0)
sd_far = np.std(far, axis=0)
close = avg_close
mid = avg_mid
far = avg_far
bins = np.arange(maxdist)
nc, bin_c, _ = ax4.hist(bins,maxdist, weights=close, stacked=True, label='similar', color='blue')
midway = 0.5*(bin_c[1:] + bin_c[:-1])
plt.errorbar(midway, nc, yerr=sd_close, fmt='none')
nm, bin_m, _ = ax4.hist(bins,maxdist, weights=mid, stacked=True,label ='not so similar', color='green' )
midway = 0.5*(bin_m[1:] + bin_m[:-1])
plt.errorbar(midway, nm, yerr=sd_mid, fmt='none')
nf, bin_f, _ = ax4.hist(bins,maxdist, weights=far, stacked=True, label = "not similar at all", color='purple')
midway = 0.5*(bin_f[1:] + bin_f[:-1])
plt.errorbar(midway, nf, yerr=sd_far, fmt='none')
ax4.legend(title="Similarity of Friends")
ax4.set_xlabel("Spatial of Distance of friends", fontsize=16)
ax4.set_ylabel("Number of friends", fontsize=16)
fig4.savefig('data/img/Number Friends VS Distance.png')
plt.close()
fig5 = plt.figure(figsize=(8,5))
ax5 = fig5.add_subplot(111, axisbelow=True)
close = scores[3::6]
mid = scores[4::6]
far = scores[5::6]
avg_close = np.mean(close, axis = 0)
sd_close = np.std(close, axis=0)
avg_mid = np.mean(mid, axis = 0)
sd_mid = np.std(mid, axis=0)
avg_far = np.mean(far, axis = 0)
sd_far = np.std(far, axis=0)
close = avg_close
mid = avg_mid
far = avg_far
nc, bin_c, _ = ax5.hist(bins,maxdist, weights=close, stacked=True, label='similar', color='blue')
midway = 0.5*(bin_c[1:] + bin_c[:-1])
plt.errorbar(midway, nc, yerr=sd_close, fmt='none')
nm, bin_m, _ = ax5.hist(bins,maxdist, weights=mid, stacked=True,label ='not so similar', color='green')
midway = 0.5*(bin_m[1:] + bin_m[:-1])
plt.errorbar(midway, nm, yerr=sd_mid, fmt='none')
nf, bin_f, _ = ax5.hist(bins,maxdist, weights=far, stacked=True, label = "not similar at all", color='purple')
midway = 0.5*(bin_f[1:] + bin_f[:-1])
plt.errorbar(midway, nf, yerr=sd_far, fmt='none')
ax5.legend(title="Similarity of Friends")
ax5.set_xlabel("Spatial distance of friends", fontsize=16)
ax5.set_ylabel("Avg. Friend Score", fontsize=16)
fig5.savefig('data/img/AVG. Friend Score VS Distance.png')
plt.close()
print()
return scores
def friends_speed_histogram(M):
# creating stacked histogram
edges = list(M.edges())
max_dist = 30
slow = np.zeros(max_dist)
fast = np.zeros(max_dist)
speeds = nx.get_node_attributes(M, 'speed')
pos = nx.get_node_attributes(M, 'pos')
for i in range(len(edges)):
p1 = edges[i][0]
p2 = edges[i][1]
dist = abs(pos[p1][0] - pos[p2][0]) + abs(pos[p1][1] - pos[p2][1])
index = int(dist)
if speeds[p1] == 1:
slow[index] += 1
elif speeds[p1] == 2:
fast[index] += 1
if speeds[p2] == 1:
slow[index] += 1
elif speeds[p2] == 2:
fast[index] += 1
bins = np.arange(max_dist)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))
[ax1, ax2] = ax
ax1.hist(slow, bins, color="violet", density=True)
ax2.hist(fast, bins, color="blue", density=True)
ax1.title.set_text('Slow')
ax2.title.set_text('Fast')
plt.setp(ax, ylim=(0, 1), xlabel="Distance to friend", ylabel="Proportion of friends")
plt.tight_layout()
plt.savefig('data/img/friend_speed.png')
plt.close()
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axes",
"numpy.zeros",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"networkx.get_node_attributes",
"m... | [((2129, 2146), 'numpy.zeros', 'np.zeros', (['maxdist'], {}), '(maxdist)\n', (2137, 2146), True, 'import numpy as np\n'), ((2157, 2174), 'numpy.zeros', 'np.zeros', (['maxdist'], {}), '(maxdist)\n', (2165, 2174), True, 'import numpy as np\n'), ((2185, 2202), 'numpy.zeros', 'np.zeros', (['maxdist'], {}), '(maxdist)\n', (2193, 2202), True, 'import numpy as np\n'), ((2216, 2233), 'numpy.zeros', 'np.zeros', (['maxdist'], {}), '(maxdist)\n', (2224, 2233), True, 'import numpy as np\n'), ((2245, 2262), 'numpy.zeros', 'np.zeros', (['maxdist'], {}), '(maxdist)\n', (2253, 2262), True, 'import numpy as np\n'), ((2274, 2291), 'numpy.zeros', 'np.zeros', (['maxdist'], {}), '(maxdist)\n', (2282, 2291), True, 'import numpy as np\n'), ((2309, 2347), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""character"""'], {}), "(M, 'character')\n", (2331, 2347), True, 'import networkx as nx\n'), ((2358, 2390), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""pos"""'], {}), "(M, 'pos')\n", (2380, 2390), True, 'import networkx as nx\n'), ((3232, 3258), 'numpy.vstack', 'np.vstack', (['(scores, close)'], {}), '((scores, close))\n', (3241, 3258), True, 'import numpy as np\n'), ((3271, 3295), 'numpy.vstack', 'np.vstack', (['(scores, mid)'], {}), '((scores, mid))\n', (3280, 3295), True, 'import numpy as np\n'), ((3308, 3332), 'numpy.vstack', 'np.vstack', (['(scores, far)'], {}), '((scores, far))\n', (3317, 3332), True, 'import numpy as np\n'), ((3345, 3372), 'numpy.vstack', 'np.vstack', (['(scores, close2)'], {}), '((scores, close2))\n', (3354, 3372), True, 'import numpy as np\n'), ((3385, 3410), 'numpy.vstack', 'np.vstack', (['(scores, mid2)'], {}), '((scores, mid2))\n', (3394, 3410), True, 'import numpy as np\n'), ((3423, 3448), 'numpy.vstack', 'np.vstack', (['(scores, far2)'], {}), '((scores, far2))\n', (3432, 3448), True, 'import numpy as np\n'), ((6542, 6560), 'numpy.zeros', 'np.zeros', (['max_dist'], {}), '(max_dist)\n', (6550, 6560), True, 'import numpy as np\n'), ((6572, 6590), 'numpy.zeros', 'np.zeros', (['max_dist'], {}), '(max_dist)\n', (6580, 6590), True, 'import numpy as np\n'), ((6604, 6638), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""speed"""'], {}), "(M, 'speed')\n", (6626, 6638), True, 'import networkx as nx\n'), ((6649, 6681), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""pos"""'], {}), "(M, 'pos')\n", (6671, 6681), True, 'import networkx as nx\n'), ((7111, 7130), 'numpy.arange', 'np.arange', (['max_dist'], {}), '(max_dist)\n', (7120, 7130), True, 'import numpy as np\n'), ((7146, 7192), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(9, 4)'}), '(nrows=1, ncols=2, figsize=(9, 4))\n', (7158, 7192), True, 'import matplotlib.pyplot as plt\n'), ((7387, 7478), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'ylim': '(0, 1)', 'xlabel': '"""Distance to friend"""', 'ylabel': '"""Proportion of friends"""'}), "(ax, ylim=(0, 1), xlabel='Distance to friend', ylabel=\n 'Proportion of friends')\n", (7395, 7478), True, 'import matplotlib.pyplot as plt\n'), ((7478, 7496), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7494, 7496), True, 'import matplotlib.pyplot as plt\n'), ((7501, 7541), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data/img/friend_speed.png"""'], {}), "('data/img/friend_speed.png')\n", (7512, 7541), True, 'import matplotlib.pyplot as plt\n'), ((7546, 7557), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7555, 7557), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1923), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data/img/Node_Graph.png"""'], {}), "('data/img/Node_Graph.png')\n", (1896, 1923), True, 'import matplotlib.pyplot as plt\n'), ((1932, 1943), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1941, 1943), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3521), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (3505, 3521), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3619), 'numpy.delete', 'np.delete', (['scores', '(0)'], {'axis': '(0)'}), '(scores, 0, axis=0)\n', (3600, 3619), True, 'import numpy as np\n'), ((3725, 3747), 'numpy.mean', 'np.mean', (['close'], {'axis': '(0)'}), '(close, axis=0)\n', (3732, 3747), True, 'import numpy as np\n'), ((3769, 3790), 'numpy.std', 'np.std', (['close'], {'axis': '(0)'}), '(close, axis=0)\n', (3775, 3790), True, 'import numpy as np\n'), ((3809, 3829), 'numpy.mean', 'np.mean', (['mid'], {'axis': '(0)'}), '(mid, axis=0)\n', (3816, 3829), True, 'import numpy as np\n'), ((3849, 3868), 'numpy.std', 'np.std', (['mid'], {'axis': '(0)'}), '(mid, axis=0)\n', (3855, 3868), True, 'import numpy as np\n'), ((3887, 3907), 'numpy.mean', 'np.mean', (['far'], {'axis': '(0)'}), '(far, axis=0)\n', (3894, 3907), True, 'import numpy as np\n'), ((3927, 3946), 'numpy.std', 'np.std', (['far'], {'axis': '(0)'}), '(far, axis=0)\n', (3933, 3946), True, 'import numpy as np\n'), ((4033, 4051), 'numpy.arange', 'np.arange', (['maxdist'], {}), '(maxdist)\n', (4042, 4051), True, 'import numpy as np\n'), ((4212, 4263), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['midway', 'nc'], {'yerr': 'sd_close', 'fmt': '"""none"""'}), "(midway, nc, yerr=sd_close, fmt='none')\n", (4224, 4263), True, 'import matplotlib.pyplot as plt\n'), ((4432, 4481), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['midway', 'nm'], {'yerr': 'sd_mid', 'fmt': '"""none"""'}), "(midway, nm, yerr=sd_mid, fmt='none')\n", (4444, 4481), True, 'import matplotlib.pyplot as plt\n'), ((4656, 4705), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['midway', 'nf'], {'yerr': 'sd_far', 'fmt': '"""none"""'}), "(midway, nf, yerr=sd_far, fmt='none')\n", (4668, 4705), True, 'import matplotlib.pyplot as plt\n'), ((4958, 4969), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4967, 4969), True, 'import matplotlib.pyplot as plt\n'), ((4986, 5012), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (4996, 5012), True, 'import matplotlib.pyplot as plt\n'), ((5169, 5191), 'numpy.mean', 'np.mean', (['close'], {'axis': '(0)'}), '(close, axis=0)\n', (5176, 5191), True, 'import numpy as np\n'), ((5213, 5234), 'numpy.std', 'np.std', (['close'], {'axis': '(0)'}), '(close, axis=0)\n', (5219, 5234), True, 'import numpy as np\n'), ((5253, 5273), 'numpy.mean', 'np.mean', (['mid'], {'axis': '(0)'}), '(mid, axis=0)\n', (5260, 5273), True, 'import numpy as np\n'), ((5293, 5312), 'numpy.std', 'np.std', (['mid'], {'axis': '(0)'}), '(mid, axis=0)\n', (5299, 5312), True, 'import numpy as np\n'), ((5331, 5351), 'numpy.mean', 'np.mean', (['far'], {'axis': '(0)'}), '(far, axis=0)\n', (5338, 5351), True, 'import numpy as np\n'), ((5371, 5390), 'numpy.std', 'np.std', (['far'], {'axis': '(0)'}), '(far, axis=0)\n', (5377, 5390), True, 'import numpy as np\n'), ((5622, 5673), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['midway', 'nc'], {'yerr': 'sd_close', 'fmt': '"""none"""'}), "(midway, nc, yerr=sd_close, fmt='none')\n", (5634, 5673), True, 'import matplotlib.pyplot as plt\n'), ((5841, 5890), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['midway', 'nm'], {'yerr': 'sd_mid', 'fmt': '"""none"""'}), "(midway, nm, yerr=sd_mid, fmt='none')\n", (5853, 5890), True, 'import matplotlib.pyplot as plt\n'), ((6065, 6114), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['midway', 'nf'], {'yerr': 'sd_far', 'fmt': '"""none"""'}), "(midway, nf, yerr=sd_far, fmt='none')\n", (6077, 6114), True, 'import matplotlib.pyplot as plt\n'), ((6367, 6378), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6376, 6378), True, 'import matplotlib.pyplot as plt\n'), ((764, 796), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""pos"""'], {}), "(M, 'pos')\n", (786, 796), True, 'import networkx as nx\n'), ((938, 970), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""pos"""'], {}), "(M, 'pos')\n", (960, 970), True, 'import networkx as nx\n'), ((1412, 1444), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""pos"""'], {}), "(M, 'pos')\n", (1434, 1444), True, 'import networkx as nx\n'), ((1572, 1604), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""pos"""'], {}), "(M, 'pos')\n", (1594, 1604), True, 'import networkx as nx\n'), ((1735, 1767), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['M', '"""pos"""'], {}), "(M, 'pos')\n", (1757, 1767), True, 'import networkx as nx\n'), ((1846, 1856), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1854, 1856), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import time
import gym
import numpy as np
from collections import defaultdict
from gym.envs.toy_text import BlackjackEnv
from plot_utils import plot_blackjack_values, plot_policy
env = gym.make('Blackjack-v0')
def generate_episode_from_limit_stochastic(bj_env):
episode = []
state = bj_env.reset()
while True:
probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]
action = np.random.choice(np.arange(2), p=probs)
next_state, reward, done, info = bj_env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
def print_episode(episode: tuple) -> None:
"""
Prints a single episode so that i can understand the tuple contents...
"""
for x in range(len(episode)):
print(
f'S{x}={episode[x][0]} (player score, dealer cards, ace?), A{x}={episode[x][1]} ({"HIT" if episode[x][1] == 0 else "STICK"}), R={episode[x][2]}')
def mc_prediction_q(env: BlackjackEnv, num_episodes: int, generate_episode, gamma: float = 1.0):
# initialize empty dictionaries of arrays
returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))
N = defaultdict(lambda: np.zeros(env.action_space.n))
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# loop over episodes
for i_episode in range(1, num_episodes + 1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## TODO: complete the function
episode: tuple = generate_episode(env)
states = [x[0] for x in episode]
actions = [x[1] for x in episode]
rewards = [x[2] for x in episode]
discounted_rewards = [rewards[x] * gamma**x for x in range(len(rewards))]
for i, state in enumerate(states):
returns_sum[state][actions[i]] += sum(discounted_rewards)
N[state][actions[i]] += 1.0
Q[state][actions[i]] = returns_sum[state][actions[i]] / N[state][actions[i]]
return Q
# obtain the action-value function
Q = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic)
# obtain the corresponding state-value function
V_to_plot = dict((k, (k[0] > 18) * (np.dot([0.8, 0.2], v)) + (k[0] <= 18) * (np.dot([0.2, 0.8], v))) \
for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V_to_plot)
| [
"gym.make",
"numpy.zeros",
"sys.stdout.flush",
"numpy.arange",
"plot_utils.plot_blackjack_values",
"numpy.dot"
] | [((200, 224), 'gym.make', 'gym.make', (['"""Blackjack-v0"""'], {}), "('Blackjack-v0')\n", (208, 224), False, 'import gym\n'), ((2445, 2477), 'plot_utils.plot_blackjack_values', 'plot_blackjack_values', (['V_to_plot'], {}), '(V_to_plot)\n', (2466, 2477), False, 'from plot_utils import plot_blackjack_values, plot_policy\n'), ((433, 445), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (442, 445), True, 'import numpy as np\n'), ((1172, 1200), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (1180, 1200), True, 'import numpy as np\n'), ((1230, 1258), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (1238, 1258), True, 'import numpy as np\n'), ((1288, 1316), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (1296, 1316), True, 'import numpy as np\n'), ((1543, 1561), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1559, 1561), False, 'import sys\n'), ((2305, 2326), 'numpy.dot', 'np.dot', (['[0.8, 0.2]', 'v'], {}), '([0.8, 0.2], v)\n', (2311, 2326), True, 'import numpy as np\n'), ((2346, 2367), 'numpy.dot', 'np.dot', (['[0.2, 0.8]', 'v'], {}), '([0.2, 0.8], v)\n', (2352, 2367), True, 'import numpy as np\n')] |
import numba
import numpy as np
from ..dtypes import DTYPES
from ..meta_func import Func
from .linalg import _lapack_linalg, dot, vdot, inner, outer, matrix_rank, solve, inv, det
# Placeholder functions to be replaced by JIT-compiled function
ADD_UFUNC = lambda x, y: x + y
SUBTRACT_UFUNC = lambda x, y: x - y
MULTIPLY_UFUNC = lambda x, y: x * y
DIVIDE_UFUNC = lambda x, y: x // y
class FieldFunc(Func):
"""
A mixin class that JIT compiles general purpose functions for polynomial arithmetic and convolution.
"""
# pylint: disable=no-value-for-parameter
_overridden_functions = {
np.convolve: "_convolve",
}
_overridden_linalg_functions = {
np.dot: dot,
np.vdot: vdot,
np.inner: inner,
np.outer: outer,
# np.tensordot: "tensordot",
np.linalg.det: det,
np.linalg.matrix_rank: matrix_rank,
np.linalg.solve: solve,
np.linalg.inv: inv,
}
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._ADD_UFUNC = None
cls._SUBTRACT_UFUNC = None
cls._MULTIPLY_UFUNC = None
cls._DIVIDE_UFUNC = None
cls._funcs = {}
def _compile_funcs(cls, target):
global ADD_UFUNC, SUBTRACT_UFUNC, MULTIPLY_UFUNC, DIVIDE_UFUNC
if cls.ufunc_mode == "python-calculate":
# NOTE: Don't need to vectorize cls._convolve or cls._poly_divmod
cls._funcs["poly_evaluate"] = np.vectorize(cls._poly_evaluate_python, excluded=["coeffs"], otypes=[np.object_])
else:
ADD_UFUNC = cls._ufuncs["add"]
SUBTRACT_UFUNC = cls._ufuncs["subtract"]
MULTIPLY_UFUNC = cls._ufuncs["multiply"]
DIVIDE_UFUNC = cls._ufuncs["divide"]
assert target == "cpu"
cls._funcs["matmul"] = numba.jit("int64[:,:](int64[:,:], int64[:,:])", nopython=True)(_matmul_jit)
cls._funcs["convolve"] = numba.jit("int64[:](int64[:], int64[:])", nopython=True)(_convolve_jit)
cls._funcs["poly_divmod"] = numba.jit("int64[:](int64[:], int64[:])", nopython=True)(_poly_divmod_jit)
cls._funcs["poly_evaluate"] = numba.guvectorize([(numba.int64[:], numba.int64[:], numba.int64[:])], "(n),(m)->(m)", nopython=True)(_poly_evaluate_jit)
def _matmul(cls, A, B, out=None, **kwargs): # pylint: disable=unused-argument
if not type(A) is type(B):
raise TypeError(f"Operation 'matmul' requires both arrays be in the same Galois field, not {type(A)} and {type(B)}.")
if not (A.ndim >= 1 and B.ndim >= 1):
raise ValueError(f"Operation 'matmul' requires both arrays have dimension at least 1, not {A.ndim}-D and {B.ndim}-D.")
if not (A.ndim <= 2 and B.ndim <= 2):
raise ValueError("Operation 'matmul' currently only supports matrix multiplication up to 2-D. If you would like matrix multiplication of N-D arrays, please submit a GitHub issue at https://github.com/mhostetter/galois/issues.")
field = type(A)
dtype = A.dtype
if field.is_prime_field:
return _lapack_linalg(A, B, np.matmul, out=out)
prepend, append = False, False
if A.ndim == 1:
A = A.reshape((1,A.size))
prepend = True
if B.ndim == 1:
B = B.reshape((B.size,1))
append = True
if not A.shape[-1] == B.shape[-2]:
raise ValueError(f"Operation 'matmul' requires the last dimension of A to match the second-to-last dimension of B, not {A.shape} and {B.shape}.")
# if A.ndim > 2 and B.ndim == 2:
# new_shape = list(A.shape[:-2]) + list(B.shape)
# B = np.broadcast_to(B, new_shape)
# if B.ndim > 2 and A.ndim == 2:
# new_shape = list(B.shape[:-2]) + list(A.shape)
# A = np.broadcast_to(A, new_shape)
if cls.ufunc_mode == "python-calculate":
C = cls._matmul_python(A, B)
else:
C = cls._funcs["matmul"](A.astype(np.int64), B.astype(np.int64))
C = C.astype(dtype).view(field)
shape = list(C.shape)
if prepend:
shape = shape[1:]
if append:
shape = shape[:-1]
C = C.reshape(shape)
# TODO: Determine a better way to do this
if out is not None:
assert isinstance(out, tuple) and len(out) == 1 # TODO: Why is `out` getting populated as tuple?
out = out[0]
out[:] = C[:]
return C
def _convolve(cls, a, b, mode="full"):
if not type(a) is type(b):
raise TypeError(f"Arguments `a` and `b` must be of the same Galois field array class, not {type(a)} and {type(b)}.")
if not mode == "full":
raise ValueError(f"Operation 'convolve' currently only supports mode of 'full', not '{mode}'.")
field = type(a)
dtype = a.dtype
if field.is_prime_field:
# Determine the minimum dtype to hold the entire product and summation without overflowing
n_sum = min(a.size, b.size)
max_value = n_sum * (field.characteristic - 1)**2
dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= max_value]
dtype = np.object_ if len(dtypes) == 0 else dtypes[0]
return_dtype = a.dtype
a = a.view(np.ndarray).astype(dtype)
b = b.view(np.ndarray).astype(dtype)
c = np.convolve(a, b) # Compute result using native numpy LAPACK/BLAS implementation
c = c % field.characteristic # Reduce the result mod p
c = c.astype(return_dtype).view(field) if not np.isscalar(c) else field(c, dtype=return_dtype)
return c
else:
if cls.ufunc_mode == "python-calculate":
return cls._convolve_python(a, b)
else:
c = cls._funcs["convolve"](a.astype(np.int64), b.astype(np.int64))
c = c.astype(dtype).view(field)
return c
def _poly_divmod(cls, a, b):
assert isinstance(a, cls) and isinstance(b, cls)
field = type(a)
dtype = a.dtype
q_degree = a.size - b.size
r_degree = b.size - 1
if cls.ufunc_mode == "python-calculate":
qr = cls._poly_divmod_python(a, b)
else:
qr = cls._funcs["poly_divmod"](a.astype(np.int64), b.astype(np.int64))
qr = qr.astype(dtype).view(field)
return qr[0:q_degree + 1], qr[q_degree + 1:q_degree + 1 + r_degree + 1]
def _poly_evaluate(cls, coeffs, x):
assert isinstance(coeffs, cls) and isinstance(x, cls)
assert coeffs.ndim == 1
field = cls
dtype = x.dtype
x = np.atleast_1d(x)
if cls.ufunc_mode == "python-calculate":
# For object dtypes, call the vectorized classmethod
y = cls._funcs["poly_evaluate"](coeffs=coeffs.view(np.ndarray), values=x.view(np.ndarray)) # pylint: disable=not-callable
else:
# For integer dtypes, call the JIT-compiled gufunc
y = cls._funcs["poly_evaluate"](coeffs, x, field.Zeros(x.shape), casting="unsafe") # pylint: disable=not-callable
y = y.astype(dtype)
y = y.view(field)
if y.size == 1:
y = y[0]
return y
###############################################################################
# Pure python implementation, operating on Galois field arrays (not integers),
# for fields in ufunc_mode="python-calculate"
###############################################################################
def _matmul_python(cls, A, B):
assert A.ndim == 2 and B.ndim == 2
assert A.shape[-1] == B.shape[-2]
M, N = A.shape[-2], B.shape[-1]
C = cls.Zeros((M, N), dtype=A.dtype)
for i in range(M):
for j in range(N):
C[i,j] = np.sum(A[i,:] * B[:,j])
return C
def _convolve_python(cls, a, b):
c = cls.Zeros(a.size + b.size - 1, dtype=a.dtype)
# Want a to be the shorter sequence
if b.size < a.size:
a, b = b, a
for i in range(a.size):
c[i:i + b.size] += a[i] * b
return c
def _poly_divmod_python(cls, a, b):
# pylint: disable=unsubscriptable-object,unsupported-assignment-operation
assert a.size >= b.size
q_degree = a.size - b.size
qr = cls(a)
for i in range(0, q_degree + 1):
if qr[i] > 0:
q = qr[i] / b[0]
qr[i:i + b.size] -= q*b
qr[i] = q
return qr
def _poly_evaluate_python(cls, coeffs, values):
result = coeffs[0]
for j in range(1, coeffs.size):
result = cls._add_python(coeffs[j], cls._multiply_python(result, values))
return result
###############################################################################
# JIT-compiled implementation of the specified functions
###############################################################################
def _matmul_jit(A, B): # pragma: no cover
assert A.ndim == 2 and B.ndim == 2
assert A.shape[-1] == B.shape[-2]
M, K = A.shape
K, N = B.shape
C = np.zeros((M, N), dtype=np.int64)
for i in range(M):
for j in range(N):
for k in range(K):
C[i,j] = ADD_UFUNC(C[i,j], MULTIPLY_UFUNC(A[i,k], B[k,j]))
return C
def _convolve_jit(a, b): # pragma: no cover
c = np.zeros(a.size + b.size - 1, dtype=a.dtype)
for i in range(a.size):
for j in range(b.size - 1, -1, -1):
c[i + j] = ADD_UFUNC(c[i + j], MULTIPLY_UFUNC(a[i], b[j]))
return c
def _poly_divmod_jit(a, b): # pragma: no cover
assert a.size >= b.size
q_degree = a.size - b.size
qr = np.copy(a)
for i in range(0, q_degree + 1):
if qr[i] > 0:
q = DIVIDE_UFUNC(qr[i], b[0])
for j in range(0, b.size):
qr[i + j] = SUBTRACT_UFUNC(qr[i + j], MULTIPLY_UFUNC(q, b[j]))
qr[i] = q
return qr
def _poly_evaluate_jit(coeffs, values, results): # pragma: no cover
for i in range(values.size):
results[i] = coeffs[0]
for j in range(1, coeffs.size):
results[i] = ADD_UFUNC(coeffs[j], MULTIPLY_UFUNC(results[i], values[i]))
| [
"numpy.vectorize",
"numpy.sum",
"numpy.copy",
"numpy.isscalar",
"numpy.zeros",
"numpy.iinfo",
"numba.jit",
"numba.guvectorize",
"numpy.convolve",
"numpy.atleast_1d"
] | [((9290, 9322), 'numpy.zeros', 'np.zeros', (['(M, N)'], {'dtype': 'np.int64'}), '((M, N), dtype=np.int64)\n', (9298, 9322), True, 'import numpy as np\n'), ((9547, 9591), 'numpy.zeros', 'np.zeros', (['(a.size + b.size - 1)'], {'dtype': 'a.dtype'}), '(a.size + b.size - 1, dtype=a.dtype)\n', (9555, 9591), True, 'import numpy as np\n'), ((9868, 9878), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (9875, 9878), True, 'import numpy as np\n'), ((6769, 6785), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (6782, 6785), True, 'import numpy as np\n'), ((1507, 1593), 'numpy.vectorize', 'np.vectorize', (['cls._poly_evaluate_python'], {'excluded': "['coeffs']", 'otypes': '[np.object_]'}), "(cls._poly_evaluate_python, excluded=['coeffs'], otypes=[np.\n object_])\n", (1519, 1593), True, 'import numpy as np\n'), ((5488, 5505), 'numpy.convolve', 'np.convolve', (['a', 'b'], {}), '(a, b)\n', (5499, 5505), True, 'import numpy as np\n'), ((1873, 1935), 'numba.jit', 'numba.jit', (['"""int64[:,:](int64[:,:], int64[:,:])"""'], {'nopython': '(True)'}), "('int64[:,:](int64[:,:], int64[:,:])', nopython=True)\n", (1882, 1935), False, 'import numba\n'), ((1986, 2042), 'numba.jit', 'numba.jit', (['"""int64[:](int64[:], int64[:])"""'], {'nopython': '(True)'}), "('int64[:](int64[:], int64[:])', nopython=True)\n", (1995, 2042), False, 'import numba\n'), ((2098, 2154), 'numba.jit', 'numba.jit', (['"""int64[:](int64[:], int64[:])"""'], {'nopython': '(True)'}), "('int64[:](int64[:], int64[:])', nopython=True)\n", (2107, 2154), False, 'import numba\n'), ((2215, 2319), 'numba.guvectorize', 'numba.guvectorize', (['[(numba.int64[:], numba.int64[:], numba.int64[:])]', '"""(n),(m)->(m)"""'], {'nopython': '(True)'}), "([(numba.int64[:], numba.int64[:], numba.int64[:])],\n '(n),(m)->(m)', nopython=True)\n", (2232, 2319), False, 'import numba\n'), ((7954, 7979), 'numpy.sum', 'np.sum', (['(A[i, :] * B[:, j])'], {}), '(A[i, :] * B[:, j])\n', (7960, 7979), True, 'import numpy as np\n'), ((5696, 5710), 'numpy.isscalar', 'np.isscalar', (['c'], {}), '(c)\n', (5707, 5710), True, 'import numpy as np\n'), ((5239, 5254), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (5247, 5254), True, 'import numpy as np\n')] |
from contextlib import ExitStack, redirect_stdout
import numpy as np
from mdp import MDP, FrozenLakeEnv
import sys
sys.path.append("..")
import grading
def submit_assigment(
get_action_value,
get_new_state_value,
get_optimal_action,
value_iteration,
email,
token,
verbose=False):
grader = grading.Grader("EheZDOgLEeenIA4g5qPHFA")
with ExitStack() as stack:
if not verbose:
stack.enter_context(redirect_stdout(None))
transition_probs = {
's0': {
'a0': {'s1': 0.8, 's2': 0.2},
'a1': {'s1': 0.2, 's2': 0.8},
},
's1': {
'a0': {'s0': 0.2, 's2': 0.8},
'a1': {'s0': 0.8, 's2': 0.2},
},
's2': {
'a0': {'s3': 0.5, 's4': 0.5},
'a1': {'s3': 1.0},
},
's3': {
'a0': {'s1': 0.9, 's2': 0.1},
'a1': {'s1': 0.7, 's2': 0.3},
},
's4': {
'a0': {'s3': 1.0},
'a1': {'s3': 0.7, 's1': 0.3},
}
}
rewards = {
's0': {'a0': {'s1': 0, 's2': 1}, 'a1': {'s1': 0, 's2': 1}},
's1': {'a0': {'s0': -1, 's2': 1}, 'a1': {'s0': -1, 's2': 1}},
's2': {'a0': {'s3': 0, 's4': 1}, 'a1': {'s3': 0, 's4': 1}},
's3': {'a0': {'s1': -3, 's2': -3}, 'a1': {'s1': -3, 's2': -3}},
's4': {'a1': {'s1': +10}}
}
mdp = MDP(transition_probs, rewards, initial_state='s0', seed=998244353)
test_Vs = {s: i for i, s in enumerate(sorted(mdp.get_all_states()))}
qvalue1 = get_action_value(mdp, test_Vs, 's1', 'a0', 0.9)
qvalue2 = get_action_value(mdp, test_Vs, 's4', 'a1', 0.9)
grader.set_answer("F16dC", qvalue1 + qvalue2)
# ---
svalue1 = get_new_state_value(mdp, test_Vs, 's2', 0.9)
svalue2 = get_new_state_value(mdp, test_Vs, 's4', 0.9)
grader.set_answer("72cBp", svalue1 + svalue2)
# ---
state_values = {s: 0 for s in mdp.get_all_states()}
gamma = 0.9
# ---
action1 = get_optimal_action(mdp, state_values, 's1', gamma)
action2 = get_optimal_action(mdp, state_values, 's2', gamma)
grader.set_answer("xIuti", action1 + action2)
# ---
s = mdp.reset()
rewards = []
for _ in range(10000):
s, r, done, _ = mdp.step(get_optimal_action(mdp, state_values, s, gamma))
rewards.append(r)
grader.set_answer("Y8g0j", np.mean(rewards) + np.std(rewards))
# ---
mdp = FrozenLakeEnv(slip_chance=0.25, seed=998244353)
state_values = value_iteration(mdp)
gamma = 0.9
total_rewards = []
for game_i in range(1000):
s = mdp.reset()
rewards = []
for t in range(100):
s, r, done, _ = mdp.step(get_optimal_action(mdp, state_values, s, gamma))
rewards.append(r)
if done:
break
total_rewards.append(np.sum(rewards))
grader.set_answer("ABf1b", np.mean(total_rewards) + np.std(total_rewards))
# ---
mdp = FrozenLakeEnv(slip_chance=0.25, map_name='8x8', seed=998244353)
state_values = value_iteration(mdp)
gamma = 0.9
total_rewards = []
for game_i in range(1000):
s = mdp.reset()
rewards = []
for t in range(100):
s, r, done, _ = mdp.step(get_optimal_action(mdp, state_values, s, gamma))
rewards.append(r)
if done:
break
total_rewards.append(np.sum(rewards))
grader.set_answer("U3RzE", np.mean(total_rewards) + np.std(total_rewards))
grader.submit(email, token)
| [
"sys.path.append",
"numpy.sum",
"numpy.std",
"grading.Grader",
"mdp.FrozenLakeEnv",
"contextlib.ExitStack",
"numpy.mean",
"contextlib.redirect_stdout",
"mdp.MDP"
] | [((116, 137), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (131, 137), False, 'import sys\n'), ((352, 392), 'grading.Grader', 'grading.Grader', (['"""EheZDOgLEeenIA4g5qPHFA"""'], {}), "('EheZDOgLEeenIA4g5qPHFA')\n", (366, 392), False, 'import grading\n'), ((403, 414), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (412, 414), False, 'from contextlib import ExitStack, redirect_stdout\n'), ((1533, 1599), 'mdp.MDP', 'MDP', (['transition_probs', 'rewards'], {'initial_state': '"""s0"""', 'seed': '(998244353)'}), "(transition_probs, rewards, initial_state='s0', seed=998244353)\n", (1536, 1599), False, 'from mdp import MDP, FrozenLakeEnv\n'), ((2677, 2724), 'mdp.FrozenLakeEnv', 'FrozenLakeEnv', ([], {'slip_chance': '(0.25)', 'seed': '(998244353)'}), '(slip_chance=0.25, seed=998244353)\n', (2690, 2724), False, 'from mdp import MDP, FrozenLakeEnv\n'), ((3277, 3340), 'mdp.FrozenLakeEnv', 'FrozenLakeEnv', ([], {'slip_chance': '(0.25)', 'map_name': '"""8x8"""', 'seed': '(998244353)'}), "(slip_chance=0.25, map_name='8x8', seed=998244353)\n", (3290, 3340), False, 'from mdp import MDP, FrozenLakeEnv\n'), ((481, 502), 'contextlib.redirect_stdout', 'redirect_stdout', (['None'], {}), '(None)\n', (496, 502), False, 'from contextlib import ExitStack, redirect_stdout\n'), ((2611, 2627), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (2618, 2627), True, 'import numpy as np\n'), ((2630, 2645), 'numpy.std', 'np.std', (['rewards'], {}), '(rewards)\n', (2636, 2645), True, 'import numpy as np\n'), ((3146, 3161), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (3152, 3161), True, 'import numpy as np\n'), ((3199, 3221), 'numpy.mean', 'np.mean', (['total_rewards'], {}), '(total_rewards)\n', (3206, 3221), True, 'import numpy as np\n'), ((3224, 3245), 'numpy.std', 'np.std', (['total_rewards'], {}), '(total_rewards)\n', (3230, 3245), True, 'import numpy as np\n'), ((3762, 3777), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (3768, 3777), True, 'import numpy as np\n'), ((3815, 3837), 'numpy.mean', 'np.mean', (['total_rewards'], {}), '(total_rewards)\n', (3822, 3837), True, 'import numpy as np\n'), ((3840, 3861), 'numpy.std', 'np.std', (['total_rewards'], {}), '(total_rewards)\n', (3846, 3861), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Workflow for a multi-regional energy system
#
# In this application of the FINE framework, a multi-regional energy system is modeled and optimized.
#
# All classes which are available to the user are utilized and examples of the selection of different parameters within these classes are given.
#
# The workflow is structures as follows:
# 1. Required packages are imported and the input data path is set
# 2. An energy system model instance is created
# 3. Commodity sources are added to the energy system model
# 4. Commodity conversion components are added to the energy system model
# 5. Commodity storages are added to the energy system model
# 6. Commodity transmission components are added to the energy system model
# 7. Commodity sinks are added to the energy system model
# 8. The energy system model is optimized
# 9. Selected optimization results are presented
#
# 1. Import required packages and set input data path
import FINE as fn
import numpy as np
import pandas as pd
def test_miniSystem():
numberOfTimeSteps = 4
hoursPerTimeStep = 2190
# Create an energy system model instance
esM = fn.EnergySystemModel(locations={'ElectrolyzerLocation', 'IndustryLocation'},
commodities={'electricity', 'hydrogen'},
numberOfTimeSteps=numberOfTimeSteps,
commodityUnitsDict={'electricity': r'kW$_{el}$', 'hydrogen': r'kW$_{H_{2},LHV}$'},
hoursPerTimeStep=hoursPerTimeStep, costUnit='1 Euro',
lengthUnit='km',
verboseLogLevel=2)
# time step length [h]
timeStepLength = numberOfTimeSteps * hoursPerTimeStep
### Buy electricity at the electricity market
costs = pd.DataFrame([np.array([ 0.05, 0., 0.1, 0.051,]),np.array([0., 0., 0., 0.,])],
index = ['ElectrolyzerLocation', 'IndustryLocation']).T
revenues = pd.DataFrame([np.array([ 0., 0.01, 0., 0.,]),np.array([0., 0., 0., 0.,])],
index = ['ElectrolyzerLocation', 'IndustryLocation']).T
maxpurchase = pd.DataFrame([np.array([1e6, 1e6, 1e6, 1e6,]),np.array([0., 0., 0., 0.,])],
index = ['ElectrolyzerLocation', 'IndustryLocation']).T * hoursPerTimeStep
esM.add(fn.Source(esM=esM, name='Electricity market', commodity='electricity',
hasCapacityVariable=False, operationRateMax = maxpurchase,
commodityCostTimeSeries = costs,
commodityRevenueTimeSeries = revenues,
)) # eur/kWh
### Electrolyzers
esM.add(fn.Conversion(esM=esM, name='Electroylzers', physicalUnit=r'kW$_{el}$',
commodityConversionFactors={'electricity':-1, 'hydrogen':0.7},
hasCapacityVariable=True,
investPerCapacity=500, # euro/kW
opexPerCapacity=500*0.025,
interestRate=0.08,
economicLifetime=10))
### Hydrogen filled somewhere
esM.add(fn.Storage(esM=esM, name='Pressure tank', commodity='hydrogen',
hasCapacityVariable=True, capacityVariableDomain='continuous',
stateOfChargeMin=0.33,
investPerCapacity=0.5, # eur/kWh
interestRate=0.08,
economicLifetime=30))
### Hydrogen pipelines
esM.add(fn.Transmission(esM=esM, name='Pipelines', commodity='hydrogen',
hasCapacityVariable=True,
investPerCapacity=0.177,
interestRate=0.08,
economicLifetime=40))
### Industry site
demand = pd.DataFrame([np.array([0., 0., 0., 0.,]), np.array([6e3, 6e3, 6e3, 6e3,]),],
index = ['ElectrolyzerLocation', 'IndustryLocation']).T * hoursPerTimeStep
esM.add(fn.Sink(esM=esM, name='Industry site', commodity='hydrogen', hasCapacityVariable=False,
operationRateFix = demand,
))
# 8. Optimize energy system model
#esM.cluster(numberOfTypicalPeriods=4, numberOfTimeStepsPerPeriod=1)
esM.optimize(timeSeriesAggregation=False, solver = 'glpk')
# test if solve fits to the original results
testresults = esM.componentModelingDict["SourceSinkModel"].operationVariablesOptimum.xs('Electricity market')
np.testing.assert_array_almost_equal(testresults.values, [np.array([1.877143e+07, 3.754286e+07, 0.0, 1.877143e+07]),],decimal=-3)
# test if the summary fits to the expected summary
summary = esM.getOptimizationSummary("SourceSinkModel")
# of cost
np.testing.assert_almost_equal(summary.loc[('Electricity market','commodCosts','[1 Euro/a]'),'ElectrolyzerLocation'],
costs['ElectrolyzerLocation'].mul(np.array([1.877143e+07, 3.754286e+07, 0.0, 1.877143e+07])).sum(), decimal=0)
# and of revenues
np.testing.assert_almost_equal(summary.loc[('Electricity market','commodRevenues','[1 Euro/a]'),'ElectrolyzerLocation'],
revenues['ElectrolyzerLocation'].mul(np.array([1.877143e+07, 3.754286e+07, 0.0, 1.877143e+07])).sum(), decimal=0)
if __name__ == "__main__":
test_miniSystem() | [
"FINE.EnergySystemModel",
"FINE.Conversion",
"FINE.Source",
"FINE.Sink",
"FINE.Transmission",
"numpy.array",
"FINE.Storage"
] | [((1170, 1512), 'FINE.EnergySystemModel', 'fn.EnergySystemModel', ([], {'locations': "{'ElectrolyzerLocation', 'IndustryLocation'}", 'commodities': "{'electricity', 'hydrogen'}", 'numberOfTimeSteps': 'numberOfTimeSteps', 'commodityUnitsDict': "{'electricity': 'kW$_{el}$', 'hydrogen': 'kW$_{H_{2},LHV}$'}", 'hoursPerTimeStep': 'hoursPerTimeStep', 'costUnit': '"""1 Euro"""', 'lengthUnit': '"""km"""', 'verboseLogLevel': '(2)'}), "(locations={'ElectrolyzerLocation', 'IndustryLocation'},\n commodities={'electricity', 'hydrogen'}, numberOfTimeSteps=\n numberOfTimeSteps, commodityUnitsDict={'electricity': 'kW$_{el}$',\n 'hydrogen': 'kW$_{H_{2},LHV}$'}, hoursPerTimeStep=hoursPerTimeStep,\n costUnit='1 Euro', lengthUnit='km', verboseLogLevel=2)\n", (1190, 1512), True, 'import FINE as fn\n'), ((2390, 2593), 'FINE.Source', 'fn.Source', ([], {'esM': 'esM', 'name': '"""Electricity market"""', 'commodity': '"""electricity"""', 'hasCapacityVariable': '(False)', 'operationRateMax': 'maxpurchase', 'commodityCostTimeSeries': 'costs', 'commodityRevenueTimeSeries': 'revenues'}), "(esM=esM, name='Electricity market', commodity='electricity',\n hasCapacityVariable=False, operationRateMax=maxpurchase,\n commodityCostTimeSeries=costs, commodityRevenueTimeSeries=revenues)\n", (2399, 2593), True, 'import FINE as fn\n'), ((2741, 3007), 'FINE.Conversion', 'fn.Conversion', ([], {'esM': 'esM', 'name': '"""Electroylzers"""', 'physicalUnit': '"""kW$_{el}$"""', 'commodityConversionFactors': "{'electricity': -1, 'hydrogen': 0.7}", 'hasCapacityVariable': '(True)', 'investPerCapacity': '(500)', 'opexPerCapacity': '(500 * 0.025)', 'interestRate': '(0.08)', 'economicLifetime': '(10)'}), "(esM=esM, name='Electroylzers', physicalUnit='kW$_{el}$',\n commodityConversionFactors={'electricity': -1, 'hydrogen': 0.7},\n hasCapacityVariable=True, investPerCapacity=500, opexPerCapacity=500 * \n 0.025, interestRate=0.08, economicLifetime=10)\n", (2754, 3007), True, 'import FINE as fn\n'), ((3208, 3432), 'FINE.Storage', 'fn.Storage', ([], {'esM': 'esM', 'name': '"""Pressure tank"""', 'commodity': '"""hydrogen"""', 'hasCapacityVariable': '(True)', 'capacityVariableDomain': '"""continuous"""', 'stateOfChargeMin': '(0.33)', 'investPerCapacity': '(0.5)', 'interestRate': '(0.08)', 'economicLifetime': '(30)'}), "(esM=esM, name='Pressure tank', commodity='hydrogen',\n hasCapacityVariable=True, capacityVariableDomain='continuous',\n stateOfChargeMin=0.33, investPerCapacity=0.5, interestRate=0.08,\n economicLifetime=30)\n", (3218, 3432), True, 'import FINE as fn\n'), ((3588, 3751), 'FINE.Transmission', 'fn.Transmission', ([], {'esM': 'esM', 'name': '"""Pipelines"""', 'commodity': '"""hydrogen"""', 'hasCapacityVariable': '(True)', 'investPerCapacity': '(0.177)', 'interestRate': '(0.08)', 'economicLifetime': '(40)'}), "(esM=esM, name='Pipelines', commodity='hydrogen',\n hasCapacityVariable=True, investPerCapacity=0.177, interestRate=0.08,\n economicLifetime=40)\n", (3603, 3751), True, 'import FINE as fn\n'), ((4080, 4196), 'FINE.Sink', 'fn.Sink', ([], {'esM': 'esM', 'name': '"""Industry site"""', 'commodity': '"""hydrogen"""', 'hasCapacityVariable': '(False)', 'operationRateFix': 'demand'}), "(esM=esM, name='Industry site', commodity='hydrogen',\n hasCapacityVariable=False, operationRateFix=demand)\n", (4087, 4196), True, 'import FINE as fn\n'), ((4651, 4702), 'numpy.array', 'np.array', (['[18771430.0, 37542860.0, 0.0, 18771430.0]'], {}), '([18771430.0, 37542860.0, 0.0, 18771430.0])\n', (4659, 4702), True, 'import numpy as np\n'), ((1858, 1891), 'numpy.array', 'np.array', (['[0.05, 0.0, 0.1, 0.051]'], {}), '([0.05, 0.0, 0.1, 0.051])\n', (1866, 1891), True, 'import numpy as np\n'), ((1893, 1923), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (1901, 1923), True, 'import numpy as np\n'), ((2036, 2067), 'numpy.array', 'np.array', (['[0.0, 0.01, 0.0, 0.0]'], {}), '([0.0, 0.01, 0.0, 0.0])\n', (2044, 2067), True, 'import numpy as np\n'), ((2067, 2097), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (2075, 2097), True, 'import numpy as np\n'), ((2213, 2267), 'numpy.array', 'np.array', (['[1000000.0, 1000000.0, 1000000.0, 1000000.0]'], {}), '([1000000.0, 1000000.0, 1000000.0, 1000000.0])\n', (2221, 2267), True, 'import numpy as np\n'), ((2245, 2275), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (2253, 2275), True, 'import numpy as np\n'), ((3909, 3939), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (3917, 3939), True, 'import numpy as np\n'), ((3938, 3980), 'numpy.array', 'np.array', (['[6000.0, 6000.0, 6000.0, 6000.0]'], {}), '([6000.0, 6000.0, 6000.0, 6000.0])\n', (3946, 3980), True, 'import numpy as np\n'), ((5020, 5071), 'numpy.array', 'np.array', (['[18771430.0, 37542860.0, 0.0, 18771430.0]'], {}), '([18771430.0, 37542860.0, 0.0, 18771430.0])\n', (5028, 5071), True, 'import numpy as np\n'), ((5292, 5343), 'numpy.array', 'np.array', (['[18771430.0, 37542860.0, 0.0, 18771430.0]'], {}), '([18771430.0, 37542860.0, 0.0, 18771430.0])\n', (5300, 5343), True, 'import numpy as np\n')] |
#########################################################################################################################
## Distribution code Version 1.0 -- 14/10/2021 by <NAME> Copyright 2021, University of Siegen
##
## The Code is created based on the method described in the following paper
## [1] "Deep Optimization Prior for THz Model Parameter Estimation", <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
## Winter Conference on Applications of Computer Vision (WACV) 2022.
##
## If you use this code in your scientific publication, please cite the mentioned paper.
## The code and the algorithm are for non-comercial use only.
##
## For other details, please visit website https://github.com/tak-wong/Deep-Optimization-Prior
#########################################################################################################################
import torch
import numpy as np
import matplotlib.pyplot as plt
global JUPYTER_AVAIL
try:
from IPython.display import clear_output
JUPYTER_AVAIL = True
except:
JUPYTER_AVAIL = False
from .. import decoder_model
class util_plotter:
# -----------------------------------------------------------------------
def __init__(self, dir_log, model_optimizer):
super(util_plotter, self).__init__()
self.model_optimizer = model_optimizer
self.dir_log = dir_log
self.DEFAULT_COL = 2
self.plot_close_all()
# -----------------------------------------------------------------------
def convertTodB(self, value):
if value is None:
return None
else:
v = np.array(value)
v = np.where(v > 0.00000001, v, -8)
np.log10(v, out = v, where = v > 0)
return 10.0 * v
# -----------------------------------------------------------------------
def __create_figure(self, clear_plot = True, subfig_num_row = 1, subfig_num_col = 2, subfig_height = 6,subfig_width = 8):
if clear_plot and JUPYTER_AVAIL:
clear_output(wait=True)
# total size of figure
fig_size_vertical = subfig_height * subfig_num_row
fig_size_horizontal = subfig_width * subfig_num_col
fig, axs = plt.subplots(nrows=subfig_num_row, ncols=subfig_num_col, figsize=(fig_size_horizontal, fig_size_vertical))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
return fig, axs
# -----------------------------------------------------------------------
def __plot_loss_one(self, ax, epochs, loss_train, loss_valid, is_decibel = False):
if (is_decibel):
ln1 = ax.plot(epochs, loss_train, color='C0', label='train loss: {:.16f} dB'.format(loss_train[-1]))
ax.set_ylabel('loss (dB)', fontsize=16, color='C0')
ax.set_title('loss (dB)', fontsize=16)
if loss_valid is not None:
ln2 = ax.plot(epochs, loss_valid, color='C1', label='valid loss: {:.16f} dB'.format(loss_valid[-1]))
else:
ln2 = None
else:
ln1 = ax.plot(epochs, loss_train, color='C0', label='train loss: {:.16f}'.format(loss_train[-1]))
ax.set_ylabel('loss', fontsize=16, color='C0')
ax.set_title('loss', fontsize=16)
if loss_valid is not None:
ln2 = ax.plot(epochs, loss_valid, color='C1', label='valid loss: {:.16f}'.format(loss_valid[-1]))
else:
ln2 = None
ax.set_xlabel('epoch', fontsize=16)
if len(epochs) > 1: ax.set_xlim(epochs[0], epochs[-1])
ax.tick_params(axis='y', labelcolor='C0')
ax.grid()
if ln2 is None:
lns = ln1
else:
lns = ln1 + ln2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=0)
# -----------------------------------------------------------------------
def plot_close_all(self):
plt.close('all')
# -----------------------------------------------------------------------
def plot_loss_all(self, epochs, loss_train, loss_valid, clear_plot = False):
fig, axs = self.__create_figure(clear_plot = clear_plot)
axs_left = axs[0]
self.__plot_loss_one(axs_left, epochs, loss_train, loss_valid)
axs_right = axs[1]
self.__plot_loss_one(axs_right, epochs, self.convertTodB(loss_train), self.convertTodB(loss_valid), True)
fig.tight_layout()
plt.show()
return fig, axs
# -----------------------------------------------------------------------
def plot_learning_rate_all(self, epochs, lr, reg = None, clear_plot = False):
fig, axs = self.__create_figure(clear_plot = clear_plot)
axs_left = axs[0]
axs_left.plot(epochs, lr, color='C0', label='learning rate: {:.6E}'.format(lr[-1]))
axs_left.set_xlabel('epoch', fontsize=16)
axs_left.set_ylabel('learning rate', fontsize=16)
axs_left.set_title('learning rate', fontsize=16)
if len(epochs) > 1: axs_left.set_xlim(epochs[0], epochs[-1])
axs_left.legend()
axs_left.grid()
if (reg is not None):
axs_right = axs[1]
axs_right.plot(epochs, reg, color='C0', label='Reg: {:.6E}'.format(reg[-1]))
axs_right.set_xlabel('epoch', fontsize=16)
axs_right.set_ylabel('Reg', fontsize=16)
axs_right.set_title('regularizer', fontsize=16)
if len(epochs) > 1: axs_right.set_xlim(epochs[0], epochs[-1])
axs_right.legend()
axs_right.grid()
fig.tight_layout()
plt.show()
return fig, axs
# -----------------------------------------------------------------------
def plot_weights_all(self, epochs, matrix_weights, matrix_losses, clear_plot = False):
fig, axs = self.__create_figure(clear_plot = clear_plot)
eps = np.expand_dims(np.asarray(epochs), axis=0)
weights = matrix_weights[:, np.asarray(epochs)-1]
losses = matrix_losses[:, np.asarray(epochs)-1]
eps = np.repeat(eps, weights.shape[0], axis=0)
axs_left = axs[0]
if (weights.shape[-1] > 1):
axs_left.plot(np.transpose(eps, axes=(1, 0)), np.transpose(weights.cpu().detach(), axes=(1, 0)))
else:
axs_left.plot(eps, weights.cpu().detach())
axs_left.set_xlabel('epoch', fontsize=16)
axs_left.set_ylabel('weights', fontsize=16)
axs_left.set_title('weights', fontsize=16)
if len(epochs) > 1: axs_left.set_xlim(epochs[0], epochs[-1])
axs_left.grid()
axs_right = axs[1]
if (losses.shape[-1] > 1):
axs_right.plot(np.transpose(eps, axes=(1, 0)), np.transpose(losses.cpu().detach(), axes=(1, 0)))
else:
axs_right.plot(eps, weights.cpu().detach())
axs_right.set_xlabel('epoch', fontsize=16)
axs_right.set_ylabel('losses', fontsize=16)
axs_right.set_title('losses', fontsize=16)
if len(epochs) > 1: axs_right.set_xlim(epochs[0], epochs[-1])
axs_right.grid()
fig.tight_layout()
plt.show()
return fig, axs
# -----------------------------------------------------------------------
def plot_parameters(self, p, clear_plot = False):
# p is the parameter dictionary
num_p = len(p)
subfig_num_col = self.DEFAULT_COL
subfig_num_row = int(np.ceil(float(num_p) / float(subfig_num_col)))
fig, axs = self.__create_figure(clear_plot = clear_plot, subfig_num_row = subfig_num_row, subfig_num_col = subfig_num_col)
for index, (p_name, p_value) in enumerate(p.items()):
pos = np.unravel_index(index, (subfig_num_row, subfig_num_col), order='C') # position of image
if torch.is_tensor(p_value):
img = axs[pos].imshow( p_value.cpu().detach(), cmap='viridis' )
elif type(p_value) is np.ndarray:
img = axs[pos].imshow( p_value, cmap='viridis' )
fig.colorbar(img, ax=axs[pos], orientation='vertical')
axs[pos].set_title(p_name, fontsize=16)
fig.tight_layout()
plt.show()
return fig, axs
# -----------------------------------------------------------------------
def plot_loss_map(self, loss_map, clear_plot = False):
fig, axs = self.__create_figure(clear_plot = clear_plot)
axs_left = axs[0]
if torch.is_tensor(loss_map):
img = axs_left.imshow( loss_map.cpu().detach(), cmap='viridis' )
else:
img = axs_left.imshow( loss_map, cmap='viridis' )
fig.colorbar(img, ax=axs_left, orientation='vertical')
axs_left.set_title("loss", fontsize=16)
axs_right = axs[1]
if torch.is_tensor(loss_map):
img = axs_right.imshow( self.convertTodB(loss_map.cpu().detach()), cmap='viridis' )
else:
img = axs_right.imshow( self.convertTodB(loss_map), cmap='viridis' )
fig.colorbar(img, ax=axs_right, orientation='vertical')
axs_right.set_title("loss (in dB)", fontsize=16)
fig.tight_layout()
plt.show()
return fig, axs
# -----------------------------------------------------------------------
def plot_pixel(self, axis_base, data_left, model_left, data_right, model_right, legend_left, legend_right, str_title="", clear_plot = False):
fig, axs = self.__create_figure(clear_plot = clear_plot)
axs_left = axs[0]
ln1 = axs_left.plot(axis_base, data_left, color='C0', label='data ({})'.format(legend_left))
ln2 = axs_left.plot(axis_base, model_left, color='C1', label='model ({})'.format(legend_left))
lns = ln1 + ln2
labs = [l.get_label() for l in lns]
axs_left.legend(lns, labs, loc=0)
axs_left.set_xlim(axis_base[0], axis_base[-1])
axs_left.grid()
axs_left.set_title(str_title, fontsize=16)
if data_right is not None:
axs_right = axs[1]
ln1 = axs_right.plot(axis_base, data_right, color='C0', label='data ({})'.format(legend_right))
ln2 = axs_right.plot(axis_base, model_right, color='C1', label='model ({})'.format(legend_right))
lns = ln1 + ln2
labs = [l.get_label() for l in lns]
axs_right.legend(lns, labs, loc=0)
axs_right.set_xlim(axis_base[0], axis_base[-1])
axs_right.grid()
axs_right.set_title(str_title, fontsize=16)
fig.tight_layout()
plt.show()
return fig, axs
# -----------------------------------------------------------------------
def plot_kernel(self, kernel, clear_plot = False):
fig, axs = self.__create_figure(clear_plot = clear_plot)
kernel_np = None
if torch.is_tensor(kernel):
kernel_np = kernel.cpu().detach()
elif type(kernel) is np.ndarray:
kernel_np = kernel
axs_left = axs[0]
img = axs_left.imshow( kernel_np, cmap='viridis' )
fig.colorbar(img, ax=axs_left, orientation='vertical')
axs_left.set_title("kernel", fontsize=16)
axs_right = axs[1]
kernel_db = self.convertTodB(kernel_np)
img = axs_right.imshow( kernel_db, cmap='viridis' )
fig.colorbar(img, ax=axs_right, orientation='vertical')
axs_right.set_title("kernel (dB)", fontsize=16)
fig.tight_layout()
plt.show()
return fig, axs | [
"matplotlib.pyplot.show",
"IPython.display.clear_output",
"matplotlib.pyplot.close",
"numpy.asarray",
"numpy.transpose",
"numpy.unravel_index",
"numpy.where",
"numpy.array",
"numpy.log10",
"torch.is_tensor",
"matplotlib.pyplot.subplots",
"numpy.repeat"
] | [((2262, 2373), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'subfig_num_row', 'ncols': 'subfig_num_col', 'figsize': '(fig_size_horizontal, fig_size_vertical)'}), '(nrows=subfig_num_row, ncols=subfig_num_col, figsize=(\n fig_size_horizontal, fig_size_vertical))\n', (2274, 2373), True, 'import matplotlib.pyplot as plt\n'), ((4009, 4025), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4018, 4025), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4545), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4543, 4545), True, 'import matplotlib.pyplot as plt\n'), ((5697, 5707), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5705, 5707), True, 'import matplotlib.pyplot as plt\n'), ((6176, 6216), 'numpy.repeat', 'np.repeat', (['eps', 'weights.shape[0]'], {'axis': '(0)'}), '(eps, weights.shape[0], axis=0)\n', (6185, 6216), True, 'import numpy as np\n'), ((7239, 7249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7247, 7249), True, 'import matplotlib.pyplot as plt\n'), ((8278, 8288), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8286, 8288), True, 'import matplotlib.pyplot as plt\n'), ((8563, 8588), 'torch.is_tensor', 'torch.is_tensor', (['loss_map'], {}), '(loss_map)\n', (8578, 8588), False, 'import torch\n'), ((8901, 8926), 'torch.is_tensor', 'torch.is_tensor', (['loss_map'], {}), '(loss_map)\n', (8916, 8926), False, 'import torch\n'), ((9276, 9286), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9284, 9286), True, 'import matplotlib.pyplot as plt\n'), ((10700, 10710), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10708, 10710), True, 'import matplotlib.pyplot as plt\n'), ((10992, 11015), 'torch.is_tensor', 'torch.is_tensor', (['kernel'], {}), '(kernel)\n', (11007, 11015), False, 'import torch\n'), ((11662, 11672), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11670, 11672), True, 'import matplotlib.pyplot as plt\n'), ((1662, 1677), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (1670, 1677), True, 'import numpy as np\n'), ((1694, 1720), 'numpy.where', 'np.where', (['(v > 1e-08)', 'v', '(-8)'], {}), '(v > 1e-08, v, -8)\n', (1702, 1720), True, 'import numpy as np\n'), ((1738, 1769), 'numpy.log10', 'np.log10', (['v'], {'out': 'v', 'where': '(v > 0)'}), '(v, out=v, where=v > 0)\n', (1746, 1769), True, 'import numpy as np\n'), ((2068, 2091), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (2080, 2091), False, 'from IPython.display import clear_output\n'), ((6010, 6028), 'numpy.asarray', 'np.asarray', (['epochs'], {}), '(epochs)\n', (6020, 6028), True, 'import numpy as np\n'), ((7802, 7870), 'numpy.unravel_index', 'np.unravel_index', (['index', '(subfig_num_row, subfig_num_col)'], {'order': '"""C"""'}), "(index, (subfig_num_row, subfig_num_col), order='C')\n", (7818, 7870), True, 'import numpy as np\n'), ((7906, 7930), 'torch.is_tensor', 'torch.is_tensor', (['p_value'], {}), '(p_value)\n', (7921, 7930), False, 'import torch\n'), ((6306, 6336), 'numpy.transpose', 'np.transpose', (['eps'], {'axes': '(1, 0)'}), '(eps, axes=(1, 0))\n', (6318, 6336), True, 'import numpy as np\n'), ((6802, 6832), 'numpy.transpose', 'np.transpose', (['eps'], {'axes': '(1, 0)'}), '(eps, axes=(1, 0))\n', (6814, 6832), True, 'import numpy as np\n'), ((6083, 6101), 'numpy.asarray', 'np.asarray', (['epochs'], {}), '(epochs)\n', (6093, 6101), True, 'import numpy as np\n'), ((6139, 6157), 'numpy.asarray', 'np.asarray', (['epochs'], {}), '(epochs)\n', (6149, 6157), True, 'import numpy as np\n')] |
from flask import Flask, make_response, request, render_template
import io
import os
import csv
import pickle
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import numpy as np
app = Flask(__name__)
model = pickle.load(open(r'model.pkl', 'rb'))
# Base Route
@app.route('/')
def hello():
return render_template('bigmart.html')
# Prediction for dataset
@app.route('/predict_for_set', methods=['POST'])
def predict_for_set():
file = request.files.get('file')
df = pd.read_csv(file)
# check for categorical attributes
cat_col = []
for x in df.dtypes.index:
if df.dtypes[x] == 'object':
cat_col.append(x)
cat_col.remove('Item_Identifier')
cat_col.remove('Outlet_Identifier')
item_weight_mean = df.pivot_table( values="Item_Weight", index='Item_Identifier')
miss_bool = df['Item_Weight'].isnull()
for i, item in enumerate(df['Item_Identifier']):
if miss_bool[i]:
if item in item_weight_mean:
df['Item_Weight'][i] = item_weight_mean.loc[item]['Item_Weight']
else:
df['Item_Weight'][i] = np.mean(df['Item_Weight'])
outlet_size_mode = df.pivot_table(values='Outlet_Size', columns='Outlet_Type', aggfunc=(lambda x: x.mode()[0]))
miss_bool = df['Outlet_Size'].isnull()
df.loc[miss_bool, 'Outlet_Size'] = df.loc[miss_bool,'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
# replace zeros with mean
df.loc[:, 'Item_Visibility'].replace(
[0], [df['Item_Visibility'].mean()], inplace=True)
# combine item fat content
df['Item_Fat_Content'] = df['Item_Fat_Content'].replace({'LF': 'Low Fat', 'reg': 'Regular', 'low fat': 'Low Fat'})
df['Item_Fat_Content'].value_counts()
# Creation of New Attributes
df['New_Item_Type'] = df['Item_Identifier'].apply(lambda x: x[:2])
df['New_Item_Type'] = df['New_Item_Type'].map({'FD': 'Food', 'NC': 'Non-Consumable', 'DR': 'Drinks'})
df.loc[df['New_Item_Type'] == 'Non-Consumable','Item_Fat_Content'] = 'Non-Edible'
# create small values for establishment year
df['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']
le = LabelEncoder()
df['Outlet'] = le.fit_transform(df['Outlet_Identifier'])
cat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size','Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']
for col in cat_col:
df[col] = le.fit_transform(df[col])
# Input Split
X = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier'])
print(X)
print(X.dtypes)
# Prediction
output = model.predict(X).tolist()
sales = sum(output)
df['Item_Outlet_Sales'] = output
df['Revenue'] = df['Item_Outlet_Sales']*df['Item_MRP']
revenue = sum(df['Revenue'])
print(revenue)
revenue /= 1000000
print(revenue)
return render_template('bigmart.html', pred1="The {} is the overall number of items that are expected to be sold from bigmart stores.".format(sales), pred2="The total revenue that should be generate is ${} million.".format(revenue))
# Prediction for single product
@app.route('/predict_for_one', methods=['POST'])
def predict_for_one():
d = None
d = request.form.to_dict()
df = pd.DataFrame([d.values()], columns=d.keys())
df = df.infer_objects()
df[['Item_Weight','Item_Visibility','Item_MRP','Outlet_Establishment_Year']] = df[['Item_Weight','Item_Visibility','Item_MRP','Outlet_Establishment_Year']].apply(pd.to_numeric)
# Process dataframe as required
# check for categorical attributes
cat_col = []
for x in df.dtypes.index:
if df.dtypes[x] == 'object':
cat_col.append(x)
cat_col.remove('Item_Identifier')
cat_col.remove('Outlet_Identifier')
item_weight_mean = df.pivot_table( values="Item_Weight", index='Item_Identifier')
miss_bool = df['Item_Weight'].isnull()
for i, item in enumerate(df['Item_Identifier']):
if miss_bool[i]:
if item in item_weight_mean:
df['Item_Weight'][i] = item_weight_mean.loc[item]['Item_Weight']
else:
df['Item_Weight'][i] = np.mean(df['Item_Weight'])
outlet_size_mode = df.pivot_table(values='Outlet_Size', columns='Outlet_Type', aggfunc=(lambda x: x.mode()[0]))
miss_bool = df['Outlet_Size'].isnull()
df.loc[miss_bool, 'Outlet_Size'] = df.loc[miss_bool,'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
# replace zeros with mean
df.loc[:, 'Item_Visibility'].replace(
[0], [df['Item_Visibility'].mean()], inplace=True)
# combine item fat content
df['Item_Fat_Content'] = df['Item_Fat_Content'].replace({'LF': 'Low Fat', 'reg': 'Regular', 'low fat': 'Low Fat'})
df['Item_Fat_Content'].value_counts()
# Creation of New Attributes
df['New_Item_Type'] = df['Item_Identifier'].apply(lambda x: x[:2])
df['New_Item_Type'] = df['New_Item_Type'].map({'FD': 'Food', 'NC': 'Non-Consumable', 'DR': 'Drinks'})
df.loc[df['New_Item_Type'] == 'Non-Consumable','Item_Fat_Content'] = 'Non-Edible'
# create small values for establishment year
df['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']
le = LabelEncoder()
df['Outlet'] = le.fit_transform(df['Outlet_Identifier'])
cat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size','Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']
for col in cat_col:
df[col] = le.fit_transform(df[col])
# Input Split
X = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier'])
print(X)
print(X.dtypes)
# Prediction
output = model.predict(X).tolist()
sales = sum(output)
df['Item_Outlet_Sales'] = output
df['Revenue'] = df['Item_Outlet_Sales']*df['Item_MRP']
revenue = sum(df['Revenue'])
return render_template('bigmart.html', pred1='The {} units of this product are expected to be sold.'.format(sales), pred2='${} is the revenue that should be generated by selling this much units of selected product.'.format(revenue))
if __name__ == "__main__":
app.run(debug = True)
| [
"flask.request.files.get",
"pandas.read_csv",
"flask.Flask",
"sklearn.preprocessing.LabelEncoder",
"numpy.mean",
"flask.request.form.to_dict",
"flask.render_template"
] | [((203, 218), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (208, 218), False, 'from flask import Flask, make_response, request, render_template\n'), ((319, 350), 'flask.render_template', 'render_template', (['"""bigmart.html"""'], {}), "('bigmart.html')\n", (334, 350), False, 'from flask import Flask, make_response, request, render_template\n'), ((461, 486), 'flask.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (478, 486), False, 'from flask import Flask, make_response, request, render_template\n'), ((496, 513), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (507, 513), True, 'import pandas as pd\n'), ((2166, 2180), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2178, 2180), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3207, 3229), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (3227, 3229), False, 'from flask import Flask, make_response, request, render_template\n'), ((5181, 5195), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5193, 5195), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1131, 1157), 'numpy.mean', 'np.mean', (["df['Item_Weight']"], {}), "(df['Item_Weight'])\n", (1138, 1157), True, 'import numpy as np\n'), ((4146, 4172), 'numpy.mean', 'np.mean', (["df['Item_Weight']"], {}), "(df['Item_Weight'])\n", (4153, 4172), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Software License Agreement (MIT License)
#
# Copyright (c) 2020, tri_star
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: <NAME>, <NAME>
import os
import re
import shutil
import glob
import json
import numpy as np
import rospy
from tri_star import transformation_util
DIRNAME_ARCHIVE = "archive_{}"
"""
manage files/dirs
"""
def get_filenames_in_dir(dir_path, ext=None):
if ext:
dir_path += "/*." + ext
else:
if not dir_path.endswith("/"):
dir_path += "/*"
return [os.path.basename(i) for i in glob.glob(dir_path)]
def get_dirnames_in_dir(dir_path):
all_content = get_all_in_dir(dir_path)
abs_dir = [i for i in all_content if os.path.isdir(i)]
return [os.path.basename(i) for i in abs_dir]
def get_all_in_dir(dir_path):
return [os.path.abspath(i) for i in glob.glob(dir_path + "/*")]
def archive(base_data_dir):
# if nothing to archive, then do not do anything
to_archive = False
for name in get_all_in_dir(base_data_dir):
if re.search(DIRNAME_ARCHIVE, name) is None: # find a file or folder that is not the archive
to_archive = True
break
if not to_archive:
return
archive_index = get_index_in_dir(base_data_dir, get_re_from_file_name(DIRNAME_ARCHIVE))
archive_dir_name = DIRNAME_ARCHIVE.format(archive_index)
archive_dir_path = os.path.join(base_data_dir, archive_dir_name)
create_dir(archive_dir_path)
contents = get_all_in_dir(base_data_dir)
for content in contents:
if re.search(get_re_from_file_name(DIRNAME_ARCHIVE), os.path.basename(content)) is None: # not one of the archives
shutil.move(content, archive_dir_path)
# the default is either a directory with a number as the name, like 1,2,3
# or a file with the name 1.txt, 2.subl, etc.
def get_index_in_dir(dir_path, file_name_template="^[0-9]+$|^[0-9]+\.\S+$"):
list_file_name = get_filenames_in_dir(dir_path)
used_index = []
for file_name in list_file_name:
if not re.search(file_name_template, file_name) is None: # find the files that matches with it
# get the number in the string
matched = re.findall("[0-9]+", file_name)[0]
number = str_to_int(matched)
if number is not None:
used_index.append(number)
if len(used_index) == 0:
return 1
return max(used_index) + 1
def get_re_from_file_name(filename):
return "^" + filename.replace("{}", "[0-9]+") + "$" # add v and $ to match the entire string
# create all the directories on the path
def create_dir(dir_path):
#https://stackoverflow.com/questions/273192/how-can-i-safely-create-a-nested-directory
try:
os.makedirs(dir_path)
except OSError:
if not os.path.isdir(dir_path):
raise Exception("{} is not a directory path".format(dir_path))
"""
i/o related
"""
# https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
class ToolUseEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool) or isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, transformation_util.AngleGroup):
return obj.to_json()
return json.JSONEncoder.default(self, obj)
# variable type
#TYPE_STR = "string"
#TYPE_MATRIX = "matrix"
#TYPE_ARRAY = "array"
#TYPE_LIST = "list"
#TYPE_NESTED_LIST = "nested_list"
#TYPE_INT = "int"
#TYPE_FLOAT = "float"
TYPE_LIST = "list"
TYPE_NUMPY = "numpy"
TYPE_ANGLEGROUP = "anglegroup"
def str_to_int(string):
string = string.strip()
number = None
try:
if string == "None":
number = None
else:
number = int(string)
except ValueError as e:
pass
return number
def str_to_float(string):
string = string.strip()
number = None
try:
if string == "None":
number = None
else:
number = float(string)
except ValueError as e:
pass
return number
def str_to_npmatrix(string): # e.g., "[[1 2] [3 5]]"
string = string.strip()
if string == "None":
return None
value = None
current_list = None
lists = []
number_str = ""
for char in string:
if char == "[":
if value is None:
value = []
current_list = value
lists.append(current_list)
else:
new_list = []
current_list.append(new_list)
current_list = new_list
lists.append(current_list)
elif char == " ":
if number_str == "":
pass
else:
number = str_to_float(number_str)
current_list.append(number)
number_str = ""
elif char == "]":
if number_str != "":
number = str_to_float(number_str)
current_list.append(number)
number_str = ""
lists.pop()
if len(lists) != 0:
current_list = lists[-1]
else:
number_str += char
return np.array(value)
def str_to_nparray(string): # e.g., specifically numpy array with shape (n,), like [1 2 3]
value = string.strip()
value = value.replace("[", "")
value = value.replace("]", "")
value = value.split()
value = [str_to_float(i) for i in value]
return np.array(value)
def nparray_to_str(matrix):
value = str(matrix.tolist()).replace(",", " ")
value = value.replace("\n", "")
return value
def variable_to_string_no_name(variable, variable_collection_type=None):
content = ""
if variable_collection_type is None:
if isinstance(variable, np.ndarray):
content += nparray_to_str(variable) + "\n"
else:
variable = str(variable).replace("\n", "")
content += str(variable) + "\n"
elif variable_collection_type == TYPE_LIST:
content += str(len(variable)) + "\n"
for element in variable:
if isinstance(element, np.ndarray):
content += nparray_to_str(element) + "\n"
else:
element = str(element).replace("\n", "")
content += str(element) + "\n"
elif variable_collection_type == TYPE_NESTED_LIST: # 2 layers
content += str(len(variable)) + "\n"
for element in variable:
content += variable_to_string_no_name(element, TYPE_LIST)
return content
# for saving purposes
def variable_to_string(name, variable, variable_collection_type=None):
content = name + "\n"
content += variable_to_string_no_name(variable, variable_collection_type)
return content
# variable is a str
def convert_variable(variable, variable_type):
variable = variable.strip()
if variable == "None":
variable = None
elif variable_type == TYPE_STR:
variable = variable
elif variable_type == TYPE_INT:
variable = str_to_int(variable)
elif variable_type == TYPE_FLOAT:
variable = str_to_float(variable)
elif variable_type == TYPE_MATRIX:
variable = str_to_npmatrix(variable)
elif variable_type == TYPE_ARRAY:
variable = str_to_nparray(variable)
return variable
def read_variable(file_path, name, variable_type=None, variable_collection_type=None):
json_result = {}
with open(file_path, "r") as read_file:
json_result = json.load(read_file)
variable = json_result[name]
if variable_collection_type == TYPE_LIST:
if variable_type == TYPE_NUMPY:
for i in range(len(variable)):
variable[i] = np.array(variable[i])
else:
if variable_type == TYPE_NUMPY:
variable = np.array(variable)
elif variable_type == TYPE_ANGLEGROUP:
variable = transformation_util.AngleGroup.from_json(variable)
return variable
| [
"os.path.abspath",
"json.load",
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"tri_star.transformation_util.AngleGroup.from_json",
"re.findall",
"numpy.array",
"re.search",
"glob.glob",
"shutil.move",
"os.path.join",
"json.JSONEncoder.default"
] | [((2450, 2495), 'os.path.join', 'os.path.join', (['base_data_dir', 'archive_dir_name'], {}), '(base_data_dir, archive_dir_name)\n', (2462, 2495), False, 'import os\n'), ((6513, 6528), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (6521, 6528), True, 'import numpy as np\n'), ((6817, 6832), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (6825, 6832), True, 'import numpy as np\n'), ((1580, 1599), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (1596, 1599), False, 'import os\n'), ((1780, 1799), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (1796, 1799), False, 'import os\n'), ((1861, 1879), 'os.path.abspath', 'os.path.abspath', (['i'], {}), '(i)\n', (1876, 1879), False, 'import os\n'), ((3823, 3844), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (3834, 3844), False, 'import os\n'), ((4584, 4619), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (4608, 4619), False, 'import json\n'), ((8878, 8898), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (8887, 8898), False, 'import json\n'), ((1609, 1628), 'glob.glob', 'glob.glob', (['dir_path'], {}), '(dir_path)\n', (1618, 1628), False, 'import glob\n'), ((1750, 1766), 'os.path.isdir', 'os.path.isdir', (['i'], {}), '(i)\n', (1763, 1766), False, 'import os\n'), ((1889, 1915), 'glob.glob', 'glob.glob', (["(dir_path + '/*')"], {}), "(dir_path + '/*')\n", (1898, 1915), False, 'import glob\n'), ((2080, 2112), 're.search', 're.search', (['DIRNAME_ARCHIVE', 'name'], {}), '(DIRNAME_ARCHIVE, name)\n', (2089, 2112), False, 'import re\n'), ((2748, 2786), 'shutil.move', 'shutil.move', (['content', 'archive_dir_path'], {}), '(content, archive_dir_path)\n', (2759, 2786), False, 'import shutil\n'), ((9196, 9214), 'numpy.array', 'np.array', (['variable'], {}), '(variable)\n', (9204, 9214), True, 'import numpy as np\n'), ((2674, 2699), 'os.path.basename', 'os.path.basename', (['content'], {}), '(content)\n', (2690, 2699), False, 'import os\n'), ((3118, 3158), 're.search', 're.search', (['file_name_template', 'file_name'], {}), '(file_name_template, file_name)\n', (3127, 3158), False, 'import re\n'), ((3271, 3302), 're.findall', 're.findall', (['"""[0-9]+"""', 'file_name'], {}), "('[0-9]+', file_name)\n", (3281, 3302), False, 'import re\n'), ((3880, 3903), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (3893, 3903), False, 'import os\n'), ((9101, 9122), 'numpy.array', 'np.array', (['variable[i]'], {}), '(variable[i])\n', (9109, 9122), True, 'import numpy as np\n'), ((9285, 9335), 'tri_star.transformation_util.AngleGroup.from_json', 'transformation_util.AngleGroup.from_json', (['variable'], {}), '(variable)\n', (9325, 9335), False, 'from tri_star import transformation_util\n')] |
#!/usr/local/bin/python2.7
from sys import exit
from os import environ
environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
import utils
import obj
obj.DEBUG = True
def make_coll(fpath):
coll = obj.PFSVCollection()
coll.weight = 'ptweight'
coll.add_categories(['singletons', 'inclusive'], fpath)
return coll
top_4 = make_coll('/home/snarayan/hscratch/baconarrays/v7_repro/PARTITION/RSGluonToTT_*_CATEGORY.npy') # T
qcd_0 = make_coll('/home/snarayan/hscratch/baconarrays/v7_repro/PARTITION/QCD_*_CATEGORY.npy') # T
# qcd_0 = make_coll('/home/snarayan/hscratch/baconarrays/v6/QCD_*_0_XXXX.npy') # q/g
bins = {}
bins['tau32'] = np.arange(0,1.1,0.05)
bins['pt'] = np.arange(200.,2000.,40)
bins['msd'] = np.arange(0,400,10.)
labels = {
'tau32' : r'$\tau_{32}$',
'pt' : r'$p_{T} [GeV]$',
'msd' : r'$m_{SD} [GeV]$',
}
def draw(partition='test'):
h_top = top_4.draw_singletons(bins.items(), partition=partition)
h_qcd = qcd_0.draw_singletons(bins.items(), partition=partition)
for h in [h_top, h_qcd]:
for v in h.values():
v.scale()
for k in bins:
p = utils.Plotter()
p.add_hist(h_top[k], 'top', 'r')
p.add_hist(h_qcd[k], 'q/g', 'k')
p.plot({'xlabel':labels[k], 'ylabel':'Probability', 'output':'/home/snarayan/public_html/figs/testplots/%s/'%partition+k})
# if partition=='test':
# r = utils.Roccer()
# r.addROCs(h_top,h_qcd,labels,colors)
# r.plotROCs({'output':'/home/snarayan/public_html/figs/%s/roc'%partition})
draw()
draw('validate')
draw('train')
| [
"utils.Plotter",
"numpy.arange",
"obj.PFSVCollection"
] | [((659, 682), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.05)'], {}), '(0, 1.1, 0.05)\n', (668, 682), True, 'import numpy as np\n'), ((694, 722), 'numpy.arange', 'np.arange', (['(200.0)', '(2000.0)', '(40)'], {}), '(200.0, 2000.0, 40)\n', (703, 722), True, 'import numpy as np\n'), ((733, 756), 'numpy.arange', 'np.arange', (['(0)', '(400)', '(10.0)'], {}), '(0, 400, 10.0)\n', (742, 756), True, 'import numpy as np\n'), ((211, 231), 'obj.PFSVCollection', 'obj.PFSVCollection', ([], {}), '()\n', (229, 231), False, 'import obj\n'), ((1115, 1130), 'utils.Plotter', 'utils.Plotter', ([], {}), '()\n', (1128, 1130), False, 'import utils\n')] |
from collections import Counter, OrderedDict
from sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn import metrics
from random import shuffle
import pickle
import os
import numpy as np
from .model_base import ModelBase
from ipclassifier.dataprep import RawFileProcessor
class ModelTrainer(ModelBase):
PERIODS = (
"15th-Century",
"16th-Century",
"17th-Century",
"18th-Century",
"19th-Century-(Romantic)",
"19th-Century-(Victorian)"
)
SECTION_LENGTH = 100
def __init__(self, feature_runner ,accented_words_file="master_word_list.pickle"):
super().__init__()
self.accented_words_file = os.path.join(os.path.dirname(__file__), accented_words_file)
self.feature_runner = feature_runner
self.main()
def get_sections_per_period(self, period, iterations=1):
print('get sections per period started...')
filename = os.path.join(os.path.dirname(__file__), f"poems/_{period}.txt")
rfp = RawFileProcessor(filename)
contents = rfp.cleaned_contents
for j in range(iterations):
shuffle(contents)
sectioned_contents = []
sections = []
for i,line in enumerate(contents):
if i == 0: continue
sections.append(line.rstrip("\n"))
if i % self.SECTION_LENGTH == 0:
sectioned_contents.append(sections)
sections = []
for i,section in enumerate(sectioned_contents):
r = self.feature_runner(section)
features = r.initial_process_contents(period)
counter_dict = features["counter_dict"]
rules_avg = features["rules_avg"]
words_per_line = features["words_per_line"]
avg_syllables_per_line = features["avg_syllables_per_line"]
rule_0 = features["rule_0"]
rule_1 = features["rule_1"]
rule_2 = features["rule_2"]
rule_3 = features["rule_3"]
rule_4 = features["rule_4"]
rule_5 = features["rule_5"]
rule_6 = features["rule_6"]
self.counter_dicts.append(counter_dict)
self.other_features = [rules_avg, words_per_line, avg_syllables_per_line, rule_0, rule_1, rule_2, rule_3, rule_4, rule_5, rule_6]
def create_accented_word_feature(self, period):
print('create accebted word feature started...')
period_features = []
for i,sect in enumerate(self.counter_dicts):
sect = sorted([word for word in sect])
sect_dict = OrderedDict(Counter(sect))
sect_combined = OrderedDict()
for k,v in self.ordered_accented_words.items():
if k in sect_dict:
sect_combined[k] = 1
else:
sect_combined[k] = 0
one_hot_sect = [[float(v) for v in sect_combined.values()]]
one_hot_sect[0].extend(self.other_features)
one_hot_sect.append(period)
period_features.append(one_hot_sect)
return period_features
def reset(self):
print('reset started...')
self.counter_dicts = []
self.other_features = []
def combine_all_period_features(self):
print('combine all period features started...')
flattened_all_period_features = [sect for period in self.all_period_features for sect in period]
shuffle(flattened_all_period_features)
return flattened_all_period_features
def get_train_test_split(self, flattened_all_period_features):
print('get train test split started...')
X = [x[0] for x in flattened_all_period_features]
y = [x[1] for x in flattened_all_period_features]
size = len(X)
print("size y", len(y), y)
if size != len(y): raise Exception("X and y not same len")
test_split_point = size // 10
X_train = X[test_split_point:]
y_train = y[test_split_point:]
X_train_np = np.array(X_train)
y_train_np = np.array(y_train)
X_test = X[:test_split_point]
y_test = y[:test_split_point]
X_test_np = np.array(X_test)
y_test_np = np.array(y_test)
# print("saving train test pickle...")
# with open("train_test_data.pickle", "wb") as f:
# pickle.dump({
# "X_test_np": X_test_np,
# "y_test_np": y_test_np,
# "X_train_np": X_train_np,
# "y_train_np": y_train_np
# }, f)
return {
"X_test_np": X_test_np,
"y_test_np": y_test_np,
"X_train_np": X_train_np,
"y_train_np": y_train_np
}
def train_model(self, train_test):
print('train model started...')
# models = [MultinomialNB, ComplementNB, MLPClassifier]
# names = ["MultinomialNB", "ComplementNB", "MLPClassifier"]
# for i,model in enumerate(models):
# print(str(model))
# print( len(train_test["X_train_np"]), len(train_test["y_train_np"]) )
# if names[i] == "ComplementNB":
# test_model = model(alpha=2.0)
# if names[i] == "MLPClassifier":
# test_model = model(activation="identity", alpha=1e-08, solver="lbfgs", hidden_layer_sizes=(100,), max_iter=2000)
# test_model = model()
# test_model.fit(train_test["X_train_np"], train_test["y_train_np"])
test_model = ComplementNB(alpha=2.0)
test_model.fit(train_test["X_train_np"], train_test["y_train_np"])
with open(f'garbage.pickle', 'wb') as f:
pickle.dump(test_model, f)
def test_model(self, train_test):
with open(f"garbage.pickle", 'rb') as f:
test_model = pickle.load(f)
predicted = test_model.predict(train_test["X_test_np"])
print(metrics.classification_report(train_test["y_test_np"], predicted))
print(metrics.confusion_matrix(train_test["y_test_np"], predicted))
print(metrics.accuracy_score(train_test["y_test_np"], predicted))
print("Accuracy on training set: {:.3f}".format(test_model.score(train_test["X_train_np"], train_test["y_train_np"])))
print("Accuracy on test set: {:.3f}".format(test_model.score(train_test["X_test_np"], train_test["y_test_np"])))
def main(self):
print('main started...')
self.create_accented_word_dict()
for period in self.PERIODS:
print("on period: ", period)
self.get_sections_per_period(period)
period_features = self.create_accented_word_feature(period)
self.all_period_features.append(period_features)
print("len all period features", len(self.all_period_features))
self.reset()
flattened_all_period_features = self.combine_all_period_features()
print("len flattened all period features", len(flattened_all_period_features))
train_test = self.get_train_test_split(flattened_all_period_features)
self.train_model(train_test)
self.test_model(train_test)
| [
"sklearn.metrics.confusion_matrix",
"pickle.dump",
"ipclassifier.dataprep.RawFileProcessor",
"random.shuffle",
"os.path.dirname",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"pickle.load",
"numpy.array",
"sklearn.naive_bayes.ComplementNB",
"collections.OrderedDict"... | [((1255, 1281), 'ipclassifier.dataprep.RawFileProcessor', 'RawFileProcessor', (['filename'], {}), '(filename)\n', (1271, 1281), False, 'from ipclassifier.dataprep import RawFileProcessor\n'), ((3771, 3809), 'random.shuffle', 'shuffle', (['flattened_all_period_features'], {}), '(flattened_all_period_features)\n', (3778, 3809), False, 'from random import shuffle\n'), ((4351, 4368), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (4359, 4368), True, 'import numpy as np\n'), ((4390, 4407), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4398, 4407), True, 'import numpy as np\n'), ((4505, 4521), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (4513, 4521), True, 'import numpy as np\n'), ((4542, 4558), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4550, 4558), True, 'import numpy as np\n'), ((5821, 5844), 'sklearn.naive_bayes.ComplementNB', 'ComplementNB', ([], {'alpha': '(2.0)'}), '(alpha=2.0)\n', (5833, 5844), False, 'from sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB\n'), ((924, 949), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (939, 949), False, 'import os\n'), ((1190, 1215), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1205, 1215), False, 'import os\n'), ((1371, 1388), 'random.shuffle', 'shuffle', (['contents'], {}), '(contents)\n', (1378, 1388), False, 'from random import shuffle\n'), ((2973, 2986), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2984, 2986), False, 'from collections import Counter, OrderedDict\n'), ((5981, 6007), 'pickle.dump', 'pickle.dump', (['test_model', 'f'], {}), '(test_model, f)\n', (5992, 6007), False, 'import pickle\n'), ((6122, 6136), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6133, 6136), False, 'import pickle\n'), ((2930, 2943), 'collections.Counter', 'Counter', (['sect'], {}), '(sect)\n', (2937, 2943), False, 'from collections import Counter, OrderedDict\n'), ((6223, 6288), 'sklearn.metrics.classification_report', 'metrics.classification_report', (["train_test['y_test_np']", 'predicted'], {}), "(train_test['y_test_np'], predicted)\n", (6252, 6288), False, 'from sklearn import metrics\n'), ((6308, 6368), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (["train_test['y_test_np']", 'predicted'], {}), "(train_test['y_test_np'], predicted)\n", (6332, 6368), False, 'from sklearn import metrics\n'), ((6388, 6446), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (["train_test['y_test_np']", 'predicted'], {}), "(train_test['y_test_np'], predicted)\n", (6410, 6446), False, 'from sklearn import metrics\n')] |
#
# This script uses the data from spin echo experiments to determine
# the half life etime of the magnetisation of the samples spins
#
#
# STEP 1 : READ DATA FROM FILES
#
# IMPORT MODULES
# Handling files and paths
from pathlib import Path
from matplotlib import pyplot as plt
# CODE
#
# Get the working directory
workingDir = Path(__file__).parent.absolute()
# Get generator with all data files
dataFiles = (workingDir/"data").glob("*.txt")
# Sort the files by name, makes sure that the data is ordered by concentration
dataFiles = sorted(dataFiles)
# Process each file and add data to empty data structure
spinechos = []
# Process files
for file in dataFiles:
print("Read file:", file)
# Read file
content = file.read_text().splitlines()
# Save the first line of the file as description of the data
# Replace German number notation (1,2) by English number notation (1.2)
# Remove tailing spaces with .strip()
sample = content[0].strip().replace(",", ".")
# Extract the data from the file
# Replace German number notation (1,2) by English number notation (1.2)
data = [ line.replace(",", ".").split() for line in content[3:] ]
# Reorganise the data and convert strings to floats
time = [ float(dataPair[0]) for dataPair in data ]
voltage = [ float(dataPair[1]) for dataPair in data ]
spinechos.append({"sample" : sample,
"time" : time,
"voltage": voltage })
# PLOT PROGRESS
for graph in spinechos:
# Add the data as scatter plot
plt.scatter("time", "voltage", data = graph, label = graph["sample"])
# Add the data a second time as line graph to make the plot easier to read
# label = None is important, because I use plt.scatter AND plt.plot for the same plot
# label = None ensures that the legend works properly and each label is added only once
plt.plot("time", "voltage", data = graph, label = None)
# Add legend
plt.legend(fontsize=8)
# Add title and labels
plt.title("Loss of Magnetisation")
plt.xlabel("time t / ms")
plt.ylabel("voltage ΔU / V")
# Show the plot and allow creation of a new plot
plt.show()
#
# STEP 2 : EXPONENTIAL FIT, HALF-LIFE TIME AND PLOT THE PROGRESS
#
# IMPORT MODULE
# Used for fitting arbitrary functions
from scipy.optimize import curve_fit
# Math and stuff
import numpy as np
# Define the general form of an exponential decay
expFct = lambda time, A, b : A*np.exp(-b*time)
# Create empty data structure for fitting parameters
fitParam = []
# Loop over all measurements and fit the data with exponential function
for graph in spinechos:
# Fit the data
fit = curve_fit(f = expFct,
xdata = graph["time"],
ydata = graph["voltage"],
# Initial guess of the fitting parameters
p0 = [10, 0.001])
# Extract fitted parameters
# Compute half life time of magnetisation as -ln(0.5)/b (derived from definition
# of exponential decay: voltage=A*exp(-b*time) )
fitParam.append({"sample" : graph["sample"],
"A" : fit[0][0],
"b" : fit[0][1],
"half-life-time": -np.log(0.5)/fit[0][1] })
# Create a plot with the regression
# Add raw data to the plot
plt.scatter("time", "voltage", data = graph, label = graph["sample"])
# Compute regression curve within the interval of the raw data
# Get a time axis ranging from the smalles and biggest time value with a stepsize of 0.1
fit_time = np.arange(min(graph["time"]), max(graph["time"]), 0.1)
# Evaluate the exponential decay function with the fitted parameters
fit_voltage = expFct(time = fit_time, A = fit[0][0], b = fit[0][1])
# Plot the calculated curve as line plot
# label = None is important, because I use plt.scatter AND plt.plot for the same plot
# label = None ensures that the legend works properly and each label is added only once
plt.plot( fit_time, fit_voltage, label = None )
# Add legend to the plot
plt.legend(fontsize=8)
# Add labels and title
plt.title("Exponential Regression of Spin Echos")
plt.xlabel("time t / ms")
plt.ylabel("voltage ΔU / V")
# Save as image file
# IMPORTANT: use this command before plt.show!
plt.savefig(str(workingDir)+"/img/regression.png")
# Show the plot (optional)
plt.show()
#
# STEP3 : CREATE A TABLE AND WRITE IT TO FILE
#
# IMPORT MODULES
# Convert arrays into tables
from tabulate import tabulate
# CODE
# Format the data into a pretty table
resultsTable = tabulate(
# Extract the ion and fitting parameters and put them into a list
[ sample.values() for sample in fitParam ],
# Add descriptive headers for the table
headers = ["Sample", "A / V", "b / (1/ms)", "half-life time t / ms"]
)
# Print table to console
print("Exponential Regression:\n ΔU = A * exp(-b * time)")
print(resultsTable)
# Write table to file
print("Write results to file.")
(workingDir/"img/results.txt").write_text("Exponential Regression:\n ΔU = A * exp(-b * time)\n"+resultsTable+"\n") | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"scipy.optimize.curve_fit",
"pathlib.Path",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((1974, 1996), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (1984, 1996), True, 'from matplotlib import pyplot as plt\n'), ((2020, 2054), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss of Magnetisation"""'], {}), "('Loss of Magnetisation')\n", (2029, 2054), True, 'from matplotlib import pyplot as plt\n'), ((2055, 2080), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time t / ms"""'], {}), "('time t / ms')\n", (2065, 2080), True, 'from matplotlib import pyplot as plt\n'), ((2081, 2109), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""voltage ΔU / V"""'], {}), "('voltage ΔU / V')\n", (2091, 2109), True, 'from matplotlib import pyplot as plt\n'), ((2160, 2170), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2168, 2170), True, 'from matplotlib import pyplot as plt\n'), ((4115, 4137), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (4125, 4137), True, 'from matplotlib import pyplot as plt\n'), ((4161, 4210), 'matplotlib.pyplot.title', 'plt.title', (['"""Exponential Regression of Spin Echos"""'], {}), "('Exponential Regression of Spin Echos')\n", (4170, 4210), True, 'from matplotlib import pyplot as plt\n'), ((4211, 4236), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time t / ms"""'], {}), "('time t / ms')\n", (4221, 4236), True, 'from matplotlib import pyplot as plt\n'), ((4237, 4265), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""voltage ΔU / V"""'], {}), "('voltage ΔU / V')\n", (4247, 4265), True, 'from matplotlib import pyplot as plt\n'), ((4412, 4422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4420, 4422), True, 'from matplotlib import pyplot as plt\n'), ((1570, 1635), 'matplotlib.pyplot.scatter', 'plt.scatter', (['"""time"""', '"""voltage"""'], {'data': 'graph', 'label': "graph['sample']"}), "('time', 'voltage', data=graph, label=graph['sample'])\n", (1581, 1635), True, 'from matplotlib import pyplot as plt\n'), ((1905, 1956), 'matplotlib.pyplot.plot', 'plt.plot', (['"""time"""', '"""voltage"""'], {'data': 'graph', 'label': 'None'}), "('time', 'voltage', data=graph, label=None)\n", (1913, 1956), True, 'from matplotlib import pyplot as plt\n'), ((2664, 2749), 'scipy.optimize.curve_fit', 'curve_fit', ([], {'f': 'expFct', 'xdata': "graph['time']", 'ydata': "graph['voltage']", 'p0': '[10, 0.001]'}), "(f=expFct, xdata=graph['time'], ydata=graph['voltage'], p0=[10, 0.001]\n )\n", (2673, 2749), False, 'from scipy.optimize import curve_fit\n'), ((3361, 3426), 'matplotlib.pyplot.scatter', 'plt.scatter', (['"""time"""', '"""voltage"""'], {'data': 'graph', 'label': "graph['sample']"}), "('time', 'voltage', data=graph, label=graph['sample'])\n", (3372, 3426), True, 'from matplotlib import pyplot as plt\n'), ((4041, 4084), 'matplotlib.pyplot.plot', 'plt.plot', (['fit_time', 'fit_voltage'], {'label': 'None'}), '(fit_time, fit_voltage, label=None)\n', (4049, 4084), True, 'from matplotlib import pyplot as plt\n'), ((2456, 2473), 'numpy.exp', 'np.exp', (['(-b * time)'], {}), '(-b * time)\n', (2462, 2473), True, 'import numpy as np\n'), ((342, 356), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (346, 356), False, 'from pathlib import Path\n'), ((3256, 3267), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (3262, 3267), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from ...ext.six import string_types
from .shader_object import ShaderObject
VARIABLE_TYPES = ('const', 'uniform', 'attribute', 'varying', 'inout')
class Variable(ShaderObject):
""" Representation of global shader variable
Parameters
----------
name : str
the name of the variable. This string can also contain the full
definition of the variable, e.g. 'uniform vec2 foo'.
value : {float, int, tuple, GLObject}
If given, vtype and dtype are determined automatically. If a
float/int/tuple is given, the variable is a uniform. If a gloo
object is given that has a glsl_type property, the variable is
an attribute and
vtype : {'const', 'uniform', 'attribute', 'varying', 'inout'}
The type of variable.
dtype : str
The data type of the variable, e.g. 'float', 'vec4', 'mat', etc.
"""
def __init__(self, name, value=None, vtype=None, dtype=None):
super(Variable, self).__init__()
# allow full definition in first argument
if ' ' in name:
fields = name.split(' ')
if len(fields) == 3:
vtype, dtype, name = fields
elif len(fields) == 4 and fields[0] == 'const':
vtype, dtype, name, value = fields
else:
raise ValueError('Variable specifications given by string must'
' be of the form "vtype dtype name" or '
'"const dtype name value".')
if not (isinstance(name, string_types) or name is None):
raise TypeError("Variable name must be string or None.")
self._state_counter = 0
self._name = name
self._vtype = vtype
self._dtype = dtype
self._value = None
# If vtype/dtype were given at init, then we will never
# try to set these values automatically.
self._type_locked = self._vtype is not None and self._dtype is not None
if value is not None:
self.value = value
if self._vtype and self._vtype not in VARIABLE_TYPES:
raise ValueError('Not a valid vtype: %r' % self._vtype)
@property
def name(self):
""" The name of this variable.
"""
return self._name
@name.setter
def name(self, n):
# Settable mostly to allow automatic setting of varying names
# See ShaderObject.create()
if self._name != n:
self._name = n
self.changed(code_changed=True)
@property
def vtype(self):
""" The type of variable (const, uniform, attribute, varying or inout).
"""
return self._vtype
@property
def dtype(self):
""" The type of data (float, int, vec, mat, ...).
"""
return self._dtype
@property
def value(self):
""" The value associated with this variable.
"""
return self._value
@value.setter
def value(self, value):
if isinstance(value, (tuple, list)) and 1 < len(value) < 5:
vtype = 'uniform'
dtype = 'vec%d' % len(value)
elif isinstance(value, np.ndarray):
if value.ndim == 1 and (1 < len(value) < 5):
vtype = 'uniform'
dtype = 'vec%d' % len(value)
elif value.ndim == 2 and value.shape in ((2, 2), (3, 3), (4, 4)):
vtype = 'uniform'
dtype = 'mat%d' % value.shape[0]
else:
raise ValueError("Cannot make uniform value for %s from array "
"of shape %s." % (self.name, value.shape))
elif np.isscalar(value):
vtype = 'uniform'
if isinstance(value, (float, np.floating)):
dtype = 'float'
elif isinstance(value, (int, np.integer)):
dtype = 'int'
else:
raise TypeError("Unknown data type %r for variable %r" %
(type(value), self))
elif getattr(value, 'glsl_type', None) is not None:
# Note: hasattr() is broken by design--swallows all exceptions!
vtype, dtype = value.glsl_type
else:
raise TypeError("Unknown data type %r for variable %r" %
(type(value), self))
self._value = value
self._state_counter += 1
if self._type_locked:
if dtype != self._dtype or vtype != self._vtype:
raise TypeError('Variable is type "%s"; cannot assign value '
'%r.' % (self.dtype, value))
return
# update vtype/dtype and emit changed event if necessary
changed = False
if self._dtype != dtype:
self._dtype = dtype
changed = True
if self._vtype != vtype:
self._vtype = vtype
changed = True
if changed:
self.changed(code_changed=True, value_changed=True)
@property
def state_id(self):
"""Return a unique ID that changes whenever the state of the Variable
has changed. This allows ModularProgram to quickly determine whether
the value has changed since it was last used."""
return id(self), self._state_counter
def __repr__(self):
return ("<%s \"%s %s %s\" at 0x%x>" % (self.__class__.__name__,
self._vtype, self._dtype,
self.name, id(self)))
def expression(self, names):
return names[self]
def definition(self, names):
if self.vtype is None:
raise RuntimeError("Variable has no vtype: %r" % self)
if self.dtype is None:
raise RuntimeError("Variable has no dtype: %r" % self)
name = names[self]
if self.vtype == 'const':
return '%s %s %s = %s;' % (self.vtype, self.dtype, name,
self.value)
else:
return '%s %s %s;' % (self.vtype, self.dtype, name)
class Varying(Variable):
""" Representation of a varying
Varyings can inherit their dtype from another Variable, allowing for
more flexibility in composing shaders.
"""
def __init__(self, name, dtype=None):
self._link = None
Variable.__init__(self, name, vtype='varying', dtype=dtype)
@property
def value(self):
""" The value associated with this variable.
"""
return self._value
@value.setter
def value(self, value):
if value is not None:
raise TypeError("Cannot assign value directly to varying.")
@property
def dtype(self):
if self._dtype is None:
if self._link is None:
return None
else:
return self._link.dtype
else:
return self._dtype
def link(self, var):
""" Link this Varying to another object from which it will derive its
dtype. This method is used internally when assigning an attribute to
a varying using syntax ``Function[varying] = attr``.
"""
assert self._dtype is not None or hasattr(var, 'dtype')
self._link = var
self.changed()
| [
"numpy.isscalar"
] | [((3956, 3974), 'numpy.isscalar', 'np.isscalar', (['value'], {}), '(value)\n', (3967, 3974), True, 'import numpy as np\n')] |
import os
from matplotlib import pyplot as plt
import torch
import torch.nn.functional as F
import random
from collections import Counter
from collections.abc import Iterable # import directly from collections for Python < 3.3
import numpy as np
from typing import Union
import networkx as nx
# List operation
def deduplicate(arr: list):
"""
:param arr: the original list
:return: deduplicated list
"""
return list(set(arr))
def is_overlap(a, b):
"""
:param a: list to compare
:param b: list to compare
:return: True if there are common element in a and b
"""
a, b = deduplicate(a), deduplicate(b)
shorter, longer = (a, b) if len(a) < len(b) else (b, a)
for e in shorter:
if e in longer:
return True
return False
def subtract(tbs, ts):
"""
:param tbs: list to be subtracted
:param ts: list to subtract
:return: subtracted list a
"""
ts = deduplicate(ts)
return [e for e in tbs if e not in ts]
def expand_window(data, window_set=1):
"""
:param data: the original list
:param window_set: int or list of int
:return: a list of lists shifted by the bias which is specified by window_set
"""
if isinstance(window_set, int):
window_set = [window_set]
window_list = []
for bias in window_set:
if bias > 0:
tmp = [data[0]] * bias + data[:-bias]
elif bias < 0:
tmp = data[-bias:] + [data[-1]] * -bias
else:
tmp = data
window_list.append(tmp)
return window_list
def mean(data: list):
"""
:param data: a list of int
:return: the average of integers in data
"""
res = sum(data) / len(data)
return res
def flatten(list_of_lists):
"""
:param list_of_lists: a list of sub-lists like "[[e_{00}, ...], ..., [e_{n0}, ...]]"
:return: flatten all sub-lists into single list
"""
return [item for sublist in list_of_lists for item in sublist]
def padding(seq: list, max_length: int, pad_tok=None):
"""
:param seq: list to pad
:param max_length: length of padded list
:param pad_tok: token used to pad
:return: padded list
"""
return (seq + [pad_tok] * max_length)[:max_length]
# Numpy operation
def pairwise_equation(data: np.ndarray, tok_illegal=None):
"""
:param data: the original data array
:param tok_illegal: the tokens which is meaningless
:return: an indicator matrix A where A_{i, j} == 1 denotes data[i] == data[j]
"""
placeholder = "1&*^%!2)!" # an impossible string
str_mat = data.reshape(len(data), 1).repeat(len(data), axis=1)
if tok_illegal is not None:
data[data == tok_illegal] = placeholder
mat = str_mat == data
indicator_matrix = np.tril(mat, -1).astype(int)
return indicator_matrix
def indicator_vec(indices: Union[int, list], n: int, device=None, dtype=torch.float):
"""
:param indices: indices which is one
:param n: number of classes
:param device:
:param dtype:
:return: an indicator vector
"""
vec = torch.zeros(n, dtype=dtype)
vec[indices] = 1
if device is not None:
vec = vec.to(device)
return vec
# Dict operation
def l2_norm(vec: torch.Tensor):
"""
:param vec: feature vector [D] or [N, D]
"""
vec /= torch.norm(vec, dim=-1, keepdim=True)
return vec
def sample_dict(d: dict, n: int, seed=None):
"""
:param d: original dict
:param n: number of keys to sample
:param seed: random seed of sampling
:return: sampled dictionary
"""
if seed is not None:
random.seed(seed)
keys = random.sample(d.keys(), n)
sample_d = {k: d[k] for k in keys}
return sample_d
# Tensor operation
def cosine_sim(fea: torch.Tensor):
"""
:param fea: feature vector [N, D]
:return: score: cosine similarity score [N, N]
"""
fea /= torch.norm(fea, dim=-1, keepdim=True)
score = fea.mm(fea.T)
return score
def uniform_normalize(t: torch.Tensor):
"""
:param t:
:return: normalized tensor
>>> a = torch.rand(5)
tensor([0.3357, 0.9217, 0.0937, 0.1567, 0.9447])
>>> uniform_normalize(a)
tensor([0.2843, 0.9730, 0.0000, 0.0740, 1.0000])
"""
t -= t.min(-1, keepdim=True)[0]
t /= t.max(-1, keepdim=True)[0]
return t
def build_sparse_adjacent_matrix(edges: list, n: int, device=None, dtype=torch.float, undirectional=True):
"""
Return adjacency matrix
:param edges: list of edges, for example (st, ed)
:param n: number of vertices
:param device:
:param dtype:
:param undirectional: make adjacency matrix un-directional
:return: the sparse adjacent matrix
"""
i = torch.tensor(list(zip(*edges)))
v = torch.ones(i.shape[1], dtype=dtype)
sparse = torch.sparse_coo_tensor(i, v, (n, n))
if device is not None:
sparse = sparse.to(device)
a = sparse.to_dense()
if undirectional:
ud_a = ((a > 0) | (a.transpose(-2, -1) > 0)).to(dtype)
a = ud_a
return a
def undirectionalize(mat: torch.Tensor):
dtype = mat.dtype
return ((mat > 0) | (mat.T > 0)).to(dtype)
def remove_undirectional_edge(mat, edges):
if not edges:
return mat
x, y = zip(*edges)
indices = [x+y, y+x] # undirectional edges
mat[indices] = 0 # remove edges
return mat
def cuda(data, device):
return [i.to(device) if isinstance(i, torch.Tensor) else i for i in data]
def stack_tenor_list(tensor_list, dim=0, value=0):
data_size = len(tensor_list)
prob = tensor_list[0]
dim_num = len(prob.shape)
max_dims = []
data_dims = [[] for _ in range(data_size)]
for i_dim in range(dim_num):
_max_dim = 0
for i_data in range(data_size):
data = tensor_list[i_data]
_max_dim = max(_max_dim, data.shape[i_dim])
data_dims[i_data].append(data.shape[i_dim])
max_dims.append(_max_dim)
to_stack = []
for i_data in range(data_size):
data = tensor_list[i_data]
pad = []
for i_dim in range(dim_num):
num_to_pad = max_dims[i_dim] - data.shape[i_dim]
pad = [0, num_to_pad] + pad
padded = F.pad(data, pad, mode="constant", value=value)
to_stack.append(padded)
return torch.stack(to_stack, dim=dim)
# List statistic
def percentile(data: list, p=0.5):
"""
:param data: origin list
:param p: frequency percentile
:return: the element at frequency percentile p
"""
assert 0 < p < 1
boundary = len(data) * p
counter = sorted(Counter(data).items(), key=lambda x: x[0])
keys, counts = zip(*counter)
accumulation = 0
for i, c in enumerate(counts):
accumulation += c
if accumulation > boundary:
return keys[i]
return None
# List visualization
def save_plt(fig_name: str, mute):
"""
:param fig_name: path of target file
:param mute: mute the output if True
:return: None
"""
if "/" in fig_name:
fig_dir = fig_name[:fig_name.rfind("/")]
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
plt.savefig(fig_name)
if not mute:
print("'%s' saved." % fig_name)
def list_histogram(data: list, color="b", title="Histogram of element frequency.", x_label="", y_label="Frequency", fig_name="hist.png", mute=False):
"""
:param data: the origin list
:param color: color of the histogram bars
:param title: bottom title of the histogram
:param x_label: label of x axis
:param y_label: label of y axis
:param fig_name: path of target file
:param mute: mute the output if True
:return: None
"""
def adaptive_bins(_data):
"""
:param _data: the original list to visualize
:return: the adaptive number of bins
"""
n = len(deduplicate(_data))
return n
bins = adaptive_bins(data)
plt.hist(data, color=color, bins=bins)
plt.gca().set(xlabel=x_label, ylabel=y_label)
plt.title("Fig. "+title, fontdict={'family': 'serif', "verticalalignment": "bottom"})
save_plt(fig_name, mute)
plt.clf()
def show_type_tree(data, indentation=4, depth=0, no_leaf=True):
"""
:param data: the data to show the structure
:param indentation: number of space of indentation
:param depth: variable used for recursive
:param no_leaf: don't display the leaf (non-iterable) node if True
:return: None
"""
def _indent(content: str):
if depth == 0:
print()
print(" " * (depth * indentation) + content)
if not isinstance(data, Iterable):
if no_leaf:
return
if isinstance(data, int):
_indent("int: %d" % data)
elif isinstance(data, float):
_indent("float: %.2f" % data)
else:
_indent(str(type(data)))
return
if isinstance(data, list):
_indent("list with size %d" % len(data))
for item in data:
show_type_tree(item, depth=depth + 1)
elif isinstance(data, tuple):
_indent("tuple with size %d" % len(data))
for item in data:
show_type_tree(item, depth=depth + 1)
elif isinstance(data, dict):
_indent("dict with size %d" % len(data))
for key in data:
_indent(str(key))
show_type_tree(data[key], depth=depth + 1)
elif isinstance(data, str):
_indent("str: " + data)
elif isinstance(data, torch.Tensor):
_indent("Tensor with shape" + str(list(data.shape)))
else:
_indent(str(type(data)))
for item in data:
show_type_tree(item, depth=depth + 1)
def random_color(seed=None):
if seed:
random.seed(seed) # cover all 6 times of sampling
color = "#"+''.join([random.choice('0123456789ABCDEF') for _ in range(6)])
return color
| [
"matplotlib.pyplot.title",
"torch.ones",
"torch.stack",
"torch.sparse_coo_tensor",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.clf",
"torch.norm",
"numpy.tril",
"os.makedirs",
"os.path.exists",
"random.choice",
"random.seed",
"matplotlib.pyplot.gca",
"torch.zeros",
"collections.Counter"... | [((3097, 3124), 'torch.zeros', 'torch.zeros', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (3108, 3124), False, 'import torch\n'), ((3340, 3377), 'torch.norm', 'torch.norm', (['vec'], {'dim': '(-1)', 'keepdim': '(True)'}), '(vec, dim=-1, keepdim=True)\n', (3350, 3377), False, 'import torch\n'), ((3916, 3953), 'torch.norm', 'torch.norm', (['fea'], {'dim': '(-1)', 'keepdim': '(True)'}), '(fea, dim=-1, keepdim=True)\n', (3926, 3953), False, 'import torch\n'), ((4774, 4809), 'torch.ones', 'torch.ones', (['i.shape[1]'], {'dtype': 'dtype'}), '(i.shape[1], dtype=dtype)\n', (4784, 4809), False, 'import torch\n'), ((4823, 4860), 'torch.sparse_coo_tensor', 'torch.sparse_coo_tensor', (['i', 'v', '(n, n)'], {}), '(i, v, (n, n))\n', (4846, 4860), False, 'import torch\n'), ((6324, 6354), 'torch.stack', 'torch.stack', (['to_stack'], {'dim': 'dim'}), '(to_stack, dim=dim)\n', (6335, 6354), False, 'import torch\n'), ((7172, 7193), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_name'], {}), '(fig_name)\n', (7183, 7193), True, 'from matplotlib import pyplot as plt\n'), ((7959, 7997), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'color': 'color', 'bins': 'bins'}), '(data, color=color, bins=bins)\n', (7967, 7997), True, 'from matplotlib import pyplot as plt\n'), ((8052, 8143), 'matplotlib.pyplot.title', 'plt.title', (["('Fig. ' + title)"], {'fontdict': "{'family': 'serif', 'verticalalignment': 'bottom'}"}), "('Fig. ' + title, fontdict={'family': 'serif', 'verticalalignment':\n 'bottom'})\n", (8061, 8143), True, 'from matplotlib import pyplot as plt\n'), ((8171, 8180), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8178, 8180), True, 'from matplotlib import pyplot as plt\n'), ((3629, 3646), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3640, 3646), False, 'import random\n'), ((6233, 6279), 'torch.nn.functional.pad', 'F.pad', (['data', 'pad'], {'mode': '"""constant"""', 'value': 'value'}), "(data, pad, mode='constant', value=value)\n", (6238, 6279), True, 'import torch.nn.functional as F\n'), ((9777, 9794), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (9788, 9794), False, 'import random\n'), ((2783, 2799), 'numpy.tril', 'np.tril', (['mat', '(-1)'], {}), '(mat, -1)\n', (2790, 2799), True, 'import numpy as np\n'), ((7110, 7133), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (7124, 7133), False, 'import os\n'), ((7147, 7167), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (7158, 7167), False, 'import os\n'), ((8002, 8011), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8009, 8011), True, 'from matplotlib import pyplot as plt\n'), ((6611, 6624), 'collections.Counter', 'Counter', (['data'], {}), '(data)\n', (6618, 6624), False, 'from collections import Counter\n'), ((9853, 9886), 'random.choice', 'random.choice', (['"""0123456789ABCDEF"""'], {}), "('0123456789ABCDEF')\n", (9866, 9886), False, 'import random\n')] |
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
import tqdm
from nums.core.array import utils as array_utils
def test_assign_broadcasting():
# https://numpy.org/doc/stable/user/basics.indexing.html#assigning-values-to-indexed-arrays
# Note that the above documentation does not fully capture the broadcasting behavior of NumPy.
# We therefore test our tools for broadcasting with array shapes, instead of arrays.
# Consider the permutations of a tuple of 10 integers ranging from 0 to 3.
# We partition this tuple into 2 tuples of 5 integers, and
# use the tuples to define the shapes of two arrays,
# to define the LHS and RHS of an assignment.
# A value of 0 means the axis is not specified in the resulting array shape.
# Because we're interested in valid assignment operations,
# we define empty arrays of size equal to the product of
# axis dims, excluding any axes which are set to 0.
# If all axes are set to 0,
# then create a dimensionless array with value 0.
# There is no formal proof that the proof for arrays with 5 axes
# is without loss of generality.
def get_array(shape):
shape = tuple(filter(lambda x: x > 0, shape))
if len(shape) == 0:
return np.array(0)
else:
return np.empty(np.product(shape)).reshape(shape)
perms = list(itertools.product([0, 1, 2, 3], repeat=10))
pbar = tqdm.tqdm(total=len(perms))
for shapes in perms:
A: np.ndarray = get_array(shapes[:5])
B: np.ndarray = get_array(shapes[5:])
try:
if A.shape == ():
continue
if B.shape == ():
A[:] = B
else:
A[:] = B[:]
# This should execute without error.
assert np.broadcast_to(B, A.shape).shape == array_utils.broadcast_shape_to_alt(B.shape,
A.shape)
assert array_utils.can_broadcast_shape_to(B.shape, A.shape), \
"%s can be broadcast to %s" % (B.shape, A.shape)
except ValueError as _:
with pytest.raises(ValueError):
np.broadcast_to(B, A.shape)
with pytest.raises(ValueError):
array_utils.broadcast_shape_to_alt(B.shape, A.shape)
assert not array_utils.can_broadcast_shape_to(B.shape, A.shape), \
"%s cannot be broadcast to %s" % (B.shape, A.shape)
pbar.update(1)
def test_bop_broadcasting():
def get_array(shape):
shape = tuple(filter(lambda x: x > 0, shape))
if len(shape) == 0:
return np.array(0)
else:
return np.empty(np.product(shape)).reshape(shape)
perms = list(itertools.product([0, 1, 2, 3], repeat=10))
pbar = tqdm.tqdm(total=len(perms))
for shapes in perms:
A: np.ndarray = get_array(shapes[:5])
B: np.ndarray = get_array(shapes[5:])
try:
assert (A * B).shape == array_utils.broadcast_shape(A.shape, B.shape)
except ValueError as _:
assert not array_utils.can_broadcast_shapes(B.shape, A.shape)
assert not array_utils.can_broadcast_shapes(A.shape, B.shape)
with pytest.raises(ValueError):
array_utils.broadcast_shape(A.shape, B.shape)
pbar.update(1)
if __name__ == "__main__":
# pylint: disable=import-error
from tests import conftest
app_inst = conftest.get_app("serial")
test_assign_broadcasting()
test_bop_broadcasting()
| [
"nums.core.array.utils.broadcast_shape_to_alt",
"nums.core.array.utils.can_broadcast_shapes",
"nums.core.array.utils.can_broadcast_shape_to",
"nums.core.array.utils.broadcast_shape",
"pytest.raises",
"numpy.product",
"numpy.array",
"itertools.product",
"tests.conftest.get_app",
"numpy.broadcast_to... | [((4113, 4139), 'tests.conftest.get_app', 'conftest.get_app', (['"""serial"""'], {}), "('serial')\n", (4129, 4139), False, 'from tests import conftest\n'), ((1973, 2015), 'itertools.product', 'itertools.product', (['[0, 1, 2, 3]'], {'repeat': '(10)'}), '([0, 1, 2, 3], repeat=10)\n', (1990, 2015), False, 'import itertools\n'), ((3398, 3440), 'itertools.product', 'itertools.product', (['[0, 1, 2, 3]'], {'repeat': '(10)'}), '([0, 1, 2, 3], repeat=10)\n', (3415, 3440), False, 'import itertools\n'), ((1867, 1878), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (1875, 1878), True, 'import numpy as np\n'), ((2610, 2662), 'nums.core.array.utils.can_broadcast_shape_to', 'array_utils.can_broadcast_shape_to', (['B.shape', 'A.shape'], {}), '(B.shape, A.shape)\n', (2644, 2662), True, 'from nums.core.array import utils as array_utils\n'), ((3292, 3303), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3300, 3303), True, 'import numpy as np\n'), ((2447, 2499), 'nums.core.array.utils.broadcast_shape_to_alt', 'array_utils.broadcast_shape_to_alt', (['B.shape', 'A.shape'], {}), '(B.shape, A.shape)\n', (2481, 2499), True, 'from nums.core.array import utils as array_utils\n'), ((3647, 3692), 'nums.core.array.utils.broadcast_shape', 'array_utils.broadcast_shape', (['A.shape', 'B.shape'], {}), '(A.shape, B.shape)\n', (3674, 3692), True, 'from nums.core.array import utils as array_utils\n'), ((2410, 2437), 'numpy.broadcast_to', 'np.broadcast_to', (['B', 'A.shape'], {}), '(B, A.shape)\n', (2425, 2437), True, 'import numpy as np\n'), ((2780, 2805), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2793, 2805), False, 'import pytest\n'), ((2823, 2850), 'numpy.broadcast_to', 'np.broadcast_to', (['B', 'A.shape'], {}), '(B, A.shape)\n', (2838, 2850), True, 'import numpy as np\n'), ((2868, 2893), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2881, 2893), False, 'import pytest\n'), ((2911, 2963), 'nums.core.array.utils.broadcast_shape_to_alt', 'array_utils.broadcast_shape_to_alt', (['B.shape', 'A.shape'], {}), '(B.shape, A.shape)\n', (2945, 2963), True, 'from nums.core.array import utils as array_utils\n'), ((2987, 3039), 'nums.core.array.utils.can_broadcast_shape_to', 'array_utils.can_broadcast_shape_to', (['B.shape', 'A.shape'], {}), '(B.shape, A.shape)\n', (3021, 3039), True, 'from nums.core.array import utils as array_utils\n'), ((3748, 3798), 'nums.core.array.utils.can_broadcast_shapes', 'array_utils.can_broadcast_shapes', (['B.shape', 'A.shape'], {}), '(B.shape, A.shape)\n', (3780, 3798), True, 'from nums.core.array import utils as array_utils\n'), ((3822, 3872), 'nums.core.array.utils.can_broadcast_shapes', 'array_utils.can_broadcast_shapes', (['A.shape', 'B.shape'], {}), '(A.shape, B.shape)\n', (3854, 3872), True, 'from nums.core.array import utils as array_utils\n'), ((3890, 3915), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3903, 3915), False, 'import pytest\n'), ((3933, 3978), 'nums.core.array.utils.broadcast_shape', 'array_utils.broadcast_shape', (['A.shape', 'B.shape'], {}), '(A.shape, B.shape)\n', (3960, 3978), True, 'from nums.core.array import utils as array_utils\n'), ((1921, 1938), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (1931, 1938), True, 'import numpy as np\n'), ((3346, 3363), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (3356, 3363), True, 'import numpy as np\n')] |
import os
import time
import argparse
import numpy as np
import cv2
from datetime import datetime
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.logger as logger
import nnabla.utils.save as save
from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile
from dataset import prepare_dataloader
from model import depth_cnn_model, l1_loss
from auxiliary import convert_depth2colormap
def main(args):
from numpy.random import seed
seed(46)
# Get context.
from nnabla.ext_utils import get_extension_context
ctx = get_extension_context('cudnn', device_id='0', type_config='float')
nn.set_default_context(ctx)
# Create CNN network
# === TRAIN ===
# Create input variables.
image = nn.Variable([args.batch_size, 3, args.img_height, args.img_width])
label = nn.Variable([args.batch_size, 1, args.img_height, args.img_width])
# Create prediction graph.
pred = depth_cnn_model(image, test=False)
pred.persistent = True
# Create loss function.
loss = l1_loss(pred, label)
# === VAL ===
#vimage = nn.Variable([args.batch_size, 3, args.img_height, args.img_width])
#vlabel = nn.Variable([args.batch_size, 1, args.img_height, args.img_width])
#vpred = depth_cnn_model(vimage, test=True)
#vloss = l1_loss(vpred, vlabel)
# Prepare monitors.
monitor = Monitor(os.path.join(args.log_dir, 'nnmonitor'))
monitors = {
'train_epoch_loss': MonitorSeries('Train epoch loss', monitor, interval=1),
'train_itr_loss': MonitorSeries('Train itr loss', monitor, interval=100),
# 'val_epoch_loss': MonitorSeries('Val epoch loss', monitor, interval=1),
'train_viz': MonitorImageTile('Train images', monitor, interval=1000, num_images=4)
}
# Create Solver. If training from checkpoint, load the info.
if args.optimizer == "adam":
solver = S.Adam(alpha=args.learning_rate, beta1=0.9, beta2=0.999)
elif args.optimizer == "sgd":
solver = S.Momentum(lr=args.learning_rate, momentum=0.9)
solver.set_parameters(nn.get_parameters())
# Initialize DataIterator
data_dic = prepare_dataloader(args.dataset_path,
datatype_list=['train', 'val'],
batch_size=args.batch_size,
img_size=(args.img_height, args.img_width))
# Training loop.
logger.info("Start training!!!")
total_itr_index = 0
for epoch in range(1, args.epochs + 1):
## === training === ##
total_train_loss = 0
index = 0
while index < data_dic['train']['size']:
# Preprocess
image.d, label.d = data_dic['train']['itr'].next()
loss.forward(clear_no_need_grad=True)
# Initialize gradients
solver.zero_grad()
# Backward execution
loss.backward(clear_buffer=True)
# Update parameters by computed gradients
if args.optimizer == 'sgd':
solver.weight_decay(1e-4)
solver.update()
# Update log
index += 1
total_itr_index += 1
total_train_loss += loss.d
# Pass to monitor
monitors['train_itr_loss'].add(total_itr_index, loss.d)
# Visualization
pred.forward(clear_buffer=True)
train_viz = np.concatenate([image.d,
convert_depth2colormap(label.d),
convert_depth2colormap(pred.d)], axis=3)
monitors['train_viz'].add(total_itr_index, train_viz)
# Logger
logger.info("[{}] {}/{} Train Loss {} ({})".format(epoch, index, data_dic['train']['size'],
total_train_loss / index, loss.d))
# Pass training loss to a monitor.
train_error = total_train_loss / data_dic['train']['size']
monitors['train_epoch_loss'].add(epoch, train_error)
# Save Parameter
out_param_file = os.path.join(args.log_dir, 'checkpoint' + str(epoch) + '.h5')
nn.save_parameters(out_param_file)
## === Validation === ##
#total_val_loss = 0.0
#val_index = 0
# while val_index < data_dic['val']['size']:
# # Inference
# vimage.d, vlabel.d = data_dic['val']['itr'].next()
# vpred.forward(clear_buffer=True)
# vloss.forward(clear_buffer=True)
# total_val_loss += vloss.d
# val_index += 1
# break
# Pass validation loss to a monitor.
#val_error = total_val_loss / data_dic['val']['size']
#monitors['val_epoch_loss'].add(epoch, val_error)
if __name__ == "__main__":
parser = argparse.ArgumentParser('depth-cnn-nnabla')
parser.add_argument('--dataset-path', type=str, default="~/datasets/nyudepthv2")
parser.add_argument('--batch-size', type=int, default=8)
parser.add_argument('--img-height', type=int, default=228)
parser.add_argument('--img-width', type=int, default=304)
parser.add_argument('--optimizer', type=str, default='sgd')
parser.add_argument('--learning-rate', type=float, default=1e-3)
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--log-dir', default='./log')
args = parser.parse_args()
main(args)
| [
"nnabla.save_parameters",
"numpy.random.seed",
"argparse.ArgumentParser",
"nnabla.ext_utils.get_extension_context",
"os.path.join",
"nnabla.monitor.MonitorSeries",
"model.l1_loss",
"nnabla.solvers.Adam",
"auxiliary.convert_depth2colormap",
"model.depth_cnn_model",
"nnabla.get_parameters",
"dat... | [((532, 540), 'numpy.random.seed', 'seed', (['(46)'], {}), '(46)\n', (536, 540), False, 'from numpy.random import seed\n'), ((626, 692), 'nnabla.ext_utils.get_extension_context', 'get_extension_context', (['"""cudnn"""'], {'device_id': '"""0"""', 'type_config': '"""float"""'}), "('cudnn', device_id='0', type_config='float')\n", (647, 692), False, 'from nnabla.ext_utils import get_extension_context\n'), ((697, 724), 'nnabla.set_default_context', 'nn.set_default_context', (['ctx'], {}), '(ctx)\n', (719, 724), True, 'import nnabla as nn\n'), ((813, 879), 'nnabla.Variable', 'nn.Variable', (['[args.batch_size, 3, args.img_height, args.img_width]'], {}), '([args.batch_size, 3, args.img_height, args.img_width])\n', (824, 879), True, 'import nnabla as nn\n'), ((892, 958), 'nnabla.Variable', 'nn.Variable', (['[args.batch_size, 1, args.img_height, args.img_width]'], {}), '([args.batch_size, 1, args.img_height, args.img_width])\n', (903, 958), True, 'import nnabla as nn\n'), ((1001, 1035), 'model.depth_cnn_model', 'depth_cnn_model', (['image'], {'test': '(False)'}), '(image, test=False)\n', (1016, 1035), False, 'from model import depth_cnn_model, l1_loss\n'), ((1102, 1122), 'model.l1_loss', 'l1_loss', (['pred', 'label'], {}), '(pred, label)\n', (1109, 1122), False, 'from model import depth_cnn_model, l1_loss\n'), ((2203, 2348), 'dataset.prepare_dataloader', 'prepare_dataloader', (['args.dataset_path'], {'datatype_list': "['train', 'val']", 'batch_size': 'args.batch_size', 'img_size': '(args.img_height, args.img_width)'}), "(args.dataset_path, datatype_list=['train', 'val'],\n batch_size=args.batch_size, img_size=(args.img_height, args.img_width))\n", (2221, 2348), False, 'from dataset import prepare_dataloader\n'), ((2473, 2505), 'nnabla.logger.info', 'logger.info', (['"""Start training!!!"""'], {}), "('Start training!!!')\n", (2484, 2505), True, 'import nnabla.logger as logger\n'), ((4876, 4919), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""depth-cnn-nnabla"""'], {}), "('depth-cnn-nnabla')\n", (4899, 4919), False, 'import argparse\n'), ((1434, 1473), 'os.path.join', 'os.path.join', (['args.log_dir', '"""nnmonitor"""'], {}), "(args.log_dir, 'nnmonitor')\n", (1446, 1473), False, 'import os\n'), ((1520, 1574), 'nnabla.monitor.MonitorSeries', 'MonitorSeries', (['"""Train epoch loss"""', 'monitor'], {'interval': '(1)'}), "('Train epoch loss', monitor, interval=1)\n", (1533, 1574), False, 'from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile\n'), ((1602, 1656), 'nnabla.monitor.MonitorSeries', 'MonitorSeries', (['"""Train itr loss"""', 'monitor'], {'interval': '(100)'}), "('Train itr loss', monitor, interval=100)\n", (1615, 1656), False, 'from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile\n'), ((1761, 1831), 'nnabla.monitor.MonitorImageTile', 'MonitorImageTile', (['"""Train images"""', 'monitor'], {'interval': '(1000)', 'num_images': '(4)'}), "('Train images', monitor, interval=1000, num_images=4)\n", (1777, 1831), False, 'from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile\n'), ((1954, 2010), 'nnabla.solvers.Adam', 'S.Adam', ([], {'alpha': 'args.learning_rate', 'beta1': '(0.9)', 'beta2': '(0.999)'}), '(alpha=args.learning_rate, beta1=0.9, beta2=0.999)\n', (1960, 2010), True, 'import nnabla.solvers as S\n'), ((2136, 2155), 'nnabla.get_parameters', 'nn.get_parameters', ([], {}), '()\n', (2153, 2155), True, 'import nnabla as nn\n'), ((4226, 4260), 'nnabla.save_parameters', 'nn.save_parameters', (['out_param_file'], {}), '(out_param_file)\n', (4244, 4260), True, 'import nnabla as nn\n'), ((2062, 2109), 'nnabla.solvers.Momentum', 'S.Momentum', ([], {'lr': 'args.learning_rate', 'momentum': '(0.9)'}), '(lr=args.learning_rate, momentum=0.9)\n', (2072, 2109), True, 'import nnabla.solvers as S\n'), ((3529, 3560), 'auxiliary.convert_depth2colormap', 'convert_depth2colormap', (['label.d'], {}), '(label.d)\n', (3551, 3560), False, 'from auxiliary import convert_depth2colormap\n'), ((3602, 3632), 'auxiliary.convert_depth2colormap', 'convert_depth2colormap', (['pred.d'], {}), '(pred.d)\n', (3624, 3632), False, 'from auxiliary import convert_depth2colormap\n')] |
import os
import numpy as np
import pandas as pd
import shapely.wkt
import pyproj
import pytest
from gisutils.projection import project, get_authority_crs
from gisutils.shapefile import (df2shp, shp2df, shp_properties, get_shapefile_crs,
rename_fields_to_10_characters)
def test_shp_properties():
df = pd.DataFrame({'reach': [1], 'value': [1.0], 'name': ['stuff']}, index=[0])
df = df[['name', 'reach', 'value']].copy()
assert [d.name for d in df.dtypes] == ['object', 'int64', 'float64']
assert shp_properties(df) == {'name': 'str', 'reach': 'int', 'value': 'float'}
def test_shp_integer_dtypes(test_output_path):
# verify that pandas is recasting numpy ints as python ints when converting to dict
# (numpy ints invalid for shapefiles)
d = pd.DataFrame(np.ones((3, 3)), dtype=int).astype(object).to_dict(orient='records')
for i in range(3):
assert isinstance(d[i][0], int)
df = pd.DataFrame({'r': np.arange(100), 'c': np.arange(100)})
f = '{}/ints.dbf'.format(test_output_path)
df2shp(df, f)
df2 = shp2df(f)
assert np.all(df == df2)
def test_shp_boolean_dtypes(test_output_path):
df = pd.DataFrame([False, True]).transpose()
df.columns = ['true', 'false']
f = '{}/bool.dbf'.format(test_output_path)
df2shp(df, f)
df2 = shp2df(f, true_values='True', false_values='False')
assert np.all(df == df2)
def test_rename_fields_to_10_characters(test_output_path):
columns = ['atthelimit'] + ['overthelimit', 'overtheli1',
'overthelimit2', 'overthelimit3', 'tomanycharacters']
columns += ['{}{}'.format(s, i) for i, s in enumerate(['tomanycharacters'] * 11)]
expected = ['atthelimit', 'overthelimit'[:10], 'overtheli1', 'overtheli0', 'overtheli2',
'tomanycharacters'[:10]]
expected += ['{}{}'.format(s, i)
for i, s in enumerate(['tomanycharacters'[:9]] * 10)]
expected += ['tomanycharacters'[:8] + '10']
result = rename_fields_to_10_characters(columns)
assert set([len(s) for s in result]) == {10}
assert result == expected
f = '{}/fields.dbf'.format(test_output_path)
df = pd.DataFrame(dict(zip(columns, [[1, 2]]* len(columns))))
df2shp(df, f)
df2 = shp2df(f)
assert df2.columns.tolist() == expected
@pytest.fixture(scope='module')
def eel_river_polygon(test_output_path):
polygon_wkt = ('POLYGON ((-2345010.181299999 2314860.9384, '
'-2292510.181299999 2314860.9384, -2292510.181299999 2281360.9384, '
'-2345010.181299999 2281360.9384, -2345010.181299999 2314860.9384))')
polygon = shapely.wkt.loads(polygon_wkt)
return polygon
@pytest.fixture(scope='module')
def eel_river_polygon_shapefile(test_output_path, eel_river_polygon):
df = pd.DataFrame({'geometry': [eel_river_polygon],
'id': [0]})
outfile = os.path.join(test_output_path, 'bbox.shp')
# write out to 5070
df2shp(df, outfile, epsg=5070)
return outfile
def test_get_shapefile_crs(eel_river_polygon_shapefile):
crs = get_shapefile_crs(eel_river_polygon_shapefile)
expected = pyproj.crs.CRS.from_epsg(5070)
assert crs == expected
crs_test_params = (None,
5070,
'epsg:26910',
'epsg:4269',
# an example of an uncommon CRS
('PROJCS["NAD_1983_California_Teale_Albers",'
'GEOGCS["GCS_North_American_1983",'
'DATUM["D_North_American_1983",'
'SPHEROID["GRS_1980",6378137.0,298.257222101]],'
'PRIMEM["Greenwich",0.0],'
'UNIT["Degree",0.0174532925199433]],'
'PROJECTION["Albers"],'
'PARAMETER["False_Easting",0.0],'
'PARAMETER["False_Northing",-4000000.0],'
'PARAMETER["Central_Meridian",-120.0],'
'PARAMETER["Standard_Parallel_1",34.0],'
'PARAMETER["Standard_Parallel_2",40.5],'
'PARAMETER["Latitude_Of_Origin",0.0],'
'UNIT["Meter",1.0]]')
)
@pytest.mark.parametrize('dest_crs', crs_test_params)
def test_shp2df_df2shp_crs(dest_crs, test_output_path, eel_river_polygon,
eel_river_polygon_shapefile, request):
# read in to dest_crs
df_dest_crs = shp2df(eel_river_polygon_shapefile, dest_crs=dest_crs)
# reproject back to 5070
if dest_crs is not None:
geoms = project(df_dest_crs['geometry'], dest_crs, 5070)
else:
geoms = df_dest_crs['geometry']
# verify that polygon is the same as original in 5070
assert geoms[0].almost_equals(eel_river_polygon)
# check that when writing the polygon back to a shapefile
# a valid projection file is produced
output_shapefile = os.path.join(test_output_path, 'results.shp')
df2shp(df_dest_crs, output_shapefile, crs=dest_crs)
written_crs = get_shapefile_crs(output_shapefile)
if dest_crs is not None:
assert written_crs == get_authority_crs(dest_crs)
| [
"pandas.DataFrame",
"gisutils.shapefile.shp_properties",
"gisutils.shapefile.get_shapefile_crs",
"gisutils.projection.project",
"pytest.fixture",
"numpy.all",
"numpy.ones",
"gisutils.shapefile.rename_fields_to_10_characters",
"pyproj.crs.CRS.from_epsg",
"gisutils.projection.get_authority_crs",
"... | [((2332, 2362), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2346, 2362), False, 'import pytest\n'), ((2713, 2743), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2727, 2743), False, 'import pytest\n'), ((4232, 4284), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dest_crs"""', 'crs_test_params'], {}), "('dest_crs', crs_test_params)\n", (4255, 4284), False, 'import pytest\n'), ((333, 407), 'pandas.DataFrame', 'pd.DataFrame', (["{'reach': [1], 'value': [1.0], 'name': ['stuff']}"], {'index': '[0]'}), "({'reach': [1], 'value': [1.0], 'name': ['stuff']}, index=[0])\n", (345, 407), True, 'import pandas as pd\n'), ((1062, 1075), 'gisutils.shapefile.df2shp', 'df2shp', (['df', 'f'], {}), '(df, f)\n', (1068, 1075), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((1086, 1095), 'gisutils.shapefile.shp2df', 'shp2df', (['f'], {}), '(f)\n', (1092, 1095), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((1107, 1124), 'numpy.all', 'np.all', (['(df == df2)'], {}), '(df == df2)\n', (1113, 1124), True, 'import numpy as np\n'), ((1310, 1323), 'gisutils.shapefile.df2shp', 'df2shp', (['df', 'f'], {}), '(df, f)\n', (1316, 1323), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((1334, 1385), 'gisutils.shapefile.shp2df', 'shp2df', (['f'], {'true_values': '"""True"""', 'false_values': '"""False"""'}), "(f, true_values='True', false_values='False')\n", (1340, 1385), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((1397, 1414), 'numpy.all', 'np.all', (['(df == df2)'], {}), '(df == df2)\n', (1403, 1414), True, 'import numpy as np\n'), ((2013, 2052), 'gisutils.shapefile.rename_fields_to_10_characters', 'rename_fields_to_10_characters', (['columns'], {}), '(columns)\n', (2043, 2052), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((2251, 2264), 'gisutils.shapefile.df2shp', 'df2shp', (['df', 'f'], {}), '(df, f)\n', (2257, 2264), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((2275, 2284), 'gisutils.shapefile.shp2df', 'shp2df', (['f'], {}), '(f)\n', (2281, 2284), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((2823, 2881), 'pandas.DataFrame', 'pd.DataFrame', (["{'geometry': [eel_river_polygon], 'id': [0]}"], {}), "({'geometry': [eel_river_polygon], 'id': [0]})\n", (2835, 2881), True, 'import pandas as pd\n'), ((2919, 2961), 'os.path.join', 'os.path.join', (['test_output_path', '"""bbox.shp"""'], {}), "(test_output_path, 'bbox.shp')\n", (2931, 2961), False, 'import os\n'), ((2991, 3021), 'gisutils.shapefile.df2shp', 'df2shp', (['df', 'outfile'], {'epsg': '(5070)'}), '(df, outfile, epsg=5070)\n', (2997, 3021), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((3110, 3156), 'gisutils.shapefile.get_shapefile_crs', 'get_shapefile_crs', (['eel_river_polygon_shapefile'], {}), '(eel_river_polygon_shapefile)\n', (3127, 3156), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((3172, 3202), 'pyproj.crs.CRS.from_epsg', 'pyproj.crs.CRS.from_epsg', (['(5070)'], {}), '(5070)\n', (3196, 3202), False, 'import pyproj\n'), ((4459, 4513), 'gisutils.shapefile.shp2df', 'shp2df', (['eel_river_polygon_shapefile'], {'dest_crs': 'dest_crs'}), '(eel_river_polygon_shapefile, dest_crs=dest_crs)\n', (4465, 4513), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((4927, 4972), 'os.path.join', 'os.path.join', (['test_output_path', '"""results.shp"""'], {}), "(test_output_path, 'results.shp')\n", (4939, 4972), False, 'import os\n'), ((4977, 5028), 'gisutils.shapefile.df2shp', 'df2shp', (['df_dest_crs', 'output_shapefile'], {'crs': 'dest_crs'}), '(df_dest_crs, output_shapefile, crs=dest_crs)\n', (4983, 5028), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((5047, 5082), 'gisutils.shapefile.get_shapefile_crs', 'get_shapefile_crs', (['output_shapefile'], {}), '(output_shapefile)\n', (5064, 5082), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((539, 557), 'gisutils.shapefile.shp_properties', 'shp_properties', (['df'], {}), '(df)\n', (553, 557), False, 'from gisutils.shapefile import df2shp, shp2df, shp_properties, get_shapefile_crs, rename_fields_to_10_characters\n'), ((4589, 4637), 'gisutils.projection.project', 'project', (["df_dest_crs['geometry']", 'dest_crs', '(5070)'], {}), "(df_dest_crs['geometry'], dest_crs, 5070)\n", (4596, 4637), False, 'from gisutils.projection import project, get_authority_crs\n'), ((973, 987), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (982, 987), True, 'import numpy as np\n'), ((994, 1008), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1003, 1008), True, 'import numpy as np\n'), ((1184, 1211), 'pandas.DataFrame', 'pd.DataFrame', (['[False, True]'], {}), '([False, True])\n', (1196, 1211), True, 'import pandas as pd\n'), ((5142, 5169), 'gisutils.projection.get_authority_crs', 'get_authority_crs', (['dest_crs'], {}), '(dest_crs)\n', (5159, 5169), False, 'from gisutils.projection import project, get_authority_crs\n'), ((812, 827), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (819, 827), True, 'import numpy as np\n')] |
"""Metrics for the computation of a Lens summary"""
from __future__ import division
import logging
import time
from functools import wraps
from tdigest import TDigest
import numpy as np
from scipy import stats
from scipy import signal
import pandas as pd
from .utils import hierarchical_ordering_indices
DENSITY_N = 100
LOGNORMALITY_P_THRESH = 0.05
CAT_FRAC_THRESHOLD = 0.5
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
def timeit(func):
"""Decorator to time callable execution and add it to the report.
Parameters
----------
func : callable
The callable to execute.
Returns
-------
callable
Decorated function.
"""
@wraps(func)
def decorator(*args, **kwargs):
tstart = time.time()
report = func(*args, **kwargs)
if report is not None:
report["_run_time"] = time.time() - tstart
return report
return decorator
@timeit
def row_count(df):
"""Count number of total and unique rows.
Parameters
----------
df : pd.DataFrame
A DataFrame.
Returns
-------
dict
Dictionary with `total` and `unique` keys.
"""
report = {}
report["total"] = len(df.index)
report["unique"] = len(df.drop_duplicates().index)
return report
@timeit
def column_properties(series):
"""Infer properties of a Pandas Series.
Parameters
----------
series : pd.Series
Series to infer properties of.
Returns
-------
dict
Dictionary of inferred properties.
"""
cat_N_threshold = {"object": 1000, "int64": 10, "float64": 10}
name = series.name
colresult = {}
colresult["dtype"] = str(series.dtype)
nulls = series.isnull().sum()
colresult["nulls"] = int(nulls) if not np.isnan(nulls) else 0
notnulls = series.dropna()
colresult["notnulls"] = len(notnulls.index)
colresult["numeric"] = (
series.dtype in [np.float64, np.int64] and colresult["notnulls"] > 0
)
unique = notnulls.unique().size
colresult["unique"] = unique
colresult["is_categorical"] = False
if (
colresult["dtype"] in {"object", "int64", "float64"}
and colresult["notnulls"] > 0
):
# In Pandas integers with nulls are cast as floats, so we have
# to include floats as possible categoricals to detect
# categorical integers.
colresult["is_categorical"] = (
unique / colresult["notnulls"] <= CAT_FRAC_THRESHOLD
) and (unique <= cat_N_threshold[colresult["dtype"]])
logger.debug(
"Column {:15}: {:6} unique, {:6} notnulls, {:6} total"
" --> {}categorical".format(
name,
unique,
colresult["notnulls"],
colresult["notnulls"] + colresult["nulls"],
"NOT " * (not colresult["is_categorical"]),
)
)
# Don't use the is_ID field for now:
# it's too prone to false positives.
# If a columns is wrongly identified as ID-like,
# it doesn't get analyzed
colresult["is_ID"] = False
return {name: colresult, "_columns": [name]}
def _tdigest_mean(digest):
"""TODO
Parameters
----------
digest : tdigest.TDigest
t-digest data structure.
Returns
-------
TODO
"""
means = [c.mean for c in digest.C.values()]
counts = [c.count for c in digest.C.values()]
return np.average(means, weights=counts)
def _tdigest_std(digest):
"""TODO
Parameters
----------
digest : tdigest.TDigest
t-digest data structure.
Returns
-------
TODO
"""
mean = _tdigest_mean(digest)
sums = [(x.mean - mean) ** 2 * x.count for x in digest.C.values()]
return np.sqrt(np.sum(sums) / digest.n)
def _tdigest_normalise(digest):
"""TODO
Parameters
----------
digest : tdigest.TDigest
t-digest data structure.
Returns
-------
TODO
"""
m = _tdigest_mean(digest)
s = _tdigest_std(digest)
ndigest = TDigest()
for x in digest.C.values():
ndigest.update((x.mean - m) / s, x.count)
return ndigest
def _tdigest_norm_kstest(digest):
"""TODO
Parameters
----------
digest : tdigest.TDigest
t-digest data structure.
Returns
-------
TODO
"""
normdigest = _tdigest_normalise(digest)
x = np.linspace(-3, 3, 500)
dig_q = np.array([normdigest.cdf(xx) for xx in x])
norm_q = stats.norm.cdf(x)
D = np.max(np.abs(dig_q - norm_q))
if digest.n > 3000:
return D, stats.distributions.kstwobign.sf(D * np.sqrt(digest.n))
else:
return D, 2 * stats.distributions.ksone.sf(D, digest.n)
def _test_logtrans(digest):
"""
Test if t-digest distribution is more normal when log-transformed.
Test whether a log-transform improves normality of data with a
simplified Kolmogorov-Smirnov two-sided test (the location and scale
of the normal distribution are estimated from the median and
standard deviation of the data).
Parameters
----------
digest : tdigest.TDigest
t-digest data structure.
Returns
-------
TODO
"""
if digest.percentile(0) <= 0:
return False
logdigest = TDigest()
for c in digest.C.values():
logdigest.update(np.log(c.mean), c.count)
lKS, lp = _tdigest_norm_kstest(logdigest)
KS, p = _tdigest_norm_kstest(digest)
logger.debug(
"KSnorm: log: {:.2g}, {:.2g}; linear: {:.2g}, {:.2g}".format(
lKS, lp, KS, p
)
)
return (
(lKS < KS)
and (lp > p)
and (lp > LOGNORMALITY_P_THRESH)
and (p < LOGNORMALITY_P_THRESH)
)
@timeit
def column_summary(series, column_props, delta=0.01):
"""Summarise a numeric column.
Parameters
----------
series : pd.Series
Numeric column.
column_props : TODO
TODO
delta : float
TODO
Returns
-------
TODO
"""
col = series.name
if not column_props[col]["numeric"] or column_props[col]["notnulls"] == 0:
# Series is not numeric or is all NaNs.
return None
logger.debug("column_summary - " + col)
# select non-nulls from column
data = series.dropna()
colresult = {}
for m in ["mean", "min", "max", "std", "sum"]:
val = getattr(data, m)()
if type(val) is np.int64:
colresult[m] = int(val)
else:
colresult[m] = val
colresult["n"] = column_props[col]["notnulls"]
percentiles = [0.1, 1, 10, 25, 50, 75, 90, 99, 99.9]
colresult["percentiles"] = {
perc: np.nanpercentile(series, perc) for perc in percentiles
}
colresult["median"] = colresult["percentiles"][50]
colresult["iqr"] = (
colresult["percentiles"][75] - colresult["percentiles"][25]
)
# Compute the t-digest.
logger.debug("column_summary - {} - creating TDigest...".format(col))
digest = TDigest(delta)
digest.batch_update(data)
logger.debug("column_summary - {} - testing log trans...".format(col))
try:
colresult["logtrans"] = bool(_test_logtrans(digest))
except Exception as e:
# Hard to pinpoint problems with the logtrans TDigest.
logger.warning(
"test_logtrans has failed for column `{}`: {}".format(col, e)
)
colresult["logtrans"] = False
if colresult["logtrans"]:
logdigest = TDigest()
for c in digest.C.values():
logdigest.update(np.log(c.mean), c.count)
colresult["logtrans_mean"] = _tdigest_mean(logdigest)
colresult["logtrans_std"] = _tdigest_std(logdigest)
colresult["logtrans_IQR"] = logdigest.percentile(
75
) - logdigest.percentile(25)
logger.debug(
"column_summary - {} - should {}be log-transformed".format(
col, "NOT " if not colresult["logtrans"] else ""
)
)
# Compress and store the t-digest.
digest.delta = delta
digest.compress()
colresult["tdigest"] = [(c.mean, c.count) for c in digest.C.values()]
# Compute histogram
logger.debug("column_summary - {} - computing histogram...".format(col))
if column_props[col]["is_categorical"]:
# Compute frequency table and store as histogram
counts, edges = _compute_histogram_from_frequencies(data)
else:
if colresult["logtrans"]:
counts, log_edges = np.histogram(
np.log10(data), density=False, bins="fd"
)
edges = 10 ** log_edges
else:
counts, edges = np.histogram(data, density=False, bins="fd")
colresult["histogram"] = {
"counts": counts.tolist(),
"bin_edges": edges.tolist(),
}
# Compute KDE
logger.debug("column_summary - {} - computing KDE...".format(col))
bw = _bw_scott(colresult, colresult["n"], colresult["logtrans"], 1)
logger.debug("column_summary - {} - KDE bw: {:.4g}".format(col, bw))
if column_props[col]["is_categorical"]:
kde_x, kde_y = np.zeros(1), np.zeros(1)
else:
coord_range = colresult["min"], colresult["max"]
kde_x, kde_y = _compute_smoothed_histogram(
data, bw, coord_range, logtrans=colresult["logtrans"]
)
colresult["kde"] = {"x": kde_x.tolist(), "y": kde_y.tolist()}
return {col: colresult, "_columns": [col]}
def _compute_histogram_from_frequencies(series):
"""Compute histogram from frequencies
This method uses the frequencies dict to produce a histogram data structure
with emtpy bins where the difference between the category values is larger
than 1
Parameters
----------
series : pd.Series
Categorical column.a
Returns
-------
counts, edges:
Histogram bin edges and counts in each bin.
"""
freqs = _compute_frequencies(series)
categories = sorted(freqs.keys())
diffs = list(np.diff(categories)) + [1]
edges = [categories[0] - 0.5]
counts = []
for cat, diff in zip(categories, diffs):
if diff <= 1:
edges.append(cat + diff / 2.0)
counts.append(freqs[cat])
else:
edges += [cat + 0.5, cat + diff - 0.5]
counts += [freqs[cat], 0]
return np.array(counts), np.array(edges)
def _compute_frequencies(series):
"""Helper to compute frequencies of a categorical column
Parameters
----------
series : pd.Series
Categorical column.a
Returns
-------
dict:
Dictionary from category name to count.
"""
freqs = series.value_counts()
if freqs.index.dtype == np.int64:
categories = [int(index) for index in freqs.index]
elif freqs.index.dtype == np.float64:
categories = [float(index) for index in freqs.index]
else:
categories = freqs.index
return dict(zip(categories, freqs.values.tolist()))
@timeit
def frequencies(series, column_props):
"""Compute frequencies for categorical columns.
Parameters
----------
series : pd.Series
Categorical column.
column_props : dict
Dictionary as returned by `column_properties`
Returns
-------
TODO
"""
name = series.name
if column_props[name]["is_categorical"]:
logger.debug("frequencies - " + series.name)
freqs = _compute_frequencies(series)
return {name: freqs, "_columns": [name]}
else:
return None
@timeit
def outliers(series, column_summ):
"""Count outliers for numeric columns.
Parameters
----------
series : pd.Series
Numeric column.
column_summ : TODO
TODO
Returns
-------
TODO
"""
name = series.name
if column_summ is None:
# Not a numeric column.
return None
else:
column_summ = column_summ[name]
Q1, Q3 = [column_summ["percentiles"][p] for p in [25, 75]]
IQR = Q3 - Q1
# Mild outlier limits.
lom = Q1 - 1.5 * IQR
him = Q3 + 1.5 * IQR
# Extreme outlier limits.
lox = Q1 - 3.0 * IQR
hix = Q3 + 3.0 * IQR
nn = series.dropna()
Nmildlo = len(nn[(nn < lom) & (nn > lox)].index)
Nmildhi = len(nn[(nn > him) & (nn < hix)].index)
Nextrlo = len(nn[nn < lox].index)
Nextrhi = len(nn[nn > hix].index)
return {
name: {"mild": [Nmildlo, Nmildhi], "extreme": [Nextrlo, Nextrhi]},
"_columns": [name],
}
@timeit
def correlation(df, column_props):
"""Compute correlation table between non-ID numeric variables.
Parameters
----------
df : pd.DataFrame
DataFrame.
column_props : TODO
TODO
Returns
-------
dict
Dictionary containing correlation coefficients.
"""
cols = [
col
for col in df.columns
if (column_props[col]["numeric"] and not column_props[col]["is_ID"])
]
numdf = df[cols]
pcorr = numdf.corr(method="pearson", min_periods=5)
scorr = numdf.corr(method="spearman", min_periods=5)
report = {}
report["_columns"] = list(numdf.columns)
report["pearson"] = np.array(pcorr).tolist()
report["spearman"] = np.array(scorr).tolist()
report["order"] = hierarchical_ordering_indices(
numdf.columns, scorr.values
)
return report
def _compute_smoothed_histogram(
values, bandwidth, coord_range, logtrans=False
):
"""Approximate 1-D density estimation.
Estimate 1-D probability densities at evenly-spaced grid points,
for specified data. This method is based on creating a 1-D histogram of
data points quantised with respect to evenly-spaced grid points.
Probability densities are then estimated at the grid points by convolving
the obtained histogram with a Gaussian kernel.
Parameters
----------
values : np.array (N,)
A vector containing the data for which to perform density estimation.
Successive data points are indexed by the first axis in the array.
bandwidth : float
The desired KDE bandwidth. (When log-transformation
of data is desired, bandwidth should be specified in log-space.)
coord_range: (2,)
Minimum and maximum values of coordinate on which to evaluate the
smoothed histogram.
logtrans : boolean
Whether or not to log-transform the data before performing density
estimation.
Returns
-------
np.array (M-1,)
An array of estimated probability densities at specified grid points.
"""
if logtrans:
ber = [np.log10(extreme) for extreme in coord_range]
bin_edges = np.logspace(*ber, num=DENSITY_N + 1)
bin_edge_range = ber[1] - ber[0]
else:
bin_edges = np.linspace(*coord_range, num=DENSITY_N + 1)
bin_edge_range = coord_range[1] - coord_range[0]
if values.size < 2:
# Return zeros if there are too few points to do anything useful.
return bin_edges[:-1], np.zeros(bin_edges.shape[0] - 1)
# Bin the values
H = np.histogram(values, bin_edges)[0]
relative_bw = bandwidth / bin_edge_range
K = _compute_gaussian_kernel(H.shape, relative_bw)
pdf = signal.fftconvolve(H, K, mode="same")
# Return lower edges of bins and normalized pdf
return bin_edges[:-1], pdf / np.trapz(pdf, bin_edges[:-1])
def _compute_smoothed_histogram2d(
values, bandwidth, coord_ranges, logtrans=False
):
"""Approximate 2-D density estimation.
Estimate 2-D probability densities at evenly-spaced grid points,
for specified data. This method is based on creating a 2-D histogram of
data points quantised with respect to evenly-spaced grid points.
Probability densities are then estimated at the grid points by convolving
the obtained histogram with a Gaussian kernel.
Parameters
----------
values : np.array (N,2)
A 2-D array containing the data for which to perform density
estimation. Successive data points are indexed by the first axis in the
array. The second axis indexes x and y coordinates of data points
(values[:,0] and values[:,1] respectively).
bandwidth : array-like (2,)
The desired KDE bandwidths for x and y axes. (When log-transformation
of data is desired, bandwidths should be specified in log-space.)
coord_range: (2,2)
Minimum and maximum values of coordinates on which to evaluate the
smoothed histogram.
logtrans : array-like (2,)
A 2-element boolean array specifying whether or not to log-transform
the x or y coordinates of the data before performing density
estimation.
Returns
-------
np.array (M-1, M-1)
An array of estimated probability densities at specified grid points.
"""
bin_edges = []
bedge_range = []
for minmax, lt in zip(coord_ranges, logtrans):
if lt:
ber = [np.log10(extreme) for extreme in minmax]
bin_edges.append(np.logspace(*ber, num=DENSITY_N + 1))
bedge_range.append(ber[1] - ber[0])
else:
bin_edges.append(np.linspace(*minmax, num=DENSITY_N + 1))
bedge_range.append(minmax[1] - minmax[0])
# Bin the observations
H = np.histogram2d(values[:, 0], values[:, 1], bins=bin_edges)[0]
relative_bw = [bw / berange for bw, berange in zip(bandwidth, bedge_range)]
K = _compute_gaussian_kernel(H.shape, relative_bw)
pdf = signal.fftconvolve(H.T, K, mode="same")
# Normalize pdf
bin_centers = [edges[:-1] + np.diff(edges) / 2.0 for edges in bin_edges]
pdf /= np.trapz(np.trapz(pdf, bin_centers[1]), bin_centers[0])
# Return lower bin edges and density
return bin_edges[0][:-1], bin_edges[1][:-1], pdf
def _compute_gaussian_kernel(histogram_shape, relative_bw):
"""Compute a gaussian kernel double the size of the histogram matrix"""
if len(histogram_shape) == 2:
kernel_shape = [2 * n for n in histogram_shape]
# Create a scaled grid in which the kernel is symmetric to avoid matrix
# inversion problems when the bandwiths are very different
bw_ratio = relative_bw[0] / relative_bw[1]
bw = relative_bw[0]
X, Y = np.mgrid[
-bw_ratio : bw_ratio : kernel_shape[0] * 1j,
-1 : 1 : kernel_shape[1] * 1j,
]
grid_points = np.vstack([X.ravel(), Y.ravel()]).T
Cov = np.array(((bw, 0), (0, bw))) ** 2
K = stats.multivariate_normal.pdf(grid_points, mean=(0, 0), cov=Cov)
return K.reshape(kernel_shape)
else:
grid = np.mgrid[-1 : 1 : histogram_shape[0] * 2j]
return stats.norm.pdf(grid, loc=0, scale=relative_bw)
def _bw_scott(column_summ, N, logtrans, d):
"""Scott's rule of thumb for KDE kernel bandwidth.
Parameters
----------
column_summ : dict
Dictionary as returned by `column_summary`.
N : int
Number of elements in the series for which the KDE is to be
evaluated.
logtrans : bool
Whether the series is assumed to be 'exponential' (True) or
'linear' (False). An 'exponential' series (representing, e.g.
income) is log-transformed before the KDE. The bandwidth
therefore needs to be estimated for the log transformed series.
d : int
Dimension of the KDE.
Returns
-------
float
Estimate of the kernel bandwidth for the KDE.
"""
if N == 0:
return 0
norm = 1.349 # norm.ppf(0.75) - norm.ppf(0.25)
if logtrans:
std, IQR = column_summ["logtrans_std"], column_summ["logtrans_IQR"]
factor = 2
else:
std, IQR = column_summ["std"], column_summ["iqr"]
factor = 1.4
if IQR > 0:
iqr_estimate = min(IQR / norm, std)
elif std > 0:
iqr_estimate = std
else:
iqr_estimate = 1.0
bandwidth = 1.06 * iqr_estimate * N ** (-1.0 / (4.0 + d))
return bandwidth / factor
@timeit
def pairdensity(df, column_props, column_summ, freq, log_transform=True):
"""Compute a variable pair heatmap.
Parameters
----------
df : pd.DataFrame
DataFrame with the columns for which the pair density is
computed.
column_props : dict
Column properties dictionary with at least col1 and col2, as
returned by `column_properties`.
column_summ : dict
Column summary dictionary with at least col1 and col2, as
returned by `column_summary`.
freq : dict
Frequencies dictionary with at least col1 and col2.
log_transform : bool
Whether to compute the KDE in log-space when needed.
Returns
-------
TODO
"""
col1, col2 = df.columns
# Test that both columns have valid entries and are either
# categorical or numeric, returning None if not.
column_props = {col: column_props[col][col] for col in [col1, col2]}
for col in [col1, col2]:
if (
not (
column_props[col]["is_categorical"]
or column_props[col]["numeric"]
)
or column_props[col]["notnulls"] == 0
):
return None
report = {"_columns": [col1, col2], col1: {}}
log_string = "pairdensity - {} - {}".format(col1, col2)
logger.debug("{}".format(log_string))
data = df.dropna()
N = len(data.index)
coord_ranges, scales, categories = [], [], []
bandwidths = [None, None]
for col in [col1, col2]:
if column_props[col]["is_categorical"]:
scales.append("category")
coord_ranges.append(None)
categories.append(sorted(list(freq[col][col].keys())))
else:
scales.append(
"log" if column_summ[col][col]["logtrans"] else "linear"
)
coord_ranges.append(
[column_summ[col][col][extreme] for extreme in ["min", "max"]]
)
categories.append(None)
Ncat = np.sum([scale == "category" for scale in scales])
if N == 0:
logger.warning("{}: No valid pairs found!".format(log_string))
if Ncat == 0:
# 2D pair density is not useful with very few observations
if N > 3:
logtrans = [scale == "log" for scale in scales]
bandwidths = [
_bw_scott(column_summ[col][col], N, lt, 2 - Ncat)
for col, lt in zip([col1, col2], logtrans)
]
x, y, density = _compute_smoothed_histogram2d(
np.array(data), bandwidths, coord_ranges, logtrans=logtrans
)
x, y = x.tolist(), y.tolist()
else:
x, y = coord_ranges
density = np.zeros((2, 2))
elif Ncat == 1:
# Split into categories and do a univariate KDE on each.
if column_props[col1]["is_categorical"]:
cats = categories[0]
coord_range = coord_ranges[1]
catcol, numcol, numcolsum = col1, col2, column_summ[col2][col2]
logtrans = scales[1] == "log"
else:
cats = categories[1]
coord_range = coord_ranges[0]
catcol, numcol, numcolsum = col2, col1, column_summ[col1][col1]
logtrans = scales[0] == "log"
density = []
for cat in cats:
# Filter data for this category.
datacat = data[data[catcol] == cat][numcol]
Nincat = datacat.count()
# Recompute the bandwidth because the number of pairs in
# this category might be lower than the total number of
# pairs.
num_bw = _bw_scott(numcolsum, Nincat, logtrans, 1)
grid, catdensity = _compute_smoothed_histogram(
datacat, num_bw, coord_range, logtrans=logtrans
)
# Remove normalisation to normalise it later to the total
# number of pairs.
density.append(catdensity * Nincat)
density = np.array(density) / N
if column_props[col1]["is_categorical"]:
density = density.T
x, y = cats, grid.tolist()
else:
x, y = grid.tolist(), cats
elif Ncat == 2:
if N > 0:
# Crosstab frequencies.
dfcs = (
pd.crosstab(data[col2], data[col1])
.sort_index(axis=0)
.sort_index(axis=1)
)
x = [str(column) for column in dfcs.columns]
if "" in x:
x[x.index("")] = " Null"
y = [str(index) for index in dfcs.index]
if "" in y:
y[y.index("")] = " Null"
density = dfcs.get_values()
else:
x, y = categories
density = np.zeros((len(x), len(y)))
report[col1][col2] = {
"density": density.tolist(),
"axes": {col1: x, col2: y},
"bw": bandwidths,
"scales": scales,
}
return report
| [
"numpy.nanpercentile",
"numpy.sum",
"numpy.abs",
"numpy.logspace",
"numpy.isnan",
"numpy.histogram",
"scipy.signal.fftconvolve",
"numpy.histogram2d",
"scipy.stats.norm.cdf",
"scipy.stats.distributions.ksone.sf",
"numpy.linspace",
"tdigest.TDigest",
"numpy.log10",
"numpy.trapz",
"numpy.av... | [((389, 416), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (406, 416), False, 'import logging\n'), ((435, 458), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (456, 458), False, 'import logging\n'), ((714, 725), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (719, 725), False, 'from functools import wraps\n'), ((3483, 3516), 'numpy.average', 'np.average', (['means'], {'weights': 'counts'}), '(means, weights=counts)\n', (3493, 3516), True, 'import numpy as np\n'), ((4094, 4103), 'tdigest.TDigest', 'TDigest', ([], {}), '()\n', (4101, 4103), False, 'from tdigest import TDigest\n'), ((4441, 4464), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(500)'], {}), '(-3, 3, 500)\n', (4452, 4464), True, 'import numpy as np\n'), ((4533, 4550), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['x'], {}), '(x)\n', (4547, 4550), False, 'from scipy import stats\n'), ((5323, 5332), 'tdigest.TDigest', 'TDigest', ([], {}), '()\n', (5330, 5332), False, 'from tdigest import TDigest\n'), ((7046, 7060), 'tdigest.TDigest', 'TDigest', (['delta'], {}), '(delta)\n', (7053, 7060), False, 'from tdigest import TDigest\n'), ((15254, 15291), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['H', 'K'], {'mode': '"""same"""'}), "(H, K, mode='same')\n", (15272, 15291), False, 'from scipy import signal\n'), ((17527, 17566), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['H.T', 'K'], {'mode': '"""same"""'}), "(H.T, K, mode='same')\n", (17545, 17566), False, 'from scipy import signal\n'), ((22039, 22090), 'numpy.sum', 'np.sum', (["[(scale == 'category') for scale in scales]"], {}), "([(scale == 'category') for scale in scales])\n", (22045, 22090), True, 'import numpy as np\n'), ((779, 790), 'time.time', 'time.time', ([], {}), '()\n', (788, 790), False, 'import time\n'), ((4567, 4589), 'numpy.abs', 'np.abs', (['(dig_q - norm_q)'], {}), '(dig_q - norm_q)\n', (4573, 4589), True, 'import numpy as np\n'), ((6715, 6745), 'numpy.nanpercentile', 'np.nanpercentile', (['series', 'perc'], {}), '(series, perc)\n', (6731, 6745), True, 'import numpy as np\n'), ((7524, 7533), 'tdigest.TDigest', 'TDigest', ([], {}), '()\n', (7531, 7533), False, 'from tdigest import TDigest\n'), ((10371, 10387), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (10379, 10387), True, 'import numpy as np\n'), ((10389, 10404), 'numpy.array', 'np.array', (['edges'], {}), '(edges)\n', (10397, 10404), True, 'import numpy as np\n'), ((14704, 14740), 'numpy.logspace', 'np.logspace', (['*ber'], {'num': '(DENSITY_N + 1)'}), '(*ber, num=DENSITY_N + 1)\n', (14715, 14740), True, 'import numpy as np\n'), ((14812, 14856), 'numpy.linspace', 'np.linspace', (['*coord_range'], {'num': '(DENSITY_N + 1)'}), '(*coord_range, num=DENSITY_N + 1)\n', (14823, 14856), True, 'import numpy as np\n'), ((15107, 15138), 'numpy.histogram', 'np.histogram', (['values', 'bin_edges'], {}), '(values, bin_edges)\n', (15119, 15138), True, 'import numpy as np\n'), ((17318, 17376), 'numpy.histogram2d', 'np.histogram2d', (['values[:, 0]', 'values[:, 1]'], {'bins': 'bin_edges'}), '(values[:, 0], values[:, 1], bins=bin_edges)\n', (17332, 17376), True, 'import numpy as np\n'), ((17685, 17714), 'numpy.trapz', 'np.trapz', (['pdf', 'bin_centers[1]'], {}), '(pdf, bin_centers[1])\n', (17693, 17714), True, 'import numpy as np\n'), ((18534, 18598), 'scipy.stats.multivariate_normal.pdf', 'stats.multivariate_normal.pdf', (['grid_points'], {'mean': '(0, 0)', 'cov': 'Cov'}), '(grid_points, mean=(0, 0), cov=Cov)\n', (18563, 18598), False, 'from scipy import stats\n'), ((18722, 18768), 'scipy.stats.norm.pdf', 'stats.norm.pdf', (['grid'], {'loc': '(0)', 'scale': 'relative_bw'}), '(grid, loc=0, scale=relative_bw)\n', (18736, 18768), False, 'from scipy import stats\n'), ((1820, 1835), 'numpy.isnan', 'np.isnan', (['nulls'], {}), '(nulls)\n', (1828, 1835), True, 'import numpy as np\n'), ((3815, 3827), 'numpy.sum', 'np.sum', (['sums'], {}), '(sums)\n', (3821, 3827), True, 'import numpy as np\n'), ((5390, 5404), 'numpy.log', 'np.log', (['c.mean'], {}), '(c.mean)\n', (5396, 5404), True, 'import numpy as np\n'), ((8690, 8734), 'numpy.histogram', 'np.histogram', (['data'], {'density': '(False)', 'bins': '"""fd"""'}), "(data, density=False, bins='fd')\n", (8702, 8734), True, 'import numpy as np\n'), ((9149, 9160), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (9157, 9160), True, 'import numpy as np\n'), ((9162, 9173), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (9170, 9173), True, 'import numpy as np\n'), ((10031, 10050), 'numpy.diff', 'np.diff', (['categories'], {}), '(categories)\n', (10038, 10050), True, 'import numpy as np\n'), ((13205, 13220), 'numpy.array', 'np.array', (['pcorr'], {}), '(pcorr)\n', (13213, 13220), True, 'import numpy as np\n'), ((13255, 13270), 'numpy.array', 'np.array', (['scorr'], {}), '(scorr)\n', (13263, 13270), True, 'import numpy as np\n'), ((14638, 14655), 'numpy.log10', 'np.log10', (['extreme'], {}), '(extreme)\n', (14646, 14655), True, 'import numpy as np\n'), ((15044, 15076), 'numpy.zeros', 'np.zeros', (['(bin_edges.shape[0] - 1)'], {}), '(bin_edges.shape[0] - 1)\n', (15052, 15076), True, 'import numpy as np\n'), ((15378, 15407), 'numpy.trapz', 'np.trapz', (['pdf', 'bin_edges[:-1]'], {}), '(pdf, bin_edges[:-1])\n', (15386, 15407), True, 'import numpy as np\n'), ((18488, 18516), 'numpy.array', 'np.array', (['((bw, 0), (0, bw))'], {}), '(((bw, 0), (0, bw)))\n', (18496, 18516), True, 'import numpy as np\n'), ((22768, 22784), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (22776, 22784), True, 'import numpy as np\n'), ((895, 906), 'time.time', 'time.time', ([], {}), '()\n', (904, 906), False, 'import time\n'), ((4722, 4763), 'scipy.stats.distributions.ksone.sf', 'stats.distributions.ksone.sf', (['D', 'digest.n'], {}), '(D, digest.n)\n', (4750, 4763), False, 'from scipy import stats\n'), ((7599, 7613), 'numpy.log', 'np.log', (['c.mean'], {}), '(c.mean)\n', (7605, 7613), True, 'import numpy as np\n'), ((8557, 8571), 'numpy.log10', 'np.log10', (['data'], {}), '(data)\n', (8565, 8571), True, 'import numpy as np\n'), ((16988, 17005), 'numpy.log10', 'np.log10', (['extreme'], {}), '(extreme)\n', (16996, 17005), True, 'import numpy as np\n'), ((17058, 17094), 'numpy.logspace', 'np.logspace', (['*ber'], {'num': '(DENSITY_N + 1)'}), '(*ber, num=DENSITY_N + 1)\n', (17069, 17094), True, 'import numpy as np\n'), ((17187, 17226), 'numpy.linspace', 'np.linspace', (['*minmax'], {'num': '(DENSITY_N + 1)'}), '(*minmax, num=DENSITY_N + 1)\n', (17198, 17226), True, 'import numpy as np\n'), ((17620, 17634), 'numpy.diff', 'np.diff', (['edges'], {}), '(edges)\n', (17627, 17634), True, 'import numpy as np\n'), ((22583, 22597), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (22591, 22597), True, 'import numpy as np\n'), ((24034, 24051), 'numpy.array', 'np.array', (['density'], {}), '(density)\n', (24042, 24051), True, 'import numpy as np\n'), ((4671, 4688), 'numpy.sqrt', 'np.sqrt', (['digest.n'], {}), '(digest.n)\n', (4678, 4688), True, 'import numpy as np\n'), ((24342, 24377), 'pandas.crosstab', 'pd.crosstab', (['data[col2]', 'data[col1]'], {}), '(data[col2], data[col1])\n', (24353, 24377), True, 'import pandas as pd\n')] |
import numpy as np
from scipy import sparse
from ._igl_ext import exact_geodesic
from ..vector import veclen, normalized, sq_veclen
from .laplacian import compute_mesh_laplacian
from .gradient import gradient_op
from .div import div_op
try:
from sksparse.cholmod import cholesky
factorized = lambda A: cholesky(A, mode='simplicial')
except ImportError:
print("CHOLMOD not found - trying to use slower LU factorization from scipy")
print("install scikits.sparse to use the faster cholesky factorization")
from scipy.sparse.linalg import factorized
class GeodesicDistanceComputation(object):
"""
Computation of geodesic distances on triangle meshes using the heat method from the impressive paper
Geodesics in Heat: A New Approach to Computing Distance Based on Heat Flow
<NAME>, <NAME>, <NAME>
ACM Transactions on Graphics (SIGGRAPH 2013)
Example usage:
$ compute_distance = GeodesicDistanceComputation(vertices, triangles)
$ distance_of_each_vertex_to_vertex_0 = compute_distance(0)
"""
def __init__(self, verts, tris, m=1.0):
self._verts = verts
self._tris = tris
# precompute some stuff needed later on
self._grad = gradient_op(verts, tris)
self._div = div_op(verts, tris)
e01 = verts[tris[:,1]] - verts[tris[:,0]]
e12 = verts[tris[:,2]] - verts[tris[:,1]]
e20 = verts[tris[:,0]] - verts[tris[:,2]]
# parameters for heat method
h = np.mean(list(map(veclen, [e01, e12, e20])))
t = m * h ** 2
# pre-factorize poisson systems
Lc, vertex_area = compute_mesh_laplacian(verts, tris, area_type='lumped_mass')
# TODO: could actually compute: Lc = self._div * self._grad
A = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
#self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
self._factored_AtLc = factorized((A - t * Lc).tocsc())
#self._factored_L = splu(Lc.tocsc()).solve
self._factored_L = factorized(Lc.tocsc())
def __call__(self, idx):
"""
computes geodesic distances to all vertices in the mesh
idx can be either an integer (single vertex index) or a list of vertex indices
or an array of bools of length n (with n the number of vertices in the mesh)
"""
u0 = np.zeros(len(self._verts))
u0[idx] = 1.0
# -- heat method, step 1
u = self._factored_AtLc(u0).ravel()
# running heat flow with multiple sources results in heat flowing
# into the source region. So just set the source region to the constrained value.
u[idx] = 1
# I tried solving the equality-constrained quadratic program that would fix this
# during the solve, but that did not seem to yield a lower error
# (but it meant that prefactorization is not straightforward)
# The QP solution would look something like:
# from scipy import sparse
# from cgtools.indexing import sparse_indicator_matrix
# I = sparse_indicator_matrix(idx, self._verts.shape[0])
# Q = sparse.bmat([(self.A - self.t * self.Lc, I.T),
# (I, None)])
# u = sparse.linalg.spsolve(Q, np.concatenate((u0, np.ones(I.shape[0]))))[:self._verts.shape[0]]
# -- heat method step 2: compute gradients & normalize
# additional normalization accross triangles helps overall numerical stability
n_u = 1. / (u[self._tris].sum(axis=1))
# compute gradient
grad_u = (self._grad * u).reshape(-1, 3) * n_u[:, np.newaxis]
# normalize gradient
with np.errstate(all='ignore'):
X = grad_u / veclen(grad_u)[:, np.newaxis]
X = np.nan_to_num(X, copy=False)
# -- heat method step 3: solve poisson system
div_Xs = self._div * X.ravel()
phi = self._factored_L(div_Xs).ravel()
# transform to distances
phi = phi - phi.min()
phi = phi.max() - phi
return phi
| [
"numpy.errstate",
"numpy.nan_to_num",
"sksparse.cholmod.cholesky"
] | [((313, 343), 'sksparse.cholmod.cholesky', 'cholesky', (['A'], {'mode': '"""simplicial"""'}), "(A, mode='simplicial')\n", (321, 343), False, 'from sksparse.cholmod import cholesky\n'), ((3681, 3706), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (3692, 3706), True, 'import numpy as np\n'), ((3779, 3807), 'numpy.nan_to_num', 'np.nan_to_num', (['X'], {'copy': '(False)'}), '(X, copy=False)\n', (3792, 3807), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# https://stackoverflow.com/questions/48213884/transparent-error-bars-without-affecting-markers
# https://matplotlib.org/stable/gallery/text_labels_and_annotations/text_alignment.html
import os, pickle, argparse, yaml
import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl
import util_bwopt_plotter as u
from collections import defaultdict
def main():
arg = parse_arg(); logdir = os.path.dirname(arg.cfg)
with open(os.path.join(arg.cfg), 'r') as f:
cfg = yaml.load(f)
print(cfg)
print('loading...')
with open(os.path.join(logdir, 'data', cfg['mesh_fname']), 'rb') as f:
meshdata = pickle.load(f)
print(meshdata.keys()); print(meshdata['cfg'])
tmix_list = list(meshdata['tmix'].values())
ratio_cum = 0.; tmixrat_list = []
for i, tmix in enumerate(sorted(list(set(tmix_list)))):
n = tmix_list.count(tmix); ratio = n/len(tmix_list); ratio_cum += ratio
tmixrat_list.append((tmix, n, ratio, ratio_cum))
print('{} tmix {}: {} {:.3f} {:.3f}'.format(i+1, tmix, n, ratio, ratio_cum))
print('most common tmix list')
tmixrat_list = sorted(tmixrat_list, key=lambda entry: entry[1], reverse=True)
for i, tmixrat in enumerate(tmixrat_list):
print(i+1, tmixrat)
u.print_stat(np.array(tmix_list), tag='tmix')
print('organize...')
data = {}
key_list = ['premix_angerr', 'prepostmix_angerr', 'premix_normerr', 'prepostmix_normerr',
'postmix_angerr', 'postmix_normerr']
for k, v in meshdata.items():
if k not in key_list:
continue
data[k] = defaultdict(list)
for kk, vv in v.items():
data[k][meshdata['tmix'][kk]].append(vv) # kk key: tmix target
print('plotting...')
for i, tmixrat in enumerate(tmixrat_list[0:6]):
tmix, n, ratio, ratio_cum = tmixrat
print('plotting', i+1, tmix, n, ratio, ratio_cum)
premix_angerr = np.array(data['premix_angerr'][tmix])
postmix_angerr = np.array(data['postmix_angerr'][tmix])
prepostmix_angerr = np.array(data['prepostmix_angerr'][tmix])
premix_angerr_mean = premix_angerr.mean(axis=u.sample_dimth)
premix_angerr_std = premix_angerr.std(axis=u.sample_dimth)
postmix_angerr_mean = postmix_angerr.mean(axis=u.sample_dimth)
postmix_angerr_std = postmix_angerr.std(axis=u.sample_dimth)
prepostmix_angerr_mean = prepostmix_angerr.mean(axis=u.sample_dimth)
prepostmix_angerr_std = prepostmix_angerr.std(axis=u.sample_dimth)
premix_normerr = np.array(data['premix_normerr'][tmix])
postmix_normerr = np.array(data['postmix_normerr'][tmix])
prepostmix_normerr = np.array(data['prepostmix_normerr'][tmix])
premix_normerr_mean = premix_normerr.mean(axis=u.sample_dimth)
premix_normerr_std = premix_normerr.std(axis=u.sample_dimth)
postmix_normerr_mean = postmix_normerr.mean(axis=u.sample_dimth)
postmix_normerr_std = postmix_normerr.std(axis=u.sample_dimth)
prepostmix_normerr_mean = prepostmix_normerr.mean(axis=u.sample_dimth)
prepostmix_normerr_std = prepostmix_normerr.std(axis=u.sample_dimth)
y = premix_angerr_mean.tolist() + [prepostmix_angerr_mean]
y = [yi*(180./np.pi) for yi in y] # to degree
yerr = premix_angerr_std.tolist() + [prepostmix_angerr_std]
yerr = [yerr_i*(180./np.pi) for yerr_i in yerr]
x = range(len(y))
y2 = premix_normerr_mean.tolist() + [prepostmix_normerr_mean]
yerr2 = premix_normerr_std.tolist() + [prepostmix_normerr_std]
assert np.isfinite(y).all()
fig, ax = plt.subplots(figsize=(12, 9)); ax2 = ax.twinx()
markers, caps, bars = ax.errorbar(x, y, yerr=yerr, color='blue', marker='', linestyle='-',
linewidth=3, ecolor='blue', elinewidth=10)
markers2, caps2, bars2 = ax2.errorbar(x, y2, yerr=yerr2, color='red', marker='', linestyle='-',
linewidth=3, ecolor='red', elinewidth=3, capsize=3, capthick=3)
ax.plot(len(x) - 1, postmix_angerr_mean, color='blue', marker='X', markersize=15)
ax2.plot(len(x) - 1, postmix_normerr_mean, color='red', marker='X', markersize=15)
fontsize = 45
info = 'n(policy)= {} ({:.3f})\nt(mixing)= {}'.format(n, ratio, tmix)
left, width = .25, .67; bottom, height = .25, .70
right = left + width; top = bottom + height
ax.text(right, top, info,
fontdict={'size': fontsize, 'family': 'serif', 'color': 'black', 'weight': 'normal'},
horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
xlim = ax.get_xlim()
ax.hlines(0., *xlim, linewidth=1, alpha=1.0, linestyle='--', color='blue')
ax.set_xlim(xlim)
xlim2 = ax2.get_xlim()
ax2.hlines(0., *xlim2, linewidth=1, alpha=1.0, linestyle='--', color='red')
ax2.set_xlim(xlim2)
fontsize = 25
if cfg['xlabel']:
ax.set_xlabel('$t$-th timestep so far', fontsize=fontsize)
if cfg['yleftlabel']:
ax.set_ylabel('Angular error (deg)', color='blue', fontsize=fontsize)
if cfg['yrightlabel']:
ax2.set_ylabel('Norm error', color='red', fontsize=fontsize)
if ('xticks' in cfg.keys()) and (tmix in cfg['xticks'].keys()):
ax.set_xticks(cfg['xticks'][tmix])
ax.tick_params(axis='x', labelsize=fontsize)
ax.tick_params(axis='y', labelcolor='blue', labelsize=fontsize)
_ = [bar.set_alpha(0.25) for bar in bars]
ax2.tick_params(axis='y', labelcolor='red', labelsize=fontsize)
_ = [bar.set_alpha(0.25) for bar in bars2]
_ = [cap.set_alpha(0.25) for cap in caps2]
envid = u.get_shortenvid(meshdata['cfg']['envid'])
polnet = meshdata['cfg']['polnet']['mode']
fname = '__'.join(['gradsamplingexact', str(i+1), polnet, envid]) + '.png'
plotdir = os.path.join(logdir, 'gradsamplingexact-plot'); os.makedirs(plotdir, exist_ok=True)
plt.savefig(os.path.join(plotdir, fname), dpi=300, bbox_inches='tight')
plt.close(fig)
def parse_arg():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg', help='config fpath', type=str, default=None, required=True)
arg = parser.parse_args()
arg.cfg = arg.cfg.replace('file://','')
return arg
if __name__ == '__main__':
main()
| [
"yaml.load",
"util_bwopt_plotter.get_shortenvid",
"argparse.ArgumentParser",
"os.path.join",
"os.makedirs",
"matplotlib.pyplot.close",
"os.path.dirname",
"numpy.isfinite",
"collections.defaultdict",
"pickle.load",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((417, 441), 'os.path.dirname', 'os.path.dirname', (['arg.cfg'], {}), '(arg.cfg)\n', (432, 441), False, 'import os, pickle, argparse, yaml\n'), ((6219, 6298), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (6242, 6298), False, 'import os, pickle, argparse, yaml\n'), ((504, 516), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (513, 516), False, 'import os, pickle, argparse, yaml\n'), ((655, 669), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (666, 669), False, 'import os, pickle, argparse, yaml\n'), ((1663, 1680), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1674, 1680), False, 'from collections import defaultdict\n'), ((1994, 2031), 'numpy.array', 'np.array', (["data['premix_angerr'][tmix]"], {}), "(data['premix_angerr'][tmix])\n", (2002, 2031), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((2057, 2095), 'numpy.array', 'np.array', (["data['postmix_angerr'][tmix]"], {}), "(data['postmix_angerr'][tmix])\n", (2065, 2095), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((2124, 2165), 'numpy.array', 'np.array', (["data['prepostmix_angerr'][tmix]"], {}), "(data['prepostmix_angerr'][tmix])\n", (2132, 2165), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((2620, 2658), 'numpy.array', 'np.array', (["data['premix_normerr'][tmix]"], {}), "(data['premix_normerr'][tmix])\n", (2628, 2658), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((2685, 2724), 'numpy.array', 'np.array', (["data['postmix_normerr'][tmix]"], {}), "(data['postmix_normerr'][tmix])\n", (2693, 2724), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((2754, 2796), 'numpy.array', 'np.array', (["data['prepostmix_normerr'][tmix]"], {}), "(data['prepostmix_normerr'][tmix])\n", (2762, 2796), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((3705, 3734), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (3717, 3734), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((5806, 5848), 'util_bwopt_plotter.get_shortenvid', 'u.get_shortenvid', (["meshdata['cfg']['envid']"], {}), "(meshdata['cfg']['envid'])\n", (5822, 5848), True, 'import util_bwopt_plotter as u\n'), ((6001, 6047), 'os.path.join', 'os.path.join', (['logdir', '"""gradsamplingexact-plot"""'], {}), "(logdir, 'gradsamplingexact-plot')\n", (6013, 6047), False, 'import os, pickle, argparse, yaml\n'), ((6049, 6084), 'os.makedirs', 'os.makedirs', (['plotdir'], {'exist_ok': '(True)'}), '(plotdir, exist_ok=True)\n', (6060, 6084), False, 'import os, pickle, argparse, yaml\n'), ((6173, 6187), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6182, 6187), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((456, 477), 'os.path.join', 'os.path.join', (['arg.cfg'], {}), '(arg.cfg)\n', (468, 477), False, 'import os, pickle, argparse, yaml\n'), ((575, 622), 'os.path.join', 'os.path.join', (['logdir', '"""data"""', "cfg['mesh_fname']"], {}), "(logdir, 'data', cfg['mesh_fname'])\n", (587, 622), False, 'import os, pickle, argparse, yaml\n'), ((1348, 1367), 'numpy.array', 'np.array', (['tmix_list'], {}), '(tmix_list)\n', (1356, 1367), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n'), ((6105, 6133), 'os.path.join', 'os.path.join', (['plotdir', 'fname'], {}), '(plotdir, fname)\n', (6117, 6133), False, 'import os, pickle, argparse, yaml\n'), ((3665, 3679), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (3676, 3679), True, 'import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl\n')] |
'''
CBOR reader and writing functionality used by the `Artifact` class.
Exported definitions:
PersistentArray (`numpy.memmap` subclass): A `memmap` backed by a CBOR file.
PersistentList (`list` subclass): A `list` backed by a CBOR file.
read_cbor_file (function): Read a CBOR file.
write_object_as_cbor (function): Write an object to a CBOR file.
'''
from __future__ import annotations
import sys
from contextlib import contextmanager
from io import BufferedRandom
from itertools import chain
from os import SEEK_END, SEEK_SET
from pathlib import Path
from typing import Any, Iterable, Iterator, Sequence, Tuple, cast
from typing_extensions import Annotated
try:
from fcntl import LOCK_EX, LOCK_SH, LOCK_UN, lockf
locking_is_supported = True
except ImportError:
locking_is_supported = False
import cbor2
import numpy as np
from ._namespaces import dictify, namespacify
__all__ = [
'PersistentArray', 'PersistentList',
'read_cbor_file', 'write_object_as_cbor']
#-- CBOR primitives ------------------------------------------------------------
MAJOR_TYPE_UINT = 0 << 5
MAJOR_TYPE_BYTE_STRING = 2 << 5
MAJOR_TYPE_ARRAY = 4 << 5
MAJOR_TYPE_TAG = 6 << 5
TAG_MULTIDIM_ARRAY = 40
INFO_NEXT_BYTE = 24
INFO_NEXT_2_BYTES = 25
INFO_NEXT_4_BYTES = 26
INFO_NEXT_8_BYTES = 27
dtypes_by_tag = {
64: np.dtype('u1'),
65: np.dtype('>u2'),
66: np.dtype('>u4'),
67: np.dtype('>u8'),
68: np.dtype('u1'),
69: np.dtype('<u2'),
70: np.dtype('<u4'),
71: np.dtype('<u8'),
72: np.dtype('i1'),
73: np.dtype('>i2'),
74: np.dtype('>i4'),
75: np.dtype('>i8'),
77: np.dtype('<i2'),
78: np.dtype('<i4'),
79: np.dtype('<i8'),
80: np.dtype('>f2'),
81: np.dtype('>f4'),
82: np.dtype('>f8'),
84: np.dtype('<f2'),
85: np.dtype('<f4'),
86: np.dtype('<f8')}
tags_by_dtype = {
dtype: tag
for tag, dtype
in dtypes_by_tag.items()}
#-- Persistent collections -----------------------------------------------------
class PersistentList(list):
'''
A `list` backed by a CBOR file.
For performance, a `PersistentList` is invalidated when another object,
including another `PersistentList`, writes to its backing file. An
invalidated `PersistentList` is a potentially out-of-date read-only view
into the file, and calling `append` or `extend` on it will corrupt the file.
'''
def __init__(self, file_: BufferedRandom, length: int) -> None:
# Read the file into a buffer.
file_.seek(0, SEEK_END)
buf = bytearray(file_.tell())
file_.seek(0, SEEK_SET)
file_.readinto(buf)
# Overwrite the header, in case it is currently being written to.
header = list_header(length)
buf[:len(header)] = header
# Parse the buffer's contents as list items.
super().__init__(namespacify(cbor2.loads(buf)))
# Store the file pointer for `extend` calls.
self._file = file_
def __setitem__(self, index: object, value: object) -> None:
raise TypeError('`PersistentList`s do not support item assignment')
def __delitem__(self, index: object) -> None:
raise TypeError('`PersistentList`s do not support item deletion')
def extend(self, items: Iterable[object]) -> None:
'''
Extend list by appending elements from the iterable.
'''
# Coerce the collection of items to add into a sequence.
items = items if isinstance(items, Sequence) else list(items)
# Append the items, CBOR-encoded, to the backing file.
data = b''.join(map(cbor2.dumps, dictify(items)))
self._file.seek(0, SEEK_END)
self._file.write(data)
self._file.flush()
# Update the header with the new list length.
header = list_header(len(self) + len(items))
with locking_header(self._file, LOCK_EX):
self._file.seek(0, SEEK_SET)
self._file.write(header)
self._file.flush()
# Add the items to `self`.
super().extend(items)
def append(self, item: object) -> None:
'''
Append object to the end of the list.
'''
self.extend([item])
class PersistentArray(np.memmap):
'''
A `numpy.memmap` backed by a CBOR file.
The file must contain a row-major multidimensional array as defined in IETF
RFC 8746. For performance, a `PersistentArray` is invalidated when another
object, including another `PersistentArray`, writes to its backing file. An
invalidated `PersistentArray` is a potentially out-of-date read-only view
into the file, and calling `append` or `extend` on it will corrupt the file.
Due to NumPy issue 4198 (https://github.com/numpy/numpy/issues/4198),
`PersistenArray` extends `np.memmap` by proxy, meaning that it delegates
attribute accesses and method calls to an internal `np.memmap` object
instead of using Python's native subclassing mechanism.
'''
def extend(self, items: object) -> None:
'''
Extend the array by appending elements from `items`.
'''
def append(self, item: object) -> None:
'''
Append `item` to the array.
'''
class PersistentArrayImpl:
__name__ = 'PersistentArray'
__qualname__ = 'PersistentArray'
__doc__ = PersistentArray.__doc__
def __init__(self,
file_: BufferedRandom,
shape: Tuple[int, ...],
dtype: np.dtype) -> None:
self._file = file_
self._memmap = np.memmap(
file_, dtype, 'r+',
data_offset(len(shape)),
shape)
def __array__(self) -> np.memmap:
return self._memmap
def extend(self, items: object) -> None:
'''
Extend the array by appending elements from `items`.
'''
# Convert `items` to a NumPy array.
item_array = np.require(items, self._memmap.dtype, ['C_CONTIGUOUS'])
# Raise an error if the arrays' shapes are not compatible.
if self._memmap.ndim == 0:
raise ValueError('scalars cannot be extended')
if item_array.ndim == 0:
raise ValueError('`items` must be a sequence')
if item_array.shape[1:] != self._memmap.shape[1:]:
raise ValueError('container and item shapes do not match')
# Write data.
self._file.seek(0, SEEK_END)
self._file.write(item_array)
self._file.flush()
# Expand the memory-mapped array.
dtype = self._memmap.dtype
offset = data_offset(self._memmap.ndim)
shape = (len(self._memmap) + len(item_array), *self._memmap.shape[1:])
self._memmap = np.memmap(self._file, dtype, 'r+', offset, shape)
# Overwrite the header.
self._file.seek(0, SEEK_SET)
with locking_header(self._file, LOCK_EX):
self._file.write(ndarray_header(
self._memmap.shape, self._memmap.dtype))
self._file.flush()
def append(self, item: object) -> None:
'''
Append `item` to the array.
'''
self.extend(np.asanyarray(item, self._memmap.dtype)[None])
class MemMapForwardingAttr:
'''
A descriptor that returns `obj._memmap.{key1}` when accessed.
'''
def __init__(self, key: str) -> None:
self._key = key
def __get__(self, obj: object, type_: type = None) -> Any:
return getattr(getattr(obj, '_memmap'), self._key)
if 'sphinx' not in sys.modules:
# Replace `PersistentArray` with `PersistentArrayImpl`
# and add `np.memmap` methods and attribute-accessors.
globals()['PersistentArray'] = PersistentArrayImpl
for key in set(dir(np.memmap)) - set(dir(PersistentArrayImpl)):
wrapper = MemMapForwardingAttr(key)
wrapper.__doc__ = getattr(np.memmap, key).__doc__
setattr(PersistentArrayImpl, key, wrapper)
#-- Reading --------------------------------------------------------------------
def read_cbor_file(path: Annotated[Path, '.cbor']) -> Any:
'''
Read a CBOR file.
If the file encodes an indefinite-length array, a `PersistentList` will be
returned.
If the file encodes a 0–12-dimensional row-major array as specified in IETF
RFC 8746, and the shape elements and byte string length are encoded as
8-byte unsigned integers, a `PersistentArray` will be returned.
Otherwise, a JSON-like object will be returned.
'''
# Defer to other readers if the path does not correspond to a CBOR file.
if path.suffix != '.cbor':
raise ValueError()
# Open the specified file and read the first 128 bytes.
f = cast(BufferedRandom, open(path, 'rb+'))
with locking_header(f, LOCK_SH):
header = cast(bytes, f.read(128))
f.seek(0)
# Try parsing the file as a `PersistentList`.
try: return PersistentList(f, parse_list(header))
except (ValueError, IndexError): pass
# Try parsing the file as a `PersistentArray`.
try: return PersistentArray(f, *parse_ndarray(header))
except (ValueError, IndexError): pass
# Parse the file using `cbor2`.
return namespacify(cbor2.loads(f.read()))
def parse_list(buf: bytes) -> int:
'''
Parse the given buffer as the header of a `PersistentList` and return the
number of items in the list.
A `ValueError` is raised if an unexpected token is encountered and an
`IndexError` is raised if the end of the buffer was reached while parsing.
'''
pos, size = parse_token(buf, 0, MAJOR_TYPE_ARRAY)
fail_if(pos != 9)
return size
def parse_ndarray(buf: bytes) -> Tuple[Tuple[int, ...], np.dtype]:
'''
Parse the given buffer as the header of a `PersistentArray` and return its
shape and data type.
A `ValueError` is raised if an unexpected token is encountered and an
`IndexError` is raised if the end of the buffer was reached while parsing.
'''
# Check for a "multidimensional array" tag.
pos, root_tag = parse_token(buf, 0, MAJOR_TYPE_TAG)
fail_if(pos != 2 or root_tag != TAG_MULTIDIM_ARRAY)
# Check whether the payload is a length-2 array.
pos, root_len = parse_token(buf, pos, MAJOR_TYPE_ARRAY)
fail_if(pos != 3 or root_len != 2)
# Check for a shape array with up to 12 entries.
pos, ndim = parse_token(buf, pos, MAJOR_TYPE_ARRAY)
fail_if(pos != 4 or ndim > 12)
# Read the shape array.
shape = ndim * [0]
for i in range(ndim):
pos, shape[i] = parse_token(buf, pos, MAJOR_TYPE_UINT)
fail_if(pos != 4 + 9 * (i + 1))
# Check whether the shape array is followed by a typed data array.
pos, dtype_tag = parse_token(buf, pos, MAJOR_TYPE_TAG)
fail_if(pos != 6 + 9 * ndim or dtype_tag not in dtypes_by_tag)
dtype = dtypes_by_tag[dtype_tag]
# Check whether the data array is a byte string with an 8-byte size.
pos, nbytes = parse_token(buf, pos, MAJOR_TYPE_BYTE_STRING)
fail_if(pos != 15 + 9 * ndim or nbytes != np.prod(shape) * dtype.itemsize)
# Return metadata if parsing succeeded.
return tuple(shape), dtype
def parse_token(buf: bytes, pos: int,
expected_major_type: int) -> Tuple[int, int]:
'''
Parse the CBOR token starting at `buf[pos]` and return the position of the
next token in the buffer and the token's value.
A `ValueError` is raised if the major type of the token does not match
`expected_major_type`.
'''
major_type = buf[pos] & 0b1110_0000
extra_info = buf[pos] & 0b0001_1111
if major_type != expected_major_type:
raise ValueError('CBOR parsing failed.')
elif extra_info < INFO_NEXT_BYTE:
return pos + 1, int(extra_info)
elif extra_info == INFO_NEXT_BYTE:
return pos + 2, int.from_bytes(buf[pos+1:pos+2], 'big')
elif extra_info == INFO_NEXT_2_BYTES:
return pos + 3, int.from_bytes(buf[pos+1:pos+3], 'big')
elif extra_info == INFO_NEXT_4_BYTES:
return pos + 5, int.from_bytes(buf[pos+1:pos+5], 'big')
elif extra_info == INFO_NEXT_8_BYTES:
return pos + 9, int.from_bytes(buf[pos+1:pos+9], 'big')
else:
raise ValueError('CBOR parsing failed.')
def fail_if(condition: bool) -> None:
'''
Raise a `ValueError` if the given condition is not true.
'''
if condition:
raise ValueError('CBOR parsing failed')
#-- Writing --------------------------------------------------------------------
def write_object_as_cbor(path: Path, val: object) -> str:
'''
Write a JSON-encodable object or a NumPy array to a CBOR file.
'''
if isinstance(val, np.ndarray):
write_ndarray(path, val)
elif hasattr(val, '__array__'):
write_ndarray(path, val.__array__()) # type: ignore
elif isinstance(val, list):
write_list(path, val)
else:
with open(path, 'wb') as f:
cbor2.dump(dictify(val), f)
return '.cbor'
def write_list(path: Path, list_: list) -> None:
'''
Write a list as a CBOR file containing an array.
'''
with open(path, 'wb') as f:
f.write(list_header(len(list_)))
for elem in list_:
cbor2.dump(dictify(elem), f)
f.flush()
def write_ndarray(path: Path, array: np.ndarray) -> None:
'''
Write an array as a CBOR file containing an row-major multidimensional
array, as specified in IETF RFC 8746.
The shape elements and the size of the byte string will be encoded as 8-byte
unsigned integers.
'''
with open(path, 'wb') as f:
f.write(ndarray_header(array.shape, array.dtype))
f.write(np.ascontiguousarray(array).data)
f.flush()
def list_header(length: int) -> bytes:
'''
Return the CBOR header for a list.
'''
return bytes((
MAJOR_TYPE_ARRAY | INFO_NEXT_8_BYTES,
*length.to_bytes(8, 'big')))
def ndarray_header(shape: Tuple[int, ...], dtype: np.dtype) -> bytes:
'''
Return the CBOR header for a multidimensional array.
'''
return bytes((
MAJOR_TYPE_TAG | INFO_NEXT_BYTE,
TAG_MULTIDIM_ARRAY,
MAJOR_TYPE_ARRAY | 2,
MAJOR_TYPE_ARRAY | len(shape),
*chain.from_iterable(
(MAJOR_TYPE_UINT | INFO_NEXT_8_BYTES,
*n.to_bytes(8, 'big'))
for n in shape),
MAJOR_TYPE_TAG | INFO_NEXT_BYTE,
tags_by_dtype[dtype],
MAJOR_TYPE_BYTE_STRING | INFO_NEXT_8_BYTES,
*int(np.prod(shape) * dtype.itemsize).to_bytes(8, 'big')))
def data_offset(ndim: int) -> int:
'''
Return the byte offset corresponding to the start of an `ndarray`'s data in
a CBOR file.
'''
return 15 + 9 * ndim
@contextmanager
def locking_header(file_: BufferedRandom, mode: int) -> Iterator[None]:
'''
Return a context manager that acquires a lock on a CBOR file's header.
'''
if locking_is_supported:
lockf(file_, mode, 128)
yield
lockf(file_, LOCK_UN)
else:
yield
| [
"fcntl.lockf",
"cbor2.loads",
"numpy.dtype",
"numpy.asanyarray",
"numpy.require",
"numpy.memmap",
"numpy.ascontiguousarray",
"numpy.prod"
] | [((1337, 1351), 'numpy.dtype', 'np.dtype', (['"""u1"""'], {}), "('u1')\n", (1345, 1351), True, 'import numpy as np\n'), ((1361, 1376), 'numpy.dtype', 'np.dtype', (['""">u2"""'], {}), "('>u2')\n", (1369, 1376), True, 'import numpy as np\n'), ((1386, 1401), 'numpy.dtype', 'np.dtype', (['""">u4"""'], {}), "('>u4')\n", (1394, 1401), True, 'import numpy as np\n'), ((1411, 1426), 'numpy.dtype', 'np.dtype', (['""">u8"""'], {}), "('>u8')\n", (1419, 1426), True, 'import numpy as np\n'), ((1436, 1450), 'numpy.dtype', 'np.dtype', (['"""u1"""'], {}), "('u1')\n", (1444, 1450), True, 'import numpy as np\n'), ((1460, 1475), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (1468, 1475), True, 'import numpy as np\n'), ((1485, 1500), 'numpy.dtype', 'np.dtype', (['"""<u4"""'], {}), "('<u4')\n", (1493, 1500), True, 'import numpy as np\n'), ((1510, 1525), 'numpy.dtype', 'np.dtype', (['"""<u8"""'], {}), "('<u8')\n", (1518, 1525), True, 'import numpy as np\n'), ((1535, 1549), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (1543, 1549), True, 'import numpy as np\n'), ((1559, 1574), 'numpy.dtype', 'np.dtype', (['""">i2"""'], {}), "('>i2')\n", (1567, 1574), True, 'import numpy as np\n'), ((1584, 1599), 'numpy.dtype', 'np.dtype', (['""">i4"""'], {}), "('>i4')\n", (1592, 1599), True, 'import numpy as np\n'), ((1609, 1624), 'numpy.dtype', 'np.dtype', (['""">i8"""'], {}), "('>i8')\n", (1617, 1624), True, 'import numpy as np\n'), ((1634, 1649), 'numpy.dtype', 'np.dtype', (['"""<i2"""'], {}), "('<i2')\n", (1642, 1649), True, 'import numpy as np\n'), ((1659, 1674), 'numpy.dtype', 'np.dtype', (['"""<i4"""'], {}), "('<i4')\n", (1667, 1674), True, 'import numpy as np\n'), ((1684, 1699), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (1692, 1699), True, 'import numpy as np\n'), ((1709, 1724), 'numpy.dtype', 'np.dtype', (['""">f2"""'], {}), "('>f2')\n", (1717, 1724), True, 'import numpy as np\n'), ((1734, 1749), 'numpy.dtype', 'np.dtype', (['""">f4"""'], {}), "('>f4')\n", (1742, 1749), True, 'import numpy as np\n'), ((1759, 1774), 'numpy.dtype', 'np.dtype', (['""">f8"""'], {}), "('>f8')\n", (1767, 1774), True, 'import numpy as np\n'), ((1784, 1799), 'numpy.dtype', 'np.dtype', (['"""<f2"""'], {}), "('<f2')\n", (1792, 1799), True, 'import numpy as np\n'), ((1809, 1824), 'numpy.dtype', 'np.dtype', (['"""<f4"""'], {}), "('<f4')\n", (1817, 1824), True, 'import numpy as np\n'), ((1834, 1849), 'numpy.dtype', 'np.dtype', (['"""<f8"""'], {}), "('<f8')\n", (1842, 1849), True, 'import numpy as np\n'), ((5929, 5984), 'numpy.require', 'np.require', (['items', 'self._memmap.dtype', "['C_CONTIGUOUS']"], {}), "(items, self._memmap.dtype, ['C_CONTIGUOUS'])\n", (5939, 5984), True, 'import numpy as np\n'), ((6721, 6770), 'numpy.memmap', 'np.memmap', (['self._file', 'dtype', '"""r+"""', 'offset', 'shape'], {}), "(self._file, dtype, 'r+', offset, shape)\n", (6730, 6770), True, 'import numpy as np\n'), ((14904, 14927), 'fcntl.lockf', 'lockf', (['file_', 'mode', '(128)'], {}), '(file_, mode, 128)\n', (14909, 14927), False, 'from fcntl import LOCK_EX, LOCK_SH, LOCK_UN, lockf\n'), ((14950, 14971), 'fcntl.lockf', 'lockf', (['file_', 'LOCK_UN'], {}), '(file_, LOCK_UN)\n', (14955, 14971), False, 'from fcntl import LOCK_EX, LOCK_SH, LOCK_UN, lockf\n'), ((2880, 2896), 'cbor2.loads', 'cbor2.loads', (['buf'], {}), '(buf)\n', (2891, 2896), False, 'import cbor2\n'), ((7149, 7188), 'numpy.asanyarray', 'np.asanyarray', (['item', 'self._memmap.dtype'], {}), '(item, self._memmap.dtype)\n', (7162, 7188), True, 'import numpy as np\n'), ((13624, 13651), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['array'], {}), '(array)\n', (13644, 13651), True, 'import numpy as np\n'), ((11009, 11023), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (11016, 11023), True, 'import numpy as np\n'), ((14457, 14471), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (14464, 14471), True, 'import numpy as np\n')] |
#!/usr/env python
# Imports
import numpy as np
from scipy.special import erf
from scipy.optimize import curve_fit, least_squares
from astropy.io import fits
from scipy import ndimage
from scipy.interpolate import interp1d
from scipy.special import wofz
from . import utils
def trace(im,yestimate=None,yorder=2,sigorder=4,step=10):
""" Trace the spectrum. Spectral dimension is assumed to be on the horizontal axis."""
ny,nx = im.shape
if yestimate is None:
ytot = np.sum(im,axis=1)
yestimate = np.argmax(ytot)
# Smooth in spectral dimension
# a uniform (boxcar) filter with a width of 50
smim = ndimage.uniform_filter1d(im, 50, 1)
nstep = nx//step
# Loop over the columns in steps and fit Gaussians
tcat = np.zeros(nstep,dtype=np.dtype([('x',float),('pars',float,4)]))
for i in range(nstep):
pars,cov = dln.gaussfit(y[yestimate-10:yestimate+10],im[yestimate-10:yestimate+10,step*i+step//2])
tcat['x'][i] = step*i+step//2
tcat['pars'][i] = pars
# Fit polynomail to y vs. x and gaussian sigma vs. x
ypars = np.polyfit(tcat['x'],tcat['pars'][:,1],yorder)
sigpars = np.polyfit(tcat['x'],tcat['pars'][:,2],sigorder)
# Model
mcat = np.zeros(nx,dtype=np.dtype([('x',float),('y',float),('sigma',float)]))
xx = np.arange(nx)
mcat['x'] = xx
mcat['y'] = np.poly1d(ypars)(xx)
mcat['sigma'] = np.poly1d(sigpars)(xx)
return tcat, ypars, sigpars,mcat
def boxcar(im):
""" Boxcar extract the spectrum"""
ny,nx = im.shape
ytot = np.sum(im,axis=1)
yest = np.argmax(ytot)
# Background subtract
yblo = int(np.maximum(yest-50,0))
ybhi = int(np.minimum(yest+50,ny))
med = np.median(im[yblo:ybhi,:],axis=0)
medim = np.repeat(med,ny).reshape(ny,nx)
subim = im.copy()-medim
# Sum up the flux
ylo = int(np.maximum(yest-20,0))
yhi = int(np.minimum(yest+20,ny))
flux = np.sum(subim[ylo:yhi,:],axis=0)
return flux
def linefit(x,y,initpar,bounds,err=None):
# Fit Gaussian profile to data with center and sigma fixed.
# initpar = [height, center, sigma, constant offset]
cen = initpar[1]
sigma = initpar[2]
def gline(x, amp, const=0):
"""1-D gaussian: gaussian(x, amp, cen, sig)"""
return amp * np.exp(-(x-cen)**2 / (2*sigma**2)) + const
line_initpar = [initpar[0],initpar[3]]
lbounds, ubounds = bounds
line_bounds = ([lbounds[0],lbounds[3]],[ubounds[0],ubounds[3]])
return curve_fit(gline, x, y, p0=line_initpar, bounds=line_bounds, sigma=err)
def extract(im,imerr=None,mcat=None,nobackground=False):
""" Extract a spectrum"""
ny,nx = im.shape
x = np.arange(nx)
y = np.arange(ny)
# No trace information input, get it
if mcat is None:
tcat,ypars,sigpars,mcat=trace(im)
# Loop over the columns and get the flux using the trace information
cat = np.zeros(nx,dtype=np.dtype([('x',int),('pars',float,2),('perr',float,2),
('flux',float),('fluxerr',float)]))
for i in range(nx):
line = im[:,i].flatten()
if imerr is not None:
lineerr = imerr[:,i].flatten()
else:
lineerr = np.ones(len(line)) # unweighted
# Fit the constant offset and the height of the Gaussian
# fix the central position and sigma
ycen = mcat['y'][i]
ysigma = mcat['sigma'][i]
ht0 = np.maximum(line[int(np.round(ycen))],0.01)
initpar = [ht0,ycen,ysigma,np.median(line)]
if nobackground is True:
initpar = [ht0,ycen,ysigma,0]
# Only fit the region right around the peak
y0 = int(np.maximum(ycen-50,0))
y1 = int(np.minimum(ycen+50,ny))
bnds = ([0,ycen-1e-4,ysigma-1e-4,0],[1.5*ht0,ycen,ysigma,1.5*initpar[3]])
if nobackground is True:
bnds = ([0,ycen-1e-4,ysigma-1e-4,0],[1.5*ht0,ycen,ysigma,0.1])
pars,cov = linefit(y[y0:y1],line[y0:y1],initpar=initpar,bounds=bnds,err=lineerr[y0:y1])
perr = np.sqrt(np.diag(cov))
# Gaussian area = ht*wid*sqrt(2*pi)
flux = pars[0]*ysigma*np.sqrt(2*np.pi)
fluxerr = perr[0]*ysigma*np.sqrt(2*np.pi)
cat['x'][i] = i
cat['pars'][i] = pars
cat['perr'][i] = perr
cat['flux'][i] = flux
cat['fluxerr'][i] = fluxerr
return cat
def emissionlines(spec,thresh=None):
"""Measure the emission lines in an arc lamp spectrum. """
nx = len(spec)
x = np.arange(nx)
# Threshold
if thresh is None:
thresh = np.min(spec) + (np.max(spec)-np.min(spec))*0.05
# Detect the peaks
sleft = np.hstack((0,spec[0:-1]))
sright = np.hstack((spec[1:],0))
peaks, = np.where((spec>sleft) & (spec>sright) & (spec>thresh))
npeaks = len(peaks)
print(str(npeaks)+' peaks found')
# Loop over the peaks and fit them with Gaussians
gcat = np.zeros(npeaks,dtype=np.dtype([('x0',int),('x',float),('xerr',float),('pars',float,4),('perr',float,4),
('flux',float),('fluxerr',float)]))
resid = spec.copy()
gmodel = np.zeros(nx)
for i in range(npeaks):
x0 = peaks[i]
xlo = np.maximum(x0-6,0)
xhi = np.minimum(x0+6,nx)
initpar = [spec[x0],x0,1,0]
bnds = ([0,x0-3,0.1,0],[1.5*initpar[0],x0+3,10,1e4])
pars,cov = dln.gaussfit(x[xlo:xhi],spec[xlo:xhi],initpar,bounds=bnds,binned=True)
perr = np.sqrt(np.diag(cov))
gmodel1 = dln.gaussian(x[xlo:xhi],*pars)
gmodel[xlo:xhi] += (gmodel1-pars[3])
resid[xlo:xhi] -= (gmodel1-pars[3])
# Gaussian area = ht*wid*sqrt(2*pi)
flux = pars[0]*pars[2]*np.sqrt(2*np.pi)
fluxerr = perr[0]*pars[2]*np.sqrt(2*np.pi)
gcat['x0'][i] = x0
gcat['x'][i] = pars[1]
gcat['xerr'][i] = perr[1]
gcat['pars'][i] = pars
gcat['perr'][i] = perr
gcat['flux'][i] = flux
gcat['fluxerr'][i] = fluxerr
return gcat, gmodel
def continuum(spec,bin=50,perc=60,norder=4):
""" Derive the continuum of a spectrum."""
nx = len(spec)
x = np.arange(nx)
# Loop over bins and find the maximum
nbins = nx//bin
xbin1 = np.zeros(nbins,float)
ybin1 = np.zeros(nbins,float)
for i in range(nbins):
xbin1[i] = np.mean(x[i*bin:i*bin+bin])
ybin1[i] = np.percentile(spec[i*bin:i*bin+bin],perc)
# Fit polynomial to the binned values
coef1 = np.polyfit(xbin1,ybin1,norder)
cont1 = np.poly1d(coef1)(x)
# Now remove large negative outliers and refit
gdmask = np.zeros(nx,bool)
gdmask[(spec/cont1)>0.8] = True
xbin = np.zeros(nbins,float)
ybin = np.zeros(nbins,float)
for i in range(nbins):
xbin[i] = np.mean(x[i*bin:i*bin+bin][gdmask[i*bin:i*bin+bin]])
ybin[i] = np.percentile(spec[i*bin:i*bin+bin][gdmask[i*bin:i*bin+bin]],perc)
# Fit polynomial to the binned values
coef = np.polyfit(xbin,ybin,norder)
cont = np.poly1d(coef)(x)
return cont,coef
| [
"numpy.sum",
"numpy.maximum",
"numpy.argmax",
"numpy.polyfit",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"numpy.round",
"numpy.max",
"numpy.repeat",
"numpy.minimum",
"numpy.median",
"scipy.optimize.curve_fit",
"numpy.hstack",
"numpy.percentile",
"numpy.min",
"numpy.p... | [((639, 674), 'scipy.ndimage.uniform_filter1d', 'ndimage.uniform_filter1d', (['im', '(50)', '(1)'], {}), '(im, 50, 1)\n', (663, 674), False, 'from scipy import ndimage\n'), ((1097, 1146), 'numpy.polyfit', 'np.polyfit', (["tcat['x']", "tcat['pars'][:, 1]", 'yorder'], {}), "(tcat['x'], tcat['pars'][:, 1], yorder)\n", (1107, 1146), True, 'import numpy as np\n'), ((1158, 1209), 'numpy.polyfit', 'np.polyfit', (["tcat['x']", "tcat['pars'][:, 2]", 'sigorder'], {}), "(tcat['x'], tcat['pars'][:, 2], sigorder)\n", (1168, 1209), True, 'import numpy as np\n'), ((1310, 1323), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (1319, 1323), True, 'import numpy as np\n'), ((1548, 1566), 'numpy.sum', 'np.sum', (['im'], {'axis': '(1)'}), '(im, axis=1)\n', (1554, 1566), True, 'import numpy as np\n'), ((1577, 1592), 'numpy.argmax', 'np.argmax', (['ytot'], {}), '(ytot)\n', (1586, 1592), True, 'import numpy as np\n'), ((1706, 1741), 'numpy.median', 'np.median', (['im[yblo:ybhi, :]'], {'axis': '(0)'}), '(im[yblo:ybhi, :], axis=0)\n', (1715, 1741), True, 'import numpy as np\n'), ((1921, 1954), 'numpy.sum', 'np.sum', (['subim[ylo:yhi, :]'], {'axis': '(0)'}), '(subim[ylo:yhi, :], axis=0)\n', (1927, 1954), True, 'import numpy as np\n'), ((2480, 2550), 'scipy.optimize.curve_fit', 'curve_fit', (['gline', 'x', 'y'], {'p0': 'line_initpar', 'bounds': 'line_bounds', 'sigma': 'err'}), '(gline, x, y, p0=line_initpar, bounds=line_bounds, sigma=err)\n', (2489, 2550), False, 'from scipy.optimize import curve_fit, least_squares\n'), ((2669, 2682), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (2678, 2682), True, 'import numpy as np\n'), ((2691, 2704), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (2700, 2704), True, 'import numpy as np\n'), ((4486, 4499), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (4495, 4499), True, 'import numpy as np\n'), ((4649, 4675), 'numpy.hstack', 'np.hstack', (['(0, spec[0:-1])'], {}), '((0, spec[0:-1]))\n', (4658, 4675), True, 'import numpy as np\n'), ((4688, 4712), 'numpy.hstack', 'np.hstack', (['(spec[1:], 0)'], {}), '((spec[1:], 0))\n', (4697, 4712), True, 'import numpy as np\n'), ((4725, 4785), 'numpy.where', 'np.where', (['((spec > sleft) & (spec > sright) & (spec > thresh))'], {}), '((spec > sleft) & (spec > sright) & (spec > thresh))\n', (4733, 4785), True, 'import numpy as np\n'), ((5133, 5145), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (5141, 5145), True, 'import numpy as np\n'), ((6144, 6157), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (6153, 6157), True, 'import numpy as np\n'), ((6232, 6254), 'numpy.zeros', 'np.zeros', (['nbins', 'float'], {}), '(nbins, float)\n', (6240, 6254), True, 'import numpy as np\n'), ((6266, 6288), 'numpy.zeros', 'np.zeros', (['nbins', 'float'], {}), '(nbins, float)\n', (6274, 6288), True, 'import numpy as np\n'), ((6477, 6509), 'numpy.polyfit', 'np.polyfit', (['xbin1', 'ybin1', 'norder'], {}), '(xbin1, ybin1, norder)\n', (6487, 6509), True, 'import numpy as np\n'), ((6609, 6627), 'numpy.zeros', 'np.zeros', (['nx', 'bool'], {}), '(nx, bool)\n', (6617, 6627), True, 'import numpy as np\n'), ((6674, 6696), 'numpy.zeros', 'np.zeros', (['nbins', 'float'], {}), '(nbins, float)\n', (6682, 6696), True, 'import numpy as np\n'), ((6707, 6729), 'numpy.zeros', 'np.zeros', (['nbins', 'float'], {}), '(nbins, float)\n', (6715, 6729), True, 'import numpy as np\n'), ((6965, 6995), 'numpy.polyfit', 'np.polyfit', (['xbin', 'ybin', 'norder'], {}), '(xbin, ybin, norder)\n', (6975, 6995), True, 'import numpy as np\n'), ((488, 506), 'numpy.sum', 'np.sum', (['im'], {'axis': '(1)'}), '(im, axis=1)\n', (494, 506), True, 'import numpy as np\n'), ((526, 541), 'numpy.argmax', 'np.argmax', (['ytot'], {}), '(ytot)\n', (535, 541), True, 'import numpy as np\n'), ((1359, 1375), 'numpy.poly1d', 'np.poly1d', (['ypars'], {}), '(ypars)\n', (1368, 1375), True, 'import numpy as np\n'), ((1400, 1418), 'numpy.poly1d', 'np.poly1d', (['sigpars'], {}), '(sigpars)\n', (1409, 1418), True, 'import numpy as np\n'), ((1634, 1658), 'numpy.maximum', 'np.maximum', (['(yest - 50)', '(0)'], {}), '(yest - 50, 0)\n', (1644, 1658), True, 'import numpy as np\n'), ((1672, 1697), 'numpy.minimum', 'np.minimum', (['(yest + 50)', 'ny'], {}), '(yest + 50, ny)\n', (1682, 1697), True, 'import numpy as np\n'), ((1849, 1873), 'numpy.maximum', 'np.maximum', (['(yest - 20)', '(0)'], {}), '(yest - 20, 0)\n', (1859, 1873), True, 'import numpy as np\n'), ((1886, 1911), 'numpy.minimum', 'np.minimum', (['(yest + 20)', 'ny'], {}), '(yest + 20, ny)\n', (1896, 1911), True, 'import numpy as np\n'), ((5210, 5231), 'numpy.maximum', 'np.maximum', (['(x0 - 6)', '(0)'], {}), '(x0 - 6, 0)\n', (5220, 5231), True, 'import numpy as np\n'), ((5243, 5265), 'numpy.minimum', 'np.minimum', (['(x0 + 6)', 'nx'], {}), '(x0 + 6, nx)\n', (5253, 5265), True, 'import numpy as np\n'), ((6334, 6367), 'numpy.mean', 'np.mean', (['x[i * bin:i * bin + bin]'], {}), '(x[i * bin:i * bin + bin])\n', (6341, 6367), True, 'import numpy as np\n'), ((6381, 6429), 'numpy.percentile', 'np.percentile', (['spec[i * bin:i * bin + bin]', 'perc'], {}), '(spec[i * bin:i * bin + bin], perc)\n', (6394, 6429), True, 'import numpy as np\n'), ((6520, 6536), 'numpy.poly1d', 'np.poly1d', (['coef1'], {}), '(coef1)\n', (6529, 6536), True, 'import numpy as np\n'), ((6774, 6838), 'numpy.mean', 'np.mean', (['x[i * bin:i * bin + bin][gdmask[i * bin:i * bin + bin]]'], {}), '(x[i * bin:i * bin + bin][gdmask[i * bin:i * bin + bin]])\n', (6781, 6838), True, 'import numpy as np\n'), ((6845, 6924), 'numpy.percentile', 'np.percentile', (['spec[i * bin:i * bin + bin][gdmask[i * bin:i * bin + bin]]', 'perc'], {}), '(spec[i * bin:i * bin + bin][gdmask[i * bin:i * bin + bin]], perc)\n', (6858, 6924), True, 'import numpy as np\n'), ((7005, 7020), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (7014, 7020), True, 'import numpy as np\n'), ((783, 827), 'numpy.dtype', 'np.dtype', (["[('x', float), ('pars', float, 4)]"], {}), "([('x', float), ('pars', float, 4)])\n", (791, 827), True, 'import numpy as np\n'), ((1248, 1304), 'numpy.dtype', 'np.dtype', (["[('x', float), ('y', float), ('sigma', float)]"], {}), "([('x', float), ('y', float), ('sigma', float)])\n", (1256, 1304), True, 'import numpy as np\n'), ((1752, 1770), 'numpy.repeat', 'np.repeat', (['med', 'ny'], {}), '(med, ny)\n', (1761, 1770), True, 'import numpy as np\n'), ((2910, 3013), 'numpy.dtype', 'np.dtype', (["[('x', int), ('pars', float, 2), ('perr', float, 2), ('flux', float), (\n 'fluxerr', float)]"], {}), "([('x', int), ('pars', float, 2), ('perr', float, 2), ('flux',\n float), ('fluxerr', float)])\n", (2918, 3013), True, 'import numpy as np\n'), ((3504, 3519), 'numpy.median', 'np.median', (['line'], {}), '(line)\n', (3513, 3519), True, 'import numpy as np\n'), ((3665, 3689), 'numpy.maximum', 'np.maximum', (['(ycen - 50)', '(0)'], {}), '(ycen - 50, 0)\n', (3675, 3689), True, 'import numpy as np\n'), ((3705, 3730), 'numpy.minimum', 'np.minimum', (['(ycen + 50)', 'ny'], {}), '(ycen + 50, ny)\n', (3715, 3730), True, 'import numpy as np\n'), ((4038, 4050), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (4045, 4050), True, 'import numpy as np\n'), ((4126, 4144), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4133, 4144), True, 'import numpy as np\n'), ((4176, 4194), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4183, 4194), True, 'import numpy as np\n'), ((4561, 4573), 'numpy.min', 'np.min', (['spec'], {}), '(spec)\n', (4567, 4573), True, 'import numpy as np\n'), ((4934, 5070), 'numpy.dtype', 'np.dtype', (["[('x0', int), ('x', float), ('xerr', float), ('pars', float, 4), ('perr',\n float, 4), ('flux', float), ('fluxerr', float)]"], {}), "([('x0', int), ('x', float), ('xerr', float), ('pars', float, 4), (\n 'perr', float, 4), ('flux', float), ('fluxerr', float)])\n", (4942, 5070), True, 'import numpy as np\n'), ((5473, 5485), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (5480, 5485), True, 'import numpy as np\n'), ((5700, 5718), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (5707, 5718), True, 'import numpy as np\n'), ((5751, 5769), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (5758, 5769), True, 'import numpy as np\n'), ((2285, 2327), 'numpy.exp', 'np.exp', (['(-(x - cen) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - cen) ** 2 / (2 * sigma ** 2))\n', (2291, 2327), True, 'import numpy as np\n'), ((3446, 3460), 'numpy.round', 'np.round', (['ycen'], {}), '(ycen)\n', (3454, 3460), True, 'import numpy as np\n'), ((4577, 4589), 'numpy.max', 'np.max', (['spec'], {}), '(spec)\n', (4583, 4589), True, 'import numpy as np\n'), ((4590, 4602), 'numpy.min', 'np.min', (['spec'], {}), '(spec)\n', (4596, 4602), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
data
~~~~
Utility functions and classes.
"""
import numpy as np
import pandas as pd
import xspline
import ipopt
from typing import Tuple
import process
class TempData:
"""Temperature data class"""
def __init__(
self,
mean_temp,
daily_temp,
obs_mean,
obs_std,
study_id,
data_id,
trimming_weights=None):
# pass in the data
sort_id = np.argsort(study_id)
self.mean_temp = mean_temp[sort_id]
self.daily_temp = daily_temp[sort_id]
self.obs_mean = obs_mean[sort_id]
self.obs_std = obs_std[sort_id]
self.study_id = study_id[sort_id]
if data_id is not None:
self.data_id = data_id[sort_id]
else:
self.data_id = None
self.unique_mean_temp = np.unique(self.mean_temp)
# construct structure
unique_study_id, study_sizes = np.unique(self.study_id,
return_counts=True)
sort_id = np.argsort(unique_study_id)
self.unique_study_id = unique_study_id[sort_id]
self.study_sizes = study_sizes[sort_id]
self.num_studies = self.study_sizes.size
self.num_obs = self.obs_mean.size
# pass in trimming weights if given
if trimming_weights is None:
self.trimming_weights = np.ones(self.num_obs)
else:
self.trimming_weights = trimming_weights
class TrendResult:
"""Trend fitting result"""
def __init__(
self,
beta,
beta_var,
gamma,
random_effects,
mean_temp,
num_beta_spline_knots=6,
num_gamma_spline_knots=6,
beta_spline_degree=3,
gamma_spline_degree=3):
# pass in the data
self.num_mean_temp = mean_temp.size
assert beta.shape == (self.num_mean_temp, 2)
assert gamma.shape == (self.num_mean_temp, 2)
self.beta = beta
self.beta_var = beta_var
self.gamma = gamma
self.mean_temp = mean_temp
self.random_effects = random_effects
# construct the splines
self.min_mean_temp = self.mean_temp.min()
self.max_mean_temp = self.mean_temp.max()
beta_spline_knots = np.linspace(self.min_mean_temp,
self.max_mean_temp,
num_beta_spline_knots)
gamma_spline_knots = np.linspace(self.min_mean_temp,
self.max_mean_temp,
num_gamma_spline_knots)
# gamma_spline_knots = np.array([
# self.min_mean_temp,
# 13.0,
# 17.0,
# 22.0,
# self.max_mean_temp
# ])
self.beta_spline = xspline.XSpline(
beta_spline_knots, beta_spline_degree,
l_linear=True, r_linear=True)
self.gamma_spline = xspline.XSpline(
gamma_spline_knots, gamma_spline_degree,
l_linear=True, r_linear=True)
# compute the spline bases coefficients
X_beta = self.beta_spline.design_mat(self.mean_temp)
X_gamma = self.gamma_spline.design_mat(self.mean_temp)
self.c_beta = np.linalg.solve(X_beta.T.dot(X_beta),
X_beta.T.dot(beta))
self.c_gamma = np.linalg.solve(X_gamma.T.dot(X_gamma),
X_gamma.T.dot(gamma))
def beta_at_mean_temp(self, mean_temp):
"""return beta(s) at given mean_temp"""
X = self.beta_spline.design_mat(mean_temp)
return X.dot(self.c_beta)
def gamma_at_mean_temp(self, mean_temp):
"""return gamma(s) at give mean_temp"""
X = self.gamma_spline.design_mat(mean_temp)
return X.dot(self.c_gamma)
def sample_random_effects(self, num_samples):
"""sample the random effects at the mean temperature"""
re_samples = []
for mt in self.mean_temp:
gamma = np.maximum(1e-6, self.gamma_at_mean_temp(mt))
beta = self.beta_at_mean_temp(mt)
# re_samples.append(np.random.randn(num_samples, gamma.size)*
# np.sqrt(gamma))
re_samples.append(beta +
np.random.randn(num_samples, gamma.size)*np.sqrt(gamma))
self.re_samples = np.dstack(re_samples)
class SurfaceResult:
"""Residual fitting result"""
def __init__(
self,
beta,
beta_var,
spline,
mean_temp,
daily_temp_range,
scale_params=[40.0, 1.25]):
# pass in the data
self.beta = beta
self.beta_var = beta_var
self.spline = spline
self.scale_params = scale_params
self.mean_temp = mean_temp
self.daily_temp_range = daily_temp_range
# self.tmrl = np.array([
# self.tmrl_at_mean_temp(self.mean_temp[i],
# daily_temp_range=self.daily_temp_range[i])
# for i in range(self.mean_temp.size)])
self.tmrl = mean_temp.copy()
def surface_func(self, mean_temp, daily_temp,
beta=None):
"""return surface at given temp_pairs"""
if beta is None:
beta = self.beta
num_points = mean_temp.size
scaled_daily_temp = scale_daily_temp(mean_temp, daily_temp,
self.scale_params)
X = self.spline.design_mat([mean_temp, scaled_daily_temp],
is_grid=False,
l_extra_list=[True, True],
r_extra_list=[True, True])
return X.dot(beta)
def sample_fixed_effects(self, num_samples):
"""sample the fixed effects"""
beta_samples = np.random.multivariate_normal(
self.beta, self.beta_var, num_samples)
self.beta_samples = beta_samples
def tmrl_at_mean_temp(self, mean_temp, num_points=100,
daily_temp_range=None):
if daily_temp_range is None:
lb = (mean_temp - 50.0)/44.0
scaled_daily_temp = np.linspace(lb, 0.7, num_points)
daily_temp = unscale_daily_temp(mean_temp,
scaled_daily_temp,
self.scale_params)
else:
lb = np.maximum((mean_temp - 50.0)/44.0,
scale_daily_temp(mean_temp,
np.array([daily_temp_range[0]]),
self.scale_params)[0]*0.7)
ub = np.minimum(0.7,
scale_daily_temp(mean_temp,
np.array([daily_temp_range[1]]),
self.scale_params)[0]*0.7)
lb = unscale_daily_temp(mean_temp, np.array([lb]),
self.scale_params)[0]
ub = unscale_daily_temp(mean_temp, np.array([ub]),
self.scale_params)[0]
daily_temp = np.linspace(lb, ub, num_points)
val = self.surface_func(np.repeat(mean_temp, num_points),
daily_temp)
return daily_temp[np.argmin(val)]
def scale_daily_temp(mean_temp, daily_temp, scale_params):
"""linear scale daily temp"""
scaled_daily_temp = (daily_temp - mean_temp)/\
(scale_params[0] - scale_params[1]*mean_temp)
return scaled_daily_temp
def unscale_daily_temp(mean_temp, scaled_daily_temp, scale_params):
"""scale back the daily temp"""
daily_temp = scaled_daily_temp*\
(scale_params[0] - scale_params[1]*mean_temp) + mean_temp
return daily_temp
def sizes_to_slices(sizes):
"""convert sizes to slices"""
break_points = np.cumsum(np.insert(sizes, 0, 0))
slices = []
for i in range(len(sizes)):
slices.append(slice(break_points[i], break_points[i + 1]))
return slices
def fit_line(obs_mean, obs_std, cov):
"""
Fit a line by give observations,
return intercept, slope (beta) and their postier covariance
"""
y = obs_mean
v = obs_std**2
x = cov
M = np.vstack((np.ones(x.size), x)).T
beta_var = np.linalg.inv((M.T/v).dot(M))
beta = beta_var.dot((M.T/v).dot(y))
return beta, beta_var
def fit_spline(obs_mean, obs_std, cov, spline):
"""Fit a spline by given observatinos and spline
"""
M = spline.design_mat(cov)
beta = np.linalg.solve((M.T/obs_std**2).dot(M),
(M.T/obs_std**2).dot(obs_mean))
# residual = (obs_mean - M.dot(beta))/obs_std
# print(np.std(residual))
return beta
def create_grid_points(mts, ddt,
scaled_dt_range=[-1.0, 0.8],
scale_params=[40.0, 1.25]):
mt_list = []
dt_list = []
for mt in mts:
mt_sub, dt_sub = create_grid_points_at_mean_temp(
mt, ddt,
scaled_dt_range=scaled_dt_range,
scale_params=scale_params,
)
mt_list.append(mt_sub)
dt_list.append(dt_sub)
return np.hstack(mt_list), np.hstack(dt_list)
def create_grid_points_at_mean_temp(mt, ddt,
scaled_dt_range=[-1.0, 0.8],
scale_params=[40.0, 1.25]):
scaled_ddt = ddt/(scale_params[0] - scale_params[1]*mt)
scaled_dt = np.arange(scaled_dt_range[0],
scaled_dt_range[1], scaled_ddt)
dt = unscale_daily_temp(mt, scaled_dt, scale_params)
return np.repeat(mt, dt.size), dt
def create_grid_points_alt(mts, ddt, tdata):
mt_list = []
dt_list = []
for mt in mts:
tdata_amt = process.extract_at_mean_temp(tdata, mt)
mt_sub, dt_sub = create_grid_points_at_mean_temp_alt(
mt, ddt,
dt_lb=tdata_amt.daily_temp.min(),
dt_ub=tdata_amt.daily_temp.max()
)
mt_list.append(mt_sub)
dt_list.append(dt_sub)
return np.hstack(mt_list), np.hstack(dt_list)
def create_grid_points_at_mean_temp_alt(mt: float, ddt: float,
dt_lb: float, dt_ub: float) -> Tuple[np.ndarray]:
dt = np.arange(dt_lb, dt_ub + ddt, ddt)
return np.repeat(mt, dt.size), dt | [
"numpy.dstack",
"xspline.XSpline",
"numpy.random.randn",
"numpy.ones",
"numpy.argmin",
"numpy.insert",
"numpy.argsort",
"numpy.hstack",
"numpy.arange",
"numpy.random.multivariate_normal",
"numpy.linspace",
"numpy.array",
"process.extract_at_mean_temp",
"numpy.sqrt",
"numpy.unique",
"nu... | [((9713, 9774), 'numpy.arange', 'np.arange', (['scaled_dt_range[0]', 'scaled_dt_range[1]', 'scaled_ddt'], {}), '(scaled_dt_range[0], scaled_dt_range[1], scaled_ddt)\n', (9722, 9774), True, 'import numpy as np\n'), ((10502, 10536), 'numpy.arange', 'np.arange', (['dt_lb', '(dt_ub + ddt)', 'ddt'], {}), '(dt_lb, dt_ub + ddt, ddt)\n', (10511, 10536), True, 'import numpy as np\n'), ((494, 514), 'numpy.argsort', 'np.argsort', (['study_id'], {}), '(study_id)\n', (504, 514), True, 'import numpy as np\n'), ((883, 908), 'numpy.unique', 'np.unique', (['self.mean_temp'], {}), '(self.mean_temp)\n', (892, 908), True, 'import numpy as np\n'), ((979, 1023), 'numpy.unique', 'np.unique', (['self.study_id'], {'return_counts': '(True)'}), '(self.study_id, return_counts=True)\n', (988, 1023), True, 'import numpy as np\n'), ((1091, 1118), 'numpy.argsort', 'np.argsort', (['unique_study_id'], {}), '(unique_study_id)\n', (1101, 1118), True, 'import numpy as np\n'), ((2368, 2442), 'numpy.linspace', 'np.linspace', (['self.min_mean_temp', 'self.max_mean_temp', 'num_beta_spline_knots'], {}), '(self.min_mean_temp, self.max_mean_temp, num_beta_spline_knots)\n', (2379, 2442), True, 'import numpy as np\n'), ((2552, 2627), 'numpy.linspace', 'np.linspace', (['self.min_mean_temp', 'self.max_mean_temp', 'num_gamma_spline_knots'], {}), '(self.min_mean_temp, self.max_mean_temp, num_gamma_spline_knots)\n', (2563, 2627), True, 'import numpy as np\n'), ((2943, 3031), 'xspline.XSpline', 'xspline.XSpline', (['beta_spline_knots', 'beta_spline_degree'], {'l_linear': '(True)', 'r_linear': '(True)'}), '(beta_spline_knots, beta_spline_degree, l_linear=True,\n r_linear=True)\n', (2958, 3031), False, 'import xspline\n'), ((3081, 3171), 'xspline.XSpline', 'xspline.XSpline', (['gamma_spline_knots', 'gamma_spline_degree'], {'l_linear': '(True)', 'r_linear': '(True)'}), '(gamma_spline_knots, gamma_spline_degree, l_linear=True,\n r_linear=True)\n', (3096, 3171), False, 'import xspline\n'), ((4512, 4533), 'numpy.dstack', 'np.dstack', (['re_samples'], {}), '(re_samples)\n', (4521, 4533), True, 'import numpy as np\n'), ((6009, 6077), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.beta', 'self.beta_var', 'num_samples'], {}), '(self.beta, self.beta_var, num_samples)\n', (6038, 6077), True, 'import numpy as np\n'), ((8093, 8115), 'numpy.insert', 'np.insert', (['sizes', '(0)', '(0)'], {}), '(sizes, 0, 0)\n', (8102, 8115), True, 'import numpy as np\n'), ((9422, 9440), 'numpy.hstack', 'np.hstack', (['mt_list'], {}), '(mt_list)\n', (9431, 9440), True, 'import numpy as np\n'), ((9442, 9460), 'numpy.hstack', 'np.hstack', (['dt_list'], {}), '(dt_list)\n', (9451, 9460), True, 'import numpy as np\n'), ((9870, 9892), 'numpy.repeat', 'np.repeat', (['mt', 'dt.size'], {}), '(mt, dt.size)\n', (9879, 9892), True, 'import numpy as np\n'), ((10021, 10060), 'process.extract_at_mean_temp', 'process.extract_at_mean_temp', (['tdata', 'mt'], {}), '(tdata, mt)\n', (10049, 10060), False, 'import process\n'), ((10334, 10352), 'numpy.hstack', 'np.hstack', (['mt_list'], {}), '(mt_list)\n', (10343, 10352), True, 'import numpy as np\n'), ((10354, 10372), 'numpy.hstack', 'np.hstack', (['dt_list'], {}), '(dt_list)\n', (10363, 10372), True, 'import numpy as np\n'), ((10548, 10570), 'numpy.repeat', 'np.repeat', (['mt', 'dt.size'], {}), '(mt, dt.size)\n', (10557, 10570), True, 'import numpy as np\n'), ((1432, 1453), 'numpy.ones', 'np.ones', (['self.num_obs'], {}), '(self.num_obs)\n', (1439, 1453), True, 'import numpy as np\n'), ((6357, 6389), 'numpy.linspace', 'np.linspace', (['lb', '(0.7)', 'num_points'], {}), '(lb, 0.7, num_points)\n', (6368, 6389), True, 'import numpy as np\n'), ((7350, 7381), 'numpy.linspace', 'np.linspace', (['lb', 'ub', 'num_points'], {}), '(lb, ub, num_points)\n', (7361, 7381), True, 'import numpy as np\n'), ((7415, 7447), 'numpy.repeat', 'np.repeat', (['mean_temp', 'num_points'], {}), '(mean_temp, num_points)\n', (7424, 7447), True, 'import numpy as np\n'), ((7520, 7534), 'numpy.argmin', 'np.argmin', (['val'], {}), '(val)\n', (7529, 7534), True, 'import numpy as np\n'), ((8482, 8497), 'numpy.ones', 'np.ones', (['x.size'], {}), '(x.size)\n', (8489, 8497), True, 'import numpy as np\n'), ((7130, 7144), 'numpy.array', 'np.array', (['[lb]'], {}), '([lb])\n', (7138, 7144), True, 'import numpy as np\n'), ((7251, 7265), 'numpy.array', 'np.array', (['[ub]'], {}), '([ub])\n', (7259, 7265), True, 'import numpy as np\n'), ((4428, 4468), 'numpy.random.randn', 'np.random.randn', (['num_samples', 'gamma.size'], {}), '(num_samples, gamma.size)\n', (4443, 4468), True, 'import numpy as np\n'), ((4469, 4483), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (4476, 4483), True, 'import numpy as np\n'), ((6739, 6770), 'numpy.array', 'np.array', (['[daily_temp_range[0]]'], {}), '([daily_temp_range[0]])\n', (6747, 6770), True, 'import numpy as np\n'), ((6978, 7009), 'numpy.array', 'np.array', (['[daily_temp_range[1]]'], {}), '([daily_temp_range[1]])\n', (6986, 7009), True, 'import numpy as np\n')] |
"""
Tokenizer
---------
Classes with a .text_to_token_list method (and a bit more). Used by other
modules as a means to convert stings to lists of strings.
If you have a function that converts strings to lists of strings, you can
make a tokenizer from it by using MakeTokenizer(my_tokenizing_func).
SparseFormatter
---------------
Classes for converting text to sparse representations (e.g. VW or SVMLight).
SFileFilter
-----------
Classes for filtering words/rows from a sparse formatted file.
"""
from collections import Counter, defaultdict
import hashlib
import random
import re
import nltk
import numpy as np
import pandas as pd
from ..common import smart_open, DocIDError
from ..common_abc import SaveLoad
from . import nlp
class BaseTokenizer(SaveLoad):
"""
Base class, don't use directly.
"""
def text_to_counter(self, text):
"""
Return a counter associated to tokens in text.
Filter/transform words according to the scheme this Tokenizer uses.
Parameters
----------
text : String
Returns
-------
tokens : Counter
keys = the tokens
values = counts of the tokens in text
"""
return Counter(self.text_to_token_list(text))
class MakeTokenizer(BaseTokenizer):
"""
Makes a subclass of BaseTokenizer out of a function.
"""
def __init__(self, tokenizer_func):
"""
Parameters
----------
tokenizer_func : Function
Takes in strings, spits out lists of strings.
"""
self.text_to_token_list = tokenizer_func
class TokenizerBasic(BaseTokenizer):
"""
A simple tokenizer. Extracts word counts from text.
Keeps only non-stopwords, converts to lowercase,
keeps words of length >=2.
"""
def text_to_token_list(self, text):
"""
Return a list of tokens.
Filter/transform words according to the scheme this Tokenizer uses.
Parameters
----------
text : String
Returns
-------
tokens : List
Tokenized text, e.g. ['hello', 'my', 'name', 'is', 'ian']
"""
tokens = nlp.word_tokenize(text, L=2, numeric=False)
return [word.lower() for word in tokens if not nlp.is_stopword(word)]
class TokenizerPOSFilter(BaseTokenizer):
"""
Tokenizes, does POS tagging, then keeps words that match particular POS.
"""
def __init__(
self, pos_types=[], sent_tokenizer=nltk.sent_tokenize,
word_tokenizer=TokenizerBasic(), word_tokenizer_func=None,
pos_tagger=nltk.pos_tag):
"""
Parameters
----------
pos_types : List of Strings
Parts of Speech to keep
sent_tokenizer : Sentence tokenizer function.
Default: nltk.sent_tokenize
Splits text into a list of sentences (each sentence is a string)
word_tokenizer : Subclass of BaseTokenizer.
Default: TokenizerBasic
For tokenizing the words.
word_tokenizer_func : Function
Converts strings to list of strings. If given, use this in place
of word_tokenizer.
pos_tagger : POS tagging function
Default: nltk.pos_tag
Given a list of words, returns a list of tuples (word, POS)
"""
self.pos_types = set(pos_types)
self.sent_tokenizer = sent_tokenizer
self.pos_tagger = pos_tagger
if not word_tokenizer:
self.word_tokenizer = MakeTokenizer(word_tokenizer_func)
else:
self.word_tokenizer = word_tokenizer
def text_to_token_list(self, text):
"""
Tokenize a list of text that (possibly) includes multiple sentences.
"""
# sentences = [['I am Ian.'], ['Who are you?']]
sentences = self.sent_tokenizer(text)
# tokenized_sentences = [['I', 'am', 'Ian.'], ['Who', 'are', 'you?']]
func = self.word_tokenizer.text_to_token_list
tokenized_sentences = [func(sent) for sent in sentences]
# tagged_sentences = [[('I', 'PRP'), ('am', 'VBP'), ...]]
tagged_sentences = [
self.pos_tagger(sent) for sent in tokenized_sentences]
# Returning a list of words that meet the filter criteria
token_list = sum(
[self._sent_filter(sent) for sent in tagged_sentences], [])
return token_list
def _sent_filter(self, tokenized_sent):
return [
word for (word, pos) in tokenized_sent if pos in self.pos_types]
class SparseFormatter(object):
"""
Base class for sparse formatting, e.g. VW or svmlight.
Not meant to be directly used.
"""
def _parse_feature_str(self, feature_str):
"""
Parses a sparse feature string and returns
feature_values = {feature1: value1, feature2: value2,...}
"""
# We currently don't support namespaces, so feature_str must start
# with a space then feature1[:value1] feature2[:value2] ...
assert feature_str[0] == ' '
feature_str = feature_str[1:]
# The regex splits 'hi:1 bye:' into [('hi', '1'), ('bye', '')]
fv_list = re.findall(r'(\S+):(\S*)', feature_str)
feature_values = {
f: self._string_to_number(v, empty_sub=1) for (f, v) in fv_list}
return feature_values
def sstr_to_dict(self, sstr):
"""
Returns a dict representation of sparse record string.
Parameters
----------
sstr : String
String representation of one record.
Returns
-------
record_dict : Dict
possible keys = 'target', 'importance', 'doc_id', 'feature_values'
Notes
-----
rstrips newline characters from sstr before parsing.
"""
sstr = sstr.rstrip('\n').rstrip('\r')
idx = sstr.index(self.preamble_char)
preamble, feature_str = sstr[:idx], sstr[idx + 1:]
record_dict = self._parse_preamble(preamble)
record_dict['feature_values'] = self._parse_feature_str(feature_str)
return record_dict
def sstr_to_info(self, sstr):
"""
Returns the full info dictionary corresponding to a sparse record
string. This holds "everything."
Parameters
----------
sstr : String
String representation of one record.
Returns
-------
info : Dict
possible keys = 'tokens', 'target', 'importance', 'doc_id',
'feature_values', etc...
"""
info = self.sstr_to_dict(sstr)
info['tokens'] = self._dict_to_tokens(info)
return info
def _dict_to_tokens(self, record_dict):
token_list = []
if 'feature_values' in record_dict:
for feature, value in record_dict['feature_values'].iteritems():
# If the value is a non-integer score (e.g. tfidf), then
# it cannot correspond to a number of tokens
int_value = int(value)
assert int_value == value
token_list += [feature] * int_value
return token_list
def sstr_to_token_list(self, sstr):
"""
Convertes a sparse record string to a list of tokens (with repeats)
corresponding to sstr.
E.g. if sstr represented the dict {'hi': 2, 'bye': 1}, then
token_list = ['hi', 'hi', 'bye'] (up to permutation).
Parameters
----------
sstr : String
Formatted according to self.format_name
Note that the values in sstr must be integers.
Returns
-------
token_list : List of Strings
"""
record_dict = self.sstr_to_dict(sstr)
return self._dict_to_tokens(record_dict)
def sfile_to_token_iter(self, filepath_or_buffer, limit=None):
"""
Return an iterator over filepath_or_buffer that returns, line-by-line,
a token_list.
Parameters
----------
filepath_or_buffer : string or file handle / StringIO.
File should be formatted according to self.format.
Returns
-------
token_iter : Iterator
E.g. token_iter.next() gets the next line as a list of tokens.
"""
with smart_open(filepath_or_buffer) as open_file:
for index, line in enumerate(open_file):
if index == limit:
raise StopIteration
yield self.sstr_to_token_list(line)
def _string_to_number(self, string, empty_sub=None):
"""
Convert a string to either an int or a float, with optional
substitution for empty strings.
"""
try:
return int(string)
except ValueError:
pass # fallback to float
try:
return float(string)
except ValueError:
# See if it is empty and there is an empty_sub value
if (string == '') and (empty_sub is not None):
return empty_sub
else:
raise
class VWFormatter(SparseFormatter):
"""
Converts in and out of VW format (namespaces currently not supported).
Many valid VW inputs are possible, we ONLY support
[target] [Importance [Tag]]| feature1[:value1] feature2[:value2] ...
Every single whitespace, pipe, colon, and newline is significant.
See:
https://github.com/JohnLangford/vowpal_wabbit/wiki/Input-format
http://hunch.net/~vw/validate.html
"""
def __init__(self):
self.format_name = 'vw'
self.preamble_char = '|'
def get_sstr(
self, feature_values=None, target=None, importance=None, doc_id=None):
"""
Return a string reprsenting one record in sparse VW format:
Parameters
----------
feature_values : Dict-like
{feature1: value1,...}
target : Real number
The value we are trying to predict.
importance : Real number
The importance weight to associate to this example.
doc_id : Number or string
A name for this example.
Returns
-------
formatted : String
Formatted in VW format
"""
if doc_id:
if re.search(r"[|\s:']", doc_id):
msg = (
"Malformed VW string %s. Strings cannot have |, :, ', "
"or whitespace")
raise DocIDError(msg)
# If doc_id, then we must have importance.
# The doc_id sits right against the pipe.
assert importance is not None
formatted = " %s|" % doc_id
# If no doc_id, insert a space to the left of the pipe.
else:
formatted = " |"
if importance:
# Insert a space to the left of importance.
formatted = " " + str(importance) + formatted
if target:
# target gets stuck on the end
formatted = str(target) + formatted
# The feature part must start with a space unless there is a namespace.
formatted += ' '
for word, count in feature_values.iteritems():
formatted += "%s:%s " % (word, count)
# Remove the trailing space...not required but it's screwy to have a
# space-delimited file with a trailing space but nothing after it!
if len(feature_values) > 0:
formatted = formatted.rstrip()
return formatted
def _parse_preamble(self, preamble):
"""
Parse the VW preamble: [target] [Importance [Tag]]
and return a dict with keys 'doc_id', 'target', 'importance' iff
the corresponding values were found in the preamble.
"""
# If preamble was butted directly against a pipe, then the right-most
# part is a doc_id....extract it and continue.
if preamble[-1] != ' ':
doc_id_left = preamble.rfind(' ')
doc_id = preamble[doc_id_left + 1:]
preamble = preamble[: doc_id_left]
else:
doc_id = None
# Step from left to right through preamble.
# We are in the target until we encounter the first space...if there
# is no target, then the first character will be a space.
in_target = True
target = ''
importance = ''
for char in preamble:
if char == ' ':
in_target = False
elif in_target:
target += char
else:
importance += char
parsed = {}
items = (
('doc_id', doc_id), ('target', target), ('importance', importance))
for key, value in items:
if value:
if key in ['target', 'importance']:
parsed[key] = self._string_to_number(value)
else:
parsed[key] = value
return parsed
class SVMLightFormatter(SparseFormatter):
"""
For formatting in/out of SVM-Light format (info not currently supported)
http://svmlight.joachims.org/
<line> .=. <target> <feature>:<value> <feature>:<value> ...
<target> .=. +1 | -1 | 0 | <float>
<feature> .=. <integer> | "qid"
<value> .=. <float>
<info> .=. <string>
"""
def __init__(self):
"""
"""
self.format_name = 'svmlight'
self.preamble_char = ' '
def get_sstr(
self, feature_values=None, target=1, importance=None, doc_id=None):
"""
Return a string reprsenting one record in SVM-Light sparse format
<line> .=. <target> <feature>:<value> <feature>:<value>
Parameters
----------
feature_values : Dict-like
{hash1: value1,...}
target : Real number
The value we are trying to predict.
Returns
-------
formatted : String
Formatted in SVM-Light
"""
# For now, just use 0 for <target>
formatted = str(target) + ' '
for word, count in feature_values.iteritems():
formatted += " %s:%s" % (word, count)
return formatted
def _parse_preamble(self, preamble):
return {'target': float(preamble)}
class SFileFilter(SaveLoad):
"""
Filters results stored in sfiles (sparsely formattted bag-of-words files).
"""
def __init__(self, formatter, bit_precision=18, sfile=None, verbose=True):
"""
Parameters
----------
formatter : Subclass of SparseFormatter
bit_precision : Integer
Hashes are taken modulo 2**bit_precision. Currently must be < 32.
sfile : filepath or buffer
Load this sfile during init
verbose : Boolean
"""
assert isinstance(bit_precision, int)
self.formatter = formatter
self.bit_precision = bit_precision
self.verbose = verbose
self.precision = 2**bit_precision
self.sfile_loaded = False
self.bit_precision_required = bit_precision
if sfile is not None:
self.load_sfile(sfile)
def _get_hash_fun(self):
"""
The fastest is the built in function hash. Quick experimentation
shows that this function maps similar words to similar values (not
cryptographic) and therefore increases collisions...no big deal.
hashlib.sha224 is up to 224 bit.
"""
if self.bit_precision <= 64:
hash_fun = lambda w: hash(w) % self.precision
elif self.bit_precision <= 224:
hash_fun = lambda w: (
int(hashlib.sha224(w).hexdigest(), 16) % self.precision)
else:
raise ValueError("Precision above 224 bit not supported")
return hash_fun
def load_sfile(self, sfile):
"""
Load an sfile, building self.token2id
Parameters
----------
sfile : String or open file
The sparse formatted file we will load.
Returns
-------
self
"""
# TODO Allow loading of more than one sfile
assert not self.sfile_loaded
# Build token2id
token2id, token_score, doc_freq, num_docs = (
self._load_sfile_fwd(sfile))
self.token2id = token2id
self.token_score = token_score
self.doc_freq = doc_freq
self.num_docs = num_docs
self.sfile_loaded = True
self.collisions_resolved = False
def _load_sfile_fwd(self, sfile):
"""
Builds the "forward" objects involved in loading an sfile.
"""
token2id = {}
token_score = defaultdict(float)
doc_freq = defaultdict(int)
num_docs = 0
hash_fun = self._get_hash_fun()
with smart_open(sfile) as open_file:
# Each line represents one document
for line in open_file:
num_docs += 1
record_dict = self.formatter.sstr_to_dict(line)
for token, value in record_dict['feature_values'].iteritems():
hash_value = hash_fun(token)
token2id[token] = hash_value
token_score[token] += value
doc_freq[token] += 1
return token2id, token_score, doc_freq, num_docs
def set_id2token(self, seed=None):
"""
Sets self.id2token, resolving collisions as needed (which alters
self.token2id)
"""
self._resolve_collisions(seed=seed)
self.id2token = {v: k for k, v in self.token2id.iteritems()}
def _resolve_collisions(self, seed=None):
"""
Alters self.token2id by finding new id values used using a
"random probe" method.
Meant to be called by self.set_id2token. If you call this by itself,
then self.token2id is altered, but self.id2token is not!!!!
"""
id_counts = Counter(self.token2id.values())
vocab_size = self.vocab_size
# Make sure we don't have too many collisions
num_collisions = vocab_size - len(id_counts)
self._print(
"collisions = %d, vocab_size = %d" % (num_collisions, vocab_size))
if num_collisions > vocab_size / 2.:
msg = (
"Too many collisions to be efficient: "
"num_collisions = %d. vocab_size = %d. Try using the "
"function collision_probability to estimate needed precision"
% (num_collisions, vocab_size))
raise CollisionError(msg)
# Seed for testing
random.seed(seed)
# Resolve the collisions in this loop
collisions = (
tok for tok in self.token2id if id_counts[self.token2id[tok]] > 1)
for token in collisions:
old_id = self.token2id[token]
new_id = old_id
# If id_counts[old_id] > 1, then the collision still must be
# resolved. In that case, change new_id and update id_counts
if id_counts[old_id] > 1:
# id_counts is the only dict (at this time) holding every
# id you have ever seen
while new_id in id_counts:
new_id = random.randint(0, self.precision - 1)
new_id = new_id % self.precision
id_counts[old_id] -= 1
id_counts[new_id] = 1
# Update dictionaries
self.token2id[token] = new_id
self._print("All collisions resolved")
self.collisions_resolved = True
def compactify(self):
"""
Removes "gaps" in the id values in self.token2id. Every single id
value will (probably) be altered.
"""
# You can't compactify if self.bit_precision is too low
min_precision = int(np.ceil(np.log2(self.vocab_size)))
if self.bit_precision < min_precision:
raise CollisionError(
"Cannot compactify unless you increase self.bit_precision "
"to >= %d or remove some tokens" % min_precision)
new_token2id = {}
for i, tok in enumerate(self.token2id):
new_token2id[tok] = i
self.token2id = new_token2id
if hasattr(self, 'id2token'):
self.set_id2token()
self.set_bit_precision_required()
self._print(
"Compactification done. self.bit_precision_required = %d"
% self.bit_precision_required)
def set_bit_precision_required(self):
"""
Sets self.bit_precision_required to the minimum bit precision b such
that all token id values are less than 2^b.
The idea is that only compactification can change this, so we only
(automatically) call this after compactification.
"""
max_id = np.max(self.token2id.values())
self.bit_precision_required = int(np.ceil(np.log2(max_id)))
def filter_sfile(
self, infile, outfile, doc_id_list=None, enforce_all_doc_id=True):
"""
Alter an sfile by converting tokens to id values, and removing tokens
not in self.token2id. Optionally filters on doc_id.
Parameters
----------
infile : file path or buffer
outfile : file path or buffer
doc_id_list : Iterable over strings
Keep only rows with doc_id in this list
enforce_all_doc_id : Boolean
If True (and doc_id is not None), raise exception unless all doc_id
in doc_id_list are seen.
"""
assert self.sfile_loaded, "Must load an sfile before you can filter"
if not hasattr(self, 'id2token'):
self._print(
"WARNING: Filtering an sfile before setting self.id2token. "
"The resultant outfile will have collisions and you will not "
"be able to convert ids back to tokens.\nIt is recommended to "
"call: self.compactify() then either self.set_id2token() or "
" self.save() before filtering")
extra_filter = self._get_extra_filter(doc_id_list)
with smart_open(infile) as f, smart_open(outfile, 'w') as g:
# Each line represents one document
for line in f:
record_dict = self.formatter.sstr_to_dict(line)
if extra_filter(record_dict):
record_dict['feature_values'] = {
self.token2id[token]: value
for token, value
in record_dict['feature_values'].iteritems()
if token in self.token2id}
new_sstr = self.formatter.get_sstr(**record_dict)
g.write(new_sstr + '\n')
self._done_check(enforce_all_doc_id)
def _get_extra_filter(self, doc_id_list):
self._doc_id_seen = set()
# Possible filters to use
if doc_id_list is not None:
self._doc_id_set = set(doc_id_list)
def doc_id_filter(record_dict):
doc_id = record_dict['doc_id']
self._doc_id_seen.add(doc_id)
return doc_id in self._doc_id_set
else:
self._doc_id_set = set()
doc_id_filter = lambda record_dict: True
# Add together all the filters into one function
return lambda record_dict: doc_id_filter(record_dict)
def _done_check(self, enforce_all_doc_id):
"""
QA check to perform once we're done filtering an sfile.
"""
# Make sure we saw all the doc_id we're supposed to
if enforce_all_doc_id:
assert self._doc_id_set.issubset(self._doc_id_seen), (
"Did not see every doc_id in the passed doc_id_list")
def filter_extremes(
self, doc_freq_min=0, doc_freq_max=np.inf, doc_fraction_min=0,
doc_fraction_max=1, token_score_min=0, token_score_max=np.inf,
token_score_quantile_min=0, token_score_quantile_max=1):
"""
Remove extreme tokens from self (calling self.filter_tokens).
Parameters
----------
doc_freq_min : Integer
Remove tokens that in less than this number of documents
doc_freq_max : Integer
doc_fraction_min : Float in [0, 1]
Remove tokens that are in less than this fraction of documents
doc_fraction_max : Float in [0, 1]
token_score_quantile_min : Float in [0, 1]
Minimum quantile that the token score (usually total token count)
can be in.
token_score_quantile_max : Float in [0, 1]
Maximum quantile that the token score can be in
Returns
-------
self
"""
frame = self.to_frame()
to_remove_mask = (
(frame.doc_freq < doc_freq_min)
| (frame.doc_freq > doc_freq_max)
| (frame.doc_freq < (doc_fraction_min * self.num_docs))
| (frame.doc_freq > (doc_fraction_max * self.num_docs))
| (frame.token_score < token_score_min)
| (frame.token_score > token_score_max)
| (frame.token_score
< frame.token_score.quantile(token_score_quantile_min))
| (frame.token_score
> frame.token_score.quantile(token_score_quantile_max))
)
self._print(
"Removed %d/%d tokens" % (to_remove_mask.sum(), len(frame)))
self.filter_tokens(frame[to_remove_mask].index)
def filter_tokens(self, tokens):
"""
Remove tokens from appropriate attributes.
Parameters
----------
tokens : String or iterable over strings
E.g. a single token or list of tokens
Returns
-------
self
"""
if isinstance(tokens, str):
tokens = [tokens]
for tok in tokens:
id_value = self.token2id[tok]
self.token2id.pop(tok)
self.token_score.pop(tok)
self.doc_freq.pop(tok)
if hasattr(self, 'id2token'):
self.id2token.pop(id_value)
def _print(self, msg):
if self.verbose:
print(msg)
def to_frame(self):
"""
Return a dataframe representation of self.
"""
token2id = self.token2id
token_score = self.token_score
doc_freq = self.doc_freq
frame = pd.DataFrame(
{'token_score': [token_score[tok] for tok in token2id],
'doc_freq': [doc_freq[tok] for tok in token2id]},
index=[tok for tok in token2id])
frame['doc_fraction'] = frame.doc_freq / float(self.num_docs)
frame.index.name = 'token'
return frame
@property
def vocab_size(self):
return len(self.token2id)
def save(self, savepath, protocol=-1, set_id2token=True):
"""
Pickle self to outfile.
Parameters
----------
savefile : filepath or buffer
protocol : 0, 1, 2, -1
0 < 1 < 2 in terms of performance. -1 means use highest available.
set_id2token : Boolean
If True, set self.id2token before saving.
Used to associate tokens with the output of a VW file.
"""
if set_id2token:
self.set_id2token()
SaveLoad.save(self, savepath, protocol=protocol)
def collision_probability(vocab_size, bit_precision):
"""
Approximate probability of at least one collision
(assuming perfect hashing). See the Wikipedia article on
"The birthday problem" for details.
Parameters
----------
vocab_size : Integer
Number of unique words in vocabulary
bit_precision : Integer
Number of bits in space we are hashing to
"""
exponent = - vocab_size * (vocab_size - 1) / 2.**bit_precision
return 1 - np.exp(exponent)
class CollisionError(Exception):
pass
| [
"pandas.DataFrame",
"random.randint",
"numpy.log2",
"collections.defaultdict",
"re.findall",
"random.seed",
"numpy.exp",
"hashlib.sha224",
"re.search"
] | [((5219, 5259), 're.findall', 're.findall', (['"""(\\\\S+):(\\\\S*)"""', 'feature_str'], {}), "('(\\\\S+):(\\\\S*)', feature_str)\n", (5229, 5259), False, 'import re\n'), ((16768, 16786), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (16779, 16786), False, 'from collections import Counter, defaultdict\n'), ((16806, 16822), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (16817, 16822), False, 'from collections import Counter, defaultdict\n'), ((18713, 18730), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (18724, 18730), False, 'import random\n'), ((26557, 26716), 'pandas.DataFrame', 'pd.DataFrame', (["{'token_score': [token_score[tok] for tok in token2id], 'doc_freq': [\n doc_freq[tok] for tok in token2id]}"], {'index': '[tok for tok in token2id]'}), "({'token_score': [token_score[tok] for tok in token2id],\n 'doc_freq': [doc_freq[tok] for tok in token2id]}, index=[tok for tok in\n token2id])\n", (26569, 26716), True, 'import pandas as pd\n'), ((28013, 28029), 'numpy.exp', 'np.exp', (['exponent'], {}), '(exponent)\n', (28019, 28029), True, 'import numpy as np\n'), ((10366, 10395), 're.search', 're.search', (['"""[|\\\\s:\']"""', 'doc_id'], {}), '("[|\\\\s:\']", doc_id)\n', (10375, 10395), False, 'import re\n'), ((19955, 19979), 'numpy.log2', 'np.log2', (['self.vocab_size'], {}), '(self.vocab_size)\n', (19962, 19979), True, 'import numpy as np\n'), ((21030, 21045), 'numpy.log2', 'np.log2', (['max_id'], {}), '(max_id)\n', (21037, 21045), True, 'import numpy as np\n'), ((19355, 19392), 'random.randint', 'random.randint', (['(0)', '(self.precision - 1)'], {}), '(0, self.precision - 1)\n', (19369, 19392), False, 'import random\n'), ((15731, 15748), 'hashlib.sha224', 'hashlib.sha224', (['w'], {}), '(w)\n', (15745, 15748), False, 'import hashlib\n')] |
import numpy as np
from skimage.io import imsave
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.expand_dims(np.max(x, axis=-1), axis=-1))
return e_x / np.expand_dims(e_x.sum(axis=-1), axis=-1) # only difference
def save_samples(np_imgs, img_path):
"""
Args:
np_imgs: [N, H, W, 3] float32
img_path: str
"""
np_imgs = np_imgs.astype(np.uint8)
N, H, W, _ = np_imgs.shape
num = int(N ** 0.5)
merge_img = np.zeros((num * H, num * W, 3), dtype=np.uint8)
for i in range(num):
for j in range(num):
merge_img[i*H:(i+1)*H, j*W:(j+1)*W, :] = np_imgs[i*num+j, :, :, :]
imsave(img_path, merge_img)
def logits_2_pixel_value(logits, mu=1.1):
"""
Args:
logits: [n, 256] float32
mu : float32
Returns:
pixels: [n] float32
"""
rebalance_logits = logits * mu
probs = softmax(rebalance_logits)
pixel_dict = np.arange(0, 256, dtype=np.float32)
pixels = np.sum(probs * pixel_dict, axis=1)
return np.floor(pixels)
| [
"numpy.sum",
"numpy.floor",
"numpy.zeros",
"numpy.max",
"numpy.arange",
"skimage.io.imsave"
] | [((486, 533), 'numpy.zeros', 'np.zeros', (['(num * H, num * W, 3)'], {'dtype': 'np.uint8'}), '((num * H, num * W, 3), dtype=np.uint8)\n', (494, 533), True, 'import numpy as np\n'), ((658, 685), 'skimage.io.imsave', 'imsave', (['img_path', 'merge_img'], {}), '(img_path, merge_img)\n', (664, 685), False, 'from skimage.io import imsave\n'), ((918, 953), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {'dtype': 'np.float32'}), '(0, 256, dtype=np.float32)\n', (927, 953), True, 'import numpy as np\n'), ((965, 999), 'numpy.sum', 'np.sum', (['(probs * pixel_dict)'], {'axis': '(1)'}), '(probs * pixel_dict, axis=1)\n', (971, 999), True, 'import numpy as np\n'), ((1009, 1025), 'numpy.floor', 'np.floor', (['pixels'], {}), '(pixels)\n', (1017, 1025), True, 'import numpy as np\n'), ((166, 184), 'numpy.max', 'np.max', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (172, 184), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# =======
# Imports
# =======
import sys
import os
from os.path import join
import getopt
import re
import numpy
import pickle
import platform
import subprocess
import multiprocessing
from datetime import datetime
from imate import traceinv, logdet
# ===============
# parse arguments
# ===============
def parse_arguments(argv):
"""
Parses the argument.
"""
# -----------
# print usage
# -----------
def print_usage(exec_name):
usage_string = "Usage: " + exec_name + " <arguments>"
options_string = """
Required arguments:
-f --function=string Function can be 'logdet' or 'traceinv' (default).
Required arguments (choose at least one, or more):
-s --32-bit Uses single-precision matrices. Default is not to use.
-d --64-bit Uses double-precision matrices. Default is not to use,
-l --128-bit Uses long-double-precision matrices. Default is not to use.
-a --all Uses all 32-bit, 64-bit, and 128-bit precision matrices.
"""
print(usage_string)
print(options_string)
# -----------------
# Initialize variables (defaults)
arguments = {
'32-bit': False,
'64-bit': False,
'128-bit': False,
'function': 'traceinv'
}
# Get options
try:
opts, args = getopt.getopt(
argv[1:], "sdlaf:", ["32-bit", "64-bit", "128-bit", "all",
"function="])
except getopt.GetoptError:
print_usage(argv[0])
sys.exit(2)
# Assign options
for opt, arg in opts:
if opt in ('-s', '--32-bit'):
arguments['32-bit'] = True
elif opt in ('-d', '--64-bit'):
arguments['64-bit'] = True
elif opt in ('-l', '--128-bit'):
arguments['128-bit'] = True
elif opt in ('-a', '--all'):
arguments['32-bit'] = True
arguments['64-bit'] = True
arguments['128-bit'] = True
elif opt in ('-f', '--function'):
arguments['function'] = arg
if len(argv) < 2:
print_usage(argv[0])
sys.exit()
return arguments
# ==================
# get processor name
# ==================
def get_processor_name():
"""
Gets the name of CPU.
For windows operating system, this function still does not get the full
brand name of the cpu.
"""
if platform.system() == "Windows":
return platform.processor()
elif platform.system() == "Darwin":
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
command = "sysctl -n machdep.cpu.brand_string"
return subprocess.getoutput(command).strip()
elif platform.system() == "Linux":
command = "cat /proc/cpuinfo"
all_info = subprocess.getoutput(command).strip()
for line in all_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1)[1:]
return ""
# ===============
# compare methods
# ===============
def compare_methods(M, config, matrix, arguments):
"""
Compares speed of slq, hutchinson, and cholesky methods on band matrix.
"""
if arguments['function'] == 'traceinv':
function = traceinv
elif arguments['function'] == 'logdet':
function = logdet
else:
raise ValueError("'function' should be either 'traceinv' or 'logdet'.")
# SLQ method
trace_s = numpy.zeros((config['num_repeats'], ), dtype=float)
absolute_error_s = numpy.zeros((config['num_repeats'], ), dtype=float)
alg_wall_time_s = numpy.zeros((config['num_repeats'], ), dtype=float)
# When computing traceinv on practical matrices, if the matrix is very
# large, reduce the number of repeats.
num_repeats = config['num_repeats']
if arguments['function'] == 'traceinv' and M.shape[0] > 2**17:
num_repeats = 2
for i in range(num_repeats):
print('\tslq, repeat %d ...' % (i+1), end="")
trace_s[i], info_s = function(
M,
method='slq',
exponent=config['exponent'],
gram=config['gram'],
min_num_samples=config['min_num_samples'],
max_num_samples=config['max_num_samples'],
error_rtol=config['error_rtol'],
error_atol=config['error_atol'],
confidence_level=config['confidence_level'],
outlier_significance_level=config[
'outlier_significance_level'],
lanczos_degree=config['lanczos_degree'],
lanczos_tol=config['lanczos_tol'],
orthogonalize=config['orthogonalize'],
num_threads=config['num_threads'],
verbose=config['verbose'],
plot=config['plot'],
gpu=False)
print(' done.')
absolute_error_s[i] = info_s['error']['absolute_error']
alg_wall_time_s[i] = info_s['time']['alg_wall_time']
# Taking average of repeated values
# trace_s = numpy.mean(trace_s)
# trace_s = trace_s[-1]
# absolute_error_s = numpy.mean(absolute_error_s)
# absolute_error_s = absolute_error_s[-1]
# alg_wall_time_s = numpy.mean(alg_wall_time_s)
# Reset values with array of repeated experiment
info_s['error']['absolute_error'] = absolute_error_s
info_s['time']['alg_wall_time'] = alg_wall_time_s
# Hutchinson method (only for traceinv, 32-bit, and 64-bit)
if M.shape[0] <= matrix['max_hutchinson_size'] and \
M.dtype != 'float128' and \
arguments['function'] == 'traceinv':
trace_h = numpy.zeros((config['num_repeats'], ), dtype=float)
absolute_error_h = numpy.zeros((config['num_repeats'], ), dtype=float)
alg_wall_time_h = numpy.zeros((config['num_repeats'], ), dtype=float)
for i in range(num_repeats):
print('\thutchinson, repeat %d ...' % (i+1), end="")
trace_h[i], info_h = function(
M,
method='hutchinson',
exponent=config['exponent'],
assume_matrix='sym',
min_num_samples=config['min_num_samples'],
max_num_samples=config['max_num_samples'],
error_atol=config['error_atol'],
error_rtol=config['error_rtol'],
confidence_level=config['confidence_level'],
outlier_significance_level=config[
'outlier_significance_level'],
solver_tol=config['solver_tol'],
orthogonalize=bool(config['orthogonalize']),
num_threads=config['num_threads'],
verbose=False,
plot=False)
print(' done.')
absolute_error_h[i] = info_h['error']['absolute_error']
alg_wall_time_h[i] = info_h['time']['alg_wall_time']
# Taking average of repeated values
# trace_h = numpy.mean(trace_h)
# trace_h = trace_h[-1]
# absolute_error_h = numpy.mean(absolute_error_h)
# absolute_error_h = absolute_error_h[-1]
# alg_wall_time_h = numpy.mean(alg_wall_time_h)
# Reset values with array of repeated experiment
info_h['error']['absolute_error'] = absolute_error_h
info_h['time']['alg_wall_time'] = alg_wall_time_h
else:
# Takes a long time, do not compute
trace_h = numpy.nan
info_h = {}
# Cholesky method (only for 64-bit)
if M.shape[0] <= matrix['max_cholesky_size'] and M.dtype == 'float64':
print('\tcholesky ...', end="")
if arguments['function'] == 'traceinv':
trace_c, info_c = function(
M,
method='cholesky',
exponent=config['exponent'],
cholmod=None,
invert_cholesky=False)
trace_c2 = numpy.nan
info_c2 = {}
elif arguments['function'] == 'logdet':
# This uses cholmod (if scikit-sparse is installed), otherwise
# it only uses scipy.sparse.cholesky
trace_c, info_c = function(
M,
method='cholesky',
cholmod=None,
exponent=config['exponent'])
# If cholmod is used, also compute once more without cholmod
# if info_c['solver']['cholmod_used'] is True and \
# M.shape[0] <= matrix['max_cholesky_size_2']:
# trace_c2, info_c2 = function(
# M,
# method='cholesky',
# cholmod=False,
# exponent=config['exponent'])
# else:
# trace_c2 = numpy.nan
# info_c2 = {}
trace_c2 = numpy.nan
info_c2 = {}
print(' done.')
else:
# Takes a long time, do not compute
trace_c = numpy.nan
trace_c2 = numpy.nan
info_c = {}
info_c2 = {}
# Save all results in a dictionary
result = {
'trace_s': trace_s,
'trace_h': trace_h,
'trace_c': trace_c,
'trace_c2': trace_c2,
'info_s': info_s,
'info_h': info_h,
'info_c': info_c,
'info_c2': info_c2
}
return result
# ====
# main
# ====
def main(argv):
"""
benchmark test for speed and accuracy of slq, hutchinson, and cholesky
methods.
"""
# Settings
config = {
'num_repeats': 10,
'gram': False,
'exponent': 1,
'min_num_samples': 200,
'max_num_samples': 200,
'lanczos_degree': 100,
'lanczos_tol': None,
'solver_tol': 1e-6,
'orthogonalize': 0,
'error_rtol': 1e-3,
'error_atol': 0,
'confidence_level': 0.95,
'outlier_significance_level': 0.01,
'verbose': False,
'plot': False,
'num_threads': 0
}
matrix = {
'max_hutchinson_size': 2**22,
'max_cholesky_size': 2**16, # for using cholmod
'max_cholesky_size_2': 2**16, # for not using cholmod (logdet only)
'band_alpha': 2.0,
'band_beta': 1.0,
'gram': True,
'format': 'csr',
}
devices = {
'cpu_name': get_processor_name(),
'num_all_cpu_threads': multiprocessing.cpu_count(),
}
benchmark_dir = '..'
directory = join(benchmark_dir, 'matrices')
# data_names = ['Queen_4147', 'G3_circuit', 'Flan_1565', 'Bump_2911',
# 'cvxbqp1', 'StocF-1465', 'G2_circuit', 'gridgena',
# 'parabolic_fem']
data_names = ['nos5', 'mhd4800b', 'bodyy6', 'G2_circuit', 'parabolic_fem',
'StocF-1465', 'Bump_2911', 'Queen_4147']
# data_names = ['nos7', 'nos5', 'plat362', 'bcsstk21', 'mhd4800b', 'aft01',
# 'bodyy6', 'ted_B', 'G2_circuit', 'parabolic_fem',
# 'StocF-1465', 'Bump_2911', 'Queen_4147']
data_types = ['32', '64', '128']
data_results = []
arguments = parse_arguments(argv)
# Computing logdet with cholesky method is very efficient. So, do not limit
# the matrix size for cholesky method of function is logdet.
# Note: in computing logdet with cholesky, matrix of size 2.9e+6 raises
# memory error. scikit-sparse requires more memory than SLQ. So, here, we
# liomit the matrix size for logdet to 2e+6.
if arguments['function'] == 'logdet':
matrix['max_cholesky_size'] = 2e+6
matrix['max_cholesky_size_2'] = 2e+6
# Loop over data filenames
for data_name in data_names:
data_result = {
'data_name': data_name,
'type_results': [],
}
# For each data, loop over float type, such as 32-bit, 64-bit, 128-bit
for data_type in data_types:
filename = data_name + '_float' + data_type + '.pickle'
filepath = join(directory, filename)
with open(filepath, 'rb') as h:
M = pickle.load(h)
print('loaded %s.' % filename)
# Run a benchmark for all algorithms
result = compare_methods(M, config, matrix, arguments)
type_result = {
'data_type': data_type,
'result': result
}
data_result['type_results'].append(type_result)
print('')
data_results.append(data_result)
print('')
now = datetime.now()
# Final object of all results
benchmark_results = {
'config': config,
'matrix': matrix,
'devices': devices,
'data_results': data_results,
'date': now.strftime("%d/%m/%Y %H:%M:%S")
}
# Save to file
benchmark_dir = '..'
pickle_dir = 'pickle_results'
if arguments['function'] == 'traceinv':
output_filename = 'compare_methods_practical_matrix_traceinv'
elif arguments['function'] == 'logdet':
output_filename = 'compare_methods_practical_matrix_logdet'
else:
raise ValueError("'function' should be either 'traceinv' or 'logdet'.")
output_filename += '.pickle'
output_full_filename = join(benchmark_dir, pickle_dir, output_filename)
with open(output_full_filename, 'wb') as file:
pickle.dump(benchmark_results, file, protocol=pickle.HIGHEST_PROTOCOL)
print('Results saved to %s.' % output_full_filename)
# ===========
# script main
# ===========
if __name__ == "__main__":
main(sys.argv)
| [
"platform.processor",
"pickle.dump",
"getopt.getopt",
"numpy.zeros",
"datetime.datetime.now",
"pickle.load",
"re.sub",
"platform.system",
"subprocess.getoutput",
"os.path.join",
"sys.exit",
"multiprocessing.cpu_count"
] | [((3480, 3530), 'numpy.zeros', 'numpy.zeros', (["(config['num_repeats'],)"], {'dtype': 'float'}), "((config['num_repeats'],), dtype=float)\n", (3491, 3530), False, 'import numpy\n'), ((3555, 3605), 'numpy.zeros', 'numpy.zeros', (["(config['num_repeats'],)"], {'dtype': 'float'}), "((config['num_repeats'],), dtype=float)\n", (3566, 3605), False, 'import numpy\n'), ((3629, 3679), 'numpy.zeros', 'numpy.zeros', (["(config['num_repeats'],)"], {'dtype': 'float'}), "((config['num_repeats'],), dtype=float)\n", (3640, 3679), False, 'import numpy\n'), ((10571, 10602), 'os.path.join', 'join', (['benchmark_dir', '"""matrices"""'], {}), "(benchmark_dir, 'matrices')\n", (10575, 10602), False, 'from os.path import join\n'), ((12621, 12635), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12633, 12635), False, 'from datetime import datetime\n'), ((13327, 13375), 'os.path.join', 'join', (['benchmark_dir', 'pickle_dir', 'output_filename'], {}), '(benchmark_dir, pickle_dir, output_filename)\n', (13331, 13375), False, 'from os.path import join\n'), ((1346, 1436), 'getopt.getopt', 'getopt.getopt', (['argv[1:]', '"""sdlaf:"""', "['32-bit', '64-bit', '128-bit', 'all', 'function=']"], {}), "(argv[1:], 'sdlaf:', ['32-bit', '64-bit', '128-bit', 'all',\n 'function='])\n", (1359, 1436), False, 'import getopt\n'), ((2149, 2159), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2157, 2159), False, 'import sys\n'), ((2428, 2445), 'platform.system', 'platform.system', ([], {}), '()\n', (2443, 2445), False, 'import platform\n'), ((2475, 2495), 'platform.processor', 'platform.processor', ([], {}), '()\n', (2493, 2495), False, 'import platform\n'), ((5690, 5740), 'numpy.zeros', 'numpy.zeros', (["(config['num_repeats'],)"], {'dtype': 'float'}), "((config['num_repeats'],), dtype=float)\n", (5701, 5740), False, 'import numpy\n'), ((5769, 5819), 'numpy.zeros', 'numpy.zeros', (["(config['num_repeats'],)"], {'dtype': 'float'}), "((config['num_repeats'],), dtype=float)\n", (5780, 5819), False, 'import numpy\n'), ((5847, 5897), 'numpy.zeros', 'numpy.zeros', (["(config['num_repeats'],)"], {'dtype': 'float'}), "((config['num_repeats'],), dtype=float)\n", (5858, 5897), False, 'import numpy\n'), ((10494, 10521), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10519, 10521), False, 'import multiprocessing\n'), ((13435, 13505), 'pickle.dump', 'pickle.dump', (['benchmark_results', 'file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(benchmark_results, file, protocol=pickle.HIGHEST_PROTOCOL)\n', (13446, 13505), False, 'import pickle\n'), ((1555, 1566), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1563, 1566), False, 'import sys\n'), ((2506, 2523), 'platform.system', 'platform.system', ([], {}), '()\n', (2521, 2523), False, 'import platform\n'), ((12086, 12111), 'os.path.join', 'join', (['directory', 'filename'], {}), '(directory, filename)\n', (12090, 12111), False, 'from os.path import join\n'), ((2730, 2747), 'platform.system', 'platform.system', ([], {}), '()\n', (2745, 2747), False, 'import platform\n'), ((12176, 12190), 'pickle.load', 'pickle.load', (['h'], {}), '(h)\n', (12187, 12190), False, 'import pickle\n'), ((2682, 2711), 'subprocess.getoutput', 'subprocess.getoutput', (['command'], {}), '(command)\n', (2702, 2711), False, 'import subprocess\n'), ((2817, 2846), 'subprocess.getoutput', 'subprocess.getoutput', (['command'], {}), '(command)\n', (2837, 2846), False, 'import subprocess\n'), ((2957, 2995), 're.sub', 're.sub', (['""".*model name.*:"""', '""""""', 'line', '(1)'], {}), "('.*model name.*:', '', line, 1)\n", (2963, 2995), False, 'import re\n')] |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring, redefined-outer-name
"""Tests for estimators working with multiplicities and related routines"""
from collections import Counter
import numpy
import pytest
from make_test_ref import SEED, approx
from ndd import fnsb
from ndd.counters import MultiCounter
from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin
K = 4
N = 10000
P = 3
@pytest.fixture
def data_1d():
numpy.random.seed(SEED)
return numpy.random.randint(K, size=N)
@pytest.fixture
def data_2d():
numpy.random.seed(SEED)
return numpy.random.randint(K, size=(N, P))
@pytest.fixture
def counts_1d(data_1d):
counter = MultiCounter(data_1d, stat='counts')
return counter.counts()[1]
@pytest.fixture
def multi_1d(data_1d):
counter = MultiCounter(data_1d, stat='multiplicities')
return counter.counts(k=K)
def compute_frequencies(a):
"""Frequencies from 1D array"""
return list(Counter(a).values())
def compute_multiplicities(a):
"""Return a tuple (frequencies, multiplicities) from 1D array"""
freqs = compute_frequencies(a)
counter = Counter(freqs)
freqs, mults = (list(x) for x in zip(*counter.items()))
# add unobserved bins
n_observed_bins = sum(mults)
freqs.append(0)
mults.append(K - n_observed_bins)
return freqs, mults
def identical_sorted(a, b):
return sorted(list(a)) == sorted(list(b))
def test_nsb_from_multiplicities(data_1d):
frequencies = compute_frequencies(data_1d)
hn, hz = compute_multiplicities(data_1d)
estimate_from_counts = fnsb.nsb(frequencies, K)[0]
estimate_from_multiplicities = fnsb.nsb_from_multiplicities(hn, hz, K)[0]
assert estimate_from_multiplicities == approx(estimate_from_counts)
def test_counter_1d_counts(data_1d):
freqs0 = compute_frequencies(data_1d)
counter = MultiCounter(data_1d, stat='counts')
freqs = counter.counts()[1]
assert identical_sorted(freqs, freqs0)
def test_counter_2d(data_2d):
freqs0 = numpy.unique(data_2d, return_counts=1, axis=0)[1]
mult0 = Counter(freqs0)
mult0[0] = 0
counter = MultiCounter(data_2d)
mult = counter.counts()[1]
assert identical_sorted(mult, mult0.values())
def test_counter_2d_columns(data_2d):
ids = (1, 2)
freqs0 = numpy.unique(data_2d[:, list(ids)], return_counts=1, axis=0)[1]
mult0 = Counter(freqs0)
mult0[0] = 0
counter = MultiCounter(data_2d)
mult = counter.counts(ids)[1]
assert identical_sorted(mult, mult0.values())
def test_nsb(counts_1d, multi_1d):
estimate_from_counts = fnsb.nsb(counts_1d, K)[0]
estimate_from_multiplicities = fnsb.nsb_from_multiplicities(
multi_1d[0], multi_1d[1], K)[0]
assert estimate_from_multiplicities == approx(estimate_from_counts)
def test_nsb_estimator(counts_1d, multi_1d):
estimate_from_counts = Nsb()(counts_1d, k=K)
nk, zk = multi_1d
estimate_from_multiplicities = Nsb()(nk, zk=zk, k=K)
assert estimate_from_multiplicities == approx(estimate_from_counts)
def test_ww(counts_1d, multi_1d):
alpha = 0.1
estimate_from_counts = fnsb.ww(counts_1d, K, alpha)[0]
estimate_from_multiplicities = fnsb.ww_from_multiplicities(
multi_1d[0], multi_1d[1], K, alpha)[0]
assert estimate_from_multiplicities == approx(estimate_from_counts)
def test_ww_estimator(counts_1d, multi_1d):
alpha = 0.1
estimate_from_counts = Nsb(alpha=alpha)(counts_1d, k=K)
nk, zk = multi_1d
estimate_from_multiplicities = Nsb(alpha=alpha)(nk, zk=zk, k=K)
assert estimate_from_multiplicities == approx(estimate_from_counts)
def test_grassberger_estimator(counts_1d, multi_1d):
est = Grassberger()
estimate_from_counts = est(counts_1d, k=K)
nk, zk = multi_1d
estimate_from_multiplicities = est(nk, zk=zk, k=K)
assert estimate_from_multiplicities == approx(estimate_from_counts)
def test_plugin_estimator(counts_1d, multi_1d):
est = Plugin()
estimate_from_counts = est(counts_1d, k=K)
nk, zk = multi_1d
estimate_from_multiplicities = est(nk, zk=zk, k=K)
assert estimate_from_multiplicities == approx(estimate_from_counts)
def test_millermadow_estimator(counts_1d, multi_1d):
est = MillerMadow()
estimate_from_counts = est(counts_1d, k=K)
nk, zk = multi_1d
estimate_from_multiplicities = est(nk, zk=zk, k=K)
assert estimate_from_multiplicities == approx(estimate_from_counts)
| [
"ndd.fnsb.nsb_from_multiplicities",
"ndd.estimators.Grassberger",
"numpy.random.seed",
"ndd.fnsb.ww",
"ndd.fnsb.nsb",
"make_test_ref.approx",
"numpy.random.randint",
"ndd.counters.MultiCounter",
"ndd.estimators.Plugin",
"ndd.fnsb.ww_from_multiplicities",
"collections.Counter",
"ndd.estimators.... | [((442, 465), 'numpy.random.seed', 'numpy.random.seed', (['SEED'], {}), '(SEED)\n', (459, 465), False, 'import numpy\n'), ((477, 508), 'numpy.random.randint', 'numpy.random.randint', (['K'], {'size': 'N'}), '(K, size=N)\n', (497, 508), False, 'import numpy\n'), ((546, 569), 'numpy.random.seed', 'numpy.random.seed', (['SEED'], {}), '(SEED)\n', (563, 569), False, 'import numpy\n'), ((581, 617), 'numpy.random.randint', 'numpy.random.randint', (['K'], {'size': '(N, P)'}), '(K, size=(N, P))\n', (601, 617), False, 'import numpy\n'), ((674, 710), 'ndd.counters.MultiCounter', 'MultiCounter', (['data_1d'], {'stat': '"""counts"""'}), "(data_1d, stat='counts')\n", (686, 710), False, 'from ndd.counters import MultiCounter\n'), ((797, 841), 'ndd.counters.MultiCounter', 'MultiCounter', (['data_1d'], {'stat': '"""multiplicities"""'}), "(data_1d, stat='multiplicities')\n", (809, 841), False, 'from ndd.counters import MultiCounter\n'), ((1127, 1141), 'collections.Counter', 'Counter', (['freqs'], {}), '(freqs)\n', (1134, 1141), False, 'from collections import Counter\n'), ((1856, 1892), 'ndd.counters.MultiCounter', 'MultiCounter', (['data_1d'], {'stat': '"""counts"""'}), "(data_1d, stat='counts')\n", (1868, 1892), False, 'from ndd.counters import MultiCounter\n'), ((2075, 2090), 'collections.Counter', 'Counter', (['freqs0'], {}), '(freqs0)\n', (2082, 2090), False, 'from collections import Counter\n'), ((2122, 2143), 'ndd.counters.MultiCounter', 'MultiCounter', (['data_2d'], {}), '(data_2d)\n', (2134, 2143), False, 'from ndd.counters import MultiCounter\n'), ((2371, 2386), 'collections.Counter', 'Counter', (['freqs0'], {}), '(freqs0)\n', (2378, 2386), False, 'from collections import Counter\n'), ((2418, 2439), 'ndd.counters.MultiCounter', 'MultiCounter', (['data_2d'], {}), '(data_2d)\n', (2430, 2439), False, 'from ndd.counters import MultiCounter\n'), ((3681, 3694), 'ndd.estimators.Grassberger', 'Grassberger', ([], {}), '()\n', (3692, 3694), False, 'from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin\n'), ((3951, 3959), 'ndd.estimators.Plugin', 'Plugin', ([], {}), '()\n', (3957, 3959), False, 'from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin\n'), ((4221, 4234), 'ndd.estimators.MillerMadow', 'MillerMadow', ([], {}), '()\n', (4232, 4234), False, 'from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin\n'), ((1583, 1607), 'ndd.fnsb.nsb', 'fnsb.nsb', (['frequencies', 'K'], {}), '(frequencies, K)\n', (1591, 1607), False, 'from ndd import fnsb\n'), ((1646, 1685), 'ndd.fnsb.nsb_from_multiplicities', 'fnsb.nsb_from_multiplicities', (['hn', 'hz', 'K'], {}), '(hn, hz, K)\n', (1674, 1685), False, 'from ndd import fnsb\n'), ((1732, 1760), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (1738, 1760), False, 'from make_test_ref import SEED, approx\n'), ((2013, 2059), 'numpy.unique', 'numpy.unique', (['data_2d'], {'return_counts': '(1)', 'axis': '(0)'}), '(data_2d, return_counts=1, axis=0)\n', (2025, 2059), False, 'import numpy\n'), ((2588, 2610), 'ndd.fnsb.nsb', 'fnsb.nsb', (['counts_1d', 'K'], {}), '(counts_1d, K)\n', (2596, 2610), False, 'from ndd import fnsb\n'), ((2649, 2706), 'ndd.fnsb.nsb_from_multiplicities', 'fnsb.nsb_from_multiplicities', (['multi_1d[0]', 'multi_1d[1]', 'K'], {}), '(multi_1d[0], multi_1d[1], K)\n', (2677, 2706), False, 'from ndd import fnsb\n'), ((2762, 2790), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (2768, 2790), False, 'from make_test_ref import SEED, approx\n'), ((2865, 2870), 'ndd.estimators.Nsb', 'Nsb', ([], {}), '()\n', (2868, 2870), False, 'from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin\n'), ((2944, 2949), 'ndd.estimators.Nsb', 'Nsb', ([], {}), '()\n', (2947, 2949), False, 'from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin\n'), ((3009, 3037), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (3015, 3037), False, 'from make_test_ref import SEED, approx\n'), ((3117, 3145), 'ndd.fnsb.ww', 'fnsb.ww', (['counts_1d', 'K', 'alpha'], {}), '(counts_1d, K, alpha)\n', (3124, 3145), False, 'from ndd import fnsb\n'), ((3184, 3247), 'ndd.fnsb.ww_from_multiplicities', 'fnsb.ww_from_multiplicities', (['multi_1d[0]', 'multi_1d[1]', 'K', 'alpha'], {}), '(multi_1d[0], multi_1d[1], K, alpha)\n', (3211, 3247), False, 'from ndd import fnsb\n'), ((3303, 3331), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (3309, 3331), False, 'from make_test_ref import SEED, approx\n'), ((3421, 3437), 'ndd.estimators.Nsb', 'Nsb', ([], {'alpha': 'alpha'}), '(alpha=alpha)\n', (3424, 3437), False, 'from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin\n'), ((3511, 3527), 'ndd.estimators.Nsb', 'Nsb', ([], {'alpha': 'alpha'}), '(alpha=alpha)\n', (3514, 3527), False, 'from ndd.estimators import Grassberger, MillerMadow, Nsb, Plugin\n'), ((3587, 3615), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (3593, 3615), False, 'from make_test_ref import SEED, approx\n'), ((3862, 3890), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (3868, 3890), False, 'from make_test_ref import SEED, approx\n'), ((4127, 4155), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (4133, 4155), False, 'from make_test_ref import SEED, approx\n'), ((4402, 4430), 'make_test_ref.approx', 'approx', (['estimate_from_counts'], {}), '(estimate_from_counts)\n', (4408, 4430), False, 'from make_test_ref import SEED, approx\n'), ((955, 965), 'collections.Counter', 'Counter', (['a'], {}), '(a)\n', (962, 965), False, 'from collections import Counter\n')] |
from __future__ import absolute_import, division, print_function
import warnings
from collections import defaultdict
from distutils.version import LooseVersion
from threading import Lock
from timeit import default_timer
import numpy as np
from dask.base import normalize_token
from scipy import sparse
from scipy.stats import rankdata
from sklearn.exceptions import FitFailedWarning
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.utils.validation import check_consistent_length
from toolz import pluck
from .._compat import Mapping
from .utils import _index_param_value, _num_samples, _safe_indexing, copy_estimator
# Copied from scikit-learn/sklearn/utils/fixes.py, can be removed once we drop
# support for scikit-learn < 0.18.1 or numpy < 1.12.0.
if LooseVersion(np.__version__) < "1.12.0":
class MaskedArray(np.ma.MaskedArray):
# Before numpy 1.12, np.ma.MaskedArray object is not picklable
# This fix is needed to make our model_selection.GridSearchCV
# picklable as the ``cv_results_`` param uses MaskedArray
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = "CF"[self.flags.fnc]
data_state = super(np.ma.MaskedArray, self).__reduce__()[2]
return data_state + (
np.ma.getmaskarray(self).tostring(cf),
self._fill_value,
)
else:
from numpy.ma import MaskedArray # noqa
# A singleton to indicate a missing parameter
MISSING = type(
"MissingParameter",
(object,),
{
"__slots__": (),
"__reduce__": lambda self: "MISSING",
"__doc__": "A singleton to indicate a missing parameter",
},
)()
normalize_token.register(type(MISSING), lambda x: "MISSING")
# A singleton to indicate a failed estimator fit
FIT_FAILURE = type(
"FitFailure",
(object,),
{
"__slots__": (),
"__reduce__": lambda self: "FIT_FAILURE",
"__doc__": "A singleton to indicate fit failure",
},
)()
def warn_fit_failure(error_score, e):
warnings.warn(
"Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e),
FitFailedWarning,
)
# ----------------------- #
# Functions in the graphs #
# ----------------------- #
class CVCache:
def __init__(self, splits, pairwise=False, cache=True, num_train_samples=None):
self.splits = splits
self.pairwise = pairwise
self.cache = {} if cache else None
self.num_train_samples = num_train_samples
def __reduce__(self):
return (
CVCache,
(
self.splits,
self.pairwise,
self.cache is not None,
self.num_train_samples,
),
)
def num_test_samples(self):
return np.array(
[i.sum() if i.dtype == bool else len(i) for i in pluck(1, self.splits)]
)
def extract(self, X, y, n, is_x=True, is_train_folds=True):
if is_x:
if self.pairwise:
return self._extract_pairwise(X, y, n, is_train_folds=is_train_folds)
return self._extract(X, y, n, is_x=True, is_train_folds=is_train_folds)
if y is None:
return None
return self._extract(X, y, n, is_x=False, is_train_folds=is_train_folds)
def extract_param(self, key, x, n, is_train_folds=True):
'''
extract_param extracts the fit_params associated with a set of folds either train folds or test fold.
Also supports caching similar to other extraction methods
returns: corresponding slice of fit_params for input key,value corresponding to nth train folds or test folds based on is_train_folds
'''
if self.cache is not None and (n, key, is_train_folds) in self.cache:
return self.cache[n, key, is_train_folds]
inds = self.splits[n][0] if is_train_folds else self.splits[n][1]
out = _index_param_value( self.num_train_samples, x, inds)
if self.cache is not None:
self.cache[n, key, is_train_folds] = out
return out
def _extract(self, X, y, n, is_x=True, is_train_folds=True):
if self.cache is not None and (n, is_x, is_train_folds) in self.cache:
return self.cache[n, is_x, is_train_folds]
inds = self.splits[n][0] if is_train_folds else self.splits[n][1]
result = _safe_indexing(X if is_x else y, inds)
if self.cache is not None:
self.cache[n, is_x, is_train_folds] = result
return result
def _extract_pairwise(self, X, y, n, is_train_folds=True):
if self.cache is not None and (n, True, is_train_folds) in self.cache:
return self.cache[n, True, is_train_folds]
if not hasattr(X, "shape"):
raise ValueError(
"Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices."
)
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
train, test = self.splits[n]
result = X[np.ix_(train if is_train_folds else test, train)]
if self.cache is not None:
self.cache[n, True, is_train_folds] = result
return result
def cv_split(cv, X, y, groups, is_pairwise, cache):
check_consistent_length(X, y, groups)
return CVCache(list(cv.split(X, y, groups)), is_pairwise, cache, _num_samples(X))
def cv_n_samples(cvs):
return cvs.num_test_samples()
def cv_extract(cvs, X, y, is_X, is_train_folds, n):
return cvs.extract(X, y, n, is_X, is_train_folds)
def cv_extract_params(cvs, keys, vals, n, is_train_folds):
'''
cv_extract_params the fit parameters of the fold sets (train folds or test fold)
cvs: (CVCache): CV cache for CV information of folds
keys: ((str,str)) fit params (name,full_name) key tuple
vals: (Any) the values for the given fit_params key
n: (int) fold number
is_train_folds : (bool) True if retrieving for train folds and False for test fold
returns: Dict[(str,str),Any)dictionary of fit_params for just the train folds or just the test folds
'''
return {k: cvs.extract_param(tok, v, n, is_train_folds) for (k, tok), v in zip(keys, vals)}
def _maybe_timed(x):
"""Unpack (est, fit_time) tuples if provided"""
return x if isinstance(x, tuple) and len(x) == 2 else (x, 0.0)
def pipeline(names, steps):
"""Reconstruct a Pipeline from names and steps"""
steps, times = zip(*map(_maybe_timed, steps))
fit_time = sum(times)
if any(s is FIT_FAILURE for s in steps):
fit_est = FIT_FAILURE
else:
fit_est = Pipeline(list(zip(names, steps)))
return fit_est, fit_time
def feature_union(names, steps, weights):
"""Reconstruct a FeatureUnion from names, steps, and weights"""
steps, times = zip(*map(_maybe_timed, steps))
fit_time = sum(times)
if any(s is FIT_FAILURE for s in steps):
fit_est = FIT_FAILURE
else:
fit_est = FeatureUnion(list(zip(names, steps)), transformer_weights=weights)
return fit_est, fit_time
def feature_union_concat(Xs, nsamples, weights):
"""Apply weights and concatenate outputs from a FeatureUnion"""
if any(x is FIT_FAILURE for x in Xs):
return FIT_FAILURE
Xs = [X if w is None else X * w for X, w in zip(Xs, weights) if X is not None]
if not Xs:
return np.zeros((nsamples, 0))
if any(sparse.issparse(f) for f in Xs):
return sparse.hstack(Xs).tocsr()
return np.hstack(Xs)
# Current set_params isn't threadsafe
SET_PARAMS_LOCK = Lock()
def set_params(est, fields=None, params=None, copy=True):
if copy:
est = copy_estimator(est)
if fields is None:
return est
params = {f: p for (f, p) in zip(fields, params) if p is not MISSING}
# TODO: rewrite set_params to avoid lock for classes that use the standard
# set_params/get_params methods
with SET_PARAMS_LOCK:
return est.set_params(**params)
def fit(est, X, y, error_score="raise", fields=None, params=None, fit_params=None):
if X is FIT_FAILURE:
est, fit_time = FIT_FAILURE, 0.0
else:
if not fit_params:
fit_params = {}
start_time = default_timer()
try:
est = set_params(est, fields, params)
est.fit(X, y, **fit_params)
except Exception as e:
if error_score == "raise":
raise
warn_fit_failure(error_score, e)
est = FIT_FAILURE
fit_time = default_timer() - start_time
return est, fit_time
def fit_transform(
est, X, y, error_score="raise", fields=None, params=None, fit_params=None
):
if X is FIT_FAILURE:
est, fit_time, Xt = FIT_FAILURE, 0.0, FIT_FAILURE
else:
if not fit_params:
fit_params = {}
start_time = default_timer()
try:
est = set_params(est, fields, params)
if hasattr(est, "fit_transform"):
Xt = est.fit_transform(X, y, **fit_params)
else:
est.fit(X, y, **fit_params)
Xt = est.transform(X)
except Exception as e:
if error_score == "raise":
raise
warn_fit_failure(error_score, e)
est = Xt = FIT_FAILURE
fit_time = default_timer() - start_time
return (est, fit_time), Xt
def _apply_scorer(estimator, X, y, scorer, sample_weight):
"""Applies the scorer to the estimator, given the data and sample_weight.
If ``sample_weight`` is None ``sample_weight`` WILL
NOT be passed to ``scorer``; otherwise, it will be passed.
In the event that ``sample_weight`` is provided and used but ``scorer``
doesn't accept a ``sample_weight`` parameter, then a ``TypeError`` should
likely be raised.
Parameters
----------
estimator : estimator object implementing 'fit'
The object that was used to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning. (May be None)
scorer : A single callable.
Should return a single float.
The callable object / fn should have signature
``scorer(estimator, X, y, sample_weight=None)`` if ``sample_weight``.
sample_weight : array-like, shape (y)
sample weights to use during metric calculation. May be None.
Returns
-------
score : float
Score returned by ``scorer`` applied to ``X`` and ``y`` given
``sample_weight``.
"""
if sample_weight is None:
if y is None:
score = scorer(estimator, X)
else:
score = scorer(estimator, X, y)
else:
try:
# Explicitly force the sample_weight parameter so that an error
# will be raised in the event that the scorer doesn't take a
# sample_weight argument. This is preferable to passing it as
# a keyword args dict in the case that it just ignores parameters
# that are not accepted by the scorer.
if y is None:
score = scorer(estimator, X, sample_weight=sample_weight)
else:
score = scorer(estimator, X, y, sample_weight=sample_weight)
except TypeError as e:
if "sample_weight" in str(e):
raise TypeError(
(
"Attempted to use 'sample_weight' for training "
"but supplied a scorer that doesn't accept a "
"'sample_weight' parameter."
),
e,
)
else:
raise e
return score
def _score(est, X, y, scorer, sample_weight):
if est is FIT_FAILURE:
return FIT_FAILURE
if isinstance(scorer, Mapping):
return {
k: _apply_scorer(est, X, y, v, sample_weight) for k, v in scorer.items()
}
return _apply_scorer(est, X, y, scorer, sample_weight)
def score(
est_and_time,
X_test,
y_test,
X_train,
y_train,
scorer,
error_score,
sample_weight=None,
eval_sample_weight=None,
):
est, fit_time = est_and_time
start_time = default_timer()
try:
test_score = _score(est, X_test, y_test, scorer, eval_sample_weight)
except Exception:
if error_score == "raise":
raise
else:
score_time = default_timer() - start_time
return fit_time, error_score, score_time, error_score
score_time = default_timer() - start_time
if X_train is None:
return fit_time, test_score, score_time
train_score = _score(est, X_train, y_train, scorer, sample_weight)
return fit_time, test_score, score_time, train_score
def fit_and_score(
est,
cv,
X,
y,
n,
scorer,
error_score="raise",
fields=None,
params=None,
fit_params=None,
test_fit_params=None,
return_train_score=True,
):
X_train = cv.extract(X, y, n, True, True)
y_train = cv.extract(X, y, n, False, True)
X_test = cv.extract(X, y, n, True, False)
y_test = cv.extract(X, y, n, False, False)
'''
Support for lightGBM evaluation data sets within folds.
https: // lightgbm.readthedocs.io / en / latest / pythonapi / lightgbm.LGBMClassifier.html
Set the test set to the corresponding part of the eval set with the test folds index.
Without this you can only use a set of corresponding size to train folds as eval_data_set requiring more data in the fit function.
'''
if fit_params is not None and 'eval_set' in fit_params:
fit_params['eval_set'] = test_fit_params['eval_set']
fit_params['eval_names'] = test_fit_params['eval_names']
fit_params['eval_sample_weight'] = test_fit_params['eval_sample_weight']
est_and_time = fit(est, X_train, y_train, error_score, fields, params, fit_params)
if not return_train_score:
X_train = y_train = None
eval_sample_weight_train, eval_sample_weight_test = None, None
if fit_params is not None:
'''
NOTE: To be back-compatible with dask-ml defaults you could add a boolean (legacy_mode) that can skip the following block if (legacy_mode==True)
'''
eval_weight_source = "eval_sample_weight" if "eval_sample_weight" in fit_params else "sample_weight"
if eval_weight_source in fit_params and fit_params[eval_weight_source] is not None:
eval_sample_weight_train = fit_params[eval_weight_source]
if eval_weight_source in test_fit_params and test_fit_params[eval_weight_source] is not None:
eval_sample_weight_test = test_fit_params[eval_weight_source]
return score(est_and_time, X_test, y_test, X_train, y_train, scorer, error_score, sample_weight=eval_sample_weight_train, eval_sample_weight=eval_sample_weight_test)
def _store(
results,
key_name,
array,
n_splits,
n_candidates,
weights=None,
splits=False,
rank=False,
):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by n_splits and then by parameters
array = np.array(array, dtype=np.float64).reshape(n_splits, n_candidates).T
if splits:
for split_i in range(n_splits):
results["split%d_%s" % (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results["mean_%s" % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(
np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)
)
results["std_%s" % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method="min"), dtype=np.int32
)
def create_cv_results(
scores, candidate_params, n_splits, error_score, weights, multimetric
):
if len(scores[0]) == 4:
fit_times, test_scores, score_times, train_scores = zip(*scores)
else:
fit_times, test_scores, score_times = zip(*scores)
train_scores = None
if not multimetric:
test_scores = [error_score if s is FIT_FAILURE else s for s in test_scores]
if train_scores is not None:
train_scores = [
error_score if s is FIT_FAILURE else s for s in train_scores
]
else:
test_scores = {
k: [error_score if x is FIT_FAILURE else x[k] for x in test_scores]
for k in multimetric
}
if train_scores is not None:
train_scores = {
k: [error_score if x is FIT_FAILURE else x[k] for x in train_scores]
for k in multimetric
}
# Construct the `cv_results_` dictionary
results = {"params": candidate_params}
n_candidates = len(candidate_params)
if weights is not None:
weights = np.broadcast_to(
weights[None, :], (len(candidate_params), len(weights))
)
_store(results, "fit_time", fit_times, n_splits, n_candidates)
_store(results, "score_time", score_times, n_splits, n_candidates)
if not multimetric:
_store(
results,
"test_score",
test_scores,
n_splits,
n_candidates,
splits=True,
rank=True,
weights=weights,
)
if train_scores is not None:
_store(
results,
"train_score",
train_scores,
n_splits,
n_candidates,
splits=True,
)
else:
for key in multimetric:
_store(
results,
"test_{}".format(key),
test_scores[key],
n_splits,
n_candidates,
splits=True,
rank=True,
weights=weights,
)
if train_scores is not None:
for key in multimetric:
_store(
results,
"train_{}".format(key),
train_scores[key],
n_splits,
n_candidates,
splits=True,
)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(
lambda: MaskedArray(np.empty(n_candidates), mask=True, dtype=object)
)
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
return results
def fit_best(estimator, params, X, y, fit_params):
estimator = copy_estimator(estimator).set_params(**params)
estimator.fit(X, y, **fit_params)
return estimator
| [
"numpy.average",
"distutils.version.LooseVersion",
"timeit.default_timer",
"numpy.ix_",
"scipy.sparse.issparse",
"numpy.zeros",
"scipy.stats.rankdata",
"numpy.empty",
"numpy.hstack",
"threading.Lock",
"numpy.ma.getmaskarray",
"sklearn.utils.validation.check_consistent_length",
"numpy.array",... | [((7839, 7845), 'threading.Lock', 'Lock', ([], {}), '()\n', (7843, 7845), False, 'from threading import Lock\n'), ((775, 803), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (787, 803), False, 'from distutils.version import LooseVersion\n'), ((2127, 2309), 'warnings.warn', 'warnings.warn', (['("""Classifier fit failed. The score on this train-test partition for these parameters will be set to %f. Details: \n%r"""\n % (error_score, e))', 'FitFailedWarning'], {}), '(\n """Classifier fit failed. The score on this train-test partition for these parameters will be set to %f. Details: \n%r"""\n % (error_score, e), FitFailedWarning)\n', (2140, 2309), False, 'import warnings\n'), ((5523, 5560), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (5546, 5560), False, 'from sklearn.utils.validation import check_consistent_length\n'), ((7767, 7780), 'numpy.hstack', 'np.hstack', (['Xs'], {}), '(Xs)\n', (7776, 7780), True, 'import numpy as np\n'), ((12575, 12590), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (12588, 12590), False, 'from timeit import default_timer\n'), ((15776, 15818), 'numpy.average', 'np.average', (['array'], {'axis': '(1)', 'weights': 'weights'}), '(array, axis=1, weights=weights)\n', (15786, 15818), True, 'import numpy as np\n'), ((7647, 7670), 'numpy.zeros', 'np.zeros', (['(nsamples, 0)'], {}), '((nsamples, 0))\n', (7655, 7670), True, 'import numpy as np\n'), ((8488, 8503), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (8501, 8503), False, 'from timeit import default_timer\n'), ((9119, 9134), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (9132, 9134), False, 'from timeit import default_timer\n'), ((12903, 12918), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (12916, 12918), False, 'from timeit import default_timer\n'), ((15955, 16033), 'numpy.average', 'np.average', (['((array - array_means[:, np.newaxis]) ** 2)'], {'axis': '(1)', 'weights': 'weights'}), '((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)\n', (15965, 16033), True, 'import numpy as np\n'), ((5300, 5348), 'numpy.ix_', 'np.ix_', (['(train if is_train_folds else test)', 'train'], {}), '(train if is_train_folds else test, train)\n', (5306, 5348), True, 'import numpy as np\n'), ((7682, 7700), 'scipy.sparse.issparse', 'sparse.issparse', (['f'], {}), '(f)\n', (7697, 7700), False, 'from scipy import sparse\n'), ((8793, 8808), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (8806, 8808), False, 'from timeit import default_timer\n'), ((9594, 9609), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (9607, 9609), False, 'from timeit import default_timer\n'), ((16164, 16200), 'scipy.stats.rankdata', 'rankdata', (['(-array_means)'], {'method': '"""min"""'}), "(-array_means, method='min')\n", (16172, 16200), False, 'from scipy.stats import rankdata\n'), ((7730, 7747), 'scipy.sparse.hstack', 'sparse.hstack', (['Xs'], {}), '(Xs)\n', (7743, 7747), False, 'from scipy import sparse\n'), ((15558, 15591), 'numpy.array', 'np.array', (['array'], {'dtype': 'np.float64'}), '(array, dtype=np.float64)\n', (15566, 15591), True, 'import numpy as np\n'), ((18939, 18961), 'numpy.empty', 'np.empty', (['n_candidates'], {}), '(n_candidates)\n', (18947, 18961), True, 'import numpy as np\n'), ((3048, 3069), 'toolz.pluck', 'pluck', (['(1)', 'self.splits'], {}), '(1, self.splits)\n', (3053, 3069), False, 'from toolz import pluck\n'), ((12791, 12806), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (12804, 12806), False, 'from timeit import default_timer\n'), ((1372, 1396), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['self'], {}), '(self)\n', (1390, 1396), True, 'import numpy as np\n')] |
# BSD 3-Clause License
#
# Copyright (c) 2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from Bezier import Bezier
import pydirectinput
import time
class Trajectory:
""" Encapsulates bezier trajectory """
def __init__(self, start_point, end_point, short_traj_length, move_per_step,
steps_per_cycle, time_until_outdated, short_traj_smooth_fac):
self._start = start_point
self._end = end_point
self._init_time = time.time()
self._time_until_outdated = time_until_outdated
self._short_traj_smooth_fac = short_traj_smooth_fac
self._steps_per_cycle = steps_per_cycle
# Two kinds of trajectories:
# - Bezier curves used to move to targets that are rather far away
# - Short trajectories, linear movement is used (use case: stay locked on to target)
length = np.linalg.norm((end_point[0] - start_point[0], end_point[1] - start_point[1]))
if length <= short_traj_length:
self._short_traj = True
self._current_idx = 0
self._max_idx = 1
else:
self._short_traj = False
# Some heuristic to get a nice looking curve
arc_point_x = start_point[0] * 0.75 + end_point[0] * 0.25
arc_point = (arc_point_x, end_point[1])
num_segments = int(length / move_per_step) # underestimated since we are moving along a Bezier curve!
step_size = 1./num_segments
t_points = np.arange(0, 1.0, step_size)
points = np.array([start_point, arc_point, end_point])
self._curve = Bezier.Curve(t_points, points)
self._max_idx = np.shape(self._curve)[0] - 1
self._current_idx = 0
def is_outdated(self):
""" Trajectory can be outdated due to:
- Time
- Length exceeded
"""
current_time = time.time()
outdated_time = (current_time - self._init_time) > self._time_until_outdated
outdated_length = self._current_idx >= self._max_idx
return outdated_time or outdated_length
def move_along(self):
""" Move next step along trajectory
Note: For short trajectories there is only one step!
"""
if self._short_traj:
move_x = (self._end[0] - self._start[0]) / self._short_traj_smooth_fac
move_y = (self._end[1] - self._start[1]) / self._short_traj_smooth_fac
pydirectinput.moveTo(int(self._start[0] + move_x),
int(self._start[1] + move_y))
self._current_idx += 1
else:
for step in range(self._steps_per_cycle):
if not self.is_outdated():
pydirectinput.moveTo(int(self._curve[self._current_idx, 0]),
int(self._curve[self._current_idx, 1]))
self._current_idx += 1
class Mouse:
""" Abstracts mouse input """
def __init__(self, config=None):
self._config = config
self._current_trajectory = None
pydirectinput.PAUSE = 0.1 # Timeout for input
def set_timeout(self, timeout):
pydirectinput.PAUSE = timeout
def move_rel(self, x, y):
if self._current_trajectory and not self._current_trajectory.is_outdated():
self._current_trajectory.move_along()
else:
current_pos = pydirectinput.position()
move_to_pos = (current_pos[0] + x, current_pos[1] + y)
self._current_trajectory = Trajectory(current_pos, move_to_pos,
self._config.mouse_short_traj_max_length,
self._config.mouse_move_per_step,
self._config.mouse_steps_per_cycle,
self._config.mouse_time_until_traj_outdated,
self._config.mouse_short_traj_smooth_fac)
self._current_trajectory.move_along()
return self._current_trajectory
def left_mouse_down(self):
pydirectinput.mouseDown()
def left_mouse_up(self):
pydirectinput.mouseUp()
| [
"Bezier.Bezier.Curve",
"time.time",
"numpy.shape",
"pydirectinput.position",
"numpy.linalg.norm",
"numpy.arange",
"numpy.array",
"pydirectinput.mouseUp",
"pydirectinput.mouseDown"
] | [((1959, 1970), 'time.time', 'time.time', ([], {}), '()\n', (1968, 1970), False, 'import time\n'), ((2358, 2436), 'numpy.linalg.norm', 'np.linalg.norm', (['(end_point[0] - start_point[0], end_point[1] - start_point[1])'], {}), '((end_point[0] - start_point[0], end_point[1] - start_point[1]))\n', (2372, 2436), True, 'import numpy as np\n'), ((3391, 3402), 'time.time', 'time.time', ([], {}), '()\n', (3400, 3402), False, 'import time\n'), ((5659, 5684), 'pydirectinput.mouseDown', 'pydirectinput.mouseDown', ([], {}), '()\n', (5682, 5684), False, 'import pydirectinput\n'), ((5724, 5747), 'pydirectinput.mouseUp', 'pydirectinput.mouseUp', ([], {}), '()\n', (5745, 5747), False, 'import pydirectinput\n'), ((2987, 3015), 'numpy.arange', 'np.arange', (['(0)', '(1.0)', 'step_size'], {}), '(0, 1.0, step_size)\n', (2996, 3015), True, 'import numpy as np\n'), ((3037, 3082), 'numpy.array', 'np.array', (['[start_point, arc_point, end_point]'], {}), '([start_point, arc_point, end_point])\n', (3045, 3082), True, 'import numpy as np\n'), ((3109, 3139), 'Bezier.Bezier.Curve', 'Bezier.Curve', (['t_points', 'points'], {}), '(t_points, points)\n', (3121, 3139), False, 'from Bezier import Bezier\n'), ((4910, 4934), 'pydirectinput.position', 'pydirectinput.position', ([], {}), '()\n', (4932, 4934), False, 'import pydirectinput\n'), ((3168, 3189), 'numpy.shape', 'np.shape', (['self._curve'], {}), '(self._curve)\n', (3176, 3189), True, 'import numpy as np\n')] |
import os
import sys
from PIL import Image
import glob
import numpy as np
import h5py
import csv
import time
import zipfile
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
def reporthook(count, block_size, total_size):
"""Taken from https://blog.shichao.io/2012/10/04/progress_speed_indicator_for_urlretrieve_in_python.html
A simple reporthook() function for urllib.urlretrieve()‘s reporthook argument that shows a progressbar
while downloading the data
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def download_data():
"""Downloads and Extracts tiny-imagenet Dataset
"""
if not os.path.exists(os.path.join(os.getcwd(), "tiny-imagenet-200")):
if not os.path.exists(os.path.join(os.getcwd(), "tiny-imagenet-200.zip")):
print ('Downloading Flowers data from http://cs231n.stanford.edu/tiny-imagenet-200.zip ...')
urlretrieve ('http://cs231n.stanford.edu/tiny-imagenet-200.zip', 'tiny-imagenet-200.zip', reporthook)
print ('\nExtracting tiny-imagenet-200.zip ...', end='', flush=True)
zfile = zipfile.ZipFile (os.path.join(os.getcwd(), 'tiny-imagenet-200.zip'), 'r')
zfile.extractall ('.')
zfile.close()
print ('Done')
def get_word_labels():
"""Get the wnids and label names from the words.txt file.
# Returns
A dictionary where keys are the wnids and values are the label names
"""
file = open ('tiny-imagenet-200/words.txt', 'r')
word_labels = {}
for f in file:
f = f.split(' ')
words = f[1]
words = words.replace('\n', '')
word_labels[f[0]] = words
file.close()
return word_labels
def get_train_wnid():
"""Extracts the wnids from the subdirectories for every image in the train folder
# Returns
A dictionary where keys are the image names and values are the wnids
"""
wnid_labels = {}
for subdir, dirs, files in os.walk('tiny-imagenet-200/train'):
for filename in files:
if filename.endswith(('.txt')):
file = open(subdir + '/' +filename, 'r')
for line in file:
line = line.split(' ')
wnid_labels[line[0]] = subdir.split('/')[-1]
file.close()
return wnid_labels
def get_val_wnid():
"""Extracts the wnids from the val_annotations.txt file for every image in the val folder
# Returns
A dictionary where keys are the image names and values are the wnids
"""
file = open('tiny-imagenet-200/val/val_annotations.txt', 'r')
wnid_labels = {}
for f in file:
f = f.split(' ')
wnid_labels[f[0]] = f[1]
file.close()
return wnid_labels
def load_labels():
"""Gets wnids for every image and convert them to categorical
# Returns
train_wnid: A dictionary where keys are the training image names and values are the wnids
val_wnid: A dictionary where keys are the validation image names and values are the wnids
uniq_wnids: A list of all the wnids
"""
train_wnid = get_train_wnid()
val_wnid = get_val_wnid()
uniq_wnids = list(set(list(train_wnid.values()) + list(val_wnid.values())))
return train_wnid, val_wnid, uniq_wnids
def load_images (folder, wnid_labels, uniq_wnids, train_val):
"""loads the images from a given folder
# Arguments
folder: directory where the images are stored
wnid_labels: A dictionary where keys are the validation image names and values are the wnids
uniq_wnids: A list of all the wnids
# Returns
images: A numpy array of the images
image_names: A numpy array of the image names
labels: A numpy array of the labels
wnids: A numpy array of the wnids
label_names: A numpy array of the label names
"""
print ('Loading {} images ... '.format(train_val), end='', flush=True)
word_labels = get_word_labels()
images = []
labels = []
wnids = []
label_names = []
image_names = []
for subdir, dirs, files in os.walk(folder):
for filename in files:
if filename.endswith(('.JPEG', '.jpeg', '.JPG', '.jpg', '.PNG', '.png')):
img = Image.open(subdir + '/' + filename)
np_img = np.array(img)
if np_img.ndim == 2:
np_img = np.dstack([np_img]*3)
images.append(np_img)
filename = filename.split("/")[-1]
labels.append(uniq_wnids.index(wnid_labels[filename]))
image_names.append(np.string_(filename))
wnids.append(np.string_(wnid_labels [filename]))
label_names.append(np.string_(word_labels [wnid_labels[filename]]))
img.close()
# if (len(images)%5000) is 0: print ('{} imges processed'.format(len(images)))
images = np.array(images)
labels = np.array(labels)
wnids = np.array(wnids)
image_names = np.array(image_names)
label_names = np.array(label_names)
# print ('Image processing finished')
print ('Done')
return images, image_names, labels, wnids, label_names
def h5_creator (filename, x, y, image_names=np.array([]), wnids=np.array([]), label_names=np.array([]) ):
"""Creates a H5 file and datasets with all the arguments.
# Arguments
filename: name of the h5 file
images: A numpy array of the images
image_names: A numpy array of the image names
labels: A numpy array of the labels
wnids: A numpy array of the wnids
label_names: A numpy array of the label names
"""
print ('Creating {} ... '.format(filename), end='', flush=True)
with h5py.File(filename, 'w') as hf:
hf.create_dataset('x', compression="gzip", data=x)
hf.create_dataset('y', compression="gzip", data=y)
hf.create_dataset('image_names', compression="gzip", data=image_names)
hf.create_dataset('label_names', compression="gzip", data=label_names)
hf.create_dataset('wnids', compression="gzip", data=wnids)
hf.close()
print ('Done')
def load_data(expanded=False):
"""Downloads the data loads all the images and the labels
# Returns
Tuple of Numpy arrays
if expanded is True: (x_train, y_train, train_image_names, train_wnids, train_label_names),
(x_val, y_val, val_image_names, val_wnids, val_label_names)
if expanded is False: (x_train, y_train), (x_val, y_val)
# Arguments
expanded: Boolean, where to load expanded entities
"""
download_data()
train_wnid_labels, val_wnid_labels, uniq_wnids = load_labels()
x_val, val_image_names, y_val, val_wnids, val_label_names = load_images ('tiny-imagenet-200/val', val_wnid_labels, uniq_wnids, 'Validation')
x_train, train_image_names, y_train, train_wnids, train_label_names = load_images ('tiny-imagenet-200/train', train_wnid_labels, uniq_wnids, 'Training')
if expanded == False:
return (x_train, y_train), (x_val, y_val)
else:
return (x_train, y_train, train_image_names, train_wnids, train_label_names), \
(x_val, y_val, val_image_names, val_wnids, val_label_names)
def create_h5(expanded=True):
if expanded == False:
(x_train, y_train), (x_val, y_val) = load_data(expanded=False)
h5_creator ('val.h5', x_val, y_val)
h5_creator ('train.h5', x_train, y_train)
else:
(x_train, y_train, train_image_names, train_wnids, train_label_names), \
(x_val, y_val, val_image_names, val_wnids, val_label_names) = load_data(expanded=True)
h5_creator ('val.h5', x_val, y_val, val_image_names, val_wnids, val_label_names)
h5_creator ('train.h5', x_train, y_train, train_image_names, train_wnids, train_label_names)
if __name__ == '__main__':
create_h5() | [
"sys.stdout.write",
"numpy.dstack",
"h5py.File",
"os.getcwd",
"os.walk",
"time.time",
"PIL.Image.open",
"sys.stdout.flush",
"numpy.array",
"urllib.urlretrieve",
"numpy.string_"
] | [((896, 1024), 'sys.stdout.write', 'sys.stdout.write', (["('\\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent, progress_size /\n (1024 * 1024), speed, duration))"], {}), "('\\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent,\n progress_size / (1024 * 1024), speed, duration))\n", (912, 1024), False, 'import sys\n'), ((1045, 1063), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1061, 1063), False, 'import sys\n'), ((2467, 2501), 'os.walk', 'os.walk', (['"""tiny-imagenet-200/train"""'], {}), "('tiny-imagenet-200/train')\n", (2474, 2501), False, 'import os\n'), ((4597, 4612), 'os.walk', 'os.walk', (['folder'], {}), '(folder)\n', (4604, 4612), False, 'import os\n'), ((5418, 5434), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (5426, 5434), True, 'import numpy as np\n'), ((5448, 5464), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5456, 5464), True, 'import numpy as np\n'), ((5477, 5492), 'numpy.array', 'np.array', (['wnids'], {}), '(wnids)\n', (5485, 5492), True, 'import numpy as np\n'), ((5511, 5532), 'numpy.array', 'np.array', (['image_names'], {}), '(image_names)\n', (5519, 5532), True, 'import numpy as np\n'), ((5551, 5572), 'numpy.array', 'np.array', (['label_names'], {}), '(label_names)\n', (5559, 5572), True, 'import numpy as np\n'), ((5738, 5750), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5746, 5750), True, 'import numpy as np\n'), ((5758, 5770), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5766, 5770), True, 'import numpy as np\n'), ((5784, 5796), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5792, 5796), True, 'import numpy as np\n'), ((673, 684), 'time.time', 'time.time', ([], {}), '()\n', (682, 684), False, 'import time\n'), ((715, 726), 'time.time', 'time.time', ([], {}), '()\n', (724, 726), False, 'import time\n'), ((6240, 6264), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (6249, 6264), False, 'import h5py\n'), ((1422, 1526), 'urllib.urlretrieve', 'urlretrieve', (['"""http://cs231n.stanford.edu/tiny-imagenet-200.zip"""', '"""tiny-imagenet-200.zip"""', 'reporthook'], {}), "('http://cs231n.stanford.edu/tiny-imagenet-200.zip',\n 'tiny-imagenet-200.zip', reporthook)\n", (1433, 1526), False, 'from urllib import urlretrieve\n'), ((1185, 1196), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1194, 1196), False, 'import os\n'), ((1648, 1659), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1657, 1659), False, 'import os\n'), ((4753, 4788), 'PIL.Image.open', 'Image.open', (["(subdir + '/' + filename)"], {}), "(subdir + '/' + filename)\n", (4763, 4788), False, 'from PIL import Image\n'), ((4814, 4827), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4822, 4827), True, 'import numpy as np\n'), ((1264, 1275), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1273, 1275), False, 'import os\n'), ((4894, 4917), 'numpy.dstack', 'np.dstack', (['([np_img] * 3)'], {}), '([np_img] * 3)\n', (4903, 4917), True, 'import numpy as np\n'), ((5111, 5131), 'numpy.string_', 'np.string_', (['filename'], {}), '(filename)\n', (5121, 5131), True, 'import numpy as np\n'), ((5162, 5195), 'numpy.string_', 'np.string_', (['wnid_labels[filename]'], {}), '(wnid_labels[filename])\n', (5172, 5195), True, 'import numpy as np\n'), ((5233, 5279), 'numpy.string_', 'np.string_', (['word_labels[wnid_labels[filename]]'], {}), '(word_labels[wnid_labels[filename]])\n', (5243, 5279), True, 'import numpy as np\n')] |
import json
from pathlib import Path
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras import backend as K
from algorithms.agent_interface import SpiceAIAgent
from algorithms.vpg.memory import Memory
from exception import InvalidDataShapeException
def build_networks(state_shape, action_size, learning_rate, hidden_neurons):
"""Creates a Policy Gradient Neural Network.
Creates a two hidden-layer Policy Gradient Neural Network. The loss
function is altered to be a log-likelihood function weighted
by the discounted reward, gamma.
Args:
space_shape: a tuple of ints representing the observation space.
action_size (int): the number of possible actions.
learning_rate (float): the nueral network's learning rate.
hidden_neurons (int): the number of neurons to use per hidden
layer.
"""
state_input = layers.Input(state_shape, name="state")
gamma = layers.Input((1,), name="gamma")
hidden_1 = layers.Dense(hidden_neurons, activation="relu")(state_input)
hidden_2 = layers.Dense(hidden_neurons, activation="relu")(hidden_1)
probabilities = layers.Dense(action_size, activation="softmax")(hidden_2)
def custom_loss(y_true, y_pred):
clip_edge = 1e-8
y_pred_clipped = K.clip(y_pred, clip_edge, 1 - clip_edge)
log_lik = y_true * K.log(y_pred_clipped)
return K.sum(-log_lik * gamma)
policy = models.Model(inputs=[state_input, gamma], outputs=[probabilities])
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
policy.compile(loss=custom_loss, optimizer=optimizer)
predict = models.Model(inputs=[state_input], outputs=[probabilities])
# Useful for visualizing the neural network graph
# tf.keras.utils.plot_model(predict, "predict_model.png", show_shapes=True)
return policy, predict
class VanillaPolicyGradientAgent(SpiceAIAgent):
"""Sets up a reinforcement learning agent."""
def __init__(
self, state_shape, action_size, gamma=0.9, learning_rate=0.02, hidden_neurons=10
):
"""Initializes the agent with Policy Gradient networks
and memory sub-classes.
Args:
state_shape: The shape of the observation state
action_size: How many actions our agent is able to take.
gamma: The discount factor for rewards that occur earlier on.
"""
super().__init__(state_shape, action_size)
policy, predict = build_networks(
state_shape, action_size, learning_rate, hidden_neurons
)
self.policy = policy
self.predict = predict
self.action_size = action_size
self.gamma = gamma
self.memory = Memory()
warnings.simplefilter(action="ignore", category=Warning)
def add_experience(self, state, action, reward, _):
self.memory.add((state, action, reward))
def act(self, state):
"""Selects an action for the agent to take given a game state.
Args:
state (list of numbers): The state of the environment to act on.
Returns:
(int) The index of the action to take.
"""
# If not acting randomly, take action with highest predicted value.
state_batch = np.expand_dims(state, axis=0)
try:
probabilities = self.predict.predict(state_batch, verbose=0)[0]
except ValueError as ex:
if "expected state to have shape" in str(ex):
raise InvalidDataShapeException(str(ex)) from ex
raise ex
# print(probabilities)
action = np.random.choice(len(probabilities), p=probabilities)
return action, probabilities
@staticmethod
def discount_episode(rewards, gamma):
discounted_rewards = np.zeros_like(rewards)
total_rewards = 0
for step in reversed(range(len(rewards))):
total_rewards = rewards[step] + total_rewards * gamma
discounted_rewards[step] = total_rewards
return discounted_rewards
def learn(self):
"""Trains a Policy Gradient policy network based on stored experiences."""
state_mb, action_mb, reward_mb = self.memory.sample()
# One hot encode actions
actions = np.zeros([len(action_mb), self.action_size])
actions[np.arange(len(action_mb)), action_mb] = 1
# Apply TD(1)
discount_mb = self.discount_episode(reward_mb, self.gamma)
std_dev = 1 if np.std(discount_mb) == 0 else np.std(discount_mb)
discount_mb = (discount_mb - np.mean(discount_mb)) / std_dev
return self.policy.train_on_batch([state_mb, discount_mb], actions)
def save(self, path: Path):
model_name = "model.pb"
model_path = path / model_name
with open(path / "meta.json", "w", encoding="utf-8") as meta_file:
meta_file.write(json.dumps({"algorithm": "vpg", "model_name": model_name}))
self.predict.save(model_path)
def load(self, path: Path) -> bool:
if (path / "meta.json").exists():
with open(path / "meta.json", "r", encoding="utf-8") as meta_file:
meta_info = json.loads(meta_file.read())
self.predict = models.load_model(str(path / meta_info["model_name"]))
return True
print(f"Model {path} doesn't exist")
return False
| [
"tensorflow.keras.backend.sum",
"numpy.zeros_like",
"warnings.simplefilter",
"tensorflow.keras.layers.Dense",
"numpy.std",
"numpy.expand_dims",
"tensorflow.keras.backend.clip",
"json.dumps",
"tensorflow.keras.models.Model",
"tensorflow.keras.backend.log",
"numpy.mean",
"tensorflow.keras.optimi... | [((953, 992), 'tensorflow.keras.layers.Input', 'layers.Input', (['state_shape'], {'name': '"""state"""'}), "(state_shape, name='state')\n", (965, 992), False, 'from tensorflow.keras import layers, models\n'), ((1005, 1037), 'tensorflow.keras.layers.Input', 'layers.Input', (['(1,)'], {'name': '"""gamma"""'}), "((1,), name='gamma')\n", (1017, 1037), False, 'from tensorflow.keras import layers, models\n'), ((1497, 1563), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': '[state_input, gamma]', 'outputs': '[probabilities]'}), '(inputs=[state_input, gamma], outputs=[probabilities])\n', (1509, 1563), False, 'from tensorflow.keras import layers, models\n'), ((1580, 1633), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1604, 1633), True, 'import tensorflow as tf\n'), ((1707, 1766), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': '[state_input]', 'outputs': '[probabilities]'}), '(inputs=[state_input], outputs=[probabilities])\n', (1719, 1766), False, 'from tensorflow.keras import layers, models\n'), ((1054, 1101), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['hidden_neurons'], {'activation': '"""relu"""'}), "(hidden_neurons, activation='relu')\n", (1066, 1101), False, 'from tensorflow.keras import layers, models\n'), ((1130, 1177), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['hidden_neurons'], {'activation': '"""relu"""'}), "(hidden_neurons, activation='relu')\n", (1142, 1177), False, 'from tensorflow.keras import layers, models\n'), ((1208, 1255), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['action_size'], {'activation': '"""softmax"""'}), "(action_size, activation='softmax')\n", (1220, 1255), False, 'from tensorflow.keras import layers, models\n'), ((1354, 1394), 'tensorflow.keras.backend.clip', 'K.clip', (['y_pred', 'clip_edge', '(1 - clip_edge)'], {}), '(y_pred, clip_edge, 1 - clip_edge)\n', (1360, 1394), True, 'from tensorflow.keras import backend as K\n'), ((1459, 1482), 'tensorflow.keras.backend.sum', 'K.sum', (['(-log_lik * gamma)'], {}), '(-log_lik * gamma)\n', (1464, 1482), True, 'from tensorflow.keras import backend as K\n'), ((2793, 2801), 'algorithms.vpg.memory.Memory', 'Memory', ([], {}), '()\n', (2799, 2801), False, 'from algorithms.vpg.memory import Memory\n'), ((2811, 2867), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'Warning'}), "(action='ignore', category=Warning)\n", (2832, 2867), False, 'import warnings\n'), ((3343, 3372), 'numpy.expand_dims', 'np.expand_dims', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (3357, 3372), True, 'import numpy as np\n'), ((3869, 3891), 'numpy.zeros_like', 'np.zeros_like', (['rewards'], {}), '(rewards)\n', (3882, 3891), True, 'import numpy as np\n'), ((1422, 1443), 'tensorflow.keras.backend.log', 'K.log', (['y_pred_clipped'], {}), '(y_pred_clipped)\n', (1427, 1443), True, 'from tensorflow.keras import backend as K\n'), ((4586, 4605), 'numpy.std', 'np.std', (['discount_mb'], {}), '(discount_mb)\n', (4592, 4605), True, 'import numpy as np\n'), ((4556, 4575), 'numpy.std', 'np.std', (['discount_mb'], {}), '(discount_mb)\n', (4562, 4575), True, 'import numpy as np\n'), ((4644, 4664), 'numpy.mean', 'np.mean', (['discount_mb'], {}), '(discount_mb)\n', (4651, 4664), True, 'import numpy as np\n'), ((4959, 5017), 'json.dumps', 'json.dumps', (["{'algorithm': 'vpg', 'model_name': model_name}"], {}), "({'algorithm': 'vpg', 'model_name': model_name})\n", (4969, 5017), False, 'import json\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.