code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""High level add faults function."""
import logging
log = logging.getLogger(__name__)
import os
import numpy as np
import resqpy.crs as rqc
import resqpy.grid as grr
import resqpy.lines as rql
import resqpy.model as rq
import resqpy.olio.grid_functions as gf
import resqpy.olio.simple_lines as sl
import resqpy.olio.vector_utilities as vec
import resqpy.olio.xml_et as rqet
from resqpy.derived_model._common import _prepare_simple_inheritance, _write_grid, _establish_model_and_source_grid
from resqpy.derived_model._copy_grid import copy_grid
def add_faults(epc_file,
source_grid,
polylines = None,
lines_file_list = None,
lines_crs_uuid = None,
full_pillar_list_dict = None,
left_right_throw_dict = None,
create_gcs = True,
inherit_properties = False,
inherit_realization = None,
inherit_all_realizations = False,
new_grid_title = None,
new_epc_file = None):
"""Extends epc file with a new grid which is a version of the source grid with new curtain fault(s) added.
arguments:
epc_file (string): file name to rewrite the model's xml to; if source grid is None, model is loaded from this file
source_grid (grid.Grid object, optional): if None, the epc_file is loaded and it should contain one ijk grid object
(or one 'ROOT' grid) which is used as the source grid
polylines (lines.PolylineSet or list of lines.Polyline, optional): list of poly lines for which curtain faults
are to be added; either this or lines_file_list or full_pillar_list_dict must be present
lines_file_list (list of str, optional): a list of file paths, each containing one or more poly lines in simple
ascii format§; see notes; either this or polylines or full_pillar_list_dicr must be present
lines_crs_uuid (uuid, optional): if present, the uuid of a coordinate reference system with which to interpret
the contents of the lines files; if None, the crs used by the grid will be assumed
full_pillar_list_dict (dict mapping str to list of pairs of ints, optional): dictionary mapping from a fault name
to a list of pairs of ints being the ordered neigbouring primary pillar (j0, i0) defining the curtain fault;
either this or polylines or lines_file_list must be present
left_right_throw_dict (dict mapping str to pair of floats, optional): dictionary mapping from a fault name to a
pair of floats being the semi-throw adjustment on the left and the right of the fault (see notes); semi-throw
values default to (+0.5, -0.5)
create_gcs (boolean, default True): if True, and faults are being defined by lines, a grid connection set is
created with one feature per new fault and associated organisational objects are also created; ignored if
lines_file_list is None
inherit_properties (boolean, default False): if True, the new grid will have a copy of any properties associated
with the source grid
inherit_realization (int, optional): realization number for which properties will be inherited; ignored if
inherit_properties is False
inherit_all_realizations (boolean, default False): if True (and inherit_realization is None), properties for all
realizations will be inherited; if False, only properties with a realization of None are inherited; ignored if
inherit_properties is False or inherit_realization is not None
new_grid_title (string): used as the citation title text for the new grid object
new_epc_file (string, optional): if None, the source epc_file is extended with the new grid object; if present,
a new epc file (& associated h5 file) is created to contain the unsplit grid (& crs)
returns:
a new grid (grid.Grid object) which is a copy of the source grid with the structure modified to incorporate
the new faults
notes:
full_pillar_list_dict is typically generated by Grid.make_face_sets_from_pillar_lists();
pillars will be split as needed to model the new faults, though existing splits will be used as appropriate, so
this function may also be used to add a constant to the throw of existing faults;
the left_right_throw_dict contains a pair of floats for each fault name (as found in keys of full_pillar_list_dict);
these throw values are lengths in the uom of the crs used by the grid (which must have the same xy units as z units);
this function does not add a GridConnectionSet to the model – calling code may wish to do that
"""
log.info('adding faults')
assert epc_file or new_epc_file, 'epc file name not specified'
assert epc_file or source_grid is not None, 'neither epc file name nor source grid supplied'
if new_epc_file and epc_file and (
(new_epc_file == epc_file) or
(os.path.exists(new_epc_file) and os.path.exists(epc_file) and os.path.samefile(new_epc_file, epc_file))):
new_epc_file = None
model, source_grid = _establish_model_and_source_grid(epc_file, source_grid)
assert source_grid.grid_representation in ['IjkGrid', 'IjkBlockGrid'] # unstructured grids not catered for
assert model is not None
assert len([arg for arg in (polylines, lines_file_list, full_pillar_list_dict) if arg is not None]) == 1
# take a copy of the resqpy grid object, without writing to hdf5 or creating xml
# the copy will be a Grid, even if the source is a RegularGrid
grid = copy_grid(source_grid, model)
grid.crs_uuid = source_grid.crs_uuid
if source_grid.model is not model:
model.duplicate_node(source_grid.model.root_for_uuid(grid.crs_uuid), add_as_part = True)
grid.crs = rqc.Crs(model, uuid = grid.crs_uuid)
if isinstance(polylines, rql.PolylineSet):
polylines = polylines.convert_to_polylines()
composite_face_set_dict = {}
# build pillar list dict for polylines if necessary
if full_pillar_list_dict is None:
full_pillar_list_dict = {}
_populate_composite_face_sets_for_polylines(model, grid, polylines, lines_crs_uuid, grid.crs, lines_file_list,
full_pillar_list_dict, composite_face_set_dict)
else: # populate composite face set dictionary from full pillar list
_populate_composite_face_sets_for_pillar_lists(source_grid, full_pillar_list_dict, composite_face_set_dict)
# log.debug(f'full_pillar_list_dict:\n{full_pillar_list_dict}')
_process_full_pillar_list_dict(grid, full_pillar_list_dict, left_right_throw_dict)
collection = _prepare_simple_inheritance(grid, source_grid, inherit_properties, inherit_realization,
inherit_all_realizations)
# todo: recompute depth properties (and volumes, cell lengths etc. if being strict)
if new_grid_title is None or len(new_grid_title) == 0:
new_grid_title = 'copy of ' + str(rqet.citation_title_for_node(source_grid.root)) + ' with added faults'
# write model
if new_epc_file:
_write_grid(new_epc_file, grid, property_collection = collection, grid_title = new_grid_title, mode = 'w')
else:
ext_uuid = model.h5_uuid()
_write_grid(epc_file,
grid,
ext_uuid = ext_uuid,
property_collection = collection,
grid_title = new_grid_title,
mode = 'a')
# create grid connection set if requested
_create_gcs_if_requested(create_gcs, composite_face_set_dict, new_epc_file, grid)
return grid
def _make_face_sets_for_new_lines(new_lines, face_set_id, grid, full_pillar_list_dict, composite_face_set_dict):
"""Adds entries to full_pillar_list_dict & composite_face_set_dict for new lines."""
pillar_list_list = sl.nearest_pillars(new_lines, grid)
face_set_dict, full_pll_dict = grid.make_face_sets_from_pillar_lists(pillar_list_list, face_set_id)
for key, pll in full_pll_dict.items():
full_pillar_list_dict[key] = pll
for key, fs_info in face_set_dict.items():
composite_face_set_dict[key] = fs_info
def _populate_composite_face_sets_for_pillar_lists(grid, full_pillar_list_dict, composite_face_set_dict):
for key, pillar_list in full_pillar_list_dict.items():
face_set_dict, _ = grid.make_face_sets_from_pillar_lists([pillar_list], key)
for k, fs_info in face_set_dict.items():
composite_face_set_dict[k] = fs_info
def _fault_from_pillar_list(grid, full_pillar_list, delta_throw_left, delta_throw_right):
"""Creates and/or adjusts throw on a single fault defined by a full pillar list, in memory.
arguments:
grid (grid.Grid): the grid object to be adjusted in memory (should have originally been copied
without the hdf5 arrays having been written yet, nor xml created)
full_pillar_list (list of pairs of ints (j0, i0)): the full list of primary pillars defining
the fault; neighbouring pairs must differ by exactly one in either j0 or i0 but not both
delta_throw_left (float): the amount to add to the 'depth' of points to the left of the line
when viewed from above, looking along the line in the direction of the pillar list entries;
units are implicitly the length units of the crs used by the grid; see notes about 'depth'
delta_throw_right (float): as for delta_throw_left but applied to points to the right of the
line
"""
# this function introduces new data into the RESQML arrays representing split pillars
# familiarity with those array representations is needed if working on this function
if full_pillar_list is None or len(full_pillar_list) < 3:
return
assert grid.z_units() == grid.xy_units()
grid.cache_all_geometry_arrays()
assert hasattr(grid, 'points_cached')
# make grid into a faulted grid if hitherto unfaulted
if not grid.has_split_coordinate_lines:
grid.points_cached = grid.points_cached.reshape((grid.nk_plus_k_gaps + 1, (grid.nj + 1) * (grid.ni + 1), 3))
grid.split_pillar_indices_cached = np.array([], dtype = int)
grid.cols_for_split_pillars = np.array([], dtype = int)
grid.cols_for_split_pillars_cl = np.array([], dtype = int)
grid.has_split_coordinate_lines = True
assert grid.points_cached.ndim == 3
if len(grid.cols_for_split_pillars_cl) == 0:
cl = 0
else:
cl = grid.cols_for_split_pillars_cl[-1]
original_p = np.zeros((grid.nk_plus_k_gaps + 1, 3), dtype = float)
n_primaries = (grid.nj + 1) * (grid.ni + 1)
for p_index in range(1, len(full_pillar_list) - 1):
primary_ji0 = full_pillar_list[p_index]
primary = primary_ji0[0] * (grid.ni + 1) + primary_ji0[1]
p_vector = np.array(_pillar_vector(grid, primary), dtype = float)
if p_vector is None:
continue
throw_left_vector = np.expand_dims(delta_throw_left * p_vector, axis = 0)
throw_right_vector = np.expand_dims(delta_throw_right * p_vector, axis = 0)
# log.debug(f'T: p ji0: {primary_ji0}; p vec: {p_vector}; left v: {throw_left_vector}; right v: {throw_right_vector}')
existing_foursome = grid.pillar_foursome(primary_ji0, none_if_unsplit = False)
lr_foursome = gf.left_right_foursome(full_pillar_list, p_index)
cl = _processs_foursome(grid, n_primaries, primary, original_p, existing_foursome, lr_foursome, primary_ji0,
throw_right_vector, throw_left_vector, cl)
def _pillar_vector(grid, p_index):
# return a unit vector for direction of pillar, in direction of increasing k
if np.all(np.isnan(grid.points_cached[:, p_index])):
return None
k_top = 0
while np.any(np.isnan(grid.points_cached[k_top, p_index])):
k_top += 1
k_bot = grid.nk_plus_k_gaps - 1
while np.any(np.isnan(grid.points_cached[k_bot, p_index])):
k_bot -= 1
if k_bot == k_top: # following coded to treat None directions as downwards
if grid.k_direction_is_down is False:
if grid.z_inc_down() is False:
return (0.0, 0.0, 1.0)
else:
return (0.0, 0.0, -1.0)
else:
if grid.z_inc_down() is False:
return (0.0, 0.0, -1.0)
else:
return (0.0, 0.0, 1.0)
else:
return vec.unit_vector(grid.points_cached[k_bot, p_index] - grid.points_cached[k_top, p_index])
def _extend_points_cached(grid, exist_p):
s = grid.points_cached.shape
e = np.empty((s[0], s[1] + 1, s[2]), dtype = float)
e[:, :-1, :] = grid.points_cached
e[:, -1, :] = grid.points_cached[:, exist_p, :]
grid.points_cached = e
def _np_int_extended(a, i):
e = np.empty(a.size + 1, dtype = int)
e[:-1] = a
e[-1] = i
return e
def _create_gcs_if_requested(create_gcs, composite_face_set_dict, new_epc_file, grid):
if create_gcs and len(composite_face_set_dict) > 0:
if new_epc_file is not None:
grid_uuid = grid.uuid
model = rq.Model(new_epc_file)
grid = grr.Grid(model, root = model.root(uuid = grid_uuid), find_properties = False)
grid.set_face_set_gcs_list_from_dict(composite_face_set_dict, create_organizing_objects_where_needed = True)
combined_gcs = grid.face_set_gcs_list[0]
for gcs in grid.face_set_gcs_list[1:]:
combined_gcs.append(gcs)
combined_gcs.write_hdf5()
combined_gcs.create_xml(title = 'faults added from lines')
grid.clear_face_sets()
grid.model.store_epc()
def _processs_foursome(grid, n_primaries, primary, original_p, existing_foursome, lr_foursome, primary_ji0,
throw_right_vector, throw_left_vector, cl):
p_j, p_i = primary_ji0
# log.debug(f'P: p ji0: {primary_ji0}; e foursome:\n{existing_foursome}; lr foursome:\n{lr_foursome}')
for exist_p in np.unique(existing_foursome):
exist_lr = None
new_p_made = False
for jp in range(2):
if (p_j == 0 and jp == 0) or (p_j == grid.nj and jp == 1):
continue
for ip in range(2):
if (p_i == 0 and ip == 0) or (p_i == grid.ni and ip == 1):
continue
if existing_foursome[jp, ip] != exist_p:
continue
if exist_lr is None:
original_p[:] = grid.points_cached[:, exist_p, :]
exist_lr = lr_foursome[jp, ip]
# log.debug(f'A: p ji0: {primary_ji0}; exist_p: {exist_p}; jp,ip: {(jp,ip)}; exist_lr: {exist_lr}')
grid.points_cached[:, exist_p, :] += throw_right_vector if exist_lr else throw_left_vector
continue
if lr_foursome[jp, ip] == exist_lr:
continue
natural_col = (p_j + jp - 1) * grid.ni + p_i + ip - 1
if exist_p != primary: # remove one of the columns currently assigned to exist_p
extra_p = exist_p - n_primaries
# log.debug(f're-split: primary: {primary}; exist: {exist_p}; col: {natural_col}; extra: {extra_p}')
# log.debug(f'pre re-split: cols: {grid.cols_for_split_pillars}')
# log.debug(f'pre re-split: ccl: {grid.cols_for_split_pillars_cl}')
assert grid.split_pillar_indices_cached[extra_p] == primary
if extra_p == 0:
start = 0
else:
start = grid.cols_for_split_pillars_cl[extra_p - 1]
found = False
for cols_index in range(start, start + grid.cols_for_split_pillars_cl[extra_p]):
if grid.cols_for_split_pillars[cols_index] == natural_col:
grid.cols_for_split_pillars = np.concatenate((grid.cols_for_split_pillars[:cols_index],
grid.cols_for_split_pillars[cols_index + 1:]))
found = True
break
assert found
grid.cols_for_split_pillars_cl[extra_p:] -= 1
cl -= 1
assert grid.cols_for_split_pillars_cl[extra_p] > 0
# log.debug(f'post re-split: cols: {grid.cols_for_split_pillars}')
# log.debug(f'post re-split: ccl: {grid.cols_for_split_pillars_cl}')
if not new_p_made: # create a new split of pillar
_extend_points_cached(grid, exist_p)
# log.debug(f'B: p ji0: {primary_ji0}; exist_p: {exist_p}; jp,ip: {(jp,ip)}; lr: {lr_foursome[jp, ip]}; c ji0: {natural_col}')
grid.points_cached[:, -1, :] = original_p + (throw_right_vector
if lr_foursome[jp, ip] else throw_left_vector)
grid.split_pillar_indices_cached = _np_int_extended(grid.split_pillar_indices_cached, primary)
if grid.split_pillars_count is None:
grid.split_pillars_count = 0
grid.split_pillars_count += 1
grid.cols_for_split_pillars = _np_int_extended(grid.cols_for_split_pillars, natural_col)
cl += 1
grid.cols_for_split_pillars_cl = _np_int_extended(grid.cols_for_split_pillars_cl, cl)
new_p_made = True
else: # include this column in newly split version of pillar
# log.debug(f'C: p ji0: {primary_ji0}; exist_p: {exist_p}; jp,ip: {(jp,ip)}; lr: {lr_foursome[jp, ip]}; c ji0: {natural_col}')
grid.cols_for_split_pillars = _np_int_extended(grid.cols_for_split_pillars, natural_col)
cl += 1
grid.cols_for_split_pillars_cl[-1] = cl
return cl
def _process_full_pillar_list_dict(grid, full_pillar_list_dict, left_right_throw_dict):
for fault_key in full_pillar_list_dict:
full_pillar_list = full_pillar_list_dict[fault_key]
left_right_throw = None
if left_right_throw_dict is not None:
left_right_throw = left_right_throw_dict.get(fault_key)
if left_right_throw is None:
left_right_throw = (+0.5, -0.5)
log.debug(
f'generating fault {fault_key} pillar count {len(full_pillar_list)}; left, right throw {left_right_throw}')
_fault_from_pillar_list(grid, full_pillar_list, left_right_throw[0], left_right_throw[1])
def _populate_composite_face_sets_for_polylines(model, grid, polylines, lines_crs_uuid, grid_crs, lines_file_list,
full_pillar_list_dict, composite_face_set_dict):
lines_crs = None if lines_crs_uuid is None else rqc.Crs(model, uuid = lines_crs_uuid)
if polylines:
for i, polyline in enumerate(polylines):
new_line = polyline.coordinates.copy()
if polyline.crs_uuid is not None and polyline.crs_uuid != lines_crs_uuid:
lines_crs_uuid = polyline.crs_uuid
lines_crs = rqc.Crs(model, uuid = lines_crs_uuid)
if lines_crs:
lines_crs.convert_array_to(grid_crs, new_line)
title = polyline.title if polyline.title else 'fault_' + str(i)
_make_face_sets_for_new_lines([new_line], title, grid, full_pillar_list_dict, composite_face_set_dict)
else:
for filename in lines_file_list:
new_lines = sl.read_lines(filename)
if lines_crs is not None:
for a in new_lines:
lines_crs.convert_array_to(grid_crs, a)
_, f_name = os.path.split(filename)
if f_name.lower().endswith('.dat'):
face_set_id = f_name[:-4]
else:
face_set_id = f_name
_make_face_sets_for_new_lines(new_lines, face_set_id, grid, full_pillar_list_dict, composite_face_set_dict)
| [
"logging.getLogger",
"resqpy.model.Model",
"resqpy.olio.simple_lines.nearest_pillars",
"resqpy.olio.vector_utilities.unit_vector",
"numpy.array",
"resqpy.olio.xml_et.citation_title_for_node",
"resqpy.derived_model._common._write_grid",
"resqpy.derived_model._common._prepare_simple_inheritance",
"os.... | [((61, 88), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (78, 88), False, 'import logging\n'), ((5177, 5232), 'resqpy.derived_model._common._establish_model_and_source_grid', '_establish_model_and_source_grid', (['epc_file', 'source_grid'], {}), '(epc_file, source_grid)\n', (5209, 5232), False, 'from resqpy.derived_model._common import _prepare_simple_inheritance, _write_grid, _establish_model_and_source_grid\n'), ((5647, 5676), 'resqpy.derived_model._copy_grid.copy_grid', 'copy_grid', (['source_grid', 'model'], {}), '(source_grid, model)\n', (5656, 5676), False, 'from resqpy.derived_model._copy_grid import copy_grid\n'), ((5869, 5903), 'resqpy.crs.Crs', 'rqc.Crs', (['model'], {'uuid': 'grid.crs_uuid'}), '(model, uuid=grid.crs_uuid)\n', (5876, 5903), True, 'import resqpy.crs as rqc\n'), ((6756, 6873), 'resqpy.derived_model._common._prepare_simple_inheritance', '_prepare_simple_inheritance', (['grid', 'source_grid', 'inherit_properties', 'inherit_realization', 'inherit_all_realizations'], {}), '(grid, source_grid, inherit_properties,\n inherit_realization, inherit_all_realizations)\n', (6783, 6873), False, 'from resqpy.derived_model._common import _prepare_simple_inheritance, _write_grid, _establish_model_and_source_grid\n'), ((7986, 8021), 'resqpy.olio.simple_lines.nearest_pillars', 'sl.nearest_pillars', (['new_lines', 'grid'], {}), '(new_lines, grid)\n', (8004, 8021), True, 'import resqpy.olio.simple_lines as sl\n'), ((10688, 10739), 'numpy.zeros', 'np.zeros', (['(grid.nk_plus_k_gaps + 1, 3)'], {'dtype': 'float'}), '((grid.nk_plus_k_gaps + 1, 3), dtype=float)\n', (10696, 10739), True, 'import numpy as np\n'), ((12758, 12803), 'numpy.empty', 'np.empty', (['(s[0], s[1] + 1, s[2])'], {'dtype': 'float'}), '((s[0], s[1] + 1, s[2]), dtype=float)\n', (12766, 12803), True, 'import numpy as np\n'), ((12961, 12992), 'numpy.empty', 'np.empty', (['(a.size + 1)'], {'dtype': 'int'}), '(a.size + 1, dtype=int)\n', (12969, 12992), True, 'import numpy as np\n'), ((14136, 14164), 'numpy.unique', 'np.unique', (['existing_foursome'], {}), '(existing_foursome)\n', (14145, 14164), True, 'import numpy as np\n'), ((7224, 7329), 'resqpy.derived_model._common._write_grid', '_write_grid', (['new_epc_file', 'grid'], {'property_collection': 'collection', 'grid_title': 'new_grid_title', 'mode': '"""w"""'}), "(new_epc_file, grid, property_collection=collection, grid_title=\n new_grid_title, mode='w')\n", (7235, 7329), False, 'from resqpy.derived_model._common import _prepare_simple_inheritance, _write_grid, _establish_model_and_source_grid\n'), ((7385, 7505), 'resqpy.derived_model._common._write_grid', '_write_grid', (['epc_file', 'grid'], {'ext_uuid': 'ext_uuid', 'property_collection': 'collection', 'grid_title': 'new_grid_title', 'mode': '"""a"""'}), "(epc_file, grid, ext_uuid=ext_uuid, property_collection=\n collection, grid_title=new_grid_title, mode='a')\n", (7396, 7505), False, 'from resqpy.derived_model._common import _prepare_simple_inheritance, _write_grid, _establish_model_and_source_grid\n'), ((10305, 10328), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (10313, 10328), True, 'import numpy as np\n'), ((10369, 10392), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (10377, 10392), True, 'import numpy as np\n'), ((10436, 10459), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (10444, 10459), True, 'import numpy as np\n'), ((11112, 11163), 'numpy.expand_dims', 'np.expand_dims', (['(delta_throw_left * p_vector)'], {'axis': '(0)'}), '(delta_throw_left * p_vector, axis=0)\n', (11126, 11163), True, 'import numpy as np\n'), ((11195, 11247), 'numpy.expand_dims', 'np.expand_dims', (['(delta_throw_right * p_vector)'], {'axis': '(0)'}), '(delta_throw_right * p_vector, axis=0)\n', (11209, 11247), True, 'import numpy as np\n'), ((11486, 11535), 'resqpy.olio.grid_functions.left_right_foursome', 'gf.left_right_foursome', (['full_pillar_list', 'p_index'], {}), '(full_pillar_list, p_index)\n', (11508, 11535), True, 'import resqpy.olio.grid_functions as gf\n'), ((11860, 11900), 'numpy.isnan', 'np.isnan', (['grid.points_cached[:, p_index]'], {}), '(grid.points_cached[:, p_index])\n', (11868, 11900), True, 'import numpy as np\n'), ((11954, 11998), 'numpy.isnan', 'np.isnan', (['grid.points_cached[k_top, p_index]'], {}), '(grid.points_cached[k_top, p_index])\n', (11962, 11998), True, 'import numpy as np\n'), ((12073, 12117), 'numpy.isnan', 'np.isnan', (['grid.points_cached[k_bot, p_index]'], {}), '(grid.points_cached[k_bot, p_index])\n', (12081, 12117), True, 'import numpy as np\n'), ((12584, 12677), 'resqpy.olio.vector_utilities.unit_vector', 'vec.unit_vector', (['(grid.points_cached[k_bot, p_index] - grid.points_cached[k_top, p_index])'], {}), '(grid.points_cached[k_bot, p_index] - grid.points_cached[\n k_top, p_index])\n', (12599, 12677), True, 'import resqpy.olio.vector_utilities as vec\n'), ((19111, 19146), 'resqpy.crs.Crs', 'rqc.Crs', (['model'], {'uuid': 'lines_crs_uuid'}), '(model, uuid=lines_crs_uuid)\n', (19118, 19146), True, 'import resqpy.crs as rqc\n'), ((13273, 13295), 'resqpy.model.Model', 'rq.Model', (['new_epc_file'], {}), '(new_epc_file)\n', (13281, 13295), True, 'import resqpy.model as rq\n'), ((19825, 19848), 'resqpy.olio.simple_lines.read_lines', 'sl.read_lines', (['filename'], {}), '(filename)\n', (19838, 19848), True, 'import resqpy.olio.simple_lines as sl\n'), ((20007, 20030), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (20020, 20030), False, 'import os\n'), ((5018, 5046), 'os.path.exists', 'os.path.exists', (['new_epc_file'], {}), '(new_epc_file)\n', (5032, 5046), False, 'import os\n'), ((5051, 5075), 'os.path.exists', 'os.path.exists', (['epc_file'], {}), '(epc_file)\n', (5065, 5075), False, 'import os\n'), ((5080, 5120), 'os.path.samefile', 'os.path.samefile', (['new_epc_file', 'epc_file'], {}), '(new_epc_file, epc_file)\n', (5096, 5120), False, 'import os\n'), ((19432, 19467), 'resqpy.crs.Crs', 'rqc.Crs', (['model'], {'uuid': 'lines_crs_uuid'}), '(model, uuid=lines_crs_uuid)\n', (19439, 19467), True, 'import resqpy.crs as rqc\n'), ((7105, 7151), 'resqpy.olio.xml_et.citation_title_for_node', 'rqet.citation_title_for_node', (['source_grid.root'], {}), '(source_grid.root)\n', (7133, 7151), True, 'import resqpy.olio.xml_et as rqet\n'), ((16107, 16216), 'numpy.concatenate', 'np.concatenate', (['(grid.cols_for_split_pillars[:cols_index], grid.cols_for_split_pillars[\n cols_index + 1:])'], {}), '((grid.cols_for_split_pillars[:cols_index], grid.\n cols_for_split_pillars[cols_index + 1:]))\n', (16121, 16216), True, 'import numpy as np\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import datetime
import os
from ocw import dataset_processor as dp
from ocw import dataset as ds
from ocw.data_source import local
import numpy as np
import numpy.ma as ma
import logging
logging.basicConfig(level=logging.CRITICAL)
class TestEnsemble(unittest.TestCase):
def test_unequal_dataset_shapes(self):
self.ten_year_dataset = ten_year_monthly_dataset()
self.two_year_dataset = two_year_daily_dataset()
with self.assertRaises(ValueError):
self.ensemble_dataset = dp.ensemble([self.ten_year_dataset, self.two_year_dataset])
def test_ensemble_logic(self):
self.datasets = []
self.datasets.append(build_ten_cube_dataset(1))
self.datasets.append(build_ten_cube_dataset(2))
self.three = build_ten_cube_dataset(3)
self.datasets.append(self.three)
self.datasets.append(build_ten_cube_dataset(4))
self.datasets.append(build_ten_cube_dataset(5))
self.ensemble = dp.ensemble(self.datasets)
self.ensemble_flat = self.ensemble.values.flatten()
self.three_flat = self.three.values.flatten()
np.testing.assert_array_equal(self.ensemble_flat, self.three_flat)
def test_ensemble_name(self):
self.ensemble_dataset_name = "Dataset Ensemble"
self.datasets = []
self.datasets.append(build_ten_cube_dataset(1))
self.datasets.append(build_ten_cube_dataset(2))
self.ensemble = dp.ensemble(self.datasets)
self.assertEquals(self.ensemble.name, self.ensemble_dataset_name)
class TestTemporalRebin(unittest.TestCase):
def setUp(self):
self.ten_year_monthly_dataset = ten_year_monthly_dataset()
self.ten_year_annual_times = np.array([datetime.datetime(year, 7, 2) for year in range(2000, 2010)])
self.two_years_daily_dataset = two_year_daily_dataset()
def test_monthly_to_annual_rebin(self):
annual_dataset = dp.temporal_rebin(self.ten_year_monthly_dataset, "annual")
np.testing.assert_array_equal(annual_dataset.times, self.ten_year_annual_times)
def test_monthly_to_full_rebin(self):
full_dataset = dp.temporal_rebin(self.ten_year_monthly_dataset, "full")
full_times = [datetime.datetime(2005, 1, 1)]
self.assertEqual(full_dataset.times, full_times)
def test_daily_to_monthly_rebin(self):
"""This test takes a really long time to run. TODO: Figure out where the performance drag is"""
monthly_dataset = dp.temporal_rebin(self.two_years_daily_dataset, "monthly")
bins = list(set([datetime.datetime(time_reading.year, time_reading.month, 15) for time_reading in self.two_years_daily_dataset.times]))
bins = np.array(bins)
bins.sort()
np.testing.assert_array_equal(monthly_dataset.times, bins)
def test_daily_to_annual_rebin(self):
annual_dataset = dp.temporal_rebin(self.two_years_daily_dataset, "annual")
bins = list(set([datetime.datetime(time_reading.year, 7, 2) for time_reading in self.two_years_daily_dataset.times]))
bins = np.array(bins)
bins.sort()
np.testing.assert_array_equal(annual_dataset.times, bins)
def test_non_rebin(self):
"""This will take a monthly dataset and ask for a monthly rebin of 28 days. The resulting
dataset should have the same time values"""
monthly_dataset = dp.temporal_rebin(self.ten_year_monthly_dataset, "monthly")
bins = list(set([datetime.datetime(time_reading.year, time_reading.month, 15) for time_reading in self.ten_year_monthly_dataset.times]))
bins = np.array(bins)
bins.sort()
np.testing.assert_array_equal(monthly_dataset.times, bins)
def test_variable_propagation(self):
annual_dataset = dp.temporal_rebin(self.ten_year_monthly_dataset,
"annual")
self.assertEquals(annual_dataset.name,
self.ten_year_monthly_dataset.name)
self.assertEquals(annual_dataset.variable,
self.ten_year_monthly_dataset.variable)
class TestRcmesSpatialRegrid(unittest.TestCase):
def test_return_array_shape(self):
spatial_values = np.ones([90,180])
spatial_values = ma.array(spatial_values)
lat_range = ma.array(range(-89, 90, 2))
lon_range = ma.array(range(-179, 180, 2))
lons, lats = np.meshgrid(lon_range, lat_range)
# Convert these to masked arrays
lats = ma.array(lats)
lons = ma.array(lons)
lat2_range = np.array(range(-89, 90, 4))
lon2_range = np.array(range(-179, 180, 4))
lons2, lats2 = np.meshgrid(lon2_range, lat2_range)
# Convert to masked arrays
lats2 = ma.array(lats2)
lons2 = ma.array(lons2)
regridded_values = dp._rcmes_spatial_regrid(spatial_values, lats, lons, lats2, lons2)
self.assertEqual(regridded_values.shape, lats2.shape)
self.assertEqual(regridded_values.shape, lons2.shape)
class TestSpatialRegrid(unittest.TestCase):
def setUp(self):
self.input_dataset = ten_year_monthly_dataset()
self.new_lats = np.array(range(-89, 90, 4))
self.new_lons = np.array(range(-179, 180, 4))
self.regridded_dataset = dp.spatial_regrid(self.input_dataset, self.new_lats, self.new_lons)
def test_returned_lats(self):
np.testing.assert_array_equal(self.regridded_dataset.lats, self.new_lats)
def test_returned_lons(self):
np.testing.assert_array_equal(self.regridded_dataset.lons, self.new_lons)
def test_shape_of_values(self):
regridded_data_shape = self.regridded_dataset.values.shape
expected_data_shape = (len(self.input_dataset.times), len(self.new_lats), len(self.new_lons))
self.assertSequenceEqual(regridded_data_shape, expected_data_shape)
def test_variable_propagation(self):
self.assertEquals(self.input_dataset.name, self.regridded_dataset.name)
self.assertEquals(self.input_dataset.variable, self.regridded_dataset.variable)
class TestNormalizeDatasetDatetimes(unittest.TestCase):
def setUp(self):
self.monthly_dataset = ten_year_monthly_15th_dataset()
self.daily_dataset = two_year_daily_2hr_dataset()
def test_daily(self):
new_ds = dp.normalize_dataset_datetimes(self.monthly_dataset, 'daily')
# Check that all the days have been shifted to the first of the month
self.assertTrue(all(x.hour == 0 for x in new_ds.times))
def test_montly(self):
new_ds = dp.normalize_dataset_datetimes(self.monthly_dataset, 'monthly')
# Check that all the days have been shifted to the first of the month
self.assertTrue(all(x.day == 1 for x in new_ds.times))
class TestSubset(unittest.TestCase):
def setUp(self):
self.target_dataset = ten_year_monthly_dataset()
self.name = 'foo'
self.subregion = ds.Bounds(
-81, 81,
-161, 161,
datetime.datetime(2001, 1, 1),
datetime.datetime(2004, 1, 1)
)
self.non_exact_spatial_subregion = ds.Bounds(
-80.25, 80.5,
-160.25, 160.5,
datetime.datetime(2001, 1, 1),
datetime.datetime(2004, 1, 1)
)
self.non_exact_temporal_subregion = ds.Bounds(
-80.25, 80.5,
-160.25, 160.5,
datetime.datetime(2001, 1, 15),
datetime.datetime(2004, 2, 15)
)
def test_subset(self):
subset = dp.subset(self.subregion, self.target_dataset)
self.assertEqual(subset.lats.shape[0], 82)
self.assertSequenceEqual(list(np.array(range(-81, 82, 2))),
list(subset.lats))
self.assertEqual(subset.lons.shape[0], 162)
self.assertEqual(subset.times.shape[0], 37)
self.assertEqual(subset.values.shape, (37, 82, 162))
def test_subset_name(self):
subset = dp.subset(self.subregion, self.target_dataset)
self.assertEqual(subset.name, self.name)
def test_subset_name_propagation(self):
subset_name = 'foo_subset_name'
subset = dp.subset(self.subregion, self.target_dataset,subset_name)
self.assertEqual(subset.name, subset_name)
def test_subset_using_non_exact_spatial_bounds(self):
index_slices = dp._get_subregion_slice_indices(self.non_exact_spatial_subregion, self.target_dataset)
control_index_slices = {"lat_start" : 5,
"lat_end" : 84,
"lon_start" : 10,
"lon_end" : 169,
"time_start" : 12,
"time_end" : 48}
self.assertDictEqual(index_slices, control_index_slices)
def test_subset_using_non_exact_temporal_bounds(self):
index_slices = dp._get_subregion_slice_indices(self.non_exact_temporal_subregion, self.target_dataset)
control_index_slices = {"lat_start" : 5,
"lat_end" : 84,
"lon_start" : 10,
"lon_end" : 169,
"time_start" : 13,
"time_end" : 49}
self.assertDictEqual(index_slices, control_index_slices)
class TestSafeSubset(unittest.TestCase):
def setUp(self):
lats = np.array(range(-60, 61, 1))
lons = np.array(range(-170, 171, 1))
times = np.array([datetime.datetime(year, month, 1)
for year in range(2000, 2010)
for month in range(1, 13)])
values = np.ones([len(times), len(lats), len(lons)])
self.target_dataset = ds.Dataset(lats,
lons,
times,
values,
variable="test variable name",
units='test variable units',
name='foo')
self.spatial_out_of_bounds = ds.Bounds(
-165, 165,
-180, 180,
datetime.datetime(2001, 1, 1),
datetime.datetime(2004, 1, 1)
)
self.temporal_out_of_bounds = ds.Bounds(
-40, 40,
-160.25, 160.5,
datetime.datetime(1999, 1, 15),
datetime.datetime(2222, 2, 15)
)
self.everything_out_of_bounds = ds.Bounds(
-165, 165,
-180, 180,
datetime.datetime(1999, 1, 15),
datetime.datetime(2222, 2, 15)
)
def test_partial_spatial_overlap(self):
'''Ensure that safe_subset can handle out of bounds spatial values'''
ds = dp.safe_subset(self.spatial_out_of_bounds, self.target_dataset)
spatial_bounds = ds.spatial_boundaries()
self.assertEquals(spatial_bounds[0], -60)
self.assertEquals(spatial_bounds[1], 60)
self.assertEquals(spatial_bounds[2], -170)
self.assertEquals(spatial_bounds[3], 170)
def test_partial_temporal_overlap(self):
'''Ensure that safe_subset can handle out of bounds temporal values'''
ds = dp.safe_subset(self.temporal_out_of_bounds, self.target_dataset)
temporal_bounds = ds.time_range()
start = datetime.datetime(2000, 1, 1)
end = datetime.datetime(2009, 12, 1)
self.assertEquals(temporal_bounds[0], start)
self.assertEquals(temporal_bounds[1], end)
def test_entire_bounds_overlap(self):
ds = dp.safe_subset(self.everything_out_of_bounds, self.target_dataset)
spatial_bounds = ds.spatial_boundaries()
temporal_bounds = ds.time_range()
start = datetime.datetime(2000, 1, 1)
end = datetime.datetime(2009, 12, 1)
self.assertEquals(spatial_bounds[0], -60)
self.assertEquals(spatial_bounds[1], 60)
self.assertEquals(spatial_bounds[2], -170)
self.assertEquals(spatial_bounds[3], 170)
self.assertEquals(temporal_bounds[0], start)
self.assertEquals(temporal_bounds[1], end)
class TestFailingSubset(unittest.TestCase):
def setUp(self):
self.target_dataset = ten_year_monthly_dataset()
self.target_dataset.lats = np.array(range(-89, 88, 2))
self.target_dataset.lons = np.array(range(-179, 178, 2))
self.subregion = ds.Bounds(
-81, 81,
-161, 161,
datetime.datetime(2001, 1, 1),
datetime.datetime(2004, 1, 1)
)
def test_out_of_dataset_bounds_lat_min(self):
self.subregion.lat_min = -90
with self.assertRaises(ValueError):
dp.subset(self.subregion, self.target_dataset)
def test_out_of_dataset_bounds_lat_max(self):
self.subregion.lat_max = 90
with self.assertRaises(ValueError):
dp.subset(self.subregion, self.target_dataset)
def test_out_of_dataset_bounds_lon_min(self):
self.subregion.lon_min = -180
with self.assertRaises(ValueError):
dp.subset(self.subregion, self.target_dataset)
def test_out_of_dataset_bounds_lon_max(self):
self.subregion.lon_max = 180
with self.assertRaises(ValueError):
dp.subset(self.subregion, self.target_dataset)
def test_out_of_dataset_bounds_start(self):
self.subregion.start = datetime.datetime(1999, 1, 1)
with self.assertRaises(ValueError):
dp.subset(self.subregion, self.target_dataset)
def test_out_of_dataset_bounds_end(self):
self.subregion.end = datetime.datetime(2011, 1, 1)
with self.assertRaises(ValueError):
dp.subset(self.subregion, self.target_dataset)
class TestNetCDFWrite(unittest.TestCase):
def setUp(self):
self.ds = ten_year_monthly_dataset()
self.file_name = 'test.nc'
def tearDown(self):
if os.path.isfile(self.file_name):
os.remove(self.file_name)
def test_file_write(self):
dp.write_netcdf(self.ds, self.file_name)
self.assertTrue(os.path.isfile(self.file_name))
def test_that_file_contents_are_valid(self):
dp.write_netcdf(self.ds, self.file_name)
new_ds = local.load_file(self.file_name, self.ds.variable)
self.assertEqual(self.ds.variable, new_ds.variable)
np.testing.assert_array_equal(self.ds.lats, new_ds.lats)
np.testing.assert_array_equal(self.ds.lons, new_ds.lons)
np.testing.assert_array_equal(self.ds.times, new_ds.times)
np.testing.assert_array_equal(self.ds.values, new_ds.values)
def ten_year_monthly_dataset():
lats = np.array(range(-89, 90, 2))
lons = np.array(range(-179, 180, 2))
# Ten Years of monthly data
times = np.array([datetime.datetime(year, month, 1) for year in range(2000, 2010) for month in range(1, 13)])
values = np.ones([len(times), len(lats), len(lons)])
input_dataset = ds.Dataset(lats, lons, times, values, variable="test variable name", units='test variable units', name='foo')
return input_dataset
def ten_year_monthly_15th_dataset():
lats = np.array(range(-89, 90, 2))
lons = np.array(range(-179, 180, 2))
# Ten Years of monthly data
times = np.array([datetime.datetime(year, month, 1) for year in range(2000, 2010) for month in range(1, 13)])
values = np.ones([len(times), len(lats), len(lons)])
input_dataset = ds.Dataset(lats, lons, times, values, variable="test variable name", units='test variable units')
return input_dataset
def two_year_daily_dataset():
lats = np.array(range(-89, 90, 2))
lons = np.array(range(-179, 180, 2))
times = np.array([datetime.datetime(2001, 1, 1) + datetime.timedelta(days=d) for d in range(730)])
values = np.ones([len(times), len(lats), len(lons)])
dataset = ds.Dataset(lats, lons, times, values, variable='random data',units='test variable units')
return dataset
def two_year_daily_2hr_dataset():
lats = np.array(range(-89, 90, 2))
lons = np.array(range(-179, 180, 2))
times = np.array([datetime.datetime(2001, 1, 1) + datetime.timedelta(days=d, hours=2) for d in range(730)])
values = np.ones([len(times), len(lats), len(lons)])
dataset = ds.Dataset(lats, lons, times, values, variable='random data', units='test variable units')
return dataset
def build_ten_cube_dataset(value):
lats = np.array(range(-89, 90, 18))
lons = np.array(range(-179, 180, 36))
times = np.array([datetime.datetime(year, 1, 1) for year in range(2000, 2010)])
values = np.ones([len(times), len(lats), len(lons)])
values = values * value
dataset = ds.Dataset(lats, lons, times, values)
return dataset
if __name__ == '__main__':
unittest.main()
| [
"numpy.array",
"ocw.dataset.Dataset",
"ocw.dataset_processor.spatial_regrid",
"unittest.main",
"datetime.timedelta",
"os.remove",
"datetime.datetime",
"ocw.data_source.local.load_file",
"numpy.meshgrid",
"ocw.dataset_processor.safe_subset",
"numpy.testing.assert_array_equal",
"ocw.dataset_proc... | [((1003, 1046), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.CRITICAL'}), '(level=logging.CRITICAL)\n', (1022, 1046), False, 'import logging\n'), ((15929, 16043), 'ocw.dataset.Dataset', 'ds.Dataset', (['lats', 'lons', 'times', 'values'], {'variable': '"""test variable name"""', 'units': '"""test variable units"""', 'name': '"""foo"""'}), "(lats, lons, times, values, variable='test variable name', units=\n 'test variable units', name='foo')\n", (15939, 16043), True, 'from ocw import dataset as ds\n'), ((16405, 16507), 'ocw.dataset.Dataset', 'ds.Dataset', (['lats', 'lons', 'times', 'values'], {'variable': '"""test variable name"""', 'units': '"""test variable units"""'}), "(lats, lons, times, values, variable='test variable name', units=\n 'test variable units')\n", (16415, 16507), True, 'from ocw import dataset as ds\n'), ((16813, 16908), 'ocw.dataset.Dataset', 'ds.Dataset', (['lats', 'lons', 'times', 'values'], {'variable': '"""random data"""', 'units': '"""test variable units"""'}), "(lats, lons, times, values, variable='random data', units=\n 'test variable units')\n", (16823, 16908), True, 'from ocw import dataset as ds\n'), ((17224, 17319), 'ocw.dataset.Dataset', 'ds.Dataset', (['lats', 'lons', 'times', 'values'], {'variable': '"""random data"""', 'units': '"""test variable units"""'}), "(lats, lons, times, values, variable='random data', units=\n 'test variable units')\n", (17234, 17319), True, 'from ocw import dataset as ds\n'), ((17639, 17676), 'ocw.dataset.Dataset', 'ds.Dataset', (['lats', 'lons', 'times', 'values'], {}), '(lats, lons, times, values)\n', (17649, 17676), True, 'from ocw import dataset as ds\n'), ((17728, 17743), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17741, 17743), False, 'import unittest\n'), ((1790, 1816), 'ocw.dataset_processor.ensemble', 'dp.ensemble', (['self.datasets'], {}), '(self.datasets)\n', (1801, 1816), True, 'from ocw import dataset_processor as dp\n'), ((1939, 2005), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.ensemble_flat', 'self.three_flat'], {}), '(self.ensemble_flat, self.three_flat)\n', (1968, 2005), True, 'import numpy as np\n'), ((2264, 2290), 'ocw.dataset_processor.ensemble', 'dp.ensemble', (['self.datasets'], {}), '(self.datasets)\n', (2275, 2290), True, 'from ocw import dataset_processor as dp\n'), ((2759, 2817), 'ocw.dataset_processor.temporal_rebin', 'dp.temporal_rebin', (['self.ten_year_monthly_dataset', '"""annual"""'], {}), "(self.ten_year_monthly_dataset, 'annual')\n", (2776, 2817), True, 'from ocw import dataset_processor as dp\n'), ((2826, 2905), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['annual_dataset.times', 'self.ten_year_annual_times'], {}), '(annual_dataset.times, self.ten_year_annual_times)\n', (2855, 2905), True, 'import numpy as np\n'), ((2976, 3032), 'ocw.dataset_processor.temporal_rebin', 'dp.temporal_rebin', (['self.ten_year_monthly_dataset', '"""full"""'], {}), "(self.ten_year_monthly_dataset, 'full')\n", (2993, 3032), True, 'from ocw import dataset_processor as dp\n'), ((3322, 3380), 'ocw.dataset_processor.temporal_rebin', 'dp.temporal_rebin', (['self.two_years_daily_dataset', '"""monthly"""'], {}), "(self.two_years_daily_dataset, 'monthly')\n", (3339, 3380), True, 'from ocw import dataset_processor as dp\n'), ((3540, 3554), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (3548, 3554), True, 'import numpy as np\n'), ((3583, 3641), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['monthly_dataset.times', 'bins'], {}), '(monthly_dataset.times, bins)\n', (3612, 3641), True, 'import numpy as np\n'), ((3714, 3771), 'ocw.dataset_processor.temporal_rebin', 'dp.temporal_rebin', (['self.two_years_daily_dataset', '"""annual"""'], {}), "(self.two_years_daily_dataset, 'annual')\n", (3731, 3771), True, 'from ocw import dataset_processor as dp\n'), ((3913, 3927), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (3921, 3927), True, 'import numpy as np\n'), ((3956, 4013), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['annual_dataset.times', 'bins'], {}), '(annual_dataset.times, bins)\n', (3985, 4013), True, 'import numpy as np\n'), ((4225, 4284), 'ocw.dataset_processor.temporal_rebin', 'dp.temporal_rebin', (['self.ten_year_monthly_dataset', '"""monthly"""'], {}), "(self.ten_year_monthly_dataset, 'monthly')\n", (4242, 4284), True, 'from ocw import dataset_processor as dp\n'), ((4445, 4459), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (4453, 4459), True, 'import numpy as np\n'), ((4488, 4546), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['monthly_dataset.times', 'bins'], {}), '(monthly_dataset.times, bins)\n', (4517, 4546), True, 'import numpy as np\n'), ((4618, 4676), 'ocw.dataset_processor.temporal_rebin', 'dp.temporal_rebin', (['self.ten_year_monthly_dataset', '"""annual"""'], {}), "(self.ten_year_monthly_dataset, 'annual')\n", (4635, 4676), True, 'from ocw import dataset_processor as dp\n'), ((5062, 5080), 'numpy.ones', 'np.ones', (['[90, 180]'], {}), '([90, 180])\n', (5069, 5080), True, 'import numpy as np\n'), ((5105, 5129), 'numpy.ma.array', 'ma.array', (['spatial_values'], {}), '(spatial_values)\n', (5113, 5129), True, 'import numpy.ma as ma\n'), ((5267, 5300), 'numpy.meshgrid', 'np.meshgrid', (['lon_range', 'lat_range'], {}), '(lon_range, lat_range)\n', (5278, 5300), True, 'import numpy as np\n'), ((5357, 5371), 'numpy.ma.array', 'ma.array', (['lats'], {}), '(lats)\n', (5365, 5371), True, 'import numpy.ma as ma\n'), ((5387, 5401), 'numpy.ma.array', 'ma.array', (['lons'], {}), '(lons)\n', (5395, 5401), True, 'import numpy.ma as ma\n'), ((5543, 5578), 'numpy.meshgrid', 'np.meshgrid', (['lon2_range', 'lat2_range'], {}), '(lon2_range, lat2_range)\n', (5554, 5578), True, 'import numpy as np\n'), ((5630, 5645), 'numpy.ma.array', 'ma.array', (['lats2'], {}), '(lats2)\n', (5638, 5645), True, 'import numpy.ma as ma\n'), ((5662, 5677), 'numpy.ma.array', 'ma.array', (['lons2'], {}), '(lons2)\n', (5670, 5677), True, 'import numpy.ma as ma\n'), ((5706, 5772), 'ocw.dataset_processor._rcmes_spatial_regrid', 'dp._rcmes_spatial_regrid', (['spatial_values', 'lats', 'lons', 'lats2', 'lons2'], {}), '(spatial_values, lats, lons, lats2, lons2)\n', (5730, 5772), True, 'from ocw import dataset_processor as dp\n'), ((6163, 6230), 'ocw.dataset_processor.spatial_regrid', 'dp.spatial_regrid', (['self.input_dataset', 'self.new_lats', 'self.new_lons'], {}), '(self.input_dataset, self.new_lats, self.new_lons)\n', (6180, 6230), True, 'from ocw import dataset_processor as dp\n'), ((6275, 6348), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.regridded_dataset.lats', 'self.new_lats'], {}), '(self.regridded_dataset.lats, self.new_lats)\n', (6304, 6348), True, 'import numpy as np\n'), ((6392, 6465), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.regridded_dataset.lons', 'self.new_lons'], {}), '(self.regridded_dataset.lons, self.new_lons)\n', (6421, 6465), True, 'import numpy as np\n'), ((7201, 7262), 'ocw.dataset_processor.normalize_dataset_datetimes', 'dp.normalize_dataset_datetimes', (['self.monthly_dataset', '"""daily"""'], {}), "(self.monthly_dataset, 'daily')\n", (7231, 7262), True, 'from ocw import dataset_processor as dp\n'), ((7451, 7514), 'ocw.dataset_processor.normalize_dataset_datetimes', 'dp.normalize_dataset_datetimes', (['self.monthly_dataset', '"""monthly"""'], {}), "(self.monthly_dataset, 'monthly')\n", (7481, 7514), True, 'from ocw import dataset_processor as dp\n'), ((8437, 8483), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (8446, 8483), True, 'from ocw import dataset_processor as dp\n'), ((8854, 8900), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (8863, 8900), True, 'from ocw import dataset_processor as dp\n'), ((9052, 9111), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset', 'subset_name'], {}), '(self.subregion, self.target_dataset, subset_name)\n', (9061, 9111), True, 'from ocw import dataset_processor as dp\n'), ((9244, 9335), 'ocw.dataset_processor._get_subregion_slice_indices', 'dp._get_subregion_slice_indices', (['self.non_exact_spatial_subregion', 'self.target_dataset'], {}), '(self.non_exact_spatial_subregion, self.\n target_dataset)\n', (9275, 9335), True, 'from ocw import dataset_processor as dp\n'), ((9787, 9879), 'ocw.dataset_processor._get_subregion_slice_indices', 'dp._get_subregion_slice_indices', (['self.non_exact_temporal_subregion', 'self.target_dataset'], {}), '(self.non_exact_temporal_subregion, self.\n target_dataset)\n', (9818, 9879), True, 'from ocw import dataset_processor as dp\n'), ((10660, 10774), 'ocw.dataset.Dataset', 'ds.Dataset', (['lats', 'lons', 'times', 'values'], {'variable': '"""test variable name"""', 'units': '"""test variable units"""', 'name': '"""foo"""'}), "(lats, lons, times, values, variable='test variable name', units=\n 'test variable units', name='foo')\n", (10670, 10774), True, 'from ocw import dataset as ds\n'), ((11733, 11796), 'ocw.dataset_processor.safe_subset', 'dp.safe_subset', (['self.spatial_out_of_bounds', 'self.target_dataset'], {}), '(self.spatial_out_of_bounds, self.target_dataset)\n', (11747, 11796), True, 'from ocw import dataset_processor as dp\n'), ((11822, 11845), 'ocw.dataset.spatial_boundaries', 'ds.spatial_boundaries', ([], {}), '()\n', (11843, 11845), True, 'from ocw import dataset as ds\n'), ((12184, 12248), 'ocw.dataset_processor.safe_subset', 'dp.safe_subset', (['self.temporal_out_of_bounds', 'self.target_dataset'], {}), '(self.temporal_out_of_bounds, self.target_dataset)\n', (12198, 12248), True, 'from ocw import dataset_processor as dp\n'), ((12275, 12290), 'ocw.dataset.time_range', 'ds.time_range', ([], {}), '()\n', (12288, 12290), True, 'from ocw import dataset as ds\n'), ((12307, 12336), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (12324, 12336), False, 'import datetime\n'), ((12351, 12381), 'datetime.datetime', 'datetime.datetime', (['(2009)', '(12)', '(1)'], {}), '(2009, 12, 1)\n', (12368, 12381), False, 'import datetime\n'), ((12543, 12609), 'ocw.dataset_processor.safe_subset', 'dp.safe_subset', (['self.everything_out_of_bounds', 'self.target_dataset'], {}), '(self.everything_out_of_bounds, self.target_dataset)\n', (12557, 12609), True, 'from ocw import dataset_processor as dp\n'), ((12635, 12658), 'ocw.dataset.spatial_boundaries', 'ds.spatial_boundaries', ([], {}), '()\n', (12656, 12658), True, 'from ocw import dataset as ds\n'), ((12685, 12700), 'ocw.dataset.time_range', 'ds.time_range', ([], {}), '()\n', (12698, 12700), True, 'from ocw import dataset as ds\n'), ((12717, 12746), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (12734, 12746), False, 'import datetime\n'), ((12761, 12791), 'datetime.datetime', 'datetime.datetime', (['(2009)', '(12)', '(1)'], {}), '(2009, 12, 1)\n', (12778, 12791), False, 'import datetime\n'), ((14371, 14400), 'datetime.datetime', 'datetime.datetime', (['(1999)', '(1)', '(1)'], {}), '(1999, 1, 1)\n', (14388, 14400), False, 'import datetime\n'), ((14580, 14609), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (14597, 14609), False, 'import datetime\n'), ((14893, 14923), 'os.path.isfile', 'os.path.isfile', (['self.file_name'], {}), '(self.file_name)\n', (14907, 14923), False, 'import os\n'), ((15003, 15043), 'ocw.dataset_processor.write_netcdf', 'dp.write_netcdf', (['self.ds', 'self.file_name'], {}), '(self.ds, self.file_name)\n', (15018, 15043), True, 'from ocw import dataset_processor as dp\n'), ((15158, 15198), 'ocw.dataset_processor.write_netcdf', 'dp.write_netcdf', (['self.ds', 'self.file_name'], {}), '(self.ds, self.file_name)\n', (15173, 15198), True, 'from ocw import dataset_processor as dp\n'), ((15216, 15265), 'ocw.data_source.local.load_file', 'local.load_file', (['self.file_name', 'self.ds.variable'], {}), '(self.file_name, self.ds.variable)\n', (15231, 15265), False, 'from ocw.data_source import local\n'), ((15335, 15391), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.ds.lats', 'new_ds.lats'], {}), '(self.ds.lats, new_ds.lats)\n', (15364, 15391), True, 'import numpy as np\n'), ((15400, 15456), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.ds.lons', 'new_ds.lons'], {}), '(self.ds.lons, new_ds.lons)\n', (15429, 15456), True, 'import numpy as np\n'), ((15465, 15523), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.ds.times', 'new_ds.times'], {}), '(self.ds.times, new_ds.times)\n', (15494, 15523), True, 'import numpy as np\n'), ((15532, 15592), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.ds.values', 'new_ds.values'], {}), '(self.ds.values, new_ds.values)\n', (15561, 15592), True, 'import numpy as np\n'), ((1327, 1386), 'ocw.dataset_processor.ensemble', 'dp.ensemble', (['[self.ten_year_dataset, self.two_year_dataset]'], {}), '([self.ten_year_dataset, self.two_year_dataset])\n', (1338, 1386), True, 'from ocw import dataset_processor as dp\n'), ((3055, 3084), 'datetime.datetime', 'datetime.datetime', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (3072, 3084), False, 'import datetime\n'), ((7894, 7923), 'datetime.datetime', 'datetime.datetime', (['(2001)', '(1)', '(1)'], {}), '(2001, 1, 1)\n', (7911, 7923), False, 'import datetime\n'), ((7938, 7967), 'datetime.datetime', 'datetime.datetime', (['(2004)', '(1)', '(1)'], {}), '(2004, 1, 1)\n', (7955, 7967), False, 'import datetime\n'), ((8100, 8129), 'datetime.datetime', 'datetime.datetime', (['(2001)', '(1)', '(1)'], {}), '(2001, 1, 1)\n', (8117, 8129), False, 'import datetime\n'), ((8144, 8173), 'datetime.datetime', 'datetime.datetime', (['(2004)', '(1)', '(1)'], {}), '(2004, 1, 1)\n', (8161, 8173), False, 'import datetime\n'), ((8306, 8336), 'datetime.datetime', 'datetime.datetime', (['(2001)', '(1)', '(15)'], {}), '(2001, 1, 15)\n', (8323, 8336), False, 'import datetime\n'), ((8351, 8381), 'datetime.datetime', 'datetime.datetime', (['(2004)', '(2)', '(15)'], {}), '(2004, 2, 15)\n', (8368, 8381), False, 'import datetime\n'), ((11123, 11152), 'datetime.datetime', 'datetime.datetime', (['(2001)', '(1)', '(1)'], {}), '(2001, 1, 1)\n', (11140, 11152), False, 'import datetime\n'), ((11166, 11195), 'datetime.datetime', 'datetime.datetime', (['(2004)', '(1)', '(1)'], {}), '(2004, 1, 1)\n', (11183, 11195), False, 'import datetime\n'), ((11317, 11347), 'datetime.datetime', 'datetime.datetime', (['(1999)', '(1)', '(15)'], {}), '(1999, 1, 15)\n', (11334, 11347), False, 'import datetime\n'), ((11361, 11391), 'datetime.datetime', 'datetime.datetime', (['(2222)', '(2)', '(15)'], {}), '(2222, 2, 15)\n', (11378, 11391), False, 'import datetime\n'), ((11512, 11542), 'datetime.datetime', 'datetime.datetime', (['(1999)', '(1)', '(15)'], {}), '(1999, 1, 15)\n', (11529, 11542), False, 'import datetime\n'), ((11556, 11586), 'datetime.datetime', 'datetime.datetime', (['(2222)', '(2)', '(15)'], {}), '(2222, 2, 15)\n', (11573, 11586), False, 'import datetime\n'), ((13443, 13472), 'datetime.datetime', 'datetime.datetime', (['(2001)', '(1)', '(1)'], {}), '(2001, 1, 1)\n', (13460, 13472), False, 'import datetime\n'), ((13487, 13516), 'datetime.datetime', 'datetime.datetime', (['(2004)', '(1)', '(1)'], {}), '(2004, 1, 1)\n', (13504, 13516), False, 'import datetime\n'), ((13671, 13717), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (13680, 13717), True, 'from ocw import dataset_processor as dp\n'), ((13861, 13907), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (13870, 13907), True, 'from ocw import dataset_processor as dp\n'), ((14053, 14099), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (14062, 14099), True, 'from ocw import dataset_processor as dp\n'), ((14244, 14290), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (14253, 14290), True, 'from ocw import dataset_processor as dp\n'), ((14457, 14503), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (14466, 14503), True, 'from ocw import dataset_processor as dp\n'), ((14666, 14712), 'ocw.dataset_processor.subset', 'dp.subset', (['self.subregion', 'self.target_dataset'], {}), '(self.subregion, self.target_dataset)\n', (14675, 14712), True, 'from ocw import dataset_processor as dp\n'), ((14937, 14962), 'os.remove', 'os.remove', (['self.file_name'], {}), '(self.file_name)\n', (14946, 14962), False, 'import os\n'), ((15068, 15098), 'os.path.isfile', 'os.path.isfile', (['self.file_name'], {}), '(self.file_name)\n', (15082, 15098), False, 'import os\n'), ((15760, 15793), 'datetime.datetime', 'datetime.datetime', (['year', 'month', '(1)'], {}), '(year, month, 1)\n', (15777, 15793), False, 'import datetime\n'), ((16236, 16269), 'datetime.datetime', 'datetime.datetime', (['year', 'month', '(1)'], {}), '(year, month, 1)\n', (16253, 16269), False, 'import datetime\n'), ((17478, 17507), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)'], {}), '(year, 1, 1)\n', (17495, 17507), False, 'import datetime\n'), ((2559, 2588), 'datetime.datetime', 'datetime.datetime', (['year', '(7)', '(2)'], {}), '(year, 7, 2)\n', (2576, 2588), False, 'import datetime\n'), ((10425, 10458), 'datetime.datetime', 'datetime.datetime', (['year', 'month', '(1)'], {}), '(year, month, 1)\n', (10442, 10458), False, 'import datetime\n'), ((16661, 16690), 'datetime.datetime', 'datetime.datetime', (['(2001)', '(1)', '(1)'], {}), '(2001, 1, 1)\n', (16678, 16690), False, 'import datetime\n'), ((16693, 16719), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'd'}), '(days=d)\n', (16711, 16719), False, 'import datetime\n'), ((17063, 17092), 'datetime.datetime', 'datetime.datetime', (['(2001)', '(1)', '(1)'], {}), '(2001, 1, 1)\n', (17080, 17092), False, 'import datetime\n'), ((17095, 17130), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'd', 'hours': '(2)'}), '(days=d, hours=2)\n', (17113, 17130), False, 'import datetime\n'), ((3406, 3466), 'datetime.datetime', 'datetime.datetime', (['time_reading.year', 'time_reading.month', '(15)'], {}), '(time_reading.year, time_reading.month, 15)\n', (3423, 3466), False, 'import datetime\n'), ((3797, 3839), 'datetime.datetime', 'datetime.datetime', (['time_reading.year', '(7)', '(2)'], {}), '(time_reading.year, 7, 2)\n', (3814, 3839), False, 'import datetime\n'), ((4310, 4370), 'datetime.datetime', 'datetime.datetime', (['time_reading.year', 'time_reading.month', '(15)'], {}), '(time_reading.year, time_reading.month, 15)\n', (4327, 4370), False, 'import datetime\n')] |
import tensorflow as tf
import flask
from flask import request
import base64
import cv2
import numpy as np
from PIL import Image
from io import BytesIO
import transform
# print('Downloading CUM...')
# url = 'https://face-off-ai.s3.amazonaws.com/CUMv6.h5'
# r = requests.get(url)
# with open('CUMv6.h5', 'wb') as mod:
# mod.write(r.content)
# r = request.get('https://face-off-ai.s3.amazonaws.com/shape_predictor_68_face_landmarks.dat')
# with open('shape_predictor_68_face_landmarks.dat') as pred:
# pred.write(r.content)
# print('Downloads Complete')
app = flask.Flask(__name__)
print('Server running...')
emotions = {
0: 'Fear',
1: 'Happy',
2: 'Neutral',
3: 'Sad'
}
print('loading model...')
model = tf.keras.models.load_model('CUMv6.h5')
print('model loaded, good to go!')
@app.route("/predict", methods=["GET","POST"])
def predict():
data = {'success': 'false'}
req = request.get_json()
image = req['image']
print(image)
if (image != None):
transformed = transform.transformIndividual(image)
if (type(transformed) != None):
transformed = cv2.cvtColor(transformed, cv2.COLOR_GRAY2RGB)
transformed = transformed/255.
transformed = np.expand_dims(transformed, axis=0)
prob = model.predict(transformed)
label = prob.argmax(axis=1)
data['prediction'] = str(emotions[label[0]])
data['success'] = 'true'
data['confidence'] = str(prob[0][label[0]])
else:
print("No faces detected")
data['success'] = False
data['reason'] = 'No faces detected'
return flask.jsonify(data)
return flask.jsonify(data)
if __name__ == "__main__":
app.run(host='0.0.0.0')
| [
"flask.Flask",
"transform.transformIndividual",
"flask.request.get_json",
"tensorflow.keras.models.load_model",
"cv2.cvtColor",
"numpy.expand_dims",
"flask.jsonify"
] | [((570, 591), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (581, 591), False, 'import flask\n'), ((747, 785), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""CUMv6.h5"""'], {}), "('CUMv6.h5')\n", (773, 785), True, 'import tensorflow as tf\n'), ((926, 944), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (942, 944), False, 'from flask import request\n'), ((1720, 1739), 'flask.jsonify', 'flask.jsonify', (['data'], {}), '(data)\n', (1733, 1739), False, 'import flask\n'), ((1033, 1069), 'transform.transformIndividual', 'transform.transformIndividual', (['image'], {}), '(image)\n', (1062, 1069), False, 'import transform\n'), ((1136, 1181), 'cv2.cvtColor', 'cv2.cvtColor', (['transformed', 'cv2.COLOR_GRAY2RGB'], {}), '(transformed, cv2.COLOR_GRAY2RGB)\n', (1148, 1181), False, 'import cv2\n'), ((1251, 1286), 'numpy.expand_dims', 'np.expand_dims', (['transformed'], {'axis': '(0)'}), '(transformed, axis=0)\n', (1265, 1286), True, 'import numpy as np\n'), ((1680, 1699), 'flask.jsonify', 'flask.jsonify', (['data'], {}), '(data)\n', (1693, 1699), False, 'import flask\n')] |
from pliers.extractors import (GoogleVisionAPIFaceExtractor,
GoogleVisionAPILabelExtractor,
GoogleVisionAPIPropertyExtractor,
GoogleVisionAPISafeSearchExtractor)
from pliers.extractors.google import GoogleVisionAPIExtractor
from pliers.stimuli import ImageStim
import pytest
import json
from os.path import join
from .utils import get_test_data_path
import numpy as np
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_extractor_inits():
ext = GoogleVisionAPIExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor_inits():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
# Test parsing of individual response
filename = join(
get_test_data_path(), 'payloads', 'google_vision_api_face_payload.json')
response = json.load(open(filename, 'r'))
features, data = ext._parse_annotations(response['faceAnnotations'])
assert len(features) == len(data)
assert data[features.index('angerLikelihood')] == 'VERY_UNLIKELY'
assert data[
features.index('landmark_LEFT_EYE_BOTTOM_BOUNDARY_y')] == 257.023
assert np.isnan(data[features.index('boundingPoly_vertex2_y')])
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert result['face_detectionConfidence'][0] > 0.7
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_multiple_face_extraction():
filename = join(get_test_data_path(), 'image', 'thai_people.jpg')
stim = ImageStim(filename)
# Only first record
ext = GoogleVisionAPIFaceExtractor(handle_annotations='first')
result1 = ext.transform(stim).to_df()
assert 'joyLikelihood' in result1.columns
# All records
ext = GoogleVisionAPIFaceExtractor(handle_annotations='prefix')
result2 = ext.transform(stim).to_df()
assert 'face2_joyLikelihood' in result2.columns
assert result2.shape[1] > result1.shape[1]
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_label_extractor():
ext = GoogleVisionAPILabelExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'apple' in result.columns
assert result['apple'][0] > 0.75
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_properties_extractor():
ext = GoogleVisionAPIPropertyExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert (158, 13, 29) in result.columns
assert np.isfinite(result[(158, 13, 29)][0])
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_safe_search():
ext = GoogleVisionAPISafeSearchExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'adult' in result.columns
assert result['violence'][0] == 'VERY_UNLIKELY'
| [
"pliers.extractors.GoogleVisionAPIFaceExtractor",
"pliers.extractors.GoogleVisionAPIPropertyExtractor",
"pliers.extractors.google.GoogleVisionAPIExtractor",
"numpy.isfinite",
"pliers.stimuli.ImageStim",
"pytest.mark.skipif",
"pliers.extractors.GoogleVisionAPILabelExtractor",
"pliers.extractors.GoogleV... | [((465, 537), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ"""'], {}), '("\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ")\n', (483, 537), False, 'import pytest\n'), ((738, 810), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ"""'], {}), '("\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ")\n', (756, 810), False, 'import pytest\n'), ((1551, 1623), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ"""'], {}), '("\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ")\n', (1569, 1623), False, 'import pytest\n'), ((2017, 2089), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ"""'], {}), '("\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ")\n', (2035, 2089), False, 'import pytest\n'), ((2651, 2723), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ"""'], {}), '("\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ")\n', (2669, 2723), False, 'import pytest\n'), ((3038, 3110), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ"""'], {}), '("\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ")\n', (3056, 3110), False, 'import pytest\n'), ((3451, 3523), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ"""'], {}), '("\'GOOGLE_APPLICATION_CREDENTIALS\' not in os.environ")\n', (3469, 3523), False, 'import pytest\n'), ((594, 633), 'pliers.extractors.google.GoogleVisionAPIExtractor', 'GoogleVisionAPIExtractor', ([], {'num_retries': '(5)'}), '(num_retries=5)\n', (618, 633), False, 'from pliers.extractors.google import GoogleVisionAPIExtractor\n'), ((872, 915), 'pliers.extractors.GoogleVisionAPIFaceExtractor', 'GoogleVisionAPIFaceExtractor', ([], {'num_retries': '(5)'}), '(num_retries=5)\n', (900, 915), False, 'from pliers.extractors import GoogleVisionAPIFaceExtractor, GoogleVisionAPILabelExtractor, GoogleVisionAPIPropertyExtractor, GoogleVisionAPISafeSearchExtractor\n'), ((1679, 1722), 'pliers.extractors.GoogleVisionAPIFaceExtractor', 'GoogleVisionAPIFaceExtractor', ([], {'num_retries': '(5)'}), '(num_retries=5)\n', (1707, 1722), False, 'from pliers.extractors import GoogleVisionAPIFaceExtractor, GoogleVisionAPILabelExtractor, GoogleVisionAPIPropertyExtractor, GoogleVisionAPISafeSearchExtractor\n'), ((1798, 1817), 'pliers.stimuli.ImageStim', 'ImageStim', (['filename'], {}), '(filename)\n', (1807, 1817), False, 'from pliers.stimuli import ImageStim\n'), ((2222, 2241), 'pliers.stimuli.ImageStim', 'ImageStim', (['filename'], {}), '(filename)\n', (2231, 2241), False, 'from pliers.stimuli import ImageStim\n'), ((2276, 2332), 'pliers.extractors.GoogleVisionAPIFaceExtractor', 'GoogleVisionAPIFaceExtractor', ([], {'handle_annotations': '"""first"""'}), "(handle_annotations='first')\n", (2304, 2332), False, 'from pliers.extractors import GoogleVisionAPIFaceExtractor, GoogleVisionAPILabelExtractor, GoogleVisionAPIPropertyExtractor, GoogleVisionAPISafeSearchExtractor\n'), ((2449, 2506), 'pliers.extractors.GoogleVisionAPIFaceExtractor', 'GoogleVisionAPIFaceExtractor', ([], {'handle_annotations': '"""prefix"""'}), "(handle_annotations='prefix')\n", (2477, 2506), False, 'from pliers.extractors import GoogleVisionAPIFaceExtractor, GoogleVisionAPILabelExtractor, GoogleVisionAPIPropertyExtractor, GoogleVisionAPISafeSearchExtractor\n'), ((2780, 2824), 'pliers.extractors.GoogleVisionAPILabelExtractor', 'GoogleVisionAPILabelExtractor', ([], {'num_retries': '(5)'}), '(num_retries=5)\n', (2809, 2824), False, 'from pliers.extractors import GoogleVisionAPIFaceExtractor, GoogleVisionAPILabelExtractor, GoogleVisionAPIPropertyExtractor, GoogleVisionAPISafeSearchExtractor\n'), ((2900, 2919), 'pliers.stimuli.ImageStim', 'ImageStim', (['filename'], {}), '(filename)\n', (2909, 2919), False, 'from pliers.stimuli import ImageStim\n'), ((3172, 3219), 'pliers.extractors.GoogleVisionAPIPropertyExtractor', 'GoogleVisionAPIPropertyExtractor', ([], {'num_retries': '(5)'}), '(num_retries=5)\n', (3204, 3219), False, 'from pliers.extractors import GoogleVisionAPIFaceExtractor, GoogleVisionAPILabelExtractor, GoogleVisionAPIPropertyExtractor, GoogleVisionAPISafeSearchExtractor\n'), ((3295, 3314), 'pliers.stimuli.ImageStim', 'ImageStim', (['filename'], {}), '(filename)\n', (3304, 3314), False, 'from pliers.stimuli import ImageStim\n'), ((3410, 3445), 'numpy.isfinite', 'np.isfinite', (['result[158, 13, 29][0]'], {}), '(result[158, 13, 29][0])\n', (3421, 3445), True, 'import numpy as np\n'), ((3576, 3625), 'pliers.extractors.GoogleVisionAPISafeSearchExtractor', 'GoogleVisionAPISafeSearchExtractor', ([], {'num_retries': '(5)'}), '(num_retries=5)\n', (3610, 3625), False, 'from pliers.extractors import GoogleVisionAPIFaceExtractor, GoogleVisionAPILabelExtractor, GoogleVisionAPIPropertyExtractor, GoogleVisionAPISafeSearchExtractor\n'), ((3701, 3720), 'pliers.stimuli.ImageStim', 'ImageStim', (['filename'], {}), '(filename)\n', (3710, 3720), False, 'from pliers.stimuli import ImageStim\n')] |
# coding=utf-8
import argparse
import os
import time
from math import ceil
import caffe
import cv2
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--caffe_prototxt_path', default="model/RFB-320/RFB-320.prototxt", type=str, help='caffe_prototxt_path')
parser.add_argument('--caffe_model_path', default="model/RFB-320/RFB-320.caffemodel", type=str, help='caffe_model_path')
parser.add_argument('--input_size', default="320,240", type=str, help='define network input size,format: width,height')
parser.add_argument('--threshold', default=0.7, type=float, help='score threshold')
parser.add_argument('--imgs_path', default="../MNN/imgs", type=str, help='imgs dir')
parser.add_argument('--results_path', default="results", type=str, help='results dir')
parser.add_argument('--mode', default="cpu", type=str, help='cpu or gpu')
args = parser.parse_args()
if args.mode == "cpu":
caffe.set_mode_cpu()
elif args.mode == "gpu":
caffe.set_mode_gpu()
image_mean = np.array([127, 127, 127])
image_std = 128.0
iou_threshold = 0.3
center_variance = 0.1
size_variance = 0.2
min_boxes = [[10.0, 16.0, 24.0], [32.0, 48.0], [64.0, 96.0], [128.0, 192.0, 256.0]]
strides = [8.0, 16.0, 32.0, 64.0]
def define_img_size(image_size):
shrinkage_list = []
feature_map_w_h_list = []
for size in image_size:
feature_map = [int(ceil(size / stride)) for stride in strides]
feature_map_w_h_list.append(feature_map)
for i in range(0, len(image_size)):
shrinkage_list.append(strides)
priors = generate_priors(feature_map_w_h_list, shrinkage_list, image_size, min_boxes)
return priors
def generate_priors(feature_map_list, shrinkage_list, image_size, min_boxes):
priors = []
for index in range(0, len(feature_map_list[0])):
scale_w = image_size[0] / shrinkage_list[0][index]
scale_h = image_size[1] / shrinkage_list[1][index]
for j in range(0, feature_map_list[1][index]):
for i in range(0, feature_map_list[0][index]):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for min_box in min_boxes[index]:
w = min_box / image_size[0]
h = min_box / image_size[1]
priors.append([
x_center,
y_center,
w,
h
])
print("priors nums:{}".format(len(priors)))
return np.clip(priors, 0.0, 1.0)
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def area_of(left_top, right_bottom):
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(boxes0, boxes1, eps=1e-5):
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
def convert_locations_to_boxes(locations, priors, center_variance,
size_variance):
if len(priors.shape) + 1 == len(locations.shape):
priors = np.expand_dims(priors, 0)
return np.concatenate([
locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
np.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
], axis=len(locations.shape) - 1)
def center_form_to_corner_form(locations):
return np.concatenate([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], len(locations.shape) - 1)
def inference():
net = caffe.Net(args.caffe_prototxt_path, args.caffe_model_path, caffe.TEST)
input_size = [int(v.strip()) for v in args.input_size.split(",")]
witdh = input_size[0]
height = input_size[1]
priors = define_img_size(input_size)
net.blobs['input'].reshape(1, 3, height, witdh)
result_path = args.results_path
imgs_path = args.imgs_path
if not os.path.exists(result_path):
os.makedirs(result_path)
listdir = os.listdir(imgs_path)
for file_path in listdir:
img_path = os.path.join(imgs_path, file_path)
img_ori = cv2.imread(img_path)
tmp_batch = np.zeros([1, 3, height, witdh], dtype=np.float32)
rect = cv2.resize(img_ori, (witdh, height))
rect = cv2.cvtColor(rect, cv2.COLOR_BGR2RGB)
image = (rect - image_mean) / image_std
tmp_batch[0, :, :, :] = image.transpose(2, 0, 1)
net.blobs['input'].data[...] = tmp_batch
time_time = time.time()
scores = net.forward()['scores'][0]
boxes = net.forward()['boxes'][0]
print("inference time: {} s".format(round(time.time() - time_time, 4)))
boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
boxes = convert_locations_to_boxes(boxes, priors, center_variance, size_variance)
boxes = center_form_to_corner_form(boxes)
boxes, labels, probs = predict(img_ori.shape[1], img_ori.shape[0], scores, boxes, args.threshold)
for i in range(boxes.shape[0]):
box = boxes[i, :]
cv2.rectangle(img_ori, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imwrite(os.path.join(result_path, file_path), img_ori)
print("result_pic is written to {}".format(os.path.join(result_path, file_path)))
cv2.imshow("ultraFace_caffe_py", img_ori)
cv2.waitKey(-1)
cv2.destroyAllWindows()
if __name__ == '__main__':
inference()
| [
"numpy.clip",
"cv2.rectangle",
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"cv2.destroyAllWindows",
"caffe.set_mode_cpu",
"os.path.exists",
"os.listdir",
"numpy.reshape",
"argparse.ArgumentParser",
"numpy.exp",
"numpy.concatenate",
"numpy.maximum",
"cv2.waitKey",
"cv2.cvtColor",
"c... | [((129, 154), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (152, 154), False, 'import argparse\n'), ((990, 1015), 'numpy.array', 'np.array', (['[127, 127, 127]'], {}), '([127, 127, 127])\n', (998, 1015), True, 'import numpy as np\n'), ((906, 926), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (924, 926), False, 'import caffe\n'), ((2499, 2524), 'numpy.clip', 'np.clip', (['priors', '(0.0)', '(1.0)'], {}), '(priors, 0.0, 1.0)\n', (2506, 2524), True, 'import numpy as np\n'), ((2690, 2708), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (2700, 2708), True, 'import numpy as np\n'), ((3258, 3301), 'numpy.clip', 'np.clip', (['(right_bottom - left_top)', '(0.0)', 'None'], {}), '(right_bottom - left_top, 0.0, None)\n', (3265, 3301), True, 'import numpy as np\n'), ((3400, 3444), 'numpy.maximum', 'np.maximum', (['boxes0[..., :2]', 'boxes1[..., :2]'], {}), '(boxes0[..., :2], boxes1[..., :2])\n', (3410, 3444), True, 'import numpy as np\n'), ((3472, 3516), 'numpy.minimum', 'np.minimum', (['boxes0[..., 2:]', 'boxes1[..., 2:]'], {}), '(boxes0[..., 2:], boxes1[..., 2:])\n', (3482, 3516), True, 'import numpy as np\n'), ((4678, 4710), 'numpy.concatenate', 'np.concatenate', (['picked_box_probs'], {}), '(picked_box_probs)\n', (4692, 4710), True, 'import numpy as np\n'), ((5633, 5703), 'caffe.Net', 'caffe.Net', (['args.caffe_prototxt_path', 'args.caffe_model_path', 'caffe.TEST'], {}), '(args.caffe_prototxt_path, args.caffe_model_path, caffe.TEST)\n', (5642, 5703), False, 'import caffe\n'), ((6074, 6095), 'os.listdir', 'os.listdir', (['imgs_path'], {}), '(imgs_path)\n', (6084, 6095), False, 'import os\n'), ((7520, 7543), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7541, 7543), False, 'import cv2\n'), ((956, 976), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (974, 976), False, 'import caffe\n'), ((4910, 4933), 'numpy.array', 'np.array', (['picked_labels'], {}), '(picked_labels)\n', (4918, 4933), True, 'import numpy as np\n'), ((5145, 5170), 'numpy.expand_dims', 'np.expand_dims', (['priors', '(0)'], {}), '(priors, 0)\n', (5159, 5170), True, 'import numpy as np\n'), ((5998, 6025), 'os.path.exists', 'os.path.exists', (['result_path'], {}), '(result_path)\n', (6012, 6025), False, 'import os\n'), ((6035, 6059), 'os.makedirs', 'os.makedirs', (['result_path'], {}), '(result_path)\n', (6046, 6059), False, 'import os\n'), ((6145, 6179), 'os.path.join', 'os.path.join', (['imgs_path', 'file_path'], {}), '(imgs_path, file_path)\n', (6157, 6179), False, 'import os\n'), ((6198, 6218), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (6208, 6218), False, 'import cv2\n'), ((6239, 6288), 'numpy.zeros', 'np.zeros', (['[1, 3, height, witdh]'], {'dtype': 'np.float32'}), '([1, 3, height, witdh], dtype=np.float32)\n', (6247, 6288), True, 'import numpy as np\n'), ((6304, 6340), 'cv2.resize', 'cv2.resize', (['img_ori', '(witdh, height)'], {}), '(img_ori, (witdh, height))\n', (6314, 6340), False, 'import cv2\n'), ((6356, 6393), 'cv2.cvtColor', 'cv2.cvtColor', (['rect', 'cv2.COLOR_BGR2RGB'], {}), '(rect, cv2.COLOR_BGR2RGB)\n', (6368, 6393), False, 'import cv2\n'), ((6568, 6579), 'time.time', 'time.time', ([], {}), '()\n', (6577, 6579), False, 'import time\n'), ((7450, 7491), 'cv2.imshow', 'cv2.imshow', (['"""ultraFace_caffe_py"""', 'img_ori'], {}), "('ultraFace_caffe_py', img_ori)\n", (7460, 7491), False, 'import cv2\n'), ((7500, 7515), 'cv2.waitKey', 'cv2.waitKey', (['(-1)'], {}), '(-1)\n', (7511, 7515), False, 'import cv2\n'), ((3082, 3117), 'numpy.expand_dims', 'np.expand_dims', (['current_box'], {'axis': '(0)'}), '(current_box, axis=0)\n', (3096, 3117), True, 'import numpy as np\n'), ((4614, 4626), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4622, 4626), True, 'import numpy as np\n'), ((4628, 4640), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4636, 4640), True, 'import numpy as np\n'), ((4642, 4654), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4650, 4654), True, 'import numpy as np\n'), ((6777, 6803), 'numpy.reshape', 'np.reshape', (['boxes', '(-1, 4)'], {}), '(boxes, (-1, 4))\n', (6787, 6803), True, 'import numpy as np\n'), ((6845, 6872), 'numpy.reshape', 'np.reshape', (['scores', '(-1, 2)'], {}), '(scores, (-1, 2))\n', (6855, 6872), True, 'import numpy as np\n'), ((7210, 7284), 'cv2.rectangle', 'cv2.rectangle', (['img_ori', '(box[0], box[1])', '(box[2], box[3])', '(0, 255, 0)', '(2)'], {}), '(img_ori, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)\n', (7223, 7284), False, 'import cv2\n'), ((7305, 7341), 'os.path.join', 'os.path.join', (['result_path', 'file_path'], {}), '(result_path, file_path)\n', (7317, 7341), False, 'import os\n'), ((1358, 1377), 'math.ceil', 'ceil', (['(size / stride)'], {}), '(size / stride)\n', (1362, 1377), False, 'from math import ceil\n'), ((5289, 5331), 'numpy.exp', 'np.exp', (['(locations[..., 2:] * size_variance)'], {}), '(locations[..., 2:] * size_variance)\n', (5295, 5331), True, 'import numpy as np\n'), ((7403, 7439), 'os.path.join', 'os.path.join', (['result_path', 'file_path'], {}), '(result_path, file_path)\n', (7415, 7439), False, 'import os\n'), ((6716, 6727), 'time.time', 'time.time', ([], {}), '()\n', (6725, 6727), False, 'import time\n')] |
# Name: <NAME>
# Date: 2 March 2020
# Program: biot_helix.py
import numpy as np
import matplotlib.pyplot as plt
import time as time
from matplotlib.patches import Circle
def biot(Rvec, wire, I):
mu_4pi = 10
dB = np.zeros((len(wire), 3))
R = Rvec - wire
Rsqr = np.sum( R**2, axis = 1 )
dL = (np.roll(wire, -1, axis = 0) - np.roll(wire, +1, axis = 0))/2
cr = np.cross(dL, R, axis = 1 )
dB = mu_4pi * I * cr/Rsqr[:,None]**(3/2)
dB = np.concatenate((dB, [dB[0,:]]), axis = 0)
Btot = np.array([simpson(dB[:,0], 1), simpson(dB[:,1], 1), simpson(dB[:,2], 1)])
return Btot
def simpson(f, dr):
total = dr/3*(np.sum(f[1:] + f[:-1]) + 2*np.sum(f[1::2]))
return total
def trapz(f, dL):
return dL/2*np.sum(f[1:] + f[:-1])
# setting up the square loop of wire
I = 0.01 # current, in amperes
L = 0.20 # length of helix in m
N = 500 # number of segments per side of square
segL = L/N # segment length in m
turns = 20 # number of loops in helix
radius = 0.03
# some useful segments to build a position array of the segments in helical wire
helix_x = np.arange(-L/2, L/2, segL)
helix_y, helix_z = np.zeros(helix_x.size), np.zeros(helix_x.size)
theta_steps = N/turns
dtheta = 2*np.pi/theta_steps
for i in range (N):
helix_y[i] = -np.sin(i*dtheta)*radius
helix_z[i] = np.cos(i*dtheta)*radius
helix = np.column_stack((helix_x, helix_y, helix_z))
# test the biot function for a single point (the origin)
pointCalc = True
if pointCalc:
# choose a point in space at which to calculate B
point = np.array([0.1,0.0,0.0])
Ti = time.time()
# call the biot function to calculate B
B = biot(point, helix,I)
print(point)
print(B)
print("duration: %5f" % (time.time()-Ti) )
# Create a 2D grid of x, y points using numpy's meshgrid function
gridstep=50
nx, ny, nz = gridstep,gridstep,gridstep
x = np.linspace(-0.2, 0.2, nx)
y = np.linspace(-0.2, 0.2, ny)
z = np.linspace(-0.2, 0.2, nz)
fig = plt.figure(figsize = (18,6))
ax1 = fig.add_subplot(2, 3, 1)
ax2 = fig.add_subplot(2, 3, 2)
ax3 = fig.add_subplot(2, 3, 3)
# Set up meshgrid as needed for the particular 2D streamplot
X, Z = np.meshgrid(x,z)
# Set up 3D array, Bgrid, for x,y,z-components of B at points in space
Bgrid = np.zeros([nx,nz,3])
Ti = time.time()
# Use for loops to populate Bgrid array with relevant B-field values
# XZ Plane
for i in range(nx):
for k in range(nz):
Bgrid[k,i, :] = biot(np.array([x[i],0.,z[k]]),helix,I)
# XY Plane
for i in range(nx):
for k in range(nz):
Bgrid[k,i, :] = biot(np.array([x[i],y[k],0.0]),helix,I)
print("duration: %5f" % (time.time()-Ti) )
# Use streamplot to show B-field
# XY plane
ax1.streamplot(X,Z,Bgrid[:,:,0],Bgrid[:,:,1], color = '0.50')
ax1.set_aspect('equal')
ax1.set_xlim((-0.2,0.2))
ax1.set_ylim((-0.2,0.2))
# add circles to plot to show where wire cross sections
ax1.add_artist(Circle((L/2,0),0.005,color='#aa0000'))
ax1.add_artist(Circle((-L/2,0),0.005,color='#0000aa'))
ax1.set_ylabel('y-direction', fontsize = '10')
ax1.set_xlabel('x-direction', fontsize = '10')
ax1.set_title('Magnetic Field due to a Helix (XY Plane)', fontweight = 'bold', fontsize = '13')
# XZ plane
ax2.streamplot(X,Z,Bgrid[:,:,0],Bgrid[:,:,2], color = '0.50')
ax2.set_aspect('equal')
ax2.set_xlim((-0.2,0.2))
ax2.set_ylim((-0.2,0.2))
# add circles to plot to show where wire cross sections
ax2.add_artist(Circle((L/2,0),0.005,color='#aa0000'))
ax2.add_artist(Circle((-L/2,0),0.005,color='#0000aa'))
ax2.set_ylabel('y-direction', fontsize = '10')
ax2.set_xlabel('x-direction', fontsize = '10')
ax2.set_title('Magnetic Field due to a Helix (XZ Plane)', fontweight = 'bold', fontsize = '13')
# YZ plane
ax3.streamplot(X,Z,Bgrid[:,:,1],Bgrid[:,:,2], color = '0.50')
ax3.set_aspect('equal')
ax3.set_xlim((-0.2,0.2))
ax3.set_ylim((-0.2,0.2))
# add circles to plot to show where wire cross sections
ax3.add_artist(Circle((L/2,0),0.005,color='#aa0000'))
ax3.add_artist(Circle((-L/2,0),0.005,color='#0000aa'))
ax3.set_ylabel('y-direction', fontsize = '10')
ax3.set_xlabel('x-direction', fontsize = '10')
ax3.set_title('Magnetic Field due to a Helix (YZ Plane)', fontweight = 'bold', fontsize = '13')
fig.tight_layout() | [
"numpy.roll",
"numpy.cross",
"numpy.column_stack",
"numpy.sum",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.array",
"numpy.concatenate",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"time.time",
"matplotlib.patches.Circle",
"numpy.arange"
] | [((1150, 1180), 'numpy.arange', 'np.arange', (['(-L / 2)', '(L / 2)', 'segL'], {}), '(-L / 2, L / 2, segL)\n', (1159, 1180), True, 'import numpy as np\n'), ((1410, 1454), 'numpy.column_stack', 'np.column_stack', (['(helix_x, helix_y, helix_z)'], {}), '((helix_x, helix_y, helix_z))\n', (1425, 1454), True, 'import numpy as np\n'), ((1950, 1976), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.2)', 'nx'], {}), '(-0.2, 0.2, nx)\n', (1961, 1976), True, 'import numpy as np\n'), ((1981, 2007), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.2)', 'ny'], {}), '(-0.2, 0.2, ny)\n', (1992, 2007), True, 'import numpy as np\n'), ((2012, 2038), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.2)', 'nz'], {}), '(-0.2, 0.2, nz)\n', (2023, 2038), True, 'import numpy as np\n'), ((2046, 2073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 6)'}), '(figsize=(18, 6))\n', (2056, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2237, 2254), 'numpy.meshgrid', 'np.meshgrid', (['x', 'z'], {}), '(x, z)\n', (2248, 2254), True, 'import numpy as np\n'), ((2333, 2354), 'numpy.zeros', 'np.zeros', (['[nx, nz, 3]'], {}), '([nx, nz, 3])\n', (2341, 2354), True, 'import numpy as np\n'), ((2359, 2370), 'time.time', 'time.time', ([], {}), '()\n', (2368, 2370), True, 'import time as time\n'), ((285, 307), 'numpy.sum', 'np.sum', (['(R ** 2)'], {'axis': '(1)'}), '(R ** 2, axis=1)\n', (291, 307), True, 'import numpy as np\n'), ((400, 423), 'numpy.cross', 'np.cross', (['dL', 'R'], {'axis': '(1)'}), '(dL, R, axis=1)\n', (408, 423), True, 'import numpy as np\n'), ((486, 526), 'numpy.concatenate', 'np.concatenate', (['(dB, [dB[0, :]])'], {'axis': '(0)'}), '((dB, [dB[0, :]]), axis=0)\n', (500, 526), True, 'import numpy as np\n'), ((1197, 1219), 'numpy.zeros', 'np.zeros', (['helix_x.size'], {}), '(helix_x.size)\n', (1205, 1219), True, 'import numpy as np\n'), ((1221, 1243), 'numpy.zeros', 'np.zeros', (['helix_x.size'], {}), '(helix_x.size)\n', (1229, 1243), True, 'import numpy as np\n'), ((1612, 1637), 'numpy.array', 'np.array', (['[0.1, 0.0, 0.0]'], {}), '([0.1, 0.0, 0.0])\n', (1620, 1637), True, 'import numpy as np\n'), ((1645, 1656), 'time.time', 'time.time', ([], {}), '()\n', (1654, 1656), True, 'import time as time\n'), ((2979, 3021), 'matplotlib.patches.Circle', 'Circle', (['(L / 2, 0)', '(0.005)'], {'color': '"""#aa0000"""'}), "((L / 2, 0), 0.005, color='#aa0000')\n", (2985, 3021), False, 'from matplotlib.patches import Circle\n'), ((3033, 3076), 'matplotlib.patches.Circle', 'Circle', (['(-L / 2, 0)', '(0.005)'], {'color': '"""#0000aa"""'}), "((-L / 2, 0), 0.005, color='#0000aa')\n", (3039, 3076), False, 'from matplotlib.patches import Circle\n'), ((3485, 3527), 'matplotlib.patches.Circle', 'Circle', (['(L / 2, 0)', '(0.005)'], {'color': '"""#aa0000"""'}), "((L / 2, 0), 0.005, color='#aa0000')\n", (3491, 3527), False, 'from matplotlib.patches import Circle\n'), ((3539, 3582), 'matplotlib.patches.Circle', 'Circle', (['(-L / 2, 0)', '(0.005)'], {'color': '"""#0000aa"""'}), "((-L / 2, 0), 0.005, color='#0000aa')\n", (3545, 3582), False, 'from matplotlib.patches import Circle\n'), ((3991, 4033), 'matplotlib.patches.Circle', 'Circle', (['(L / 2, 0)', '(0.005)'], {'color': '"""#aa0000"""'}), "((L / 2, 0), 0.005, color='#aa0000')\n", (3997, 4033), False, 'from matplotlib.patches import Circle\n'), ((4045, 4088), 'matplotlib.patches.Circle', 'Circle', (['(-L / 2, 0)', '(0.005)'], {'color': '"""#0000aa"""'}), "((-L / 2, 0), 0.005, color='#0000aa')\n", (4051, 4088), False, 'from matplotlib.patches import Circle\n'), ((780, 802), 'numpy.sum', 'np.sum', (['(f[1:] + f[:-1])'], {}), '(f[1:] + f[:-1])\n', (786, 802), True, 'import numpy as np\n'), ((1377, 1395), 'numpy.cos', 'np.cos', (['(i * dtheta)'], {}), '(i * dtheta)\n', (1383, 1395), True, 'import numpy as np\n'), ((325, 350), 'numpy.roll', 'np.roll', (['wire', '(-1)'], {'axis': '(0)'}), '(wire, -1, axis=0)\n', (332, 350), True, 'import numpy as np\n'), ((355, 380), 'numpy.roll', 'np.roll', (['wire', '(+1)'], {'axis': '(0)'}), '(wire, +1, axis=0)\n', (362, 380), True, 'import numpy as np\n'), ((679, 701), 'numpy.sum', 'np.sum', (['(f[1:] + f[:-1])'], {}), '(f[1:] + f[:-1])\n', (685, 701), True, 'import numpy as np\n'), ((1336, 1354), 'numpy.sin', 'np.sin', (['(i * dtheta)'], {}), '(i * dtheta)\n', (1342, 1354), True, 'import numpy as np\n'), ((2525, 2552), 'numpy.array', 'np.array', (['[x[i], 0.0, z[k]]'], {}), '([x[i], 0.0, z[k]])\n', (2533, 2552), True, 'import numpy as np\n'), ((2644, 2671), 'numpy.array', 'np.array', (['[x[i], y[k], 0.0]'], {}), '([x[i], y[k], 0.0])\n', (2652, 2671), True, 'import numpy as np\n'), ((2705, 2716), 'time.time', 'time.time', ([], {}), '()\n', (2714, 2716), True, 'import time as time\n'), ((706, 721), 'numpy.sum', 'np.sum', (['f[1::2]'], {}), '(f[1::2])\n', (712, 721), True, 'import numpy as np\n'), ((1795, 1806), 'time.time', 'time.time', ([], {}), '()\n', (1804, 1806), True, 'import time as time\n')] |
from chainer import functions as F
from scipy import special
import numpy as np
from scipy import ndimage
def log_beta_distribution(x, a, b):
eps = 1e-5
lnp = ((a - 1) * F.log(x + eps)
+ (b - 1) * F.log(1 - x + eps)
- float(special.beta(a, b)))
return lnp
def make_laplacian_of_gaussian_filter(sigma, ksize, angle):
# Make laplacian of gaussian filter
f = np.zeros([101, 101])
x = np.arange(-50, 51)
f[50, :] = (x**2 - sigma**2) / sigma**4 * np.exp(- x**2 / 2 / sigma**2)
# Rotate, note that angle is in degree
f = ndimage.rotate(f, angle, reshape=False, order=1)
# Crop by ksize
f = f[50 - int(ksize / 2):50 + int(ksize / 2) + 1,
50 - int(ksize / 2):50 + int(ksize / 2) + 1]
return f
| [
"chainer.functions.log",
"scipy.special.beta",
"numpy.exp",
"numpy.zeros",
"scipy.ndimage.rotate",
"numpy.arange"
] | [((403, 423), 'numpy.zeros', 'np.zeros', (['[101, 101]'], {}), '([101, 101])\n', (411, 423), True, 'import numpy as np\n'), ((432, 450), 'numpy.arange', 'np.arange', (['(-50)', '(51)'], {}), '(-50, 51)\n', (441, 450), True, 'import numpy as np\n'), ((579, 627), 'scipy.ndimage.rotate', 'ndimage.rotate', (['f', 'angle'], {'reshape': '(False)', 'order': '(1)'}), '(f, angle, reshape=False, order=1)\n', (593, 627), False, 'from scipy import ndimage\n'), ((497, 529), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2 / sigma ** 2)'], {}), '(-x ** 2 / 2 / sigma ** 2)\n', (503, 529), True, 'import numpy as np\n'), ((256, 274), 'scipy.special.beta', 'special.beta', (['a', 'b'], {}), '(a, b)\n', (268, 274), False, 'from scipy import special\n'), ((180, 194), 'chainer.functions.log', 'F.log', (['(x + eps)'], {}), '(x + eps)\n', (185, 194), True, 'from chainer import functions as F\n'), ((218, 236), 'chainer.functions.log', 'F.log', (['(1 - x + eps)'], {}), '(1 - x + eps)\n', (223, 236), True, 'from chainer import functions as F\n')] |
import os
import re
import base64
import webbrowser
import time
import tempfile
import numpy as np
import matplotlib
from numpy.testing import assert_warns, assert_no_warnings
try:
from lxml import etree
LXML_INSTALLED = True
except ImportError:
LXML_INSTALLED = False
from nilearn.plotting import js_plotting_utils
from nilearn import surface
from nilearn.datasets import fetch_surf_fsaverage
# Note: html output by nilearn view_* functions
# should validate as html5 using https://validator.w3.org/nu/ with no
# warnings
def _normalize_ws(text):
return re.sub(r'\s+', ' ', text)
def test_add_js_lib():
html = js_plotting_utils.get_html_template('surface_plot_template.html')
cdn = js_plotting_utils.add_js_lib(html, embed_js=False)
assert "decodeBase64" in cdn
assert _normalize_ws("""<script
src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js">
</script>
<script src="https://cdn.plot.ly/plotly-gl3d-latest.min.js"></script>
""") in _normalize_ws(cdn)
inline = _normalize_ws(js_plotting_utils.add_js_lib(html, embed_js=True))
assert _normalize_ws("""/*! jQuery v3.3.1 | (c) JS Foundation and other
contributors | jquery.org/license */""") in inline
assert _normalize_ws("""**
* plotly.js (gl3d - minified) v1.38.3
* Copyright 2012-2018, Plotly, Inc.
* All rights reserved.
* Licensed under the MIT license
*/ """) in inline
assert "decodeBase64" in inline
def check_colors(colors):
assert len(colors) == 100
val, cstring = zip(*colors)
assert np.allclose(np.linspace(0, 1, 100), val, atol=1e-3)
assert val[0] == 0
assert val[-1] == 1
for cs in cstring:
assert re.match(r'rgb\(\d+, \d+, \d+\)', cs)
return val, cstring
def test_colorscale_no_threshold():
cmap = 'jet'
values = np.linspace(-13, -1.5, 20)
threshold = None
colors = js_plotting_utils.colorscale(cmap, values, threshold)
check_colors(colors['colors'])
assert (colors['vmin'], colors['vmax']) == (-13, 13)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
assert colors['abs_threshold'] is None
def test_colorscale_threshold_0():
cmap = 'jet'
values = np.linspace(-13, -1.5, 20)
threshold = '0%'
colors = js_plotting_utils.colorscale(cmap, values, threshold)
check_colors(colors['colors'])
assert (colors['vmin'], colors['vmax']) == (-13, 13)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
assert colors['abs_threshold'] == 1.5
assert colors['symmetric_cmap']
def test_colorscale_threshold_99():
cmap = 'jet'
values = np.linspace(-13, -1.5, 20)
threshold = '99%'
colors = js_plotting_utils.colorscale(cmap, values, threshold)
check_colors(colors['colors'])
assert (colors['vmin'], colors['vmax']) == (-13, 13)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
assert colors['abs_threshold'] == 13
assert colors['symmetric_cmap']
def test_colorscale_threshold_50():
cmap = 'jet'
values = np.linspace(-13, -1.5, 20)
threshold = '50%'
colors = js_plotting_utils.colorscale(cmap, values, threshold)
val, cstring = check_colors(colors['colors'])
assert cstring[50] == 'rgb(127, 127, 127)'
assert (colors['vmin'], colors['vmax']) == (-13, 13)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
assert np.allclose(colors['abs_threshold'], 7.55, 2)
assert colors['symmetric_cmap']
def test_colorscale_absolute_threshold():
cmap = 'jet'
values = np.linspace(-13, -1.5, 20)
threshold = 7.25
colors = js_plotting_utils.colorscale(cmap, values, threshold)
val, cstring = check_colors(colors['colors'])
assert cstring[50] == 'rgb(127, 127, 127)'
assert (colors['vmin'], colors['vmax']) == (-13, 13)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (13, -13)
assert np.allclose(colors['abs_threshold'], 7.25)
assert colors['symmetric_cmap']
def test_colorscale_asymmetric_cmap():
cmap = 'jet'
values = np.arange(15)
colors = js_plotting_utils.colorscale(cmap, values, symmetric_cmap=False)
assert (colors['vmin'], colors['vmax']) == (0, 14)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (14, 0)
assert not colors['symmetric_cmap']
values = np.arange(15) + 3
colors = js_plotting_utils.colorscale(cmap, values, symmetric_cmap=False)
assert (colors['vmin'], colors['vmax']) == (3, 17)
assert (colors['norm'].vmax, colors['norm'].vmin) == (17, 3)
def test_colorscale_vmin_vmax():
cmap = 'jet'
values = np.arange(15)
colors = js_plotting_utils.colorscale(cmap, values, vmax=7)
assert (colors['vmin'], colors['vmax']) == (-7, 7)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7)
assert colors['symmetric_cmap']
colors = js_plotting_utils.colorscale(
cmap, values, vmax=7, vmin=-5)
assert (colors['vmin'], colors['vmax']) == (-7, 7)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7)
assert colors['symmetric_cmap']
def test_colorscale_asymmetric_cmap_vmax():
cmap = 'jet'
values = np.arange(15)
colors = js_plotting_utils.colorscale(cmap, values, vmax=7,
symmetric_cmap=False)
assert (colors['vmin'], colors['vmax']) == (0, 7)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (7, 0)
assert not colors['symmetric_cmap']
values = np.arange(15) + 3
colors = js_plotting_utils.colorscale(cmap, values, vmax=7,
symmetric_cmap=False)
assert (colors['vmin'], colors['vmax']) == (3, 7)
assert (colors['norm'].vmax, colors['norm'].vmin) == (7, 3)
colors = js_plotting_utils.colorscale(
cmap, values, vmax=7, symmetric_cmap=False, vmin=1)
assert (colors['vmin'], colors['vmax']) == (1, 7)
assert (colors['norm'].vmax, colors['norm'].vmin) == (7, 1)
colors = js_plotting_utils.colorscale(
cmap, values, vmax=10, symmetric_cmap=False, vmin=6, threshold=5)
assert (colors['vmin'], colors['vmax']) == (0, 10)
assert (colors['norm'].vmax, colors['norm'].vmin) == (10, 0)
colors = js_plotting_utils.colorscale(
cmap, values, vmax=10, symmetric_cmap=False, vmin=None, threshold=5)
assert (colors['vmin'], colors['vmax']) == (0, 10)
assert (colors['norm'].vmax, colors['norm'].vmin) == (10, 0)
def test_colorscale_asymmetric_cmap_negative_values():
cmap = 'jet'
values = np.linspace(-15, 4)
assert_warns(UserWarning, js_plotting_utils.colorscale, cmap,
values, symmetric_cmap=False)
colors = js_plotting_utils.colorscale(cmap, values, vmax=7,
symmetric_cmap=False)
assert (colors['vmin'], colors['vmax']) == (-7, 7)
assert colors['cmap'].N == 256
assert (colors['norm'].vmax, colors['norm'].vmin) == (7, -7)
assert colors['symmetric_cmap']
def test_encode():
for dtype in ['<f4', '<i4', '>f4', '>i4']:
a = np.arange(10, dtype=dtype)
encoded = js_plotting_utils.encode(a)
decoded = base64.b64decode(encoded.encode('utf-8'))
b = np.frombuffer(decoded, dtype=dtype)
assert np.allclose(js_plotting_utils.decode(encoded, dtype=dtype), b)
assert np.allclose(a, b)
def test_mesh_to_plotly():
fsaverage = fetch_surf_fsaverage()
coord, triangles = surface.load_surf_mesh(fsaverage['pial_left'])
plotly = js_plotting_utils.mesh_to_plotly(fsaverage['pial_left'])
for i, key in enumerate(['_x', '_y', '_z']):
assert np.allclose(
js_plotting_utils.decode(plotly[key], '<f4'), coord[:, i])
for i, key in enumerate(['_i', '_j', '_k']):
assert np.allclose(
js_plotting_utils.decode(plotly[key], '<i4'), triangles[:, i])
def check_html(html, check_selects=True, plot_div_id='surface-plot'):
fd, tmpfile = tempfile.mkstemp()
try:
os.close(fd)
html.save_as_html(tmpfile)
with open(tmpfile) as f:
saved = f.read()
assert saved == html.get_standalone()
finally:
os.remove(tmpfile)
assert "INSERT" not in html.html
assert html.get_standalone() == html.html
assert html._repr_html_() == html.get_iframe()
assert str(html) == html.get_standalone()
assert '<meta charset="UTF-8" />' in str(html)
_check_open_in_browser(html)
resized = html.resize(3, 17)
assert resized is html
assert (html.width, html.height) == (3, 17)
assert 'width="3" height="17"' in html.get_iframe()
assert 'width="33" height="37"' in html.get_iframe(33, 37)
if not LXML_INSTALLED:
return
root = etree.HTML(html.html.encode('utf-8'),
parser=etree.HTMLParser(huge_tree=True))
head = root.find('head')
assert len(head.findall('script')) == 5
body = root.find('body')
div = body.find('div')
assert ('id', plot_div_id) in div.items()
if not check_selects:
return
selects = body.findall('select')
assert len(selects) == 3
hemi = selects[0]
assert ('id', 'select-hemisphere') in hemi.items()
assert len(hemi.findall('option')) == 2
kind = selects[1]
assert ('id', 'select-kind') in kind.items()
assert len(kind.findall('option')) == 2
view = selects[2]
assert ('id', 'select-view') in view.items()
assert len(view.findall('option')) == 7
def _open_mock(f):
print('opened {}'.format(f))
def _check_open_in_browser(html):
wb_open = webbrowser.open
webbrowser.open = _open_mock
try:
html.open_in_browser(temp_file_lifetime=None)
temp_file = html._temp_file
assert html._temp_file is not None
assert os.path.isfile(temp_file)
html.remove_temp_file()
assert html._temp_file is None
assert not os.path.isfile(temp_file)
html.remove_temp_file()
html._temp_file = 'aaaaaaaaaaaaaaaaaaaaaa'
html.remove_temp_file()
finally:
webbrowser.open = wb_open
try:
os.remove(temp_file)
except Exception:
pass
def test_temp_file_removing():
html = js_plotting_utils.HTMLDocument('hello')
wb_open = webbrowser.open
webbrowser.open = _open_mock
try:
html.open_in_browser(temp_file_lifetime=.5)
assert os.path.isfile(html._temp_file)
time.sleep(1.5)
assert not os.path.isfile(html._temp_file)
html.open_in_browser(temp_file_lifetime=None)
assert os.path.isfile(html._temp_file)
time.sleep(1.5)
assert os.path.isfile(html._temp_file)
finally:
webbrowser.open = wb_open
try:
os.remove(html._temp_file)
except Exception:
pass
def _open_views():
return [js_plotting_utils.HTMLDocument('') for i in range(12)]
def _open_one_view():
for i in range(12):
v = js_plotting_utils.HTMLDocument('')
return v
def test_open_view_warning():
# opening many views (without deleting the SurfaceView objects)
# should raise a warning about memory usage
assert_warns(UserWarning, _open_views)
assert_no_warnings(_open_one_view)
def test_to_color_strings():
colors = [[0, 0, 1], [1, 0, 0], [.5, .5, .5]]
as_str = js_plotting_utils.to_color_strings(colors)
assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]]
as_str = js_plotting_utils.to_color_strings(colors)
assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
colors = ['#0000ff', '#ff0000', '#7f7f7f']
as_str = js_plotting_utils.to_color_strings(colors)
assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
colors = [[0, 0, 1, 1], [1, 0, 0, 1], [.5, .5, .5, 0]]
as_str = js_plotting_utils.to_color_strings(colors)
assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
colors = ['r', 'green', 'black', 'white']
as_str = js_plotting_utils.to_color_strings(colors)
assert as_str == ['#ff0000', '#008000', '#000000', '#ffffff']
if matplotlib.__version__ < '2':
return
colors = ['#0000ffff', '#ff0000ab', '#7f7f7f00']
as_str = js_plotting_utils.to_color_strings(colors)
assert as_str == ['#0000ff', '#ff0000', '#7f7f7f']
| [
"nilearn.datasets.fetch_surf_fsaverage",
"time.sleep",
"numpy.arange",
"os.remove",
"numpy.testing.assert_warns",
"lxml.etree.HTMLParser",
"nilearn.surface.load_surf_mesh",
"nilearn.plotting.js_plotting_utils.to_color_strings",
"nilearn.plotting.js_plotting_utils.get_html_template",
"nilearn.plott... | [((577, 602), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'text'], {}), "('\\\\s+', ' ', text)\n", (583, 602), False, 'import re\n'), ((639, 704), 'nilearn.plotting.js_plotting_utils.get_html_template', 'js_plotting_utils.get_html_template', (['"""surface_plot_template.html"""'], {}), "('surface_plot_template.html')\n", (674, 704), False, 'from nilearn.plotting import js_plotting_utils\n'), ((715, 765), 'nilearn.plotting.js_plotting_utils.add_js_lib', 'js_plotting_utils.add_js_lib', (['html'], {'embed_js': '(False)'}), '(html, embed_js=False)\n', (743, 765), False, 'from nilearn.plotting import js_plotting_utils\n'), ((1986, 2012), 'numpy.linspace', 'np.linspace', (['(-13)', '(-1.5)', '(20)'], {}), '(-13, -1.5, 20)\n', (1997, 2012), True, 'import numpy as np\n'), ((2047, 2100), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values', 'threshold'], {}), '(cmap, values, threshold)\n', (2075, 2100), False, 'from nilearn.plotting import js_plotting_utils\n'), ((2405, 2431), 'numpy.linspace', 'np.linspace', (['(-13)', '(-1.5)', '(20)'], {}), '(-13, -1.5, 20)\n', (2416, 2431), True, 'import numpy as np\n'), ((2466, 2519), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values', 'threshold'], {}), '(cmap, values, threshold)\n', (2494, 2519), False, 'from nilearn.plotting import js_plotting_utils\n'), ((2860, 2886), 'numpy.linspace', 'np.linspace', (['(-13)', '(-1.5)', '(20)'], {}), '(-13, -1.5, 20)\n', (2871, 2886), True, 'import numpy as np\n'), ((2922, 2975), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values', 'threshold'], {}), '(cmap, values, threshold)\n', (2950, 2975), False, 'from nilearn.plotting import js_plotting_utils\n'), ((3315, 3341), 'numpy.linspace', 'np.linspace', (['(-13)', '(-1.5)', '(20)'], {}), '(-13, -1.5, 20)\n', (3326, 3341), True, 'import numpy as np\n'), ((3377, 3430), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values', 'threshold'], {}), '(cmap, values, threshold)\n', (3405, 3430), False, 'from nilearn.plotting import js_plotting_utils\n'), ((3698, 3743), 'numpy.allclose', 'np.allclose', (["colors['abs_threshold']", '(7.55)', '(2)'], {}), "(colors['abs_threshold'], 7.55, 2)\n", (3709, 3743), True, 'import numpy as np\n'), ((3854, 3880), 'numpy.linspace', 'np.linspace', (['(-13)', '(-1.5)', '(20)'], {}), '(-13, -1.5, 20)\n', (3865, 3880), True, 'import numpy as np\n'), ((3915, 3968), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values', 'threshold'], {}), '(cmap, values, threshold)\n', (3943, 3968), False, 'from nilearn.plotting import js_plotting_utils\n'), ((4236, 4278), 'numpy.allclose', 'np.allclose', (["colors['abs_threshold']", '(7.25)'], {}), "(colors['abs_threshold'], 7.25)\n", (4247, 4278), True, 'import numpy as np\n'), ((4386, 4399), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (4395, 4399), True, 'import numpy as np\n'), ((4413, 4477), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'symmetric_cmap': '(False)'}), '(cmap, values, symmetric_cmap=False)\n', (4441, 4477), False, 'from nilearn.plotting import js_plotting_utils\n'), ((4717, 4781), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'symmetric_cmap': '(False)'}), '(cmap, values, symmetric_cmap=False)\n', (4745, 4781), False, 'from nilearn.plotting import js_plotting_utils\n'), ((4967, 4980), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (4976, 4980), True, 'import numpy as np\n'), ((4994, 5044), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(7)'}), '(cmap, values, vmax=7)\n', (5022, 5044), False, 'from nilearn.plotting import js_plotting_utils\n'), ((5249, 5308), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(7)', 'vmin': '(-5)'}), '(cmap, values, vmax=7, vmin=-5)\n', (5277, 5308), False, 'from nilearn.plotting import js_plotting_utils\n'), ((5585, 5598), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (5594, 5598), True, 'import numpy as np\n'), ((5612, 5684), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(7)', 'symmetric_cmap': '(False)'}), '(cmap, values, vmax=7, symmetric_cmap=False)\n', (5640, 5684), False, 'from nilearn.plotting import js_plotting_utils\n'), ((5964, 6036), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(7)', 'symmetric_cmap': '(False)'}), '(cmap, values, vmax=7, symmetric_cmap=False)\n', (5992, 6036), False, 'from nilearn.plotting import js_plotting_utils\n'), ((6210, 6295), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(7)', 'symmetric_cmap': '(False)', 'vmin': '(1)'}), '(cmap, values, vmax=7, symmetric_cmap=False, vmin=1\n )\n', (6238, 6295), False, 'from nilearn.plotting import js_plotting_utils\n'), ((6431, 6529), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(10)', 'symmetric_cmap': '(False)', 'vmin': '(6)', 'threshold': '(5)'}), '(cmap, values, vmax=10, symmetric_cmap=False,\n vmin=6, threshold=5)\n', (6459, 6529), False, 'from nilearn.plotting import js_plotting_utils\n'), ((6668, 6769), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(10)', 'symmetric_cmap': '(False)', 'vmin': 'None', 'threshold': '(5)'}), '(cmap, values, vmax=10, symmetric_cmap=False,\n vmin=None, threshold=5)\n', (6696, 6769), False, 'from nilearn.plotting import js_plotting_utils\n'), ((6982, 7001), 'numpy.linspace', 'np.linspace', (['(-15)', '(4)'], {}), '(-15, 4)\n', (6993, 7001), True, 'import numpy as np\n'), ((7006, 7101), 'numpy.testing.assert_warns', 'assert_warns', (['UserWarning', 'js_plotting_utils.colorscale', 'cmap', 'values'], {'symmetric_cmap': '(False)'}), '(UserWarning, js_plotting_utils.colorscale, cmap, values,\n symmetric_cmap=False)\n', (7018, 7101), False, 'from numpy.testing import assert_warns, assert_no_warnings\n'), ((7129, 7201), 'nilearn.plotting.js_plotting_utils.colorscale', 'js_plotting_utils.colorscale', (['cmap', 'values'], {'vmax': '(7)', 'symmetric_cmap': '(False)'}), '(cmap, values, vmax=7, symmetric_cmap=False)\n', (7157, 7201), False, 'from nilearn.plotting import js_plotting_utils\n'), ((7852, 7874), 'nilearn.datasets.fetch_surf_fsaverage', 'fetch_surf_fsaverage', ([], {}), '()\n', (7872, 7874), False, 'from nilearn.datasets import fetch_surf_fsaverage\n'), ((7898, 7944), 'nilearn.surface.load_surf_mesh', 'surface.load_surf_mesh', (["fsaverage['pial_left']"], {}), "(fsaverage['pial_left'])\n", (7920, 7944), False, 'from nilearn import surface\n'), ((7958, 8014), 'nilearn.plotting.js_plotting_utils.mesh_to_plotly', 'js_plotting_utils.mesh_to_plotly', (["fsaverage['pial_left']"], {}), "(fsaverage['pial_left'])\n", (7990, 8014), False, 'from nilearn.plotting import js_plotting_utils\n'), ((8405, 8423), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (8421, 8423), False, 'import tempfile\n'), ((10662, 10701), 'nilearn.plotting.js_plotting_utils.HTMLDocument', 'js_plotting_utils.HTMLDocument', (['"""hello"""'], {}), "('hello')\n", (10692, 10701), False, 'from nilearn.plotting import js_plotting_utils\n'), ((11610, 11648), 'numpy.testing.assert_warns', 'assert_warns', (['UserWarning', '_open_views'], {}), '(UserWarning, _open_views)\n', (11622, 11648), False, 'from numpy.testing import assert_warns, assert_no_warnings\n'), ((11653, 11687), 'numpy.testing.assert_no_warnings', 'assert_no_warnings', (['_open_one_view'], {}), '(_open_one_view)\n', (11671, 11687), False, 'from numpy.testing import assert_warns, assert_no_warnings\n'), ((11782, 11824), 'nilearn.plotting.js_plotting_utils.to_color_strings', 'js_plotting_utils.to_color_strings', (['colors'], {}), '(colors)\n', (11816, 11824), False, 'from nilearn.plotting import js_plotting_utils\n'), ((11953, 11995), 'nilearn.plotting.js_plotting_utils.to_color_strings', 'js_plotting_utils.to_color_strings', (['colors'], {}), '(colors)\n', (11987, 11995), False, 'from nilearn.plotting import js_plotting_utils\n'), ((12112, 12154), 'nilearn.plotting.js_plotting_utils.to_color_strings', 'js_plotting_utils.to_color_strings', (['colors'], {}), '(colors)\n', (12146, 12154), False, 'from nilearn.plotting import js_plotting_utils\n'), ((12283, 12325), 'nilearn.plotting.js_plotting_utils.to_color_strings', 'js_plotting_utils.to_color_strings', (['colors'], {}), '(colors)\n', (12317, 12325), False, 'from nilearn.plotting import js_plotting_utils\n'), ((12441, 12483), 'nilearn.plotting.js_plotting_utils.to_color_strings', 'js_plotting_utils.to_color_strings', (['colors'], {}), '(colors)\n', (12475, 12483), False, 'from nilearn.plotting import js_plotting_utils\n'), ((12670, 12712), 'nilearn.plotting.js_plotting_utils.to_color_strings', 'js_plotting_utils.to_color_strings', (['colors'], {}), '(colors)\n', (12704, 12712), False, 'from nilearn.plotting import js_plotting_utils\n'), ((1057, 1106), 'nilearn.plotting.js_plotting_utils.add_js_lib', 'js_plotting_utils.add_js_lib', (['html'], {'embed_js': '(True)'}), '(html, embed_js=True)\n', (1085, 1106), False, 'from nilearn.plotting import js_plotting_utils\n'), ((1731, 1753), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1742, 1753), True, 'import numpy as np\n'), ((1856, 1897), 're.match', 're.match', (['"""rgb\\\\(\\\\d+, \\\\d+, \\\\d+\\\\)"""', 'cs'], {}), "('rgb\\\\(\\\\d+, \\\\d+, \\\\d+\\\\)', cs)\n", (1864, 1897), False, 'import re\n'), ((4686, 4699), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (4695, 4699), True, 'import numpy as np\n'), ((5933, 5946), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (5942, 5946), True, 'import numpy as np\n'), ((7515, 7541), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'dtype'}), '(10, dtype=dtype)\n', (7524, 7541), True, 'import numpy as np\n'), ((7560, 7587), 'nilearn.plotting.js_plotting_utils.encode', 'js_plotting_utils.encode', (['a'], {}), '(a)\n', (7584, 7587), False, 'from nilearn.plotting import js_plotting_utils\n'), ((7660, 7695), 'numpy.frombuffer', 'np.frombuffer', (['decoded'], {'dtype': 'dtype'}), '(decoded, dtype=dtype)\n', (7673, 7695), True, 'import numpy as np\n'), ((7789, 7806), 'numpy.allclose', 'np.allclose', (['a', 'b'], {}), '(a, b)\n', (7800, 7806), True, 'import numpy as np\n'), ((8441, 8453), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (8449, 8453), False, 'import os\n'), ((8618, 8636), 'os.remove', 'os.remove', (['tmpfile'], {}), '(tmpfile)\n', (8627, 8636), False, 'import os\n'), ((10225, 10250), 'os.path.isfile', 'os.path.isfile', (['temp_file'], {}), '(temp_file)\n', (10239, 10250), False, 'import os\n'), ((10841, 10872), 'os.path.isfile', 'os.path.isfile', (['html._temp_file'], {}), '(html._temp_file)\n', (10855, 10872), False, 'import os\n'), ((10881, 10896), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (10891, 10896), False, 'import time\n'), ((11017, 11048), 'os.path.isfile', 'os.path.isfile', (['html._temp_file'], {}), '(html._temp_file)\n', (11031, 11048), False, 'import os\n'), ((11057, 11072), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (11067, 11072), False, 'import time\n'), ((11088, 11119), 'os.path.isfile', 'os.path.isfile', (['html._temp_file'], {}), '(html._temp_file)\n', (11102, 11119), False, 'import os\n'), ((11295, 11329), 'nilearn.plotting.js_plotting_utils.HTMLDocument', 'js_plotting_utils.HTMLDocument', (['""""""'], {}), "('')\n", (11325, 11329), False, 'from nilearn.plotting import js_plotting_utils\n'), ((11410, 11444), 'nilearn.plotting.js_plotting_utils.HTMLDocument', 'js_plotting_utils.HTMLDocument', (['""""""'], {}), "('')\n", (11440, 11444), False, 'from nilearn.plotting import js_plotting_utils\n'), ((7723, 7769), 'nilearn.plotting.js_plotting_utils.decode', 'js_plotting_utils.decode', (['encoded'], {'dtype': 'dtype'}), '(encoded, dtype=dtype)\n', (7747, 7769), False, 'from nilearn.plotting import js_plotting_utils\n'), ((8104, 8148), 'nilearn.plotting.js_plotting_utils.decode', 'js_plotting_utils.decode', (['plotly[key]', '"""<f4"""'], {}), "(plotly[key], '<f4')\n", (8128, 8148), False, 'from nilearn.plotting import js_plotting_utils\n'), ((8252, 8296), 'nilearn.plotting.js_plotting_utils.decode', 'js_plotting_utils.decode', (['plotly[key]', '"""<i4"""'], {}), "(plotly[key], '<i4')\n", (8276, 8296), False, 'from nilearn.plotting import js_plotting_utils\n'), ((9248, 9280), 'lxml.etree.HTMLParser', 'etree.HTMLParser', ([], {'huge_tree': '(True)'}), '(huge_tree=True)\n', (9264, 9280), False, 'from lxml import etree\n'), ((10341, 10366), 'os.path.isfile', 'os.path.isfile', (['temp_file'], {}), '(temp_file)\n', (10355, 10366), False, 'import os\n'), ((10554, 10574), 'os.remove', 'os.remove', (['temp_file'], {}), '(temp_file)\n', (10563, 10574), False, 'import os\n'), ((10916, 10947), 'os.path.isfile', 'os.path.isfile', (['html._temp_file'], {}), '(html._temp_file)\n', (10930, 10947), False, 'import os\n'), ((11192, 11218), 'os.remove', 'os.remove', (['html._temp_file'], {}), '(html._temp_file)\n', (11201, 11218), False, 'import os\n')] |
import random
import numpy as np
def read_data(pairs_file):
with open(pairs_file, 'r') as file:
tcrs = set()
peps = set()
all_pairs = []
for line in file:
tcr, pep, cd = line.strip().split('\t')
# print(tcr, pep)
# Proper tcr and peptides
if '*' in tcr or '*' in pep:
continue
if '/' in pep:
continue
tcrs.add(tcr)
peps.add(pep)
all_pairs.append((tcr, pep, cd))
train_pairs, test_pairs = train_test_split(all_pairs)
return all_pairs, train_pairs, test_pairs
def train_test_split(all_pairs):
train_pairs = []
test_pairs = []
for pair in all_pairs:
# 80% train, 20% test
p = np.random.binomial(1, 0.8)
if p == 1:
train_pairs.append(pair)
else:
test_pairs.append(pair)
return train_pairs, test_pairs
def positive_examples(pairs):
examples = []
for pair in pairs:
tcr, pep, cd = pair
weight = 1
examples.append((tcr, pep, cd, 'p', weight))
return examples
def negative_examples(pairs, all_pairs, size):
examples = []
i = 0
# Get tcr and peps lists
tcrs = [tcr for (tcr, pep, cd) in pairs]
peps = [pep for (tcr, pep, cd) in pairs]
while i < size:
pep = random.choice(peps)
for j in range(5):
tcr = random.choice(tcrs)
attach = (tcr, pep, 'CD4') in all_pairs\
or (tcr, pep, 'CD8') in all_pairs\
or (tcr, pep, 'NA') in all_pairs
if attach is False:
weight = 1
if (tcr, pep, 'NEG', 'n', weight) not in examples:
examples.append((tcr, pep, 'NEG', 'n', weight))
i += 1
return examples
def get_examples(pairs_file):
all_pairs, train_pairs, test_pairs = read_data(pairs_file)
train_pos = positive_examples(train_pairs)
train_neg = negative_examples(train_pairs, all_pairs, len(train_pos))
test_pos = positive_examples(test_pairs)
test_neg = negative_examples(test_pairs, all_pairs, len(test_pos))
return train_pos, train_neg, test_pos, test_neg
def load_data(pairs_file):
train_pos, train_neg, test_pos, test_neg = get_examples(pairs_file)
train = train_pos + train_neg
random.shuffle(train)
test = test_pos + test_neg
random.shuffle(test)
return train, test
def check():
pairs_file = 'McPAS-with_CD'
train, test = load_data(pairs_file)
print(train)
print(test)
print(len(train))
print(len(test))
# check()
| [
"random.choice",
"random.shuffle",
"numpy.random.binomial"
] | [((2380, 2401), 'random.shuffle', 'random.shuffle', (['train'], {}), '(train)\n', (2394, 2401), False, 'import random\n'), ((2437, 2457), 'random.shuffle', 'random.shuffle', (['test'], {}), '(test)\n', (2451, 2457), False, 'import random\n'), ((777, 803), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.8)'], {}), '(1, 0.8)\n', (795, 803), True, 'import numpy as np\n'), ((1368, 1387), 'random.choice', 'random.choice', (['peps'], {}), '(peps)\n', (1381, 1387), False, 'import random\n'), ((1433, 1452), 'random.choice', 'random.choice', (['tcrs'], {}), '(tcrs)\n', (1446, 1452), False, 'import random\n')] |
"""
Tests functions in the spiketools module
"""
import numpy as np
import pyret.spiketools as spk
def test_binspikes():
# assert the proper indices are returned
spike_times = [1.0, 2.0, 2.5, 3.0]
dt = 0.01
bin_edges = np.arange(0, 3, dt)
bspk = spk.binspikes(spike_times, bin_edges)
assert np.allclose(np.where(bspk)[0], [100, 200, 250, 299])
# maximum absolute error is dt
binned_times = bin_edges[np.where(bspk)]
assert np.all(np.abs(binned_times - spike_times) <= dt)
# test for no spikes
assert np.allclose(spk.binspikes([], bin_edges), np.zeros_like(bin_edges))
def test_estfr():
T = 100
dt = 1e-2
# test an empty array
bspk = np.zeros(T,)
time = np.arange(0, 1, dt)
fr = spk.estfr(bspk, time, sigma=0.01)
assert np.allclose(fr, bspk)
# test a single spike
bspk[T // 2] = 1.
fr = spk.estfr(bspk, time, sigma=0.01)
assert np.isclose((fr.sum() * dt), bspk.sum())
def test_spiking_events():
np.random.seed(1234)
# generate spike times
spiketimes = np.array([0.1, 0.25, 0.5, 0.75, 0.9])
N = len(spiketimes)
T = 50
jitter = 0.01
spikes = []
for trial_index in range(T):
s = spiketimes + jitter * np.random.randn(N,)
spikes.append(np.stack((s, trial_index * np.ones(N,))))
spikes = np.hstack(spikes).T
# detect events
t, psth, bspk, events = spk.detectevents(spikes)
# correct number of events
assert len(events) == N
# test SpikingEvent class
ev = events[0]
assert isinstance(ev, spk.SpikingEvent)
# mean jitter should be close to the selected amount of jitter
mean_jitter = np.mean([e.jitter() for e in events])
assert np.allclose(mean_jitter, jitter, atol=1e-3)
# time to first spike (TTFS) should match the only spike in each trial
assert np.allclose(ev.spikes[:, 0], ev.ttfs())
# one spike per trial
mu, sigma = ev.stats()
assert mu == 1
assert sigma == 0
# test sorting
sorted_spks = ev.sort()
sorted_spks = sorted_spks[np.argsort(sorted_spks[:, 1]), 0]
assert np.all(np.diff(sorted_spks) > 0)
def test_peakdet():
# create a toy signal
u = np.linspace(-5, 5, 1001)
x = np.exp(-u ** 2)
dx = np.gradient(x, 1e-2)
# one peak in x (delta=0.5)
maxtab, mintab = spk.peakdet(x, delta=0.5)
assert len(mintab) == 0
assert len(maxtab) == 1
assert np.allclose(maxtab, np.array([[500, 1]]))
# one peak in x (delta=0.1)
maxtab, mintab = spk.peakdet(x, delta=0.1)
assert len(mintab) == 0
assert len(maxtab) == 1
assert np.allclose(maxtab, np.array([[500, 1]]))
# no peaks in x (delta=1.0)
maxtab, mintab = spk.peakdet(x, delta=1.)
assert len(mintab) == 0
assert len(maxtab) == 0
# one peak and one valley in dx
maxtab, mintab = spk.peakdet(dx, delta=0.2)
assert np.allclose(maxtab, np.array([[429, 0.8576926]]))
assert np.allclose(mintab, np.array([[571, -0.8576926]]))
| [
"numpy.hstack",
"pyret.spiketools.estfr",
"numpy.argsort",
"numpy.array",
"numpy.gradient",
"numpy.arange",
"numpy.where",
"numpy.diff",
"numpy.exp",
"numpy.linspace",
"numpy.random.seed",
"numpy.abs",
"numpy.allclose",
"numpy.ones",
"pyret.spiketools.binspikes",
"pyret.spiketools.dete... | [((238, 257), 'numpy.arange', 'np.arange', (['(0)', '(3)', 'dt'], {}), '(0, 3, dt)\n', (247, 257), True, 'import numpy as np\n'), ((269, 306), 'pyret.spiketools.binspikes', 'spk.binspikes', (['spike_times', 'bin_edges'], {}), '(spike_times, bin_edges)\n', (282, 306), True, 'import pyret.spiketools as spk\n'), ((702, 713), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (710, 713), True, 'import numpy as np\n'), ((726, 745), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'dt'], {}), '(0, 1, dt)\n', (735, 745), True, 'import numpy as np\n'), ((755, 788), 'pyret.spiketools.estfr', 'spk.estfr', (['bspk', 'time'], {'sigma': '(0.01)'}), '(bspk, time, sigma=0.01)\n', (764, 788), True, 'import pyret.spiketools as spk\n'), ((800, 821), 'numpy.allclose', 'np.allclose', (['fr', 'bspk'], {}), '(fr, bspk)\n', (811, 821), True, 'import numpy as np\n'), ((880, 913), 'pyret.spiketools.estfr', 'spk.estfr', (['bspk', 'time'], {'sigma': '(0.01)'}), '(bspk, time, sigma=0.01)\n', (889, 913), True, 'import pyret.spiketools as spk\n'), ((998, 1018), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1012, 1018), True, 'import numpy as np\n'), ((1064, 1101), 'numpy.array', 'np.array', (['[0.1, 0.25, 0.5, 0.75, 0.9]'], {}), '([0.1, 0.25, 0.5, 0.75, 0.9])\n', (1072, 1101), True, 'import numpy as np\n'), ((1404, 1428), 'pyret.spiketools.detectevents', 'spk.detectevents', (['spikes'], {}), '(spikes)\n', (1420, 1428), True, 'import pyret.spiketools as spk\n'), ((1718, 1762), 'numpy.allclose', 'np.allclose', (['mean_jitter', 'jitter'], {'atol': '(0.001)'}), '(mean_jitter, jitter, atol=0.001)\n', (1729, 1762), True, 'import numpy as np\n'), ((2197, 2221), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(1001)'], {}), '(-5, 5, 1001)\n', (2208, 2221), True, 'import numpy as np\n'), ((2230, 2245), 'numpy.exp', 'np.exp', (['(-u ** 2)'], {}), '(-u ** 2)\n', (2236, 2245), True, 'import numpy as np\n'), ((2255, 2275), 'numpy.gradient', 'np.gradient', (['x', '(0.01)'], {}), '(x, 0.01)\n', (2266, 2275), True, 'import numpy as np\n'), ((2330, 2355), 'pyret.spiketools.peakdet', 'spk.peakdet', (['x'], {'delta': '(0.5)'}), '(x, delta=0.5)\n', (2341, 2355), True, 'import pyret.spiketools as spk\n'), ((2519, 2544), 'pyret.spiketools.peakdet', 'spk.peakdet', (['x'], {'delta': '(0.1)'}), '(x, delta=0.1)\n', (2530, 2544), True, 'import pyret.spiketools as spk\n'), ((2708, 2733), 'pyret.spiketools.peakdet', 'spk.peakdet', (['x'], {'delta': '(1.0)'}), '(x, delta=1.0)\n', (2719, 2733), True, 'import pyret.spiketools as spk\n'), ((2847, 2873), 'pyret.spiketools.peakdet', 'spk.peakdet', (['dx'], {'delta': '(0.2)'}), '(dx, delta=0.2)\n', (2858, 2873), True, 'import pyret.spiketools as spk\n'), ((436, 450), 'numpy.where', 'np.where', (['bspk'], {}), '(bspk)\n', (444, 450), True, 'import numpy as np\n'), ((561, 589), 'pyret.spiketools.binspikes', 'spk.binspikes', (['[]', 'bin_edges'], {}), '([], bin_edges)\n', (574, 589), True, 'import pyret.spiketools as spk\n'), ((591, 615), 'numpy.zeros_like', 'np.zeros_like', (['bin_edges'], {}), '(bin_edges)\n', (604, 615), True, 'import numpy as np\n'), ((1335, 1352), 'numpy.hstack', 'np.hstack', (['spikes'], {}), '(spikes)\n', (1344, 1352), True, 'import numpy as np\n'), ((2443, 2463), 'numpy.array', 'np.array', (['[[500, 1]]'], {}), '([[500, 1]])\n', (2451, 2463), True, 'import numpy as np\n'), ((2632, 2652), 'numpy.array', 'np.array', (['[[500, 1]]'], {}), '([[500, 1]])\n', (2640, 2652), True, 'import numpy as np\n'), ((2905, 2933), 'numpy.array', 'np.array', (['[[429, 0.8576926]]'], {}), '([[429, 0.8576926]])\n', (2913, 2933), True, 'import numpy as np\n'), ((2966, 2995), 'numpy.array', 'np.array', (['[[571, -0.8576926]]'], {}), '([[571, -0.8576926]])\n', (2974, 2995), True, 'import numpy as np\n'), ((330, 344), 'numpy.where', 'np.where', (['bspk'], {}), '(bspk)\n', (338, 344), True, 'import numpy as np\n'), ((470, 504), 'numpy.abs', 'np.abs', (['(binned_times - spike_times)'], {}), '(binned_times - spike_times)\n', (476, 504), True, 'import numpy as np\n'), ((2062, 2091), 'numpy.argsort', 'np.argsort', (['sorted_spks[:, 1]'], {}), '(sorted_spks[:, 1])\n', (2072, 2091), True, 'import numpy as np\n'), ((2114, 2134), 'numpy.diff', 'np.diff', (['sorted_spks'], {}), '(sorted_spks)\n', (2121, 2134), True, 'import numpy as np\n'), ((1238, 1256), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (1253, 1256), True, 'import numpy as np\n'), ((1307, 1317), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (1314, 1317), True, 'import numpy as np\n')] |
# Least Square Sample
# ========================================
# [] File Name : ls_sample.py
#
# [] Creation Date : December 2017
#
# [] Created By : <NAME> (<EMAIL>)
# ========================================
#
import matplotlib.pyplot as plt
import numpy as numpy
dataset = numpy.array([[3,5],[5,3],[8,4],[3,1],[6,4],[5,4],[7,5],[8,3]])
slope_list = [5, 3, 6, 6, 3, 4]
constant_list = [6, 1, 4, 8, 4, 7]
plot_titles = [
'y = 5x + 6',
'y = 3x + 1',
'y = 6x + 4',
'y = 6x + 8',
'y = 3x + 4',
'y = 4x + 7'
]
# ======================================== #
# ========== Least Square Error ========== #
# ======================================== #
def computeErrorForLineGivenPoints(b, m, coordinates):
totalError = 0
for i in range(0, len(coordinates)):
x = coordinates[i][0]
y = coordinates[i][1]
# Calcuate the error
totalError += (y - (m * x + b)) ** 2
return totalError / float(len(coordinates))
# ======================================== #
# ============ Test with data ============ #
# ======================================== #
errorlist = []
for i in range(0, 6):
errorlist.append(computeErrorForLineGivenPoints(slope_list[i], constant_list[i], dataset))
print("Hypothesis " + plot_titles[i] + " error: ")
print(errorlist[i])
# ======================================== #
# ============ Plot the result =========== #
# ======================================== #
fig = plt.figure()
fig.suptitle('Least Square Errors', fontsize=10, fontweight='bold')
for i in range(1, 7):
ax = fig.add_subplot(3, 2, i)
ax.title.set_text(plot_titles[i-1])
ax.scatter(dataset[:,0],dataset[:,1])
errorLabel = "Error = "
ax.text(0.95, 0.01, errorLabel + str(errorlist[i-1]),
verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes,
color='green', fontsize=12)
plt.plot(dataset, dataset/slope_list[i-1] + constant_list[i-1])
plt.show() | [
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((279, 356), 'numpy.array', 'numpy.array', (['[[3, 5], [5, 3], [8, 4], [3, 1], [6, 4], [5, 4], [7, 5], [8, 3]]'], {}), '([[3, 5], [5, 3], [8, 4], [3, 1], [6, 4], [5, 4], [7, 5], [8, 3]])\n', (290, 356), True, 'import numpy as numpy\n'), ((1466, 1478), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1476, 1478), True, 'import matplotlib.pyplot as plt\n'), ((1989, 1999), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1997, 1999), True, 'import matplotlib.pyplot as plt\n'), ((1924, 1993), 'matplotlib.pyplot.plot', 'plt.plot', (['dataset', '(dataset / slope_list[i - 1] + constant_list[i - 1])'], {}), '(dataset, dataset / slope_list[i - 1] + constant_list[i - 1])\n', (1932, 1993), True, 'import matplotlib.pyplot as plt\n')] |
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
A framebuffer is a collection of buffers that can be used as the destination
for rendering. OpenGL has two kinds of framebuffers: the default framebuffer,
which is provided by the OpenGL Context; and user-created framebuffers called
framebuffer objects (FBOs). The buffers for default framebuffers are part of
the context and usually represent a window or display device. The buffers for
FBOs reference images from either textures or render buffers; they are never
directly visible.
Read more on framebuffers on `OpenGL Wiki
<https://www.opengl.org/wiki/Framebuffer>`_
**Example usage**
.. code:: python
...
texture = np.zeros((512,512,4),np.float32).view(gloo.TextureFloat2D)
framebuffer = gloo.FrameBuffer(color=[texture])
...
@window.event
def on_draw(dt):
framebuffer.activate()
window.clear()
quad.draw(gl.GL_TRIANGLE_STRIP)
framebuffer.deactivate()
"""
import numpy as np
from glumpy import gl
from glumpy.log import log
from glumpy.gloo.globject import GLObject
from glumpy.gloo.texture import Texture2D
class RenderBuffer(GLObject):
""" Base class for render buffer object.
:param GLEnum format: Buffer format
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
"""
def __init__(self, width, height, format):
GLObject.__init__(self)
self._width = width
self._height = height
self._target = gl.GL_RENDERBUFFER
self._format = format
self._need_resize = True
@property
def width(self):
""" Buffer width (read-only). """
return self._width
@property
def height(self):
""" Buffer height (read-only). """
return self._height
def resize(self, width, height):
""" Resize the buffer (deferred operation).
:param int width: New buffer width (pixels)
:param int height: New buffer height (pixels)
"""
if width != self._width or height != self._height:
self._need_resize = True
self._width = width
self._height = height
def _create(self):
""" Create buffer on GPU """
log.debug("GPU: Create render buffer")
self._handle = gl.glGenRenderbuffers(1)
def _delete(self):
""" Delete buffer from GPU """
log.debug("GPU: Deleting render buffer")
gl.glDeleteRenderbuffer(self._handle)
def _activate(self):
""" Activate buffer on GPU """
log.debug("GPU: Activate render buffer")
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self._handle)
if self._need_resize:
self._resize()
self._need_resize = False
def _deactivate(self):
""" Deactivate buffer on GPU """
log.debug("GPU: Deactivate render buffer")
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, 0)
def _resize(self):
""" Buffer resize on GPU """
# WARNING: width/height should be checked against maximum size
# maxsize = gl.glGetParameter(gl.GL_MAX_RENDERBUFFER_SIZE)
log.debug("GPU: Resize render buffer")
gl.glRenderbufferStorage(self._target, self._format,
self._width, self._height)
class ColorBuffer(RenderBuffer):
""" Color buffer object.
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
:param GLEnum format: Buffer format (default is gl.GL_RGBA)
"""
def __init__(self, width, height, format=gl.GL_RGBA):
# if format not in (gl.GL_RGB565, gl.GL_RGBA4, gl.GL_RGB5_A1):
# raise ValueError("Format not allowed for color buffer")
RenderBuffer.__init__(self, width, height, format)
class DepthBuffer(RenderBuffer):
""" Depth buffer object.
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
:param GLEnum format: Buffer format (default is gl.GL_DEPTH_COMPONENT)
"""
def __init__(self, width, height, format=gl.GL_DEPTH_COMPONENT):
#if format not in (gl.GL_DEPTH_COMPONENT16,):
# raise ValueError("Format not allowed for depth buffer")
RenderBuffer.__init__(self, width, height, format)
class StencilBuffer(RenderBuffer):
""" Stencil buffer object
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
:param GLEnum format: Buffer format (default is gl.GL_STENCIL_INDEX8)
"""
def __init__(self, width, height, format=gl.GL_STENCIL_INDEX8):
# if format not in (gl.GL_STENCIL_INDEX,):
# raise ValueError("Format not allowed for color buffer")
RenderBuffer.__init__(self, width, height, format)
class FrameBuffer(GLObject):
""" Framebuffer object.
:param ColorBuffer color: One or several color buffers or None
:param DepthBuffer depth: A depth buffer or None
:param StencilBuffer stencil: A stencil buffer or None
"""
def __init__(self, color=None, depth=None, stencil=None):
"""
"""
GLObject.__init__(self)
self._width = None
self._height = None
self._color = None
self._depth = None
self._stencil = None
self._need_attach = True
self._pending_attachments = []
if color is not None:
self.color = color
if depth is not None:
self.depth = depth
if stencil is not None:
self.stencil = stencil
@property
def color(self):
""" Color buffer attachment(s) (read/write) """
return self._color
@color.setter
def color(self, buffers):
""" Color buffer attachment(s) (read/write) """
if not isinstance(buffers,list):
buffers = [buffers]
self._color = []
for i,buffer in enumerate(buffers):
if self.width is not None and self.width != buffer.width:
raise ValueError("Buffer width does not match")
elif self.height is not None and self.height != buffer.height:
raise ValueError("Buffer height does not match")
self._width = buffer.width
self._height = buffer.height
target = gl.GL_COLOR_ATTACHMENT0+i
self._color.append(buffer)
if isinstance(buffer, (ColorBuffer, Texture2D)) or buffer is None:
self._pending_attachments.append((target, buffer))
else:
raise ValueError(
"Buffer must be a ColorBuffer, Texture2D or None")
self._need_attach = True
@property
def depth(self):
""" Depth buffer attachment (read/write) """
return self._depth
@depth.setter
def depth(self, buffer):
""" Depth buffer attachment (read/write) """
if self.width is not None and self.width != buffer.width:
raise ValueError("Buffer width does not match")
elif self.height is not None and self.height != buffer.height:
raise ValueError("Buffer height does not match")
self._width = buffer.width
self._height = buffer.height
target = gl.GL_DEPTH_ATTACHMENT
self._depth = buffer
if isinstance(buffer, (DepthBuffer, Texture2D)) or buffer is None:
self._pending_attachments.append((target, buffer))
else:
raise ValueError(
"Buffer must be a DepthBuffer, Texture2D or None")
self._need_attach = True
@property
def stencil(self):
""" Stencil buffer attachment (read/write) """
return self._stencil
@stencil.setter
def stencil(self, buffer):
""" Stencil buffer attachment (read/write) """
if self.width is not None and self.width != buffer.width:
raise ValueError("Buffer width does not match")
elif self.height is not None and self.height != buffer.height:
raise ValueError("Buffer height does not match")
self._width = buffer.width
self._height = buffer.height
target = gl.GL_STENCIL_ATTACHMENT
self._stencil = buffer
if isinstance(buffer, StencilBuffer) or buffer is None:
self._pending_attachments.append((target, buffer))
else:
raise ValueError(
"Buffer must be a StencilBuffer, Texture2D or None")
self._need_attach = True
@property
def width(self):
""" Buffer width (read only, pixels) """
return self._width
@property
def height(self):
""" Buffer height (read only, pixels) """
return self._height
def resize(self, width, height):
""" Resize the buffer (deferred operation).
This method will also resize any attached buffers.
:param int width: New buffer width (pixels)
:param int height: New buffer height (pixels)
"""
self._width = width
self._height = height
for i, buffer in enumerate(self.color):
if isinstance(buffer, ColorBuffer):
buffer.resize(width, height)
elif isinstance(buffer, Texture2D):
newbuffer = np.resize(buffer, (height,width,buffer.shape[2]))
newbuffer = newbuffer.view(buffer.__class__)
self.color[i] = newbuffer
buffer.delete()
target = gl.GL_COLOR_ATTACHMENT0+i
self._pending_attachments.append((target, self.color[i]))
self._need_attach = True
if isinstance(self.depth, DepthBuffer):
self.depth.resize(width, height)
elif isinstance(self.depth, Texture2D):
depth = np.resize(self.depth, (height,width, self.depth.shape[2]))
depth = depth.view(self.depth.__class__)
self.depth.delete()
self.depth = depth
target = gl.GL_DEPTH_ATTACHMENT
self._pending_attachments.append((target, self.depth))
self._need_attach = True
if isinstance(self.stencil, StencilBuffer):
self.stencil.resize(width, height)
elif isinstance(self.stencil, Texture2D):
stencil = np.resize(self.stencil, (height,width, self.stencil.shape[2]))
stencil = stencil.view(self.stencil.__class__)
self.stencil.delete()
self.stencil = stencil
target = gl.GL_STENCIL_ATTACHMENT
self._pending_attachments.append((target, self.stencil))
self._need_attach = True
def _create(self):
""" Create framebuffer on GPU """
log.debug("GPU: Create framebuffer")
self._handle = gl.glGenFramebuffers(1)
def _delete(self):
""" Delete buffer from GPU """
log.debug("GPU: Delete framebuffer")
gl.glDeleteFramebuffers(1, np.array([self._handle]))
def _activate(self):
""" Activate framebuffer on GPU """
log.debug("GPU: Activate render framebuffer")
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self._handle)
if self._need_attach:
self._attach()
self._need_attach = False
attachments = [gl.GL_COLOR_ATTACHMENT0+i for i in range(len(self.color))]
gl.glDrawBuffers(np.array(attachments,dtype=np.uint32))
def _deactivate(self):
""" Deactivate framebuffer on GPU """
log.debug("GPU: Deactivate render framebuffer")
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# gl.glDrawBuffers([gl.GL_COLOR_ATTACHMENT0])
def _attach(self):
""" Attach render buffers to framebuffer """
log.debug("GPU: Attach render buffers")
while self._pending_attachments:
attachment, buffer = self._pending_attachments.pop(0)
if buffer is None:
gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, attachment,
gl.GL_RENDERBUFFER, 0)
elif isinstance(buffer, RenderBuffer):
buffer.activate()
gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, attachment,
gl.GL_RENDERBUFFER, buffer.handle)
buffer.deactivate()
elif isinstance(buffer, Texture2D):
buffer.activate()
# INFO: 0 is for mipmap level 0 (default) of the texture
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, attachment,
buffer.target, buffer.handle, 0)
buffer.deactivate()
else:
raise ValueError("Invalid attachment")
res = gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER)
if res == gl.GL_FRAMEBUFFER_COMPLETE:
pass
elif res == 0:
raise RuntimeError('Target not equal to GL_FRAMEBUFFER')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
raise RuntimeError(
'FrameBuffer attachments are incomplete.')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
raise RuntimeError(
'No valid attachments in the FrameBuffer.')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS:
raise RuntimeError(
'attachments do not have the same width and height.')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_FORMATS:
raise RuntimeError('Internal format of attachment '
'is not renderable.')
elif res == gl.GL_FRAMEBUFFER_UNSUPPORTED:
raise RuntimeError('Combination of internal formats used '
'by attachments is not supported.')
| [
"glumpy.gl.glCheckFramebufferStatus",
"glumpy.gl.glDeleteRenderbuffer",
"glumpy.gl.glFramebufferTexture2D",
"numpy.array",
"glumpy.gl.glGenFramebuffers",
"numpy.resize",
"glumpy.gl.glRenderbufferStorage",
"glumpy.gl.glFramebufferRenderbuffer",
"glumpy.gl.glGenRenderbuffers",
"glumpy.gloo.globject.... | [((1624, 1647), 'glumpy.gloo.globject.GLObject.__init__', 'GLObject.__init__', (['self'], {}), '(self)\n', (1641, 1647), False, 'from glumpy.gloo.globject import GLObject\n'), ((2471, 2509), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Create render buffer"""'], {}), "('GPU: Create render buffer')\n", (2480, 2509), False, 'from glumpy.log import log\n'), ((2533, 2557), 'glumpy.gl.glGenRenderbuffers', 'gl.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (2554, 2557), False, 'from glumpy import gl\n'), ((2631, 2671), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Deleting render buffer"""'], {}), "('GPU: Deleting render buffer')\n", (2640, 2671), False, 'from glumpy.log import log\n'), ((2680, 2717), 'glumpy.gl.glDeleteRenderbuffer', 'gl.glDeleteRenderbuffer', (['self._handle'], {}), '(self._handle)\n', (2703, 2717), False, 'from glumpy import gl\n'), ((2793, 2833), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Activate render buffer"""'], {}), "('GPU: Activate render buffer')\n", (2802, 2833), False, 'from glumpy.log import log\n'), ((2842, 2897), 'glumpy.gl.glBindRenderbuffer', 'gl.glBindRenderbuffer', (['gl.GL_RENDERBUFFER', 'self._handle'], {}), '(gl.GL_RENDERBUFFER, self._handle)\n', (2863, 2897), False, 'from glumpy import gl\n'), ((3072, 3114), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Deactivate render buffer"""'], {}), "('GPU: Deactivate render buffer')\n", (3081, 3114), False, 'from glumpy.log import log\n'), ((3123, 3167), 'glumpy.gl.glBindRenderbuffer', 'gl.glBindRenderbuffer', (['gl.GL_RENDERBUFFER', '(0)'], {}), '(gl.GL_RENDERBUFFER, 0)\n', (3144, 3167), False, 'from glumpy import gl\n'), ((3377, 3415), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Resize render buffer"""'], {}), "('GPU: Resize render buffer')\n", (3386, 3415), False, 'from glumpy.log import log\n'), ((3424, 3503), 'glumpy.gl.glRenderbufferStorage', 'gl.glRenderbufferStorage', (['self._target', 'self._format', 'self._width', 'self._height'], {}), '(self._target, self._format, self._width, self._height)\n', (3448, 3503), False, 'from glumpy import gl\n'), ((5379, 5402), 'glumpy.gloo.globject.GLObject.__init__', 'GLObject.__init__', (['self'], {}), '(self)\n', (5396, 5402), False, 'from glumpy.gloo.globject import GLObject\n'), ((10941, 10977), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Create framebuffer"""'], {}), "('GPU: Create framebuffer')\n", (10950, 10977), False, 'from glumpy.log import log\n'), ((11001, 11024), 'glumpy.gl.glGenFramebuffers', 'gl.glGenFramebuffers', (['(1)'], {}), '(1)\n', (11021, 11024), False, 'from glumpy import gl\n'), ((11098, 11134), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Delete framebuffer"""'], {}), "('GPU: Delete framebuffer')\n", (11107, 11134), False, 'from glumpy.log import log\n'), ((11276, 11321), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Activate render framebuffer"""'], {}), "('GPU: Activate render framebuffer')\n", (11285, 11321), False, 'from glumpy.log import log\n'), ((11330, 11383), 'glumpy.gl.glBindFramebuffer', 'gl.glBindFramebuffer', (['gl.GL_FRAMEBUFFER', 'self._handle'], {}), '(gl.GL_FRAMEBUFFER, self._handle)\n', (11350, 11383), False, 'from glumpy import gl\n'), ((11710, 11757), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Deactivate render framebuffer"""'], {}), "('GPU: Deactivate render framebuffer')\n", (11719, 11757), False, 'from glumpy.log import log\n'), ((11766, 11808), 'glumpy.gl.glBindFramebuffer', 'gl.glBindFramebuffer', (['gl.GL_FRAMEBUFFER', '(0)'], {}), '(gl.GL_FRAMEBUFFER, 0)\n', (11786, 11808), False, 'from glumpy import gl\n'), ((11950, 11989), 'glumpy.log.log.debug', 'log.debug', (['"""GPU: Attach render buffers"""'], {}), "('GPU: Attach render buffers')\n", (11959, 11989), False, 'from glumpy.log import log\n'), ((12977, 13023), 'glumpy.gl.glCheckFramebufferStatus', 'gl.glCheckFramebufferStatus', (['gl.GL_FRAMEBUFFER'], {}), '(gl.GL_FRAMEBUFFER)\n', (13004, 13023), False, 'from glumpy import gl\n'), ((11170, 11194), 'numpy.array', 'np.array', (['[self._handle]'], {}), '([self._handle])\n', (11178, 11194), True, 'import numpy as np\n'), ((11587, 11625), 'numpy.array', 'np.array', (['attachments'], {'dtype': 'np.uint32'}), '(attachments, dtype=np.uint32)\n', (11595, 11625), True, 'import numpy as np\n'), ((10025, 10084), 'numpy.resize', 'np.resize', (['self.depth', '(height, width, self.depth.shape[2])'], {}), '(self.depth, (height, width, self.depth.shape[2]))\n', (10034, 10084), True, 'import numpy as np\n'), ((10521, 10584), 'numpy.resize', 'np.resize', (['self.stencil', '(height, width, self.stencil.shape[2])'], {}), '(self.stencil, (height, width, self.stencil.shape[2]))\n', (10530, 10584), True, 'import numpy as np\n'), ((12144, 12231), 'glumpy.gl.glFramebufferRenderbuffer', 'gl.glFramebufferRenderbuffer', (['gl.GL_FRAMEBUFFER', 'attachment', 'gl.GL_RENDERBUFFER', '(0)'], {}), '(gl.GL_FRAMEBUFFER, attachment, gl.\n GL_RENDERBUFFER, 0)\n', (12172, 12231), False, 'from glumpy import gl\n'), ((9511, 9562), 'numpy.resize', 'np.resize', (['buffer', '(height, width, buffer.shape[2])'], {}), '(buffer, (height, width, buffer.shape[2]))\n', (9520, 9562), True, 'import numpy as np\n'), ((12373, 12472), 'glumpy.gl.glFramebufferRenderbuffer', 'gl.glFramebufferRenderbuffer', (['gl.GL_FRAMEBUFFER', 'attachment', 'gl.GL_RENDERBUFFER', 'buffer.handle'], {}), '(gl.GL_FRAMEBUFFER, attachment, gl.\n GL_RENDERBUFFER, buffer.handle)\n', (12401, 12472), False, 'from glumpy import gl\n'), ((12720, 12813), 'glumpy.gl.glFramebufferTexture2D', 'gl.glFramebufferTexture2D', (['gl.GL_FRAMEBUFFER', 'attachment', 'buffer.target', 'buffer.handle', '(0)'], {}), '(gl.GL_FRAMEBUFFER, attachment, buffer.target,\n buffer.handle, 0)\n', (12745, 12813), False, 'from glumpy import gl\n')] |
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import albumentations
import albumentations.pytorch
import numpy as np
import math
import pandas as pd
import random
import os
import matplotlib
import argparse
import wandb
from EnD import *
from configs import *
from collections import defaultdict
import colour_mnist
import models
from tqdm import tqdm
device = torch.device('cpu')
def num_correct(outputs,labels):
_, preds = torch.max(outputs, dim=1)
correct = preds.eq(labels).sum()
return correct
def train(model, dataloader, criterion, weights, optimizer, scheduler):
num_samples = 0
tot_correct = 0
tot_loss = 0
tot_bce = 0.
tot_abs = 0.
model.train()
for data, labels, color_labels in tqdm(dataloader, leave=False):
data, labels, color_labels = data.to(device), labels.to(device), color_labels.to(device)
optimizer.zero_grad()
with torch.enable_grad():
outputs = model(data)
bce, abs = criterion(outputs, labels, color_labels, weights)
loss = bce+abs
loss.backward()
optimizer.step()
batch_size = data.shape[0]
tot_correct += num_correct(outputs, labels).item()
num_samples += batch_size
tot_loss += loss.item() * batch_size
tot_bce += bce.item() * batch_size
tot_abs += abs.item() * batch_size
if scheduler is not None:
scheduler.step()
avg_accuracy = tot_correct / num_samples
avg_loss = tot_loss / num_samples
return avg_accuracy, avg_loss, tot_bce/num_samples, tot_abs/num_samples
def test(model, dataloader, criterion, weights):
num_samples = 0
tot_correct = 0
tot_loss = 0
model.eval()
for data, labels, color_labels in tqdm(dataloader, leave=False):
data, labels, color_labels = data.to(device), labels.to(device), color_labels.to(device)
with torch.no_grad():
outputs = model(data)
loss = criterion(outputs, labels, color_labels, weights)
batch_size = data.shape[0]
tot_correct += num_correct(outputs, labels).item()
num_samples += batch_size
tot_loss += loss.item() * batch_size
avg_accuracy = tot_correct / num_samples
avg_loss = tot_loss / num_samples
return avg_accuracy, avg_loss
def main(config):
seed = 42
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
train_loader, valid_loader = colour_mnist.get_biased_mnist_dataloader(
f'{os.path.expanduser("~")}/data',
config.batch_size,
config.rho,
train=True
)
biased_test_loader = colour_mnist.get_biased_mnist_dataloader(
f'{os.path.expanduser("~")}/data',
config.batch_size,
1.0,
train=False
)
unbiased_test_loader = colour_mnist.get_biased_mnist_dataloader(
f'{os.path.expanduser("~")}/data',
config.batch_size,
0.1,
train=False
)
print('Training debiased model')
print('Config:', config)
model = models.simple_convnet()
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, verbose=True)
hook = Hook(model.avgpool, backward=False)
def ce(outputs, labels, color_labels, weights):
return F.cross_entropy(outputs, labels)
def ce_abs(outputs, labels, color_labels, weights):
loss = ce(outputs, labels, color_labels, weights)
abs = abs_regu(hook, labels, color_labels, config.alpha, config.beta)
return loss, abs
best = defaultdict(float)
for i in range(config.epochs):
train_acc, train_loss, train_bce, train_abs = train(model, train_loader, ce_abs, None, optimizer, scheduler=None)
scheduler.step()
valid_acc, valid_loss = test(model, valid_loader, ce, None)
biased_test_acc, biased_test_loss = test(model, biased_test_loader, ce, None)
unbiased_test_acc, unbiased_test_loss = test(model, unbiased_test_loader, ce, None)
print(f'Epoch {i} - Train acc: {train_acc:.4f}, train_loss: {train_loss:.4f} (bce: {train_bce:.4f} abs: {train_abs:.4f});')
print(f'Valid acc {valid_acc:.4f}, loss: {valid_loss:.4f}')
print(f'Biased test acc: {biased_test_acc:.4f}, loss: {biased_test_loss:.4f}')
print(f'Unbiased test acc: {unbiased_test_acc:.4f}, loss: {unbiased_test_loss:.4f}')
if valid_acc > best['valid_acc']:
best = dict(
valid_acc = valid_acc,
biased_test_acc = biased_test_acc,
unbiased_test_acc = unbiased_test_acc
)
if not config.local:
metrics = {
'train_acc': train_acc,
'train_loss': train_loss,
'train_bce': train_bce,
'train_abs': train_abs,
'valid_acc': valid_acc,
'valid_loss': valid_loss,
'biased_test_acc': biased_test_acc,
'biased_test_loss': biased_test_loss,
'unbiased_test_acc': unbiased_test_acc,
'unbiased_test_loss': unbiased_test_loss,
'best': best
}
wandb.log(metrics)
torch.save({'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'config': config}, os.path.join(wandb.run.dir, 'model.pt'))
if __name__ == '__main__':
if not config.local:
hyperparameters_defaults = dict(
lr=config.lr,
alpha=config.alpha,
beta=config.beta,
weight_decay=config.weight_decay,
batch_size=config.batch_size,
epochs=config.epochs,
rho=config.rho
)
hyperparameters_defaults.update(vars(config))
tags = ['abs']
if config.alpha == 0 and config.beta == 0:
tags = ['baseline']
tags.append(str(config.rho))
wandb.init(
config=hyperparameters_defaults,
project='EnD-cvpr21',
anonymous='allow',
name=f'biased-mnist-rho{str(config.rho)}-{tags[0]}-valid',
tags=tags,
group=tags[0]
)
device = torch.device(config.device)
main(config)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"wandb.log",
"torch.enable_grad",
"torch.max",
"tqdm.tqdm",
"torch.optim.lr_scheduler.StepLR",
"random.seed",
"os.path.join",
"collections.defaultdict",
"numpy.random.seed",
"torch.nn.functional.cross_entropy",
"models.simple_convnet",
"to... | [((403, 422), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (415, 422), False, 'import torch\n'), ((472, 497), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (481, 497), False, 'import torch\n'), ((775, 804), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'leave': '(False)'}), '(dataloader, leave=False)\n', (779, 804), False, 'from tqdm import tqdm\n'), ((1783, 1812), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'leave': '(False)'}), '(dataloader, leave=False)\n', (1787, 1812), False, 'from tqdm import tqdm\n'), ((2371, 2388), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2382, 2388), False, 'import random\n'), ((2438, 2458), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2452, 2458), True, 'import numpy as np\n'), ((2463, 2491), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (2485, 2491), False, 'import torch\n'), ((2496, 2528), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2522, 2528), False, 'import torch\n'), ((2622, 2645), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2639, 2645), False, 'import torch\n'), ((3273, 3296), 'models.simple_convnet', 'models.simple_convnet', ([], {}), '()\n', (3294, 3296), False, 'import models\n'), ((3444, 3530), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(20)', 'gamma': '(0.1)', 'verbose': '(True)'}), '(optimizer, step_size=20, gamma=0.1, verbose\n =True)\n', (3475, 3530), False, 'import torch\n'), ((3904, 3922), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (3915, 3922), False, 'from collections import defaultdict\n'), ((6519, 6546), 'torch.device', 'torch.device', (['config.device'], {}), '(config.device)\n', (6531, 6546), False, 'import torch\n'), ((3641, 3673), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['outputs', 'labels'], {}), '(outputs, labels)\n', (3656, 3673), True, 'import torch.nn.functional as F\n'), ((947, 966), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (964, 966), False, 'import torch\n'), ((1925, 1940), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1938, 1940), False, 'import torch\n'), ((5536, 5554), 'wandb.log', 'wandb.log', (['metrics'], {}), '(metrics)\n', (5545, 5554), False, 'import wandb\n'), ((2733, 2756), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2751, 2756), False, 'import os\n'), ((2916, 2939), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2934, 2939), False, 'import os\n'), ((3095, 3118), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3113, 3118), False, 'import os\n'), ((5664, 5703), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""model.pt"""'], {}), "(wandb.run.dir, 'model.pt')\n", (5676, 5703), False, 'import os\n')] |
# coding: utf-8
# Gather breast cancer data
from sklearn.datasets import load_breast_cancer
breast_cancer = load_breast_cancer()
breast_cancer_data = breast_cancer.data
breast_cancer_labels = breast_cancer.target
# Prepare data as pandas dataframe
import numpy as np
labels = np.reshape(breast_cancer_labels,(569,1))
final_breast_cancer_data = np.concatenate([breast_cancer_data,labels],axis=1)
import pandas as pd
breast_cancer_dataset = pd.DataFrame(final_breast_cancer_data)
features = breast_cancer.feature_names
features_labels = np.append(features,'label')
breast_cancer_dataset.columns = features_labels
"""
Replace 0,1 label by medical terminology (Benign = cancer false, Malignant = cancer true)
breast_cancer_dataset['label'].replace(0, 'Benign',inplace=True)
breast_cancer_dataset['label'].replace(1, 'Malignant',inplace=True)
"""
# For simplicity reduce dataset to a few relevant features only
from sklearn.preprocessing import StandardScaler
X, Y = breast_cancer_dataset.drop(columns='label'), breast_cancer_dataset['label']
X_norm = StandardScaler().fit_transform(X)
from sklearn.feature_selection import SelectPercentile, f_regression
selector = SelectPercentile(f_regression, percentile=10)
X_new = selector.fit_transform(X_norm, Y)
feature_support = selector.get_support()
selected_features = X.loc[:,feature_support].columns
breast_cancer_dataset = breast_cancer_dataset[np.append(selected_features,'label')]
# Split data for training and testing
from sklearn.model_selection import train_test_split
split = 0.3
breast_cancer_dataset_train, breast_cancer_dataset_test = train_test_split(breast_cancer_dataset, test_size=split)
X_train, Y_train = breast_cancer_dataset_train.drop(columns='label'), breast_cancer_dataset_train['label']
X_test, Y_test = breast_cancer_dataset_test.drop(columns='label'), breast_cancer_dataset_test['label']
# Train a simple decision tree model and then save it using pickle
from sklearn import tree
model = tree.DecisionTreeClassifier(criterion ="entropy", max_depth = 5).fit(X_train,Y_train)
import pickle
pickle.dump(model, open('./myModel.pkl','wb')) | [
"numpy.reshape",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.datasets.load_breast_cancer",
"numpy.append",
"sklearn.preprocessing.StandardScaler",
"numpy.concatenate",
"sklearn.feature_selection.SelectPercentile",
"pandas.DataFrame"
] | [((110, 130), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (128, 130), False, 'from sklearn.datasets import load_breast_cancer\n'), ((282, 324), 'numpy.reshape', 'np.reshape', (['breast_cancer_labels', '(569, 1)'], {}), '(breast_cancer_labels, (569, 1))\n', (292, 324), True, 'import numpy as np\n'), ((350, 402), 'numpy.concatenate', 'np.concatenate', (['[breast_cancer_data, labels]'], {'axis': '(1)'}), '([breast_cancer_data, labels], axis=1)\n', (364, 402), True, 'import numpy as np\n'), ((446, 484), 'pandas.DataFrame', 'pd.DataFrame', (['final_breast_cancer_data'], {}), '(final_breast_cancer_data)\n', (458, 484), True, 'import pandas as pd\n'), ((542, 570), 'numpy.append', 'np.append', (['features', '"""label"""'], {}), "(features, 'label')\n", (551, 570), True, 'import numpy as np\n'), ((1175, 1220), 'sklearn.feature_selection.SelectPercentile', 'SelectPercentile', (['f_regression'], {'percentile': '(10)'}), '(f_regression, percentile=10)\n', (1191, 1220), False, 'from sklearn.feature_selection import SelectPercentile, f_regression\n'), ((1607, 1663), 'sklearn.model_selection.train_test_split', 'train_test_split', (['breast_cancer_dataset'], {'test_size': 'split'}), '(breast_cancer_dataset, test_size=split)\n', (1623, 1663), False, 'from sklearn.model_selection import train_test_split\n'), ((1405, 1442), 'numpy.append', 'np.append', (['selected_features', '"""label"""'], {}), "(selected_features, 'label')\n", (1414, 1442), True, 'import numpy as np\n'), ((1059, 1075), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1073, 1075), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1978, 2039), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'max_depth': '(5)'}), "(criterion='entropy', max_depth=5)\n", (2005, 2039), False, 'from sklearn import tree\n')] |
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
from torch import save as tsave
from .general_functions import create_dir
class Logger:
def __init__(
self,
save_path="",
save_every=100,
save_best=False,
log_every=50,
log_style="block",
**training_kwargs
):
self.save_path = save_path
create_dir(save_path)
self._save_every = save_every
self._save_best = save_best
self.best_reward = -np.inf
self._rewards = []
self._losses = []
self._block_rewards = []
self._block_losses = []
self.random_rewards = []
self.episode = 0
self.log_every = log_every
self.log_style = log_style
self.training_kwargs = training_kwargs
def clear(self):
self._rewards = []
self._losses = []
self._block_rewards = []
self._block_losses = []
self.random_rewards = []
self.episode = 0
def update(self, reward, loss, model):
self.episode += 1
self._block_rewards.append(reward)
self._block_losses.append(loss)
if self.episode % self.log_every == 0:
self.report()
if self._save_best and reward > self.best_reward:
self.best_reward = reward
self.training_kwargs.update({"model_state_dict": model.state_dict()})
tsave(self.training_kwargs, os.path.join(self.save_path, "best_model.pth"))
if self.episode % self._save_every == 0:
tsave(model, os.path.join(self.save_path, "{}.pth").format(self.episode))
def report(self):
if self.log_style == "continuous":
losses = self._losses
rewards = self._rewards
elif self.log_style == "block":
losses = self._block_losses
rewards = self._block_rewards
self._losses.extend(self._block_losses)
self._block_losses = []
self._rewards.extend(self._block_rewards)
self._block_rewards = []
else:
raise RuntimeError("Log style must be 'continuous' or 'block'")
mean_loss = np.mean(losses)
se_loss = np.std(losses) / np.sqrt(len(losses))
mean_reward = np.mean(rewards)
se_reward = np.std(rewards) / np.sqrt(len(rewards))
print("\nEpisode {}".format(self.episode))
print("Loss: {:.3f} +/- {:.1f}".format(mean_loss, se_loss))
print("Reward: {:.3f} +/- {:.1f}".format(mean_reward, se_reward))
def save_data(self):
with open(os.path.join(self.save_path, "temp_rewards.pkl"), "wb") as f:
pickle.dump(self._rewards, f)
with open(os.path.join(self.save_path, "temp_losses.pkl"), "wb") as f:
pickle.dump(self._losses, f)
def save_model(self, model, name):
if not name.endswith(".pth"):
name += ".pth"
tsave(model, os.path.join(self.save_path, name))
def plot_reward(self, sliding_window=50, show=False, save=False):
if self.random_rewards:
random_rewards = self._moving_average(self.random_rewards, sliding_window)
plt.plot(range(len(random_rewards)), random_rewards, label="Random actions")
rewards = self._moving_average(self._rewards, sliding_window)
plt.plot(range(len(rewards)), rewards, label="DQN")
plt.xlabel("Episode")
plt.ylabel("Total episode reward")
plt.legend()
if save:
plt.savefig(os.path.join(self.save_path, "rewards.png"))
if show:
plt.show()
def plot_reward_continuous(self, show=False, save=False):
if self.random_rewards:
plt.plot(
range(len(self.random_rewards)),
np.cumsum(self.random_rewards),
label="Random actions",
)
plt.plot(range(len(self._rewards)), np.cumsum(self._rewards), label="DQN")
plt.xlabel("Episode")
plt.ylabel("Cumulative reward")
plt.legend()
if save:
plt.savefig(os.path.join(self.save_path, "cumulative_rewards.png"))
if show:
plt.show()
@staticmethod
def _moving_average(interval, window_size):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, "same")
| [
"numpy.mean",
"numpy.convolve",
"pickle.dump",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"numpy.std",
"numpy.cumsum",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2202, 2217), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2209, 2217), True, 'import numpy as np\n'), ((2297, 2313), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (2304, 2313), True, 'import numpy as np\n'), ((3417, 3438), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (3427, 3438), True, 'import matplotlib.pyplot as plt\n'), ((3447, 3481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total episode reward"""'], {}), "('Total episode reward')\n", (3457, 3481), True, 'import matplotlib.pyplot as plt\n'), ((3490, 3502), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3500, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3990, 4011), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (4000, 4011), True, 'import matplotlib.pyplot as plt\n'), ((4020, 4051), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative reward"""'], {}), "('Cumulative reward')\n", (4030, 4051), True, 'import matplotlib.pyplot as plt\n'), ((4060, 4072), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4070, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4358, 4395), 'numpy.convolve', 'np.convolve', (['interval', 'window', '"""same"""'], {}), "(interval, window, 'same')\n", (4369, 4395), True, 'import numpy as np\n'), ((2236, 2250), 'numpy.std', 'np.std', (['losses'], {}), '(losses)\n', (2242, 2250), True, 'import numpy as np\n'), ((2334, 2349), 'numpy.std', 'np.std', (['rewards'], {}), '(rewards)\n', (2340, 2349), True, 'import numpy as np\n'), ((2686, 2715), 'pickle.dump', 'pickle.dump', (['self._rewards', 'f'], {}), '(self._rewards, f)\n', (2697, 2715), False, 'import pickle\n'), ((2807, 2835), 'pickle.dump', 'pickle.dump', (['self._losses', 'f'], {}), '(self._losses, f)\n', (2818, 2835), False, 'import pickle\n'), ((2962, 2996), 'os.path.join', 'os.path.join', (['self.save_path', 'name'], {}), '(self.save_path, name)\n', (2974, 2996), False, 'import os\n'), ((3620, 3630), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3628, 3630), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3967), 'numpy.cumsum', 'np.cumsum', (['self._rewards'], {}), '(self._rewards)\n', (3952, 3967), True, 'import numpy as np\n'), ((4201, 4211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4209, 4211), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1515), 'os.path.join', 'os.path.join', (['self.save_path', '"""best_model.pth"""'], {}), "(self.save_path, 'best_model.pth')\n", (1481, 1515), False, 'import os\n'), ((2612, 2660), 'os.path.join', 'os.path.join', (['self.save_path', '"""temp_rewards.pkl"""'], {}), "(self.save_path, 'temp_rewards.pkl')\n", (2624, 2660), False, 'import os\n'), ((2734, 2781), 'os.path.join', 'os.path.join', (['self.save_path', '"""temp_losses.pkl"""'], {}), "(self.save_path, 'temp_losses.pkl')\n", (2746, 2781), False, 'import os\n'), ((3545, 3588), 'os.path.join', 'os.path.join', (['self.save_path', '"""rewards.png"""'], {}), "(self.save_path, 'rewards.png')\n", (3557, 3588), False, 'import os\n'), ((3813, 3843), 'numpy.cumsum', 'np.cumsum', (['self.random_rewards'], {}), '(self.random_rewards)\n', (3822, 3843), True, 'import numpy as np\n'), ((4115, 4169), 'os.path.join', 'os.path.join', (['self.save_path', '"""cumulative_rewards.png"""'], {}), "(self.save_path, 'cumulative_rewards.png')\n", (4127, 4169), False, 'import os\n'), ((1592, 1630), 'os.path.join', 'os.path.join', (['self.save_path', '"""{}.pth"""'], {}), "(self.save_path, '{}.pth')\n", (1604, 1630), False, 'import os\n')] |
"""
h2o2_mk2012_ad.py
Hydrogen peroxide, H2O2, ground state surface from
Ref [1]_. The coefficients are available from the references
supplementary information as the 'adiabatic PES', which
corresponds to the "V+C+R+H+D" results.
The surface is implemented in internal coordinates.
X1 ... O1 -- H1 bond length (Angstroms)
X2 ... O2 -- H2 bond length ( " " )
X3 ... O1 -- O2 bond length ( " " )
X4 ... O2-O1-H1 bond angle (degrees)
X5 ... O1-O2-H2 bond angle ( " " )
X6 ... dihedral angle ( " " )
References
----------
.. [1] <NAME> and <NAME>. J. Comp. Chem. 34, 337-344 (2013).
https://doi.org/10.1002/jcc.23137
"""
import nitrogen as n2
import nitrogen.autodiff.forward as adf
import numpy as np
def Vfun(X, deriv = 0, out = None, var = None):
"""
expected order : r1, r2, R, a1, a2, tau
"""
x = n2.dfun.X2adf(X, deriv, var)
r1 = x[0]
r2 = x[1]
R = x[2]
a1 = x[3]
a2 = x[4]
tau = x[5]
# Define reference values
Re = 1.45538654 # Angstroms
re = 0.96257063 # Angstroms
ae = 101.08307909 # degrees
q1 = (r1 - re) / r1 # Simons-Parr-Finlan coordinates
q2 = (r2 - re) / r2
q3 = (R - Re) / R
q4 = (a1 - ae) * np.pi/180.0 # radians
q5 = (a2 - ae) * np.pi/180.0 # radians
q6 = tau * np.pi/180.0 # radians
# Calculate surface
v = calcsurf([q1,q2,q3,q4,q5,q6]) * n2.constants.Eh
return n2.dfun.adf2array([v], out)
######################################
#
# Define module-scope PES DFun object
#
PES = n2.dfun.DFun(Vfun, nf = 1, nx = 6)
#
#
######################################
def calcsurf(q):
max_pow = [5,5,5,6,6,6] # max_pow[5] is really the max freq. of dihedral
qpow = []
for i in range(5):
qi = [adf.const_like(1.0, q[i]), q[i]]
for p in range(2,max_pow[i]+1):
qi.append(qi[1] * qi[p-1]) # qi ** p
qpow.append(qi)
# Calculate cos(n*q6)
cosq = [ adf.cos(n * q[5]) for n in range(max_pow[5] + 1)]
qpow.append(cosq)
v = 0.0
nterms = powers.shape[0]
for i in range(nterms):
c = coeffs[i]
v += c * \
qpow[0][powers[i,0]] * \
qpow[1][powers[i,1]] * \
qpow[2][powers[i,2]] * \
qpow[3][powers[i,3]] * \
qpow[4][powers[i,4]] * \
qpow[5][powers[i,5]]
return v
powers = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 3],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 6],
[0, 0, 2, 0, 0, 0],
[2, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 0, 0, 2, 0],
[1, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 3, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0],
[0, 0, 0, 3, 0, 0],
[0, 0, 0, 0, 3, 0],
[1, 0, 2, 0, 0, 0],
[0, 1, 2, 0, 0, 0],
[0, 0, 2, 1, 0, 0],
[0, 0, 2, 0, 1, 0],
[2, 0, 1, 0, 0, 0],
[0, 2, 1, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 1, 0, 2, 0],
[1, 2, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0],
[1, 0, 0, 2, 0, 0],
[0, 1, 0, 0, 2, 0],
[2, 0, 0, 1, 0, 0],
[0, 2, 0, 0, 1, 0],
[1, 0, 0, 0, 2, 0],
[0, 1, 0, 2, 0, 0],
[2, 0, 0, 0, 1, 0],
[0, 2, 0, 1, 0, 0],
[0, 0, 0, 1, 2, 0],
[0, 0, 0, 2, 1, 0],
[1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 1, 0, 0],
[1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 1, 0],
[0, 0, 4, 0, 0, 0],
[4, 0, 0, 0, 0, 0],
[0, 4, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 4, 0],
[2, 0, 2, 0, 0, 0],
[0, 2, 2, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 0, 2, 0],
[2, 2, 0, 0, 0, 0],
[2, 0, 0, 2, 0, 0],
[0, 2, 0, 0, 2, 0],
[0, 0, 0, 2, 2, 0],
[1, 0, 3, 0, 0, 0],
[0, 1, 3, 0, 0, 0],
[0, 0, 3, 1, 0, 0],
[0, 0, 3, 0, 1, 0],
[3, 0, 0, 1, 0, 0],
[0, 3, 0, 0, 1, 0],
[3, 0, 1, 0, 0, 0],
[0, 3, 1, 0, 0, 0],
[0, 0, 1, 3, 0, 0],
[0, 0, 1, 0, 3, 0],
[1, 3, 0, 0, 0, 0],
[3, 1, 0, 0, 0, 0],
[1, 0, 0, 3, 0, 0],
[0, 1, 0, 0, 3, 0],
[1, 0, 0, 0, 3, 0],
[0, 1, 0, 3, 0, 0],
[0, 0, 0, 1, 3, 0],
[0, 0, 0, 3, 1, 0],
[1, 1, 2, 0, 0, 0],
[1, 0, 2, 1, 0, 0],
[0, 1, 2, 0, 1, 0],
[1, 0, 2, 0, 1, 0],
[0, 1, 2, 1, 0, 0],
[0, 0, 2, 1, 1, 0],
[2, 0, 0, 1, 1, 0],
[0, 2, 0, 1, 1, 0],
[1, 0, 1, 2, 0, 0],
[0, 1, 1, 0, 2, 0],
[1, 0, 0, 1, 2, 0],
[0, 1, 0, 2, 1, 0],
[1, 0, 0, 2, 1, 0],
[0, 1, 0, 1, 2, 0],
[0, 0, 5, 0, 0, 0],
[5, 0, 0, 0, 0, 0],
[0, 5, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 0],
[0, 0, 0, 0, 5, 0],
[0, 0, 0, 6, 0, 0],
[0, 0, 0, 0, 6, 0],
[0, 0, 0, 4, 1, 0],
[0, 0, 0, 1, 4, 0],
[0, 0, 0, 3, 2, 0],
[0, 0, 0, 2, 3, 0],
[0, 0, 1, 4, 0, 0],
[0, 0, 1, 0, 4, 0],
[0, 0, 2, 3, 0, 0],
[0, 0, 2, 0, 3, 0],
[1, 0, 0, 4, 0, 0],
[0, 1, 0, 0, 4, 0],
[2, 0, 0, 3, 0, 0],
[0, 2, 0, 0, 3, 0],
[0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 2, 0, 0, 1],
[2, 0, 0, 0, 0, 1],
[0, 2, 0, 0, 0, 1],
[0, 0, 0, 2, 0, 1],
[0, 0, 0, 0, 2, 1],
[1, 0, 1, 0, 0, 1],
[0, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 3, 0, 0, 1],
[3, 0, 0, 0, 0, 1],
[0, 3, 0, 0, 0, 1],
[0, 0, 0, 3, 0, 1],
[0, 0, 0, 0, 3, 1],
[1, 0, 2, 0, 0, 1],
[0, 1, 2, 0, 0, 1],
[0, 0, 2, 1, 0, 1],
[0, 0, 2, 0, 1, 1],
[0, 0, 1, 2, 0, 1],
[0, 0, 1, 0, 2, 1],
[1, 2, 0, 0, 0, 1],
[2, 1, 0, 0, 0, 1],
[1, 0, 0, 2, 0, 1],
[0, 1, 0, 0, 2, 1],
[1, 0, 0, 0, 2, 1],
[0, 1, 0, 2, 0, 1],
[0, 0, 0, 1, 2, 1],
[0, 0, 0, 2, 1, 1],
[1, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 0, 0, 4, 0, 1],
[0, 0, 0, 0, 4, 1],
[0, 0, 0, 5, 0, 1],
[0, 0, 0, 0, 5, 1],
[0, 0, 1, 3, 0, 1],
[0, 0, 1, 0, 3, 1],
[0, 0, 2, 2, 0, 1],
[0, 0, 2, 0, 2, 1],
[0, 0, 0, 1, 3, 1],
[0, 0, 0, 3, 1, 1],
[0, 0, 0, 2, 2, 1],
[1, 0, 0, 3, 0, 1],
[0, 1, 0, 0, 3, 1],
[1, 0, 0, 0, 3, 1],
[0, 1, 0, 3, 0, 1],
[2, 0, 0, 2, 0, 1],
[0, 2, 0, 0, 2, 1],
[2, 0, 0, 0, 2, 1],
[0, 2, 0, 2, 0, 1],
[1, 0, 2, 1, 0, 1],
[0, 1, 2, 0, 1, 1],
[2, 0, 1, 1, 0, 1],
[0, 2, 1, 0, 1, 1],
[1, 0, 1, 2, 0, 1],
[0, 1, 1, 0, 2, 1],
[0, 0, 1, 0, 0, 2],
[1, 0, 0, 0, 0, 2],
[0, 1, 0, 0, 0, 2],
[0, 0, 0, 1, 0, 2],
[0, 0, 0, 0, 1, 2],
[0, 0, 2, 0, 0, 2],
[2, 0, 0, 0, 0, 2],
[0, 2, 0, 0, 0, 2],
[0, 0, 0, 2, 0, 2],
[0, 0, 0, 0, 2, 2],
[1, 0, 1, 0, 0, 2],
[0, 1, 1, 0, 0, 2],
[0, 0, 1, 1, 0, 2],
[0, 0, 1, 0, 1, 2],
[1, 1, 0, 0, 0, 2],
[1, 0, 0, 1, 0, 2],
[0, 1, 0, 0, 1, 2],
[1, 0, 0, 0, 1, 2],
[0, 1, 0, 1, 0, 2],
[0, 0, 0, 1, 1, 2],
[0, 0, 3, 0, 0, 2],
[3, 0, 0, 0, 0, 2],
[0, 3, 0, 0, 0, 2],
[0, 0, 0, 3, 0, 2],
[0, 0, 0, 0, 3, 2],
[0, 0, 0, 2, 1, 2],
[0, 0, 0, 1, 2, 2],
[0, 0, 1, 2, 0, 2],
[0, 0, 1, 0, 2, 2],
[1, 0, 2, 0, 0, 2],
[0, 1, 2, 0, 0, 2],
[2, 0, 1, 0, 0, 2],
[0, 2, 1, 0, 0, 2],
[0, 0, 0, 4, 0, 2],
[0, 0, 0, 0, 4, 2],
[0, 0, 0, 1, 3, 2],
[0, 0, 0, 3, 1, 2],
[0, 0, 0, 2, 2, 2],
[2, 0, 0, 1, 0, 2],
[0, 2, 0, 0, 1, 2],
[1, 0, 0, 2, 0, 2],
[0, 1, 0, 0, 2, 2],
[1, 0, 0, 0, 2, 2],
[0, 1, 0, 2, 0, 2],
[1, 0, 1, 1, 0, 2],
[0, 1, 1, 0, 1, 2],
[1, 0, 1, 0, 1, 2],
[0, 1, 1, 1, 0, 2],
[0, 0, 1, 3, 0, 2],
[0, 0, 1, 0, 3, 2],
[0, 0, 1, 0, 0, 3],
[1, 0, 0, 0, 0, 3],
[0, 1, 0, 0, 0, 3],
[0, 0, 0, 1, 0, 3],
[0, 0, 0, 0, 1, 3],
[0, 0, 2, 0, 0, 3],
[2, 0, 0, 0, 0, 3],
[0, 2, 0, 0, 0, 3],
[0, 0, 0, 2, 0, 3],
[0, 0, 0, 0, 2, 3],
[0, 0, 0, 1, 1, 3],
[0, 0, 3, 0, 0, 3],
[0, 0, 0, 3, 0, 3],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 1, 2, 3],
[0, 0, 0, 2, 1, 3],
[0, 0, 1, 1, 0, 3],
[0, 0, 1, 0, 1, 3],
[1, 0, 0, 1, 0, 3],
[0, 1, 0, 0, 1, 3],
[1, 0, 0, 0, 1, 3],
[0, 1, 0, 1, 0, 3],
[0, 0, 2, 1, 0, 3],
[0, 0, 2, 0, 1, 3],
[0, 0, 1, 0, 0, 4],
[1, 0, 0, 0, 0, 4],
[0, 1, 0, 0, 0, 4],
[0, 0, 0, 1, 0, 4],
[0, 0, 0, 0, 1, 4],
[0, 0, 2, 0, 0, 4],
[0, 0, 0, 2, 0, 4],
[0, 0, 0, 0, 2, 4],
[0, 0, 0, 1, 1, 4],
[0, 0, 1, 1, 0, 4],
[0, 0, 1, 0, 1, 4],
[0, 0, 1, 0, 0, 5],
[1, 0, 0, 0, 0, 5],
[0, 1, 0, 0, 0, 5],
[0, 0, 0, 1, 0, 5],
[0, 0, 0, 0, 1, 5]
])
coeffs = np.array([
0.00396159 ,
0.00481490 ,
0.00318934 ,
0.00027018 ,
0.00005307 ,
0.00001047 ,
0.00000198 ,
1.07103383 ,
0.85671785 ,
0.85671785 ,
0.11105339 ,
0.11105339 ,
-0.03876908 ,
-0.03876908 ,
0.18430247 ,
0.18430247 ,
0.00036727 ,
-0.00663756 ,
-0.00663756 ,
-0.00196944 ,
-0.00196944 ,
0.01747081 ,
-1.18343510 ,
-0.23735539 ,
-0.23735539 ,
-0.02611900 ,
-0.02611900 ,
-0.15438002 ,
-0.15438002 ,
-0.35516368 ,
-0.35516368 ,
0.07899067 ,
0.07899067 ,
-0.26776532 ,
-0.26776532 ,
-0.00406083 ,
-0.00406083 ,
-0.01925971 ,
-0.01925971 ,
-0.01107079 ,
-0.01107079 ,
-0.00816282 ,
-0.00816282 ,
0.00337183 ,
0.00337183 ,
-0.01352772 ,
-0.01352772 ,
0.01289325 ,
-0.07449808 ,
-0.07449808 ,
-0.03379136 ,
-0.03379136 ,
-0.01672271 ,
-0.00495469 ,
-0.00495469 ,
-0.00453600 ,
-0.00453600 ,
-0.91033894 ,
-0.38779590 ,
-0.38779590 ,
-0.00503640 ,
-0.00503640 ,
-0.46416302 ,
-0.46416302 ,
0.07527264 ,
0.07527264 ,
-0.00799835 ,
-0.04029912 ,
-0.04029912 ,
0.00364088 ,
0.47561739 ,
0.47561739 ,
-0.41647359 ,
-0.41647359 ,
-0.06425296 ,
-0.06425296 ,
0.26125142 ,
0.26125142 ,
0.10336257 ,
0.10336257 ,
-0.01680055 ,
-0.01680055 ,
0.04984239 ,
0.04984239 ,
0.00354416 ,
0.00354416 ,
0.00452574 ,
0.00452574 ,
-0.05423804 ,
0.06564708 ,
0.06564708 ,
0.03801095 ,
0.03801095 ,
-0.09161667 ,
-0.01589965 ,
-0.01589965 ,
0.01341203 ,
0.01341203 ,
-0.01342635 ,
-0.01342635 ,
-0.00671149 ,
-0.00671149 ,
-0.73562441 ,
-0.30455894 ,
-0.30455894 ,
0.00582616 ,
0.00582616 ,
-0.00547701 ,
-0.00547701 ,
0.00280896 ,
0.00280896 ,
0.00674263 ,
0.00674263 ,
0.06845098 ,
0.06845098 ,
0.04193747 ,
0.04193747 ,
-0.05190213 ,
-0.05190213 ,
0.04168912 ,
0.04168912 ,
-0.01682379 ,
-0.00098759 ,
-0.00098759 ,
-0.01176361 ,
-0.01176361 ,
0.01742527 ,
-0.00533832 ,
-0.00533832 ,
0.00542779 ,
0.00542779 ,
0.00263732 ,
0.00263732 ,
0.01859551 ,
0.01859551 ,
0.00511361 ,
-0.00973834 ,
-0.00973834 ,
-0.00511467 ,
-0.00511467 ,
-0.01356281 ,
0.00352911 ,
-0.00964293 ,
-0.00964293 ,
-0.00113452 ,
-0.00113452 ,
0.01028106 ,
0.01028106 ,
-0.03748145 ,
-0.03748145 ,
-0.00708628 ,
-0.00708628 ,
0.00742831 ,
0.00742831 ,
0.00419281 ,
0.00419281 ,
-0.00555253 ,
-0.00555253 ,
-0.02044897 ,
-0.02044897 ,
-0.02429936 ,
0.00148383 ,
0.00148383 ,
0.00050075 ,
0.00050075 ,
0.00149142 ,
0.00149142 ,
0.02232416 ,
0.02232416 ,
0.07164353 ,
0.07164353 ,
0.01644870 ,
0.01644870 ,
0.01815537 ,
0.01605919 ,
0.01605919 ,
0.00735028 ,
0.00735028 ,
0.02670612 ,
0.02670612 ,
0.01548269 ,
0.01548269 ,
-0.13042235 ,
-0.13042235 ,
0.07364926 ,
0.07364926 ,
-0.08874645 ,
-0.08874645 ,
-0.01177248 ,
0.00172223 ,
0.00172223 ,
-0.00154074 ,
-0.00154074 ,
0.01965194 ,
0.00409752 ,
0.00409752 ,
0.00301573 ,
0.00301573 ,
-0.00734859 ,
-0.00734859 ,
0.00350247 ,
0.00350247 ,
-0.00037121 ,
0.00249543 ,
0.00249543 ,
-0.00168725 ,
-0.00168725 ,
0.00914785 ,
-0.02015559 ,
0.00925238 ,
0.00925238 ,
-0.00593037 ,
-0.00593037 ,
-0.01230679 ,
-0.01230679 ,
0.00829575 ,
0.00829575 ,
0.03735453 ,
0.03735453 ,
-0.04328977 ,
-0.04328977 ,
0.00458548 ,
0.00458548 ,
0.00364501 ,
0.00364501 ,
0.00986809 ,
0.01437361 ,
0.01437361 ,
0.00072674 ,
0.00072674 ,
-0.00158409 ,
-0.00158409 ,
-0.03961996 ,
-0.03961996 ,
-0.01732246 ,
-0.01732246 ,
0.02668498 ,
0.02668498 ,
-0.00188286 ,
0.00052265 ,
0.00052265 ,
-0.00089442 ,
-0.00089442 ,
0.00481644 ,
0.00031496 ,
0.00031496 ,
0.00103249 ,
0.00103249 ,
0.00224998 ,
-0.00366693 ,
-0.00033429 ,
-0.00033429 ,
-0.00319598 ,
-0.00319598 ,
0.00447145 ,
0.00447145 ,
-0.00147544 ,
-0.00147544 ,
-0.00085521 ,
-0.00085521 ,
-0.01099915 ,
-0.01099915 ,
-0.00042972 ,
0.00013538 ,
0.00013538 ,
-0.00019221 ,
-0.00019221 ,
0.00121114 ,
0.00026755 ,
0.00026755 ,
0.00054596 ,
0.00057513 ,
0.00057513 ,
-0.00009041 ,
0.00002274 ,
0.00002274 ,
-0.00004075 ,
-0.00004075
]) | [
"nitrogen.autodiff.forward.cos",
"nitrogen.dfun.DFun",
"numpy.array",
"nitrogen.dfun.X2adf",
"nitrogen.dfun.adf2array",
"nitrogen.autodiff.forward.const_like"
] | [((1555, 1585), 'nitrogen.dfun.DFun', 'n2.dfun.DFun', (['Vfun'], {'nf': '(1)', 'nx': '(6)'}), '(Vfun, nf=1, nx=6)\n', (1567, 1585), True, 'import nitrogen as n2\n'), ((2432, 8340), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0,\n 3], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 5], [0, 0, 0, 0, 0, 6], [0, 0, \n 2, 0, 0, 0], [2, 0, 0, 0, 0, 0], [0, 2, 0, 0, 0, 0], [0, 0, 0, 2, 0, 0],\n [0, 0, 0, 0, 2, 0], [1, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 0], [0, 0, 1, 1,\n 0, 0], [0, 0, 1, 0, 1, 0], [1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, \n 1, 0, 0, 1, 0], [1, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 1,\n 0], [0, 0, 3, 0, 0, 0], [3, 0, 0, 0, 0, 0], [0, 3, 0, 0, 0, 0], [0, 0, \n 0, 3, 0, 0], [0, 0, 0, 0, 3, 0], [1, 0, 2, 0, 0, 0], [0, 1, 2, 0, 0, 0],\n [0, 0, 2, 1, 0, 0], [0, 0, 2, 0, 1, 0], [2, 0, 1, 0, 0, 0], [0, 2, 1, 0,\n 0, 0], [0, 0, 1, 2, 0, 0], [0, 0, 1, 0, 2, 0], [1, 2, 0, 0, 0, 0], [2, \n 1, 0, 0, 0, 0], [1, 0, 0, 2, 0, 0], [0, 1, 0, 0, 2, 0], [2, 0, 0, 1, 0,\n 0], [0, 2, 0, 0, 1, 0], [1, 0, 0, 0, 2, 0], [0, 1, 0, 2, 0, 0], [2, 0, \n 0, 0, 1, 0], [0, 2, 0, 1, 0, 0], [0, 0, 0, 1, 2, 0], [0, 0, 0, 2, 1, 0],\n [1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 0, 0], [0, 1, 1, 0, 1, 0], [1, 0, 1, 0,\n 1, 0], [0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [1, 1, 0, 1, 0, 0], [1, \n 1, 0, 0, 1, 0], [1, 0, 0, 1, 1, 0], [0, 1, 0, 1, 1, 0], [0, 0, 4, 0, 0,\n 0], [4, 0, 0, 0, 0, 0], [0, 4, 0, 0, 0, 0], [0, 0, 0, 4, 0, 0], [0, 0, \n 0, 0, 4, 0], [2, 0, 2, 0, 0, 0], [0, 2, 2, 0, 0, 0], [0, 0, 2, 2, 0, 0],\n [0, 0, 2, 0, 2, 0], [2, 2, 0, 0, 0, 0], [2, 0, 0, 2, 0, 0], [0, 2, 0, 0,\n 2, 0], [0, 0, 0, 2, 2, 0], [1, 0, 3, 0, 0, 0], [0, 1, 3, 0, 0, 0], [0, \n 0, 3, 1, 0, 0], [0, 0, 3, 0, 1, 0], [3, 0, 0, 1, 0, 0], [0, 3, 0, 0, 1,\n 0], [3, 0, 1, 0, 0, 0], [0, 3, 1, 0, 0, 0], [0, 0, 1, 3, 0, 0], [0, 0, \n 1, 0, 3, 0], [1, 3, 0, 0, 0, 0], [3, 1, 0, 0, 0, 0], [1, 0, 0, 3, 0, 0],\n [0, 1, 0, 0, 3, 0], [1, 0, 0, 0, 3, 0], [0, 1, 0, 3, 0, 0], [0, 0, 0, 1,\n 3, 0], [0, 0, 0, 3, 1, 0], [1, 1, 2, 0, 0, 0], [1, 0, 2, 1, 0, 0], [0, \n 1, 2, 0, 1, 0], [1, 0, 2, 0, 1, 0], [0, 1, 2, 1, 0, 0], [0, 0, 2, 1, 1,\n 0], [2, 0, 0, 1, 1, 0], [0, 2, 0, 1, 1, 0], [1, 0, 1, 2, 0, 0], [0, 1, \n 1, 0, 2, 0], [1, 0, 0, 1, 2, 0], [0, 1, 0, 2, 1, 0], [1, 0, 0, 2, 1, 0],\n [0, 1, 0, 1, 2, 0], [0, 0, 5, 0, 0, 0], [5, 0, 0, 0, 0, 0], [0, 5, 0, 0,\n 0, 0], [0, 0, 0, 5, 0, 0], [0, 0, 0, 0, 5, 0], [0, 0, 0, 6, 0, 0], [0, \n 0, 0, 0, 6, 0], [0, 0, 0, 4, 1, 0], [0, 0, 0, 1, 4, 0], [0, 0, 0, 3, 2,\n 0], [0, 0, 0, 2, 3, 0], [0, 0, 1, 4, 0, 0], [0, 0, 1, 0, 4, 0], [0, 0, \n 2, 3, 0, 0], [0, 0, 2, 0, 3, 0], [1, 0, 0, 4, 0, 0], [0, 1, 0, 0, 4, 0],\n [2, 0, 0, 3, 0, 0], [0, 2, 0, 0, 3, 0], [0, 0, 1, 0, 0, 1], [1, 0, 0, 0,\n 0, 1], [0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 1], [0, \n 0, 2, 0, 0, 1], [2, 0, 0, 0, 0, 1], [0, 2, 0, 0, 0, 1], [0, 0, 0, 2, 0,\n 1], [0, 0, 0, 0, 2, 1], [1, 0, 1, 0, 0, 1], [0, 1, 1, 0, 0, 1], [0, 0, \n 1, 1, 0, 1], [0, 0, 1, 0, 1, 1], [1, 1, 0, 0, 0, 1], [1, 0, 0, 1, 0, 1],\n [0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1], [0, 0, 0, 1,\n 1, 1], [0, 0, 3, 0, 0, 1], [3, 0, 0, 0, 0, 1], [0, 3, 0, 0, 0, 1], [0, \n 0, 0, 3, 0, 1], [0, 0, 0, 0, 3, 1], [1, 0, 2, 0, 0, 1], [0, 1, 2, 0, 0,\n 1], [0, 0, 2, 1, 0, 1], [0, 0, 2, 0, 1, 1], [0, 0, 1, 2, 0, 1], [0, 0, \n 1, 0, 2, 1], [1, 2, 0, 0, 0, 1], [2, 1, 0, 0, 0, 1], [1, 0, 0, 2, 0, 1],\n [0, 1, 0, 0, 2, 1], [1, 0, 0, 0, 2, 1], [0, 1, 0, 2, 0, 1], [0, 0, 0, 1,\n 2, 1], [0, 0, 0, 2, 1, 1], [1, 1, 1, 0, 0, 1], [1, 0, 0, 1, 1, 1], [0, \n 1, 0, 1, 1, 1], [0, 0, 0, 4, 0, 1], [0, 0, 0, 0, 4, 1], [0, 0, 0, 5, 0,\n 1], [0, 0, 0, 0, 5, 1], [0, 0, 1, 3, 0, 1], [0, 0, 1, 0, 3, 1], [0, 0, \n 2, 2, 0, 1], [0, 0, 2, 0, 2, 1], [0, 0, 0, 1, 3, 1], [0, 0, 0, 3, 1, 1],\n [0, 0, 0, 2, 2, 1], [1, 0, 0, 3, 0, 1], [0, 1, 0, 0, 3, 1], [1, 0, 0, 0,\n 3, 1], [0, 1, 0, 3, 0, 1], [2, 0, 0, 2, 0, 1], [0, 2, 0, 0, 2, 1], [2, \n 0, 0, 0, 2, 1], [0, 2, 0, 2, 0, 1], [1, 0, 2, 1, 0, 1], [0, 1, 2, 0, 1,\n 1], [2, 0, 1, 1, 0, 1], [0, 2, 1, 0, 1, 1], [1, 0, 1, 2, 0, 1], [0, 1, \n 1, 0, 2, 1], [0, 0, 1, 0, 0, 2], [1, 0, 0, 0, 0, 2], [0, 1, 0, 0, 0, 2],\n [0, 0, 0, 1, 0, 2], [0, 0, 0, 0, 1, 2], [0, 0, 2, 0, 0, 2], [2, 0, 0, 0,\n 0, 2], [0, 2, 0, 0, 0, 2], [0, 0, 0, 2, 0, 2], [0, 0, 0, 0, 2, 2], [1, \n 0, 1, 0, 0, 2], [0, 1, 1, 0, 0, 2], [0, 0, 1, 1, 0, 2], [0, 0, 1, 0, 1,\n 2], [1, 1, 0, 0, 0, 2], [1, 0, 0, 1, 0, 2], [0, 1, 0, 0, 1, 2], [1, 0, \n 0, 0, 1, 2], [0, 1, 0, 1, 0, 2], [0, 0, 0, 1, 1, 2], [0, 0, 3, 0, 0, 2],\n [3, 0, 0, 0, 0, 2], [0, 3, 0, 0, 0, 2], [0, 0, 0, 3, 0, 2], [0, 0, 0, 0,\n 3, 2], [0, 0, 0, 2, 1, 2], [0, 0, 0, 1, 2, 2], [0, 0, 1, 2, 0, 2], [0, \n 0, 1, 0, 2, 2], [1, 0, 2, 0, 0, 2], [0, 1, 2, 0, 0, 2], [2, 0, 1, 0, 0,\n 2], [0, 2, 1, 0, 0, 2], [0, 0, 0, 4, 0, 2], [0, 0, 0, 0, 4, 2], [0, 0, \n 0, 1, 3, 2], [0, 0, 0, 3, 1, 2], [0, 0, 0, 2, 2, 2], [2, 0, 0, 1, 0, 2],\n [0, 2, 0, 0, 1, 2], [1, 0, 0, 2, 0, 2], [0, 1, 0, 0, 2, 2], [1, 0, 0, 0,\n 2, 2], [0, 1, 0, 2, 0, 2], [1, 0, 1, 1, 0, 2], [0, 1, 1, 0, 1, 2], [1, \n 0, 1, 0, 1, 2], [0, 1, 1, 1, 0, 2], [0, 0, 1, 3, 0, 2], [0, 0, 1, 0, 3,\n 2], [0, 0, 1, 0, 0, 3], [1, 0, 0, 0, 0, 3], [0, 1, 0, 0, 0, 3], [0, 0, \n 0, 1, 0, 3], [0, 0, 0, 0, 1, 3], [0, 0, 2, 0, 0, 3], [2, 0, 0, 0, 0, 3],\n [0, 2, 0, 0, 0, 3], [0, 0, 0, 2, 0, 3], [0, 0, 0, 0, 2, 3], [0, 0, 0, 1,\n 1, 3], [0, 0, 3, 0, 0, 3], [0, 0, 0, 3, 0, 3], [0, 0, 0, 0, 3, 3], [0, \n 0, 0, 1, 2, 3], [0, 0, 0, 2, 1, 3], [0, 0, 1, 1, 0, 3], [0, 0, 1, 0, 1,\n 3], [1, 0, 0, 1, 0, 3], [0, 1, 0, 0, 1, 3], [1, 0, 0, 0, 1, 3], [0, 1, \n 0, 1, 0, 3], [0, 0, 2, 1, 0, 3], [0, 0, 2, 0, 1, 3], [0, 0, 1, 0, 0, 4],\n [1, 0, 0, 0, 0, 4], [0, 1, 0, 0, 0, 4], [0, 0, 0, 1, 0, 4], [0, 0, 0, 0,\n 1, 4], [0, 0, 2, 0, 0, 4], [0, 0, 0, 2, 0, 4], [0, 0, 0, 0, 2, 4], [0, \n 0, 0, 1, 1, 4], [0, 0, 1, 1, 0, 4], [0, 0, 1, 0, 1, 4], [0, 0, 1, 0, 0,\n 5], [1, 0, 0, 0, 0, 5], [0, 1, 0, 0, 0, 5], [0, 0, 0, 1, 0, 5], [0, 0, \n 0, 0, 1, 5]]'], {}), '([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 2], [0, 0,\n 0, 0, 0, 3], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 5], [0, 0, 0, 0, 0, 6],\n [0, 0, 2, 0, 0, 0], [2, 0, 0, 0, 0, 0], [0, 2, 0, 0, 0, 0], [0, 0, 0, 2,\n 0, 0], [0, 0, 0, 0, 2, 0], [1, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 0], [0, \n 0, 1, 1, 0, 0], [0, 0, 1, 0, 1, 0], [1, 1, 0, 0, 0, 0], [1, 0, 0, 1, 0,\n 0], [0, 1, 0, 0, 1, 0], [1, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 0, \n 0, 1, 1, 0], [0, 0, 3, 0, 0, 0], [3, 0, 0, 0, 0, 0], [0, 3, 0, 0, 0, 0],\n [0, 0, 0, 3, 0, 0], [0, 0, 0, 0, 3, 0], [1, 0, 2, 0, 0, 0], [0, 1, 2, 0,\n 0, 0], [0, 0, 2, 1, 0, 0], [0, 0, 2, 0, 1, 0], [2, 0, 1, 0, 0, 0], [0, \n 2, 1, 0, 0, 0], [0, 0, 1, 2, 0, 0], [0, 0, 1, 0, 2, 0], [1, 2, 0, 0, 0,\n 0], [2, 1, 0, 0, 0, 0], [1, 0, 0, 2, 0, 0], [0, 1, 0, 0, 2, 0], [2, 0, \n 0, 1, 0, 0], [0, 2, 0, 0, 1, 0], [1, 0, 0, 0, 2, 0], [0, 1, 0, 2, 0, 0],\n [2, 0, 0, 0, 1, 0], [0, 2, 0, 1, 0, 0], [0, 0, 0, 1, 2, 0], [0, 0, 0, 2,\n 1, 0], [1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 0, 0], [0, 1, 1, 0, 1, 0], [1, \n 0, 1, 0, 1, 0], [0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [1, 1, 0, 1, 0,\n 0], [1, 1, 0, 0, 1, 0], [1, 0, 0, 1, 1, 0], [0, 1, 0, 1, 1, 0], [0, 0, \n 4, 0, 0, 0], [4, 0, 0, 0, 0, 0], [0, 4, 0, 0, 0, 0], [0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 4, 0], [2, 0, 2, 0, 0, 0], [0, 2, 2, 0, 0, 0], [0, 0, 2, 2,\n 0, 0], [0, 0, 2, 0, 2, 0], [2, 2, 0, 0, 0, 0], [2, 0, 0, 2, 0, 0], [0, \n 2, 0, 0, 2, 0], [0, 0, 0, 2, 2, 0], [1, 0, 3, 0, 0, 0], [0, 1, 3, 0, 0,\n 0], [0, 0, 3, 1, 0, 0], [0, 0, 3, 0, 1, 0], [3, 0, 0, 1, 0, 0], [0, 3, \n 0, 0, 1, 0], [3, 0, 1, 0, 0, 0], [0, 3, 1, 0, 0, 0], [0, 0, 1, 3, 0, 0],\n [0, 0, 1, 0, 3, 0], [1, 3, 0, 0, 0, 0], [3, 1, 0, 0, 0, 0], [1, 0, 0, 3,\n 0, 0], [0, 1, 0, 0, 3, 0], [1, 0, 0, 0, 3, 0], [0, 1, 0, 3, 0, 0], [0, \n 0, 0, 1, 3, 0], [0, 0, 0, 3, 1, 0], [1, 1, 2, 0, 0, 0], [1, 0, 2, 1, 0,\n 0], [0, 1, 2, 0, 1, 0], [1, 0, 2, 0, 1, 0], [0, 1, 2, 1, 0, 0], [0, 0, \n 2, 1, 1, 0], [2, 0, 0, 1, 1, 0], [0, 2, 0, 1, 1, 0], [1, 0, 1, 2, 0, 0],\n [0, 1, 1, 0, 2, 0], [1, 0, 0, 1, 2, 0], [0, 1, 0, 2, 1, 0], [1, 0, 0, 2,\n 1, 0], [0, 1, 0, 1, 2, 0], [0, 0, 5, 0, 0, 0], [5, 0, 0, 0, 0, 0], [0, \n 5, 0, 0, 0, 0], [0, 0, 0, 5, 0, 0], [0, 0, 0, 0, 5, 0], [0, 0, 0, 6, 0,\n 0], [0, 0, 0, 0, 6, 0], [0, 0, 0, 4, 1, 0], [0, 0, 0, 1, 4, 0], [0, 0, \n 0, 3, 2, 0], [0, 0, 0, 2, 3, 0], [0, 0, 1, 4, 0, 0], [0, 0, 1, 0, 4, 0],\n [0, 0, 2, 3, 0, 0], [0, 0, 2, 0, 3, 0], [1, 0, 0, 4, 0, 0], [0, 1, 0, 0,\n 4, 0], [2, 0, 0, 3, 0, 0], [0, 2, 0, 0, 3, 0], [0, 0, 1, 0, 0, 1], [1, \n 0, 0, 0, 0, 1], [0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1,\n 1], [0, 0, 2, 0, 0, 1], [2, 0, 0, 0, 0, 1], [0, 2, 0, 0, 0, 1], [0, 0, \n 0, 2, 0, 1], [0, 0, 0, 0, 2, 1], [1, 0, 1, 0, 0, 1], [0, 1, 1, 0, 0, 1],\n [0, 0, 1, 1, 0, 1], [0, 0, 1, 0, 1, 1], [1, 1, 0, 0, 0, 1], [1, 0, 0, 1,\n 0, 1], [0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1], [0, \n 0, 0, 1, 1, 1], [0, 0, 3, 0, 0, 1], [3, 0, 0, 0, 0, 1], [0, 3, 0, 0, 0,\n 1], [0, 0, 0, 3, 0, 1], [0, 0, 0, 0, 3, 1], [1, 0, 2, 0, 0, 1], [0, 1, \n 2, 0, 0, 1], [0, 0, 2, 1, 0, 1], [0, 0, 2, 0, 1, 1], [0, 0, 1, 2, 0, 1],\n [0, 0, 1, 0, 2, 1], [1, 2, 0, 0, 0, 1], [2, 1, 0, 0, 0, 1], [1, 0, 0, 2,\n 0, 1], [0, 1, 0, 0, 2, 1], [1, 0, 0, 0, 2, 1], [0, 1, 0, 2, 0, 1], [0, \n 0, 0, 1, 2, 1], [0, 0, 0, 2, 1, 1], [1, 1, 1, 0, 0, 1], [1, 0, 0, 1, 1,\n 1], [0, 1, 0, 1, 1, 1], [0, 0, 0, 4, 0, 1], [0, 0, 0, 0, 4, 1], [0, 0, \n 0, 5, 0, 1], [0, 0, 0, 0, 5, 1], [0, 0, 1, 3, 0, 1], [0, 0, 1, 0, 3, 1],\n [0, 0, 2, 2, 0, 1], [0, 0, 2, 0, 2, 1], [0, 0, 0, 1, 3, 1], [0, 0, 0, 3,\n 1, 1], [0, 0, 0, 2, 2, 1], [1, 0, 0, 3, 0, 1], [0, 1, 0, 0, 3, 1], [1, \n 0, 0, 0, 3, 1], [0, 1, 0, 3, 0, 1], [2, 0, 0, 2, 0, 1], [0, 2, 0, 0, 2,\n 1], [2, 0, 0, 0, 2, 1], [0, 2, 0, 2, 0, 1], [1, 0, 2, 1, 0, 1], [0, 1, \n 2, 0, 1, 1], [2, 0, 1, 1, 0, 1], [0, 2, 1, 0, 1, 1], [1, 0, 1, 2, 0, 1],\n [0, 1, 1, 0, 2, 1], [0, 0, 1, 0, 0, 2], [1, 0, 0, 0, 0, 2], [0, 1, 0, 0,\n 0, 2], [0, 0, 0, 1, 0, 2], [0, 0, 0, 0, 1, 2], [0, 0, 2, 0, 0, 2], [2, \n 0, 0, 0, 0, 2], [0, 2, 0, 0, 0, 2], [0, 0, 0, 2, 0, 2], [0, 0, 0, 0, 2,\n 2], [1, 0, 1, 0, 0, 2], [0, 1, 1, 0, 0, 2], [0, 0, 1, 1, 0, 2], [0, 0, \n 1, 0, 1, 2], [1, 1, 0, 0, 0, 2], [1, 0, 0, 1, 0, 2], [0, 1, 0, 0, 1, 2],\n [1, 0, 0, 0, 1, 2], [0, 1, 0, 1, 0, 2], [0, 0, 0, 1, 1, 2], [0, 0, 3, 0,\n 0, 2], [3, 0, 0, 0, 0, 2], [0, 3, 0, 0, 0, 2], [0, 0, 0, 3, 0, 2], [0, \n 0, 0, 0, 3, 2], [0, 0, 0, 2, 1, 2], [0, 0, 0, 1, 2, 2], [0, 0, 1, 2, 0,\n 2], [0, 0, 1, 0, 2, 2], [1, 0, 2, 0, 0, 2], [0, 1, 2, 0, 0, 2], [2, 0, \n 1, 0, 0, 2], [0, 2, 1, 0, 0, 2], [0, 0, 0, 4, 0, 2], [0, 0, 0, 0, 4, 2],\n [0, 0, 0, 1, 3, 2], [0, 0, 0, 3, 1, 2], [0, 0, 0, 2, 2, 2], [2, 0, 0, 1,\n 0, 2], [0, 2, 0, 0, 1, 2], [1, 0, 0, 2, 0, 2], [0, 1, 0, 0, 2, 2], [1, \n 0, 0, 0, 2, 2], [0, 1, 0, 2, 0, 2], [1, 0, 1, 1, 0, 2], [0, 1, 1, 0, 1,\n 2], [1, 0, 1, 0, 1, 2], [0, 1, 1, 1, 0, 2], [0, 0, 1, 3, 0, 2], [0, 0, \n 1, 0, 3, 2], [0, 0, 1, 0, 0, 3], [1, 0, 0, 0, 0, 3], [0, 1, 0, 0, 0, 3],\n [0, 0, 0, 1, 0, 3], [0, 0, 0, 0, 1, 3], [0, 0, 2, 0, 0, 3], [2, 0, 0, 0,\n 0, 3], [0, 2, 0, 0, 0, 3], [0, 0, 0, 2, 0, 3], [0, 0, 0, 0, 2, 3], [0, \n 0, 0, 1, 1, 3], [0, 0, 3, 0, 0, 3], [0, 0, 0, 3, 0, 3], [0, 0, 0, 0, 3,\n 3], [0, 0, 0, 1, 2, 3], [0, 0, 0, 2, 1, 3], [0, 0, 1, 1, 0, 3], [0, 0, \n 1, 0, 1, 3], [1, 0, 0, 1, 0, 3], [0, 1, 0, 0, 1, 3], [1, 0, 0, 0, 1, 3],\n [0, 1, 0, 1, 0, 3], [0, 0, 2, 1, 0, 3], [0, 0, 2, 0, 1, 3], [0, 0, 1, 0,\n 0, 4], [1, 0, 0, 0, 0, 4], [0, 1, 0, 0, 0, 4], [0, 0, 0, 1, 0, 4], [0, \n 0, 0, 0, 1, 4], [0, 0, 2, 0, 0, 4], [0, 0, 0, 2, 0, 4], [0, 0, 0, 0, 2,\n 4], [0, 0, 0, 1, 1, 4], [0, 0, 1, 1, 0, 4], [0, 0, 1, 0, 1, 4], [0, 0, \n 1, 0, 0, 5], [1, 0, 0, 0, 0, 5], [0, 1, 0, 0, 0, 5], [0, 0, 0, 1, 0, 5],\n [0, 0, 0, 0, 1, 5]])\n', (2440, 8340), True, 'import numpy as np\n'), ((8024, 11731), 'numpy.array', 'np.array', (['[0.00396159, 0.0048149, 0.00318934, 0.00027018, 5.307e-05, 1.047e-05, \n 1.98e-06, 1.07103383, 0.85671785, 0.85671785, 0.11105339, 0.11105339, -\n 0.03876908, -0.03876908, 0.18430247, 0.18430247, 0.00036727, -\n 0.00663756, -0.00663756, -0.00196944, -0.00196944, 0.01747081, -\n 1.1834351, -0.23735539, -0.23735539, -0.026119, -0.026119, -0.15438002,\n -0.15438002, -0.35516368, -0.35516368, 0.07899067, 0.07899067, -\n 0.26776532, -0.26776532, -0.00406083, -0.00406083, -0.01925971, -\n 0.01925971, -0.01107079, -0.01107079, -0.00816282, -0.00816282, \n 0.00337183, 0.00337183, -0.01352772, -0.01352772, 0.01289325, -\n 0.07449808, -0.07449808, -0.03379136, -0.03379136, -0.01672271, -\n 0.00495469, -0.00495469, -0.004536, -0.004536, -0.91033894, -0.3877959,\n -0.3877959, -0.0050364, -0.0050364, -0.46416302, -0.46416302, \n 0.07527264, 0.07527264, -0.00799835, -0.04029912, -0.04029912, \n 0.00364088, 0.47561739, 0.47561739, -0.41647359, -0.41647359, -\n 0.06425296, -0.06425296, 0.26125142, 0.26125142, 0.10336257, 0.10336257,\n -0.01680055, -0.01680055, 0.04984239, 0.04984239, 0.00354416, \n 0.00354416, 0.00452574, 0.00452574, -0.05423804, 0.06564708, 0.06564708,\n 0.03801095, 0.03801095, -0.09161667, -0.01589965, -0.01589965, \n 0.01341203, 0.01341203, -0.01342635, -0.01342635, -0.00671149, -\n 0.00671149, -0.73562441, -0.30455894, -0.30455894, 0.00582616, \n 0.00582616, -0.00547701, -0.00547701, 0.00280896, 0.00280896, \n 0.00674263, 0.00674263, 0.06845098, 0.06845098, 0.04193747, 0.04193747,\n -0.05190213, -0.05190213, 0.04168912, 0.04168912, -0.01682379, -\n 0.00098759, -0.00098759, -0.01176361, -0.01176361, 0.01742527, -\n 0.00533832, -0.00533832, 0.00542779, 0.00542779, 0.00263732, 0.00263732,\n 0.01859551, 0.01859551, 0.00511361, -0.00973834, -0.00973834, -\n 0.00511467, -0.00511467, -0.01356281, 0.00352911, -0.00964293, -\n 0.00964293, -0.00113452, -0.00113452, 0.01028106, 0.01028106, -\n 0.03748145, -0.03748145, -0.00708628, -0.00708628, 0.00742831, \n 0.00742831, 0.00419281, 0.00419281, -0.00555253, -0.00555253, -\n 0.02044897, -0.02044897, -0.02429936, 0.00148383, 0.00148383, \n 0.00050075, 0.00050075, 0.00149142, 0.00149142, 0.02232416, 0.02232416,\n 0.07164353, 0.07164353, 0.0164487, 0.0164487, 0.01815537, 0.01605919, \n 0.01605919, 0.00735028, 0.00735028, 0.02670612, 0.02670612, 0.01548269,\n 0.01548269, -0.13042235, -0.13042235, 0.07364926, 0.07364926, -\n 0.08874645, -0.08874645, -0.01177248, 0.00172223, 0.00172223, -\n 0.00154074, -0.00154074, 0.01965194, 0.00409752, 0.00409752, 0.00301573,\n 0.00301573, -0.00734859, -0.00734859, 0.00350247, 0.00350247, -\n 0.00037121, 0.00249543, 0.00249543, -0.00168725, -0.00168725, \n 0.00914785, -0.02015559, 0.00925238, 0.00925238, -0.00593037, -\n 0.00593037, -0.01230679, -0.01230679, 0.00829575, 0.00829575, \n 0.03735453, 0.03735453, -0.04328977, -0.04328977, 0.00458548, \n 0.00458548, 0.00364501, 0.00364501, 0.00986809, 0.01437361, 0.01437361,\n 0.00072674, 0.00072674, -0.00158409, -0.00158409, -0.03961996, -\n 0.03961996, -0.01732246, -0.01732246, 0.02668498, 0.02668498, -\n 0.00188286, 0.00052265, 0.00052265, -0.00089442, -0.00089442, \n 0.00481644, 0.00031496, 0.00031496, 0.00103249, 0.00103249, 0.00224998,\n -0.00366693, -0.00033429, -0.00033429, -0.00319598, -0.00319598, \n 0.00447145, 0.00447145, -0.00147544, -0.00147544, -0.00085521, -\n 0.00085521, -0.01099915, -0.01099915, -0.00042972, 0.00013538, \n 0.00013538, -0.00019221, -0.00019221, 0.00121114, 0.00026755, \n 0.00026755, 0.00054596, 0.00057513, 0.00057513, -9.041e-05, 2.274e-05, \n 2.274e-05, -4.075e-05, -4.075e-05]'], {}), '([0.00396159, 0.0048149, 0.00318934, 0.00027018, 5.307e-05, \n 1.047e-05, 1.98e-06, 1.07103383, 0.85671785, 0.85671785, 0.11105339, \n 0.11105339, -0.03876908, -0.03876908, 0.18430247, 0.18430247, \n 0.00036727, -0.00663756, -0.00663756, -0.00196944, -0.00196944, \n 0.01747081, -1.1834351, -0.23735539, -0.23735539, -0.026119, -0.026119,\n -0.15438002, -0.15438002, -0.35516368, -0.35516368, 0.07899067, \n 0.07899067, -0.26776532, -0.26776532, -0.00406083, -0.00406083, -\n 0.01925971, -0.01925971, -0.01107079, -0.01107079, -0.00816282, -\n 0.00816282, 0.00337183, 0.00337183, -0.01352772, -0.01352772, \n 0.01289325, -0.07449808, -0.07449808, -0.03379136, -0.03379136, -\n 0.01672271, -0.00495469, -0.00495469, -0.004536, -0.004536, -0.91033894,\n -0.3877959, -0.3877959, -0.0050364, -0.0050364, -0.46416302, -\n 0.46416302, 0.07527264, 0.07527264, -0.00799835, -0.04029912, -\n 0.04029912, 0.00364088, 0.47561739, 0.47561739, -0.41647359, -\n 0.41647359, -0.06425296, -0.06425296, 0.26125142, 0.26125142, \n 0.10336257, 0.10336257, -0.01680055, -0.01680055, 0.04984239, \n 0.04984239, 0.00354416, 0.00354416, 0.00452574, 0.00452574, -0.05423804,\n 0.06564708, 0.06564708, 0.03801095, 0.03801095, -0.09161667, -\n 0.01589965, -0.01589965, 0.01341203, 0.01341203, -0.01342635, -\n 0.01342635, -0.00671149, -0.00671149, -0.73562441, -0.30455894, -\n 0.30455894, 0.00582616, 0.00582616, -0.00547701, -0.00547701, \n 0.00280896, 0.00280896, 0.00674263, 0.00674263, 0.06845098, 0.06845098,\n 0.04193747, 0.04193747, -0.05190213, -0.05190213, 0.04168912, \n 0.04168912, -0.01682379, -0.00098759, -0.00098759, -0.01176361, -\n 0.01176361, 0.01742527, -0.00533832, -0.00533832, 0.00542779, \n 0.00542779, 0.00263732, 0.00263732, 0.01859551, 0.01859551, 0.00511361,\n -0.00973834, -0.00973834, -0.00511467, -0.00511467, -0.01356281, \n 0.00352911, -0.00964293, -0.00964293, -0.00113452, -0.00113452, \n 0.01028106, 0.01028106, -0.03748145, -0.03748145, -0.00708628, -\n 0.00708628, 0.00742831, 0.00742831, 0.00419281, 0.00419281, -0.00555253,\n -0.00555253, -0.02044897, -0.02044897, -0.02429936, 0.00148383, \n 0.00148383, 0.00050075, 0.00050075, 0.00149142, 0.00149142, 0.02232416,\n 0.02232416, 0.07164353, 0.07164353, 0.0164487, 0.0164487, 0.01815537, \n 0.01605919, 0.01605919, 0.00735028, 0.00735028, 0.02670612, 0.02670612,\n 0.01548269, 0.01548269, -0.13042235, -0.13042235, 0.07364926, \n 0.07364926, -0.08874645, -0.08874645, -0.01177248, 0.00172223, \n 0.00172223, -0.00154074, -0.00154074, 0.01965194, 0.00409752, \n 0.00409752, 0.00301573, 0.00301573, -0.00734859, -0.00734859, \n 0.00350247, 0.00350247, -0.00037121, 0.00249543, 0.00249543, -\n 0.00168725, -0.00168725, 0.00914785, -0.02015559, 0.00925238, \n 0.00925238, -0.00593037, -0.00593037, -0.01230679, -0.01230679, \n 0.00829575, 0.00829575, 0.03735453, 0.03735453, -0.04328977, -\n 0.04328977, 0.00458548, 0.00458548, 0.00364501, 0.00364501, 0.00986809,\n 0.01437361, 0.01437361, 0.00072674, 0.00072674, -0.00158409, -\n 0.00158409, -0.03961996, -0.03961996, -0.01732246, -0.01732246, \n 0.02668498, 0.02668498, -0.00188286, 0.00052265, 0.00052265, -\n 0.00089442, -0.00089442, 0.00481644, 0.00031496, 0.00031496, 0.00103249,\n 0.00103249, 0.00224998, -0.00366693, -0.00033429, -0.00033429, -\n 0.00319598, -0.00319598, 0.00447145, 0.00447145, -0.00147544, -\n 0.00147544, -0.00085521, -0.00085521, -0.01099915, -0.01099915, -\n 0.00042972, 0.00013538, 0.00013538, -0.00019221, -0.00019221, \n 0.00121114, 0.00026755, 0.00026755, 0.00054596, 0.00057513, 0.00057513,\n -9.041e-05, 2.274e-05, 2.274e-05, -4.075e-05, -4.075e-05])\n', (8032, 11731), True, 'import numpy as np\n'), ((845, 873), 'nitrogen.dfun.X2adf', 'n2.dfun.X2adf', (['X', 'deriv', 'var'], {}), '(X, deriv, var)\n', (858, 873), True, 'import nitrogen as n2\n'), ((1438, 1465), 'nitrogen.dfun.adf2array', 'n2.dfun.adf2array', (['[v]', 'out'], {}), '([v], out)\n', (1455, 1465), True, 'import nitrogen as n2\n'), ((1980, 1997), 'nitrogen.autodiff.forward.cos', 'adf.cos', (['(n * q[5])'], {}), '(n * q[5])\n', (1987, 1997), True, 'import nitrogen.autodiff.forward as adf\n'), ((1789, 1814), 'nitrogen.autodiff.forward.const_like', 'adf.const_like', (['(1.0)', 'q[i]'], {}), '(1.0, q[i])\n', (1803, 1814), True, 'import nitrogen.autodiff.forward as adf\n')] |
import os
import re
import json
import time
import numpy as np
import pandas as pd
from plotnine import *
# Config
PATH = os.getcwd()
path_n = re.split(pattern=r"/|\\", string=PATH)[1:]
if os.name == "posix":
path_n = "/" + os.path.join(*path_n)
else:
drive = PATH[0:3]
path_n = drive + os.path.join(*path_n)
RUNS = 100
def infer_column_cats(dir: "Path to working directoty.") -> tuple:
"""Helper function to identify dataset sizes based on file names."""
files = os.listdir(os.path.join(dir, "data"))
cats = set([re.match(pattern=".*_(.*).csv$", string=file).group(1) for file in files])
cols = set([re.match(pattern=".*_(.*)_.*.csv$", string=file).group(1) for file in files])
return cats, cols
def time_function(func: "Function call to be evaluted as str.") -> float:
"""Helper function to time data access."""
start = time.time()
exec(func)
return time.time() - start
def create_stats(measures: "List of function timings.",
col: "Current Column.", row: "Current Row",
scenario: "Current Scenario.") -> dict:
"""Helper function to create result dataset."""
return {"scenario": scenario,
"no_column": col,
"data_length": row,
"min": np.min(measures),
"max": np.max(measures),
"avg": np.mean(measures),
"q50": np.median(measures)}
scenarios = json.load(open(os.path.join(path_n, "output", "mutate.JSON")))
nrows, ncols = infer_column_cats(path_n)
timings, results = [], []
for col in ncols:
print(f"-Column: {col}--")
for row in nrows:
print(f"--Row: {row}")
data = pd.read_csv(os.path.join(path_n, "data", f"sim_data_{col}_{row}.csv"))
for i, scenario in enumerate(scenarios[col]["mutate"]):
print(f"---Scenario {i+1}: {scenario}---")
sel = re.search(pattern=r'([A-Z]{3})', string=scenario).group(1)
print(sel)
if sel == "INT":
func = f"temp['result'] = temp['{scenario}'] + 1"
elif sel == "DBL":
func = f"temp['result'] = temp['{scenario}'] * 2"
elif sel == "STR":
func = f"temp['result'] = temp['{scenario}'] + 'a'"
elif sel == "LGL":
func = f"temp['result'] = ~temp['{scenario}']"
for j in range(RUNS):
temp = data
timings.append(time_function(func=func))
temp = None
results.append(create_stats(measures=timings, col=col, row=row, scenario=sel))
print(results[-1])
timings = []
results_df = pd.DataFrame(results)
results_df[["data_length", "no_column"]] = results_df[["data_length", "no_column"]].apply(pd.to_numeric,
axis=1,
downcast="integer")
results_df.sort_values(["data_length", "no_column"])
results_df[["min", "max", "q50", "avg"]] = round(results_df[["min", "max", "q50", "avg"]] * 1000, 2)
# results_df["sel_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="([13])", string=x).group(1))
# results_df["pos_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="[13](.*)$", string=x).group(1))
results_df.to_csv(os.path.join(path_n, "output", "mutate_results_pandas.csv"), index=False) | [
"re.split",
"numpy.mean",
"numpy.median",
"os.path.join",
"re.match",
"os.getcwd",
"numpy.max",
"numpy.min",
"pandas.DataFrame",
"time.time",
"re.search"
] | [((123, 134), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (132, 134), False, 'import os\n'), ((2648, 2669), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (2660, 2669), True, 'import pandas as pd\n'), ((144, 183), 're.split', 're.split', ([], {'pattern': '"""/|\\\\\\\\"""', 'string': 'PATH'}), "(pattern='/|\\\\\\\\', string=PATH)\n", (152, 183), False, 'import re\n'), ((868, 879), 'time.time', 'time.time', ([], {}), '()\n', (877, 879), False, 'import time\n'), ((3384, 3443), 'os.path.join', 'os.path.join', (['path_n', '"""output"""', '"""mutate_results_pandas.csv"""'], {}), "(path_n, 'output', 'mutate_results_pandas.csv')\n", (3396, 3443), False, 'import os\n'), ((229, 250), 'os.path.join', 'os.path.join', (['*path_n'], {}), '(*path_n)\n', (241, 250), False, 'import os\n'), ((300, 321), 'os.path.join', 'os.path.join', (['*path_n'], {}), '(*path_n)\n', (312, 321), False, 'import os\n'), ((499, 524), 'os.path.join', 'os.path.join', (['dir', '"""data"""'], {}), "(dir, 'data')\n", (511, 524), False, 'import os\n'), ((906, 917), 'time.time', 'time.time', ([], {}), '()\n', (915, 917), False, 'import time\n'), ((1269, 1285), 'numpy.min', 'np.min', (['measures'], {}), '(measures)\n', (1275, 1285), True, 'import numpy as np\n'), ((1306, 1322), 'numpy.max', 'np.max', (['measures'], {}), '(measures)\n', (1312, 1322), True, 'import numpy as np\n'), ((1343, 1360), 'numpy.mean', 'np.mean', (['measures'], {}), '(measures)\n', (1350, 1360), True, 'import numpy as np\n'), ((1381, 1400), 'numpy.median', 'np.median', (['measures'], {}), '(measures)\n', (1390, 1400), True, 'import numpy as np\n'), ((1431, 1476), 'os.path.join', 'os.path.join', (['path_n', '"""output"""', '"""mutate.JSON"""'], {}), "(path_n, 'output', 'mutate.JSON')\n", (1443, 1476), False, 'import os\n'), ((1676, 1733), 'os.path.join', 'os.path.join', (['path_n', '"""data"""', 'f"""sim_data_{col}_{row}.csv"""'], {}), "(path_n, 'data', f'sim_data_{col}_{row}.csv')\n", (1688, 1733), False, 'import os\n'), ((542, 587), 're.match', 're.match', ([], {'pattern': '""".*_(.*).csv$"""', 'string': 'file'}), "(pattern='.*_(.*).csv$', string=file)\n", (550, 587), False, 'import re\n'), ((633, 681), 're.match', 're.match', ([], {'pattern': '""".*_(.*)_.*.csv$"""', 'string': 'file'}), "(pattern='.*_(.*)_.*.csv$', string=file)\n", (641, 681), False, 'import re\n'), ((1872, 1920), 're.search', 're.search', ([], {'pattern': '"""([A-Z]{3})"""', 'string': 'scenario'}), "(pattern='([A-Z]{3})', string=scenario)\n", (1881, 1920), False, 'import re\n')] |
import pytest
from numerous.engine.model import Model
from numerous.engine.simulation import Simulation
from numerous.utils.logger_levels import LoggerLevel
from numerous.multiphysics.equation_base import EquationBase
from numerous.multiphysics.equation_decorators import Equation
from numerous.engine.system.item import Item
from numerous.engine.system.subsystem import Subsystem
from numerous.engine.simulation.solvers.base_solver import solver_types
import numpy as np
INFO = LoggerLevel.INFO
DEBUG = LoggerLevel.DEBUG
ALL = LoggerLevel.ALL
@pytest.fixture(autouse=True)
def run_before_and_after_tests():
import shutil
shutil.rmtree('../tmp', ignore_errors=True)
yield
class TestLogItem1(Item, EquationBase):
def __init__(self, tag='testlogitem1'):
super(TestLogItem1, self).__init__(tag)
self.t1 = self.create_namespace('t1')
self.add_state('v', 0, logger_level=INFO)
self.add_state('s', 0.5, logger_level=DEBUG)
self.add_parameter('p', 1, logger_level=ALL)
self.t1.add_equations([self])
return
@Equation()
def eval(self, scope):
scope.v_dot = 1
scope.s_dot = -2 / ((np.exp(scope.v) + np.exp(-scope.v)) ** 2)
class TestLogSubsystem1(Subsystem):
def __init__(self, tag='testlogsubsystem1'):
super().__init__(tag)
item = TestLogItem1()
self.register_items([item])
def sigmoidlike(t):
return 1 / (1 + np.exp(2 * t))
@pytest.mark.parametrize("solver", solver_types)
@pytest.mark.parametrize("use_llvm", [True, False])
def test_logger_levels(solver, use_llvm):
num = 100
t_stop = 100
t_start = 0
sys = TestLogSubsystem1()
model = Model(sys, logger_level=ALL, use_llvm=use_llvm)
tvec = np.linspace(t_start, t_stop, num + 1, dtype=np.float64)
sim = Simulation(model, t_start=t_start, t_stop=t_stop, num=num, num_inner=1, solver_type=solver,
rtol=1e-8, atol=1e-8)
sim.solve()
df = sim.model.historian_df
s_analytic = sigmoidlike(tvec)
prefix = 'testlogsubsystem1.testlogitem1.t1'
p = f"{prefix}.p"
v = f"{prefix}.v"
s = f"{prefix}.s"
expected_results = {v: tvec, p: np.ones(num + 1), s: s_analytic}
for k, v in expected_results.items():
assert pytest.approx(v, abs=1e-5) == df.get(k), "expected results do not match actual results"
| [
"pytest.approx",
"numerous.multiphysics.equation_decorators.Equation",
"numpy.ones",
"numerous.engine.simulation.Simulation",
"pytest.mark.parametrize",
"numpy.linspace",
"numpy.exp",
"numerous.engine.model.Model",
"shutil.rmtree",
"pytest.fixture"
] | [((548, 576), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (562, 576), False, 'import pytest\n'), ((1459, 1506), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""solver"""', 'solver_types'], {}), "('solver', solver_types)\n", (1482, 1506), False, 'import pytest\n'), ((1508, 1558), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_llvm"""', '[True, False]'], {}), "('use_llvm', [True, False])\n", (1531, 1558), False, 'import pytest\n'), ((633, 676), 'shutil.rmtree', 'shutil.rmtree', (['"""../tmp"""'], {'ignore_errors': '(True)'}), "('../tmp', ignore_errors=True)\n", (646, 676), False, 'import shutil\n'), ((1083, 1093), 'numerous.multiphysics.equation_decorators.Equation', 'Equation', ([], {}), '()\n', (1091, 1093), False, 'from numerous.multiphysics.equation_decorators import Equation\n'), ((1690, 1737), 'numerous.engine.model.Model', 'Model', (['sys'], {'logger_level': 'ALL', 'use_llvm': 'use_llvm'}), '(sys, logger_level=ALL, use_llvm=use_llvm)\n', (1695, 1737), False, 'from numerous.engine.model import Model\n'), ((1749, 1804), 'numpy.linspace', 'np.linspace', (['t_start', 't_stop', '(num + 1)'], {'dtype': 'np.float64'}), '(t_start, t_stop, num + 1, dtype=np.float64)\n', (1760, 1804), True, 'import numpy as np\n'), ((1815, 1934), 'numerous.engine.simulation.Simulation', 'Simulation', (['model'], {'t_start': 't_start', 't_stop': 't_stop', 'num': 'num', 'num_inner': '(1)', 'solver_type': 'solver', 'rtol': '(1e-08)', 'atol': '(1e-08)'}), '(model, t_start=t_start, t_stop=t_stop, num=num, num_inner=1,\n solver_type=solver, rtol=1e-08, atol=1e-08)\n', (1825, 1934), False, 'from numerous.engine.simulation import Simulation\n'), ((2188, 2204), 'numpy.ones', 'np.ones', (['(num + 1)'], {}), '(num + 1)\n', (2195, 2204), True, 'import numpy as np\n'), ((1441, 1454), 'numpy.exp', 'np.exp', (['(2 * t)'], {}), '(2 * t)\n', (1447, 1454), True, 'import numpy as np\n'), ((2279, 2306), 'pytest.approx', 'pytest.approx', (['v'], {'abs': '(1e-05)'}), '(v, abs=1e-05)\n', (2292, 2306), False, 'import pytest\n'), ((1174, 1189), 'numpy.exp', 'np.exp', (['scope.v'], {}), '(scope.v)\n', (1180, 1189), True, 'import numpy as np\n'), ((1192, 1208), 'numpy.exp', 'np.exp', (['(-scope.v)'], {}), '(-scope.v)\n', (1198, 1208), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Scheduling tactician.
"""
import os
import logging
import numpy as np
import ephem
from collections import OrderedDict as odict
from obztak import get_survey
from obztak.utils.projector import angsep
from obztak.utils import projector as proj
from obztak.ctio import CTIO
from obztak.utils import constants
from obztak.utils.date import datestring
CONDITIONS = odict([
(None, [0.0, 2.0]), #default
('great', [1.6, 2.0]),
('good', [0.0, 2.0]),
('complete',[0.0, 2.0]),
('maglites',[0.0, 2.0]),
('fine', [0.0, 1.9]),
('ok', [0.0, 1.6]),
('poor', [0.0, 1.5]),
('bad', [0.0, 1.4]),
])
class Tactician(object):
name = 'tactician'
def __init__(self, fields=None, observatory=None, **kwargs):
""" Initialize the survey scheduling tactician.
Parameters:
-----------
fields : The available fields.
observatory : The observatory (defaults to CTIO)
Returns:
--------
Tactician : The Tactician object
"""
if not observatory: observatory = CTIO()
self.observatory = observatory
self.moon = ephem.Moon()
self.sun = ephem.Sun()
self.set_target_fields(fields)
self.set_completed_fields(None)
self.set_date(None)
def set_date(self,date):
if date is not None:
self.observatory.date = ephem.Date(date)
self.moon.compute(self.observatory)
self.sun.compute(self.observatory)
def set_target_fields(self,fields):
if fields is not None:
self.fields = fields.copy()
else:
self.fields = None
def set_completed_fields(self,fields):
if fields is not None:
self.completed_fields = fields.copy()
else:
self.completed_fields = None
def set_previous_field(self,field):
#if field is not None:
# self.previous_field = field.copy()
#else:
# self.previous_field = None
pass
@property
def date(self):
return self.observatory.date
@property
def hour_angle_limit(self):
return self.observatory.hour_angle_limit(self.fields['DEC'])
@property
def airmass_limit(self):
return self.observatory.airmass_limit(self.fields['DEC'])
@property
def zenith_angle(self):
# RA and Dec of zenith
return np.degrees(self.observatory.radec_of(0,'90'))
@property
def airmass(self):
""" Calculate the airmass of each field. """
ra_zenith,dec_zenith = self.zenith_angle
return proj.airmass(ra_zenith, dec_zenith,
self.fields['RA'], self.fields['DEC'])
@property
def moon_angle(self):
# Include moon angle
# See here for ra,dec details: http://rhodesmill.org/pyephem/radec
ra_moon,dec_moon = np.degrees([self.moon.ra,self.moon.dec])
return proj.angsep(ra_moon, dec_moon,
self.fields['RA'], self.fields['DEC'])
@property
def moon_phase(self):
return self.moon.phase
@property
def slew(self):
"""Angular separation to previous field."""
# Set previous field as last completed field
previous_field = None
if (self.completed_fields is not None) and len(self.completed_fields):
previous_field = self.completed_fields[-1]
# Ignore if more than 30 minutes has elapsed
if (self.date-ephem.Date(previous_field['DATE'])) > 30*ephem.minute:
previous_field = None
if previous_field:
return angsep(previous_field['RA'],previous_field['DEC'],
self.fields['RA'], self.fields['DEC'])
else:
return np.zeros(len(self.fields))
@property
def slew_time(self):
"""Estimate of the slew time (Alt/Az telescope)."""
# Set previous field as last completed field
previous_field = None
if (self.completed_fields is not None) and len(self.completed_fields):
previous_field = self.completed_fields[-1]
# Ignore if more than 30 minutes has elapsed
if (self.date-ephem.Date(previous_field['DATE'])) > 30*ephem.minute:
previous_field = None
if previous_field:
return np.sqrt((previous_field['RA']-self.fields['RA'])**2 +
(previous_field['DEC']-self.fields['DEC'])**2)
else:
return np.zeros(len(self.fields))
@property
def hour_angle(self):
ra_zenith,dec_zenith = self.zenith_angle
hour_angle = np.copy(self.fields['RA']) - ra_zenith
hour_angle[hour_angle < -180.] += 360.
hour_angle[hour_angle > 180.] -= 360.
return hour_angle
@property
def viable_fields(self):
# Check the hour angle restrictions at south pole
sel_hour_angle = np.fabs(self.hour_angle) < self.hour_angle_limit
# Blanco airmass restrictions
sel_airmass = self.airmass < self.airmass_limit
# Declination restrictions
sel_declination = self.fields['DEC'] > constants.SOUTHERN_REACH
# Exclude special fields (unless using special tacticians)
sel_special = self.fields['PRIORITY'] < 90
viable = sel_hour_angle & sel_airmass & sel_declination & sel_special
return viable
@property
def weight(self):
weight = self.hour_angle
sel = self.viable_fields
weight[~sel] = np.inf
weight += 6. * 360. * self.fields['TILING'] # Was 6, 60
weight += self.slew**3 # slew**2
weight += 100. * (self.airmass - 1.)**3
return weight
def select_index(self):
index_select = np.argmin(self.weight)
# Search for other exposures in the same field
field_id = self.fields['HEX'][index_select]
tiling = self.fields['TILING'][index_select]
index = np.nonzero( (self.fields['HEX'] == field_id) &
(self.fields['TILING'] == tiling))[0]
return index
def select_fields(self):
index = self.select_index()
timedelta = constants.FIELDTIME*np.arange(len(index))
if np.any(self.slew[index] > 5.):
# Apply a 30 second penalty for slews over 5 deg.
# This is not completely realistic, but better than nothing
# WARNING: This is broken when selecting two fields at once
timedelta += 30*ephem.second
fields = self.fields[index]
fields['AIRMASS'] = self.airmass[index]
fields['DATE'] = map(datestring,self.date+timedelta)
fields['SLEW'] = self.slew[index]
fields['MOONANGLE'] = self.moon_angle[index]
fields['HOURANGLE'] = self.hour_angle[index]
return fields
class CoverageTactician(Tactician):
name = 'coverage'
mode = None
@property
def weight(self):
sel = self.viable_fields
weight = self.hour_angle
weight[~sel] = np.inf
weight += 6. * 360. * self.fields['TILING'] # Was 6, 60
weight += self.slew**3 # slew**2
weight += 100. * (self.airmass - 1.)**3
return weight
class ConditionTactician(Tactician):
name = 'condition'
def __init__(self, *args, **kwargs):
super(ConditionTactician,self).__init__(*args,**kwargs)
self.mode = kwargs.get('mode',None)
@property
def weight(self):
airmass = self.airmass
sel = self.viable_fields
weight = 2.0 * self.hour_angle
weight[~sel] = np.inf
weight += 3. * 360. * self.fields['TILING']
if self.mode == 'complete':
weight += 100. * 360. * self.fields['TILING']
weight += self.slew**3
airmass_min, airmass_max = CONDITIONS[self.mode]
airmass_cut = ((airmass < airmass_min) | (airmass > airmass_max))
# ADW: This should probably also be in there
weight += 100. * (airmass - 1.)**3
weight += 5000. * airmass_cut
if self.mode == 'great':
weight += 5000. * (self.fields['DEC'] > -80)
return weight
class SMCNODTactician(Tactician):
@property
def weight(self):
sel = self.viable_fields
weight = 10000. * np.logical_not(np.in1d(self.fields['HEX'], obztak.utils.constants.HEX_SMCNOD)).astype(float)
weight[~sel] = np.inf
weight += 360. * self.fields['TILING']
weight += slew
return weight
class BlissTactician(Tactician):
CONDITIONS = odict([
(None, [1.0, 1.4]),
('bliss', [1.0, 1.4]),
#('good', [1.0, 1.4]),
#('poor', [1.0, 1.2]),
])
def __init__(self, *args, **kwargs):
super(BlissTactician,self).__init__(*args,**kwargs)
self.mode = kwargs.get('mode',None)
@property
def weight(self):
airmass = self.airmass
moon_angle = self.moon_angle
sel = self.viable_fields
weight = np.zeros(len(sel))
# Moon angle constraints
moon_limit = 30.
sel &= (moon_angle > moon_limit)
# Moon band constraints
if (self.moon.phase >= 80) and (self.moon.alt > -0.04):
# Moon is very bright; only do z
sel &= (np.char.count('z',self.fields['FILTER']) > 0)
# Allow i,z but prefer z
#sel &= (np.char.count('iz',self.fields['FILTER']) > 0)
#weight += 1e2 * (np.char.count('i',self.fields['FILTER']) > 0)
elif (self.moon.phase >= 45) and (self.moon.alt > -0.04):
# Moon is more than half full; do i,z
sel &= (np.char.count('iz',self.fields['FILTER']) > 0)
else:
# Moon is faint or down; do g,r (unless none available)
sel &= (np.char.count('gr',self.fields['FILTER']) > 0)
#weight += 1e8 * (np.char.count('iz',self.fields['FILTER']) > 0)
if (self.sun.alt > -0.28):
# No g-band if Sun altitude > -16 deg
sel &= ~(np.char.count('g',self.fields['FILTER']) > 0)
# Airmass cut
airmass_min, airmass_max = self.CONDITIONS[self.mode]
sel &= ((airmass > airmass_min) & (airmass < airmass_max))
# Don't allow the same field to be scheduled in different bands
# less than 10 hours apart
if len(self.completed_fields):
dates = np.array(map(ephem.Date,self.completed_fields['DATE']))
recent = self.completed_fields[(self.date - dates) < 10*ephem.hour]
# Don't allow the same fields twice on one night
sel &= ~np.in1d(self.fields.field_id,recent.field_id)
# Higher weight for duplicate HEXs
weight += 500.0 * np.in1d(self.fields['HEX'],recent['HEX'])
#weight += 1e9 * np.in1d(self.fields.field_id,recent.field_id)
# Set the weights for each field. Lower weight means more favorable.
# Higher weight for rising fields (higher hour angle)
# HA [min,max] = [-53,54] (for airmass 1.4)
#weight += 5.0 * self.hour_angle
weight += 1.0 * self.hour_angle
#weight += 0.1 * self.hour_angle
# Higher weight for larger slews
# slew = 10 deg -> weight = 1e2
#weight += self.slew**2
#weight += self.slew
weight += 1e3 * self.slew
# Higher weight for higher airmass
# airmass = 1.4 -> weight = 6.4
weight += 100. * (airmass - 1.)**3
# Higher weight for fields close to the moon (when up)
# angle = 50 -> weight = 6.4
if (self.moon.alt > -0.04):
#weight += 100 * (35./moon_angle)**3
#weight += 10 * (35./moon_angle)**3
weight += 1 * (35./moon_angle)**3
# Try hard to do the first tiling
weight += 1e6 * (self.fields['TILING'] - 1)
# Prioritize Planet 9 Region late in the survey/night
#ra_zenith, dec_zenith = np.degrees(self.observatory.radec_of(0,'90'))
#if ra_zenith > 270:
# weight += 1e6 * (self.fields['PRIORITY'] - 1)
# # Allow i,z exposures at high penalty
# #sel &= (np.char.count('iz',self.fields['FILTER']) > 0)
# #weight += 1e8 * (np.char.count('iz',self.fields['FILTER']) > 0)
# Set infinite weight to all disallowed fields
weight[~sel] = np.inf
return weight
def select_index(self):
weight = self.weight
index = np.array([np.argmin(weight)],dtype=int)
if np.any(~np.isfinite(weight[index])):
#if True:
msg = "Infinite weight selected"
print(msg)
import obztak.utils.ortho, pylab as plt
airmass_min, airmass_max = self.CONDITIONS[self.mode]
bmap = obztak.utils.ortho.plotFields(self.completed_fields[-1],self.fields,self.completed_fields,options_basemap=dict(airmass=airmass_max))
import pdb; pdb.set_trace()
raise ValueError(msg)
return index
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
# if mode == 'airmass':
# airmass_effective = copy.copy(airmass)
# # Do not observe fields that are unavailable
# airmass_effective[np.logical_not(cut)] = np.inf
# # Priorize coverage over multiple tilings
# airmass_effective += self.target_fields['TILING']
# index_select = np.argmin(airmass_effective)
# elif mode == 'ra':
# # Different selection
# #ra_effective = copy.copy(self.target_fields['RA'])
# ra_effective = copy.copy(self.target_fields['RA']) - ra_zenith
# ra_effective[ra_effective > 180.] = ra_effective[ra_effective > 180.] - 360.
# ra_effective[np.logical_not(cut)] = np.inf
# ra_effective += 360. * self.target_fields['TILING']
# index_select = np.argmin(ra_effective)
# elif mode == 'slew':
# #ra_effective = copy.copy(self.target_fields['RA'])
# ra_effective = copy.copy(self.target_fields['RA']) - ra_zenith
# ra_effective[ra_effective > 180.] = ra_effective[ra_effective > 180.] - 360.
# ra_effective[np.logical_not(cut)] = np.inf
# ra_effective += 360. * self.target_fields['TILING']
# ra_effective += slew**2
# #ra_effective += 2. * slew
# index_select = np.argmin(ra_effective)
# elif mode == 'balance':
# """
# ra_effective = copy.copy(self.target_fields['RA']) - ra_zenith
# ra_effective[ra_effective > 180.] = ra_effective[ra_effective > 180.] - 360.
# ra_effective[np.logical_not(cut)] = np.inf
# ra_effective += 360. * self.target_fields['TILING']
# #ra_effective += 720. * self.target_fields['TILING']
# ra_effective += slew**2
# ra_effective += 100. * (airmass - 1.)**3
# weight = ra_effective
# index_select = np.argmin(weight)
# weight = hour_angle_degree
# """
# weight = copy.copy(hour_angle_degree)
# weight[np.logical_not(cut)] = np.inf
# weight += 3. * 360. * self.target_fields['TILING']
# weight += slew**3 # slew**2
# weight += 100. * (airmass - 1.)**3
# index_select = np.argmin(weight)
# elif mode == 'balance2':
# weight = copy.copy(hour_angle_degree)
# weight[np.logical_not(cut)] = np.inf
# weight += 360. * self.target_fields['TILING']
# weight += slew_ra**2
# weight += slew_dec
# weight += 100. * (airmass - 1.)**3
# index_select = np.argmin(weight)
# elif mode == 'balance3':
# logging.debug("Slew: %s"%slew)
# weight = copy.copy(hour_angle_degree)
# weight[np.logical_not(cut)] = np.inf
# weight += 3. * 360. * self.target_fields['TILING']
# """
# x_slew, y_slew = zip(*[[0., 0.],
# [2.5, 10.],
# [5., 30.],
# [10., 150.],
# [20., 250.],
# [50., 500.],
# [180., 5000.]])
# """
# x_slew, y_slew = zip(*[[0., 0.],
# [2.5, 10.],
# [5., 30.],
# [10., 500.], #
# [20., 1000.], # 500
# [50., 5000.], # 1000
# [180., 5000.]])
# weight += np.interp(slew, x_slew, y_slew, left=np.inf, right=np.inf)
# weight += 100. * (airmass - 1.)**3
# index_select = np.argmin(weight)
# elif mode == 'airmass2':
# weight = 200. * (airmass - airmass_next)
# weight[np.logical_not(cut)] = np.inf
# weight += 360. * self.target_fields['TILING']
# weight += 100. * (airmass - 1.)**3
# weight += slew**2
# index_select = np.argmin(weight)
# elif mode in ('coverage','good'):
# weight = copy.copy(hour_angle_degree)
# #weight[np.logical_not(cut)] = 9999.
# weight[np.logical_not(cut)] = np.inf
# weight += 6. * 360. * self.target_fields['TILING'] # Was 6, 60
# weight += slew**3 # slew**2
# weight += 100. * (airmass - 1.)**3
# index_select = np.argmin(weight)
# elif mode == 'coverage2':
# weight = copy.copy(hour_angle_degree)
# weight *= 2.
# weight[np.logical_not(cut)] = np.inf
# weight += 6. * 360. * self.target_fields['TILING']
# weight += slew**3 # slew**2
# weight += 100. * (airmass - 1.)**3
# index_select = np.argmin(weight)
# elif mode == 'coverage3':
# weight = copy.copy(hour_angle_degree)
# weight *= 0.5
# weight[np.logical_not(cut)] = np.inf
# weight += 6. * 360. * self.target_fields['TILING']
# weight += slew**3 # slew**2
# weight += 100. * (airmass - 1.)**3
# index_select = np.argmin(weight)
# elif mode == 'lowairmass':
# weight = 2.0 * copy.copy(hour_angle_degree)
# #if len(self.scheduled_fields) == 0:
# # weight += 200. * obztak.utils.projector.angsep(self.target_fields['RA'],
# # self.target_fields['DEC'],
# # 90., -70.)
# weight[np.logical_not(cut)] = np.inf
# weight += 3. * 360. * self.target_fields['TILING']
# weight += slew**3 # slew**2
# #weight += 2000. * (airmass - 1.)**3 # 200
# weight += 5000. * (airmass > 1.5)
# index_select = np.argmin(weight)
#
# """
# weight = copy.copy(hour_angle_degree)
# weight[np.logical_not(cut)] = np.inf
# weight += 3. * 360. * self.target_fields['TILING']
# weight += slew**3 # slew**2
# weight += 1000. * (airmass - 1.)**3
# index_select = np.argmin(weight)
# """
# elif mode in CONDITIONS.keys():
# weight = 2.0 * copy.copy(hour_angle_degree)
# weight[np.logical_not(cut)] = np.inf
# weight += 3. * 360. * self.target_fields['TILING']
# weight += slew**3
# airmass_min, airmass_max = CONDITIONS[mode]
# airmass_sel = ((airmass < airmass_min) | (airmass > airmass_max))
# # ADW: This should probably also be in there
# weight += 100. * (airmass - 1.)**3
# weight += 5000. * airmass_sel
# index_select = np.argmin(weight)
# elif mode == 'smcnod':
# weight = 10000. * np.logical_not(np.in1d(self.target_fields['HEX'], obztak.utils.constants.HEX_SMCNOD)).astype(float)
# weight[np.logical_not(cut)] = np.inf
# weight += 360. * self.target_fields['TILING']
# weight += slew
# index_select = np.argmin(weight)
# else:
# msg = "Unrecognized mode: %s"%mode
# raise Exception(msg)
| [
"numpy.sqrt",
"ephem.Sun",
"obztak.ctio.CTIO",
"numpy.isfinite",
"ephem.Date",
"argparse.ArgumentParser",
"obztak.utils.projector.angsep",
"numpy.argmin",
"numpy.degrees",
"ephem.Moon",
"collections.OrderedDict",
"numpy.in1d",
"numpy.any",
"numpy.nonzero",
"numpy.char.count",
"obztak.u... | [((391, 609), 'collections.OrderedDict', 'odict', (["[(None, [0.0, 2.0]), ('great', [1.6, 2.0]), ('good', [0.0, 2.0]), (\n 'complete', [0.0, 2.0]), ('maglites', [0.0, 2.0]), ('fine', [0.0, 1.9]),\n ('ok', [0.0, 1.6]), ('poor', [0.0, 1.5]), ('bad', [0.0, 1.4])]"], {}), "([(None, [0.0, 2.0]), ('great', [1.6, 2.0]), ('good', [0.0, 2.0]), (\n 'complete', [0.0, 2.0]), ('maglites', [0.0, 2.0]), ('fine', [0.0, 1.9]),\n ('ok', [0.0, 1.6]), ('poor', [0.0, 1.5]), ('bad', [0.0, 1.4])])\n", (396, 609), True, 'from collections import OrderedDict as odict\n'), ((8644, 8694), 'collections.OrderedDict', 'odict', (["[(None, [1.0, 1.4]), ('bliss', [1.0, 1.4])]"], {}), "([(None, [1.0, 1.4]), ('bliss', [1.0, 1.4])])\n", (8649, 8694), True, 'from collections import OrderedDict as odict\n'), ((13141, 13185), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (13164, 13185), False, 'import argparse\n'), ((1191, 1203), 'ephem.Moon', 'ephem.Moon', ([], {}), '()\n', (1201, 1203), False, 'import ephem\n'), ((1223, 1234), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (1232, 1234), False, 'import ephem\n'), ((2664, 2738), 'obztak.utils.projector.airmass', 'proj.airmass', (['ra_zenith', 'dec_zenith', "self.fields['RA']", "self.fields['DEC']"], {}), "(ra_zenith, dec_zenith, self.fields['RA'], self.fields['DEC'])\n", (2676, 2738), True, 'from obztak.utils import projector as proj\n'), ((2939, 2980), 'numpy.degrees', 'np.degrees', (['[self.moon.ra, self.moon.dec]'], {}), '([self.moon.ra, self.moon.dec])\n', (2949, 2980), True, 'import numpy as np\n'), ((2995, 3064), 'obztak.utils.projector.angsep', 'proj.angsep', (['ra_moon', 'dec_moon', "self.fields['RA']", "self.fields['DEC']"], {}), "(ra_moon, dec_moon, self.fields['RA'], self.fields['DEC'])\n", (3006, 3064), True, 'from obztak.utils import projector as proj\n'), ((5827, 5849), 'numpy.argmin', 'np.argmin', (['self.weight'], {}), '(self.weight)\n', (5836, 5849), True, 'import numpy as np\n'), ((6305, 6335), 'numpy.any', 'np.any', (['(self.slew[index] > 5.0)'], {}), '(self.slew[index] > 5.0)\n', (6311, 6335), True, 'import numpy as np\n'), ((1125, 1131), 'obztak.ctio.CTIO', 'CTIO', ([], {}), '()\n', (1129, 1131), False, 'from obztak.ctio import CTIO\n'), ((1438, 1454), 'ephem.Date', 'ephem.Date', (['date'], {}), '(date)\n', (1448, 1454), False, 'import ephem\n'), ((3692, 3787), 'obztak.utils.projector.angsep', 'angsep', (["previous_field['RA']", "previous_field['DEC']", "self.fields['RA']", "self.fields['DEC']"], {}), "(previous_field['RA'], previous_field['DEC'], self.fields['RA'], self\n .fields['DEC'])\n", (3698, 3787), False, 'from obztak.utils.projector import angsep\n'), ((4409, 4522), 'numpy.sqrt', 'np.sqrt', (["((previous_field['RA'] - self.fields['RA']) ** 2 + (previous_field['DEC'] -\n self.fields['DEC']) ** 2)"], {}), "((previous_field['RA'] - self.fields['RA']) ** 2 + (previous_field[\n 'DEC'] - self.fields['DEC']) ** 2)\n", (4416, 4522), True, 'import numpy as np\n'), ((4710, 4736), 'numpy.copy', 'np.copy', (["self.fields['RA']"], {}), "(self.fields['RA'])\n", (4717, 4736), True, 'import numpy as np\n'), ((4995, 5019), 'numpy.fabs', 'np.fabs', (['self.hour_angle'], {}), '(self.hour_angle)\n', (5002, 5019), True, 'import numpy as np\n'), ((6030, 6115), 'numpy.nonzero', 'np.nonzero', (["((self.fields['HEX'] == field_id) & (self.fields['TILING'] == tiling))"], {}), "((self.fields['HEX'] == field_id) & (self.fields['TILING'] == tiling)\n )\n", (6040, 6115), True, 'import numpy as np\n'), ((13008, 13023), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13021, 13023), False, 'import pdb\n'), ((9368, 9409), 'numpy.char.count', 'np.char.count', (['"""z"""', "self.fields['FILTER']"], {}), "('z', self.fields['FILTER'])\n", (9381, 9409), True, 'import numpy as np\n'), ((10694, 10740), 'numpy.in1d', 'np.in1d', (['self.fields.field_id', 'recent.field_id'], {}), '(self.fields.field_id, recent.field_id)\n', (10701, 10740), True, 'import numpy as np\n'), ((10818, 10860), 'numpy.in1d', 'np.in1d', (["self.fields['HEX']", "recent['HEX']"], {}), "(self.fields['HEX'], recent['HEX'])\n", (10825, 10860), True, 'import numpy as np\n'), ((12550, 12567), 'numpy.argmin', 'np.argmin', (['weight'], {}), '(weight)\n', (12559, 12567), True, 'import numpy as np\n'), ((12599, 12625), 'numpy.isfinite', 'np.isfinite', (['weight[index]'], {}), '(weight[index])\n', (12610, 12625), True, 'import numpy as np\n'), ((3552, 3586), 'ephem.Date', 'ephem.Date', (["previous_field['DATE']"], {}), "(previous_field['DATE'])\n", (3562, 3586), False, 'import ephem\n'), ((4269, 4303), 'ephem.Date', 'ephem.Date', (["previous_field['DATE']"], {}), "(previous_field['DATE'])\n", (4279, 4303), False, 'import ephem\n'), ((9731, 9773), 'numpy.char.count', 'np.char.count', (['"""iz"""', "self.fields['FILTER']"], {}), "('iz', self.fields['FILTER'])\n", (9744, 9773), True, 'import numpy as np\n'), ((9880, 9922), 'numpy.char.count', 'np.char.count', (['"""gr"""', "self.fields['FILTER']"], {}), "('gr', self.fields['FILTER'])\n", (9893, 9922), True, 'import numpy as np\n'), ((10110, 10151), 'numpy.char.count', 'np.char.count', (['"""g"""', "self.fields['FILTER']"], {}), "('g', self.fields['FILTER'])\n", (10123, 10151), True, 'import numpy as np\n'), ((8392, 8454), 'numpy.in1d', 'np.in1d', (["self.fields['HEX']", 'obztak.utils.constants.HEX_SMCNOD'], {}), "(self.fields['HEX'], obztak.utils.constants.HEX_SMCNOD)\n", (8399, 8454), True, 'import numpy as np\n')] |
""" A PointSampleCam emulates a camera which has been calibrated to associate real-world coordinates (xr, yr) with each pixel position (xp, yp). A calibration data file is consulted which provides these associations. """
import pymunk
import numpy as np
from math import atan2, sqrt, fabs
from common import *
from pymunk import ShapeFilter
from configsingleton import ConfigSingleton
# Our mechanism for selectively importing pyglet/GUI-related stuff.
import gui_setting
if gui_setting.use:
import pyglet
class PointSampleImage:
def __init__(self, calib_array, neighbour_array):
self.calib_array = calib_array # How costly is this?
self.neighbour_array = neighbour_array
# A list of masks, where each entry in the list corresponds to a row of
# calib_array.
self.n_rows = self.calib_array.shape[0]
self.masks = [0] * self.n_rows
class PointSampleCam:
def __init__(self, calib_filename, detection_mask, acceptance_mask, frontal_only):
config = ConfigSingleton.get_instance()
self.min_distance = config.getfloat("PointSampleCam", "min_distance")
self.max_distance = config.getfloat("PointSampleCam", "max_distance")
self.max_abs_angle = config.getfloat("PointSampleCam", "max_abs_angle")
# The detection mask is used to indicate all types of objects that
# the sensor should be sensitive to. However, if a detected object
# doesn't also match the acceptance mask then it will be treated as
# a wall.
self.detection_mask = detection_mask
self.acceptance_mask = acceptance_mask
self.calib_array = np.loadtxt(calib_filename, delimiter=',')
self.calib_array[:,2] *= CM_TO_PIXELS
self.calib_array[:,3] *= CM_TO_PIXELS
# We will also store within calib_array the following additional
# quantities derived from (xr, yr) so that we don't need to compute
# these again.
# angle of (xr, yr) w.r.t. to X_R axis --- atan2(yr, xr)
# length of (xr, yr) --- sqrt(xr*xr + yr*yr)
n_rows = self.calib_array.shape[0]
# Add the two extra columns
self.calib_array = np.append(self.calib_array, np.zeros((n_rows, 2)), axis=1)
for i in range(n_rows):
(xr, yr) = self.calib_array[i,2], self.calib_array[i,3]
self.calib_array[i,4] = atan2(yr, xr)
self.calib_array[i,5] = sqrt(xr*xr + yr*yr)
if frontal_only:
# Delete all rows with distance outside of [min_distance,
# max_distance] and angle outside of [-max_abs_angle, max_abs_angle]
delete_indices = []
for i in range(n_rows):
angle = self.calib_array[i,4]
dist = self.calib_array[i,5]
if fabs(dist) < self.min_distance:
delete_indices.append(i)
if fabs(dist) > self.max_distance:
delete_indices.append(i)
if fabs(angle) > self.max_abs_angle:
delete_indices.append(i)
self.calib_array = np.delete(self.calib_array, delete_indices, axis=0)
# We also pre-compute the indices of the neighbours for each pixel.
self.neighbour_array = []
n_rows = self.calib_array.shape[0]
for i in range(n_rows):
#(ixi, iyi) = self.calib_array[i,0], self.calib_array[i,1]
(ixr, iyr) = self.calib_array[i,2], self.calib_array[i,3]
nghbrs = []
for j in range(i+1, n_rows):
#(jxi, jyi) = self.calib_array[j,0], self.calib_array[j,1]
(jxr, jyr) = self.calib_array[j,2], self.calib_array[j,3]
""" Determining neighbourhood based on 8-adjacency
dx = ixi - jxi
dy = iyi - jyi
ij_dist = sqrt(dx*dx + dy*dy)
if ij_dist <= sqrt(2) + 0.01:
nghbrs.append(j)
"""
# Determining neighbourhood based on a threshold distance
dx = ixr - jxr
dy = iyr - jyr
ij_dist = sqrt(dx*dx + dy*dy)
if ij_dist <= 50:
nghbrs.append(j)
self.neighbour_array.append(nghbrs)
self.shape_filter = ShapeFilter(mask=self.detection_mask)
def compute(self, env, robot):
image = PointSampleImage(self.calib_array, self.neighbour_array)
n_rows = self.calib_array.shape[0]
for i in range(n_rows):
# Coordinates of sensed point in robot ref. frame
(xr, yr) = self.calib_array[i,2], self.calib_array[i,3]
# Coordinates in world coordinates
(xw, yw) = robot.body.local_to_world((xr, yr))
query_info = env.point_query_nearest((xw, yw), 0, self.shape_filter)
if query_info != None:
object_mask = query_info.shape.filter.categories
if object_mask & self.acceptance_mask == 0:
# The detected shape is not accepted, we will treat
# it as a wall.
object_mask = WALL_MASK
image.masks[i] = object_mask
return image
def visualize(self, robot, image):
n_rows = self.calib_array.shape[0]
for i in range(n_rows):
# Coordinates of sensed point in robot ref. frame
(xr, yr) = self.calib_array[i,2], self.calib_array[i,3]
# Coordinates in world coordinates
(xw, yw) = robot.body.local_to_world((xr, yr))
pyglet.gl.glPointSize(3);
if image.masks[i] == 0:
color = (255, 255, 255)
elif image.masks[i] == WALL_MASK:
color = (255, 255, 0)
elif image.masks[i] == ROBOT_MASK:
color = (0, 255, 255)
elif image.masks[i] == BLAST_LANDMARK_MASK:
color = (0, 0, 255)
elif image.masks[i] == POLE_LANDMARK_MASK:
color = (255, 0, 0)
elif image.masks[i] == ARC_LANDMARK_MASK:
color = (0, 255, 0)
elif image.masks[i] == RED_PUCK_MASK:
color = (255, 0, 255)
elif image.masks[i] == GREEN_PUCK_MASK:
color = (0, 255, 255)
elif image.masks[i] == BLUE_PUCK_MASK:
color = (255, 255, 0)
else:
print("Unknown mask: {}".format(image.masks[i]))
pyglet.graphics.draw(1, pyglet.gl.GL_POINTS,
('v2f', (xw, yw)), ('c3B', color))
pyglet.gl.glPointSize(1);
"""
# make module runnable from command line
if __name__ == '__main__':
print "RUN"
sampler = PointSampler("../data/phase1_160x120.csv", 0, 0)
sampler.compute(None, None)
"""
| [
"numpy.delete",
"math.sqrt",
"pyglet.gl.glPointSize",
"numpy.zeros",
"pyglet.graphics.draw",
"math.fabs",
"math.atan2",
"pymunk.ShapeFilter",
"numpy.loadtxt",
"configsingleton.ConfigSingleton.get_instance"
] | [((1019, 1049), 'configsingleton.ConfigSingleton.get_instance', 'ConfigSingleton.get_instance', ([], {}), '()\n', (1047, 1049), False, 'from configsingleton import ConfigSingleton\n'), ((1652, 1693), 'numpy.loadtxt', 'np.loadtxt', (['calib_filename'], {'delimiter': '""","""'}), "(calib_filename, delimiter=',')\n", (1662, 1693), True, 'import numpy as np\n'), ((4354, 4391), 'pymunk.ShapeFilter', 'ShapeFilter', ([], {'mask': 'self.detection_mask'}), '(mask=self.detection_mask)\n', (4365, 4391), False, 'from pymunk import ShapeFilter\n'), ((6653, 6677), 'pyglet.gl.glPointSize', 'pyglet.gl.glPointSize', (['(1)'], {}), '(1)\n', (6674, 6677), False, 'import pyglet\n'), ((2233, 2254), 'numpy.zeros', 'np.zeros', (['(n_rows, 2)'], {}), '((n_rows, 2))\n', (2241, 2254), True, 'import numpy as np\n'), ((2400, 2413), 'math.atan2', 'atan2', (['yr', 'xr'], {}), '(yr, xr)\n', (2405, 2413), False, 'from math import atan2, sqrt, fabs\n'), ((2450, 2473), 'math.sqrt', 'sqrt', (['(xr * xr + yr * yr)'], {}), '(xr * xr + yr * yr)\n', (2454, 2473), False, 'from math import atan2, sqrt, fabs\n'), ((3130, 3181), 'numpy.delete', 'np.delete', (['self.calib_array', 'delete_indices'], {'axis': '(0)'}), '(self.calib_array, delete_indices, axis=0)\n', (3139, 3181), True, 'import numpy as np\n'), ((5642, 5666), 'pyglet.gl.glPointSize', 'pyglet.gl.glPointSize', (['(3)'], {}), '(3)\n', (5663, 5666), False, 'import pyglet\n'), ((6548, 6627), 'pyglet.graphics.draw', 'pyglet.graphics.draw', (['(1)', 'pyglet.gl.GL_POINTS', "('v2f', (xw, yw))", "('c3B', color)"], {}), "(1, pyglet.gl.GL_POINTS, ('v2f', (xw, yw)), ('c3B', color))\n", (6568, 6627), False, 'import pyglet\n'), ((4173, 4196), 'math.sqrt', 'sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (4177, 4196), False, 'from math import atan2, sqrt, fabs\n'), ((2825, 2835), 'math.fabs', 'fabs', (['dist'], {}), '(dist)\n', (2829, 2835), False, 'from math import atan2, sqrt, fabs\n'), ((2922, 2932), 'math.fabs', 'fabs', (['dist'], {}), '(dist)\n', (2926, 2932), False, 'from math import atan2, sqrt, fabs\n'), ((3019, 3030), 'math.fabs', 'fabs', (['angle'], {}), '(angle)\n', (3023, 3030), False, 'from math import atan2, sqrt, fabs\n')] |
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
pgf_with_custom_preamble = {
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
# "pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.preamble": [
"\\usepackage{unicode-math,amsmath,amssymb,amsthm}", # unicode math setup
]
}
mpl.rcParams.update(pgf_with_custom_preamble)
import matplotlib.pyplot as plt
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
def makeup_for_plot(fig1):
fig1.spines["top"].set_visible(False)
fig1.spines["bottom"].set_visible(True)
fig1.spines["right"].set_visible(False)
fig1.spines["left"].set_visible(True)
fig1.get_xaxis().tick_bottom()
fig1.get_yaxis().tick_left()
fig1.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on",labelsize=12)
grid_color = '#e3e3e3'
grid_line_style= '--'
fig1.grid(linestyle=grid_line_style,color=grid_color)
return fig1
def do_tight_layout_for_fig(fig):
fig.tight_layout()
return fig
lr_vals = [0.1]
colors = ['red','green','c','m','y','orange','green','c','m','y','black','brown','orange','blue', 'black','blue','brown','red','orange','green','c','m','y','orange','green','c','m','y']
import argparse
parser = argparse.ArgumentParser(description='Plot Experiments')
parser.add_argument('--fun_num', '--fun_num', default=0,type=int, dest='fun_num')
args = parser.parse_args()
fun_num = args.fun_num
my_markers = ['','','','','','','']
if fun_num == 0:
files = {
1: 'results/cocain_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
2: 'results/bpg_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
3: 'results/palm_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_beta_0.0_lam_val_0.1.txt',
4: 'results/palm_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_beta_0.2_lam_val_0.1.txt',
5: 'results/palm_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_beta_0.4_lam_val_0.1.txt',
6: 'results/bpg_mf_wb_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
}
if fun_num == 1:
files = {
1: 'results/cocain_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
2: 'results/bpg_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
3: 'results/palm_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_beta_0.0_lam_val_0.1.txt',
4: 'results/palm_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_beta_0.2_lam_val_0.1.txt',
5: 'results/palm_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_beta_0.4_lam_val_0.1.txt',
6: 'results/bpg_mf_wb_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_lam_val_0.1.txt'
}
if fun_num == 2:
files = {
1: 'results/cocain_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
2: 'results/bpg_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
3: 'results/palm_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_beta_0.0_lam_val_0.1.txt',
4: 'results/palm_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_beta_0.2_lam_val_0.1.txt',
5: 'results/palm_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_beta_0.4_lam_val_0.1.txt',
6: 'results/bpg_mf_wb_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_lam_val_0.1.txt'
}
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1 = makeup_for_plot(ax1)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2 = makeup_for_plot(ax2)
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3 = makeup_for_plot(ax3)
fig4 = plt.figure()
ax4 = fig4.add_subplot(111)
ax4 = makeup_for_plot(ax4)
fig5 = plt.figure()
ax5 = fig5.add_subplot(111)
ax5 = makeup_for_plot(ax5)
label_font_size = 13
legend_font_size = 17
my_line_width = 2
labels_dict = {
1: r"CoCaIn BPG-MF",
6: r"BPG-MF-WB",
2: r"BPG-MF",
3: r"PALM",
4: r"iPALM ($\beta = 0.2$)",
5: r"iPALM ($\beta = 0.4$)",
}
nb_epoch = 1000
opt_vals= np.array([1,6,2,3,4,5])
color_count = 0
f_opt = 0
min_fun_val = np.inf
for i in opt_vals:
file_name = files[i]
try:
best_train_objective_vals = np.loadtxt(file_name)[:,0]
min_fun_val = np.nanmin([min_fun_val,np.min(best_train_objective_vals)])
print(min_fun_val)
except:
pass
for i in opt_vals:
file_name = files[i]
print(file_name)
try:
best_train_objective_vals = np.loadtxt(file_name)[:,0]
best_time_vals = np.loadtxt(file_name)[:,1]
except:
best_train_objective_vals = np.loadtxt(file_name)
ax4.loglog((np.arange(nb_epoch)+1),(best_train_objective_vals[:nb_epoch] - min_fun_val),\
label=labels_dict[i],color=colors[color_count], linewidth=my_line_width,marker=my_markers[i-1])
ax3.loglog((np.arange(nb_epoch)+1),(best_train_objective_vals[:nb_epoch] - min_fun_val)/(best_train_objective_vals[0] - min_fun_val),\
label=labels_dict[i],color=colors[color_count], linewidth=my_line_width,marker=my_markers[i-1])
ax5.loglog((np.arange(nb_epoch)+1),(best_train_objective_vals[:nb_epoch] - min_fun_val)/(best_train_objective_vals[0]),\
label=labels_dict[i],color=colors[color_count], linewidth=my_line_width,marker=my_markers[i-1])
ax1.plot((np.arange(nb_epoch)+1),(best_train_objective_vals[:nb_epoch]),\
label=labels_dict[i],color=colors[color_count], linewidth=my_line_width,marker=my_markers[i-1])
best_time_vals = best_time_vals - best_time_vals[0]
best_time_vals[0]=1e-2
temp_time_vals = np.cumsum(best_time_vals[:nb_epoch])
ax2.loglog(temp_time_vals, (best_train_objective_vals[:nb_epoch]),\
label=labels_dict[i],color=colors[color_count], linewidth=my_line_width,marker=my_markers[i-1])
color_count +=1
figure_name1 = 'figures/'+'func_vals_fun_num_'+str(fun_num)
# legends
ax1.legend(loc='upper right', fontsize=label_font_size)
ax2.legend(loc='upper right', fontsize=label_font_size)
ax3.legend(loc='lower left', fontsize=label_font_size)
ax4.legend(loc='lower left', fontsize=label_font_size)
ax5.legend(loc='lower left', fontsize=label_font_size)
ax1.set_xlabel('Iterations (log scale)',fontsize=legend_font_size)
ax1.set_ylabel('Function value (log scale)',fontsize=legend_font_size)
do_tight_layout_for_fig(fig1)
fig1.savefig(figure_name1+'.png', dpi=fig1.dpi)
fig1.savefig(figure_name1+'.pdf', dpi=fig1.dpi)
ax2.set_xlabel('Time (log scale)',fontsize=legend_font_size)
ax2.set_ylabel('Function value (log scale)',fontsize=legend_font_size)
do_tight_layout_for_fig(fig2)
fig2.savefig(figure_name1+'_time_.png', dpi=fig2.dpi)
fig2.savefig(figure_name1+'_time_.pdf', dpi=fig2.dpi)
ax3.set_xlabel('Iterations (log scale)',fontsize=legend_font_size)
ax3.set_ylabel(r'$\frac{\Psi({\bf U^k},{\bf Z^k}) - v({\mathcal P})}{\Psi({\bf U^1},{\bf Z^1}) -v({\mathcal P})}$ (log scale)',fontsize=legend_font_size)
do_tight_layout_for_fig(fig3)
fig3.savefig(figure_name1+'_compare_optval1_.png', dpi=fig3.dpi)
fig3.savefig(figure_name1+'_compare_optval1_.pdf', dpi=fig3.dpi)
ax4.set_xlabel('Iterations (log scale)',fontsize=legend_font_size)
ax4.set_ylabel(r'$\Psi({\bf U^k},{\bf Z^k}) -v({\mathcal P})$ (log scale)',fontsize=legend_font_size)
do_tight_layout_for_fig(fig4)
fig4.savefig(figure_name1+'_compare_optval2_.png', dpi=fig4.dpi)
fig4.savefig(figure_name1+'_compare_optval2_.pdf', dpi=fig4.dpi)
ax5.set_xlabel('Iterations (log scale)',fontsize=legend_font_size)
ax5.set_ylabel(r'$\frac{\Psi({\bf U^k},{\bf Z^k}) -v({\mathcal P})}{\Psi({\bf U^1},{\bf Z^1})}$ (log scale)',fontsize=legend_font_size)
do_tight_layout_for_fig(fig5)
fig5.savefig(figure_name1+'_compare_optval3_.png', dpi=fig5.dpi)
fig5.savefig(figure_name1+'_compare_optval3_.pdf', dpi=fig5.dpi)
| [
"argparse.ArgumentParser",
"matplotlib.rcParams.update",
"matplotlib.use",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.cumsum",
"numpy.loadtxt",
"numpy.arange"
] | [((44, 58), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (51, 58), True, 'import matplotlib as mpl\n'), ((401, 446), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['pgf_with_custom_preamble'], {}), '(pgf_with_custom_preamble)\n', (420, 446), True, 'import matplotlib as mpl\n'), ((1377, 1432), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot Experiments"""'}), "(description='Plot Experiments')\n", (1400, 1432), False, 'import argparse\n'), ((3653, 3665), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3663, 3665), True, 'import matplotlib.pyplot as plt\n'), ((3728, 3740), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3738, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3813, 3815), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3890), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3888, 3890), True, 'import matplotlib.pyplot as plt\n'), ((3953, 3965), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3963, 3965), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4288), 'numpy.array', 'np.array', (['[1, 6, 2, 3, 4, 5]'], {}), '([1, 6, 2, 3, 4, 5])\n', (4268, 4288), True, 'import numpy as np\n'), ((5704, 5740), 'numpy.cumsum', 'np.cumsum', (['best_time_vals[:nb_epoch]'], {}), '(best_time_vals[:nb_epoch])\n', (5713, 5740), True, 'import numpy as np\n'), ((4413, 4434), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {}), '(file_name)\n', (4423, 4434), True, 'import numpy as np\n'), ((4649, 4670), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {}), '(file_name)\n', (4659, 4670), True, 'import numpy as np\n'), ((4695, 4716), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {}), '(file_name)\n', (4705, 4716), True, 'import numpy as np\n'), ((4761, 4782), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {}), '(file_name)\n', (4771, 4782), True, 'import numpy as np\n'), ((4797, 4816), 'numpy.arange', 'np.arange', (['nb_epoch'], {}), '(nb_epoch)\n', (4806, 4816), True, 'import numpy as np\n'), ((4988, 5007), 'numpy.arange', 'np.arange', (['nb_epoch'], {}), '(nb_epoch)\n', (4997, 5007), True, 'import numpy as np\n'), ((5224, 5243), 'numpy.arange', 'np.arange', (['nb_epoch'], {}), '(nb_epoch)\n', (5233, 5243), True, 'import numpy as np\n'), ((5445, 5464), 'numpy.arange', 'np.arange', (['nb_epoch'], {}), '(nb_epoch)\n', (5454, 5464), True, 'import numpy as np\n'), ((4479, 4512), 'numpy.min', 'np.min', (['best_train_objective_vals'], {}), '(best_train_objective_vals)\n', (4485, 4512), True, 'import numpy as np\n')] |
'''
Copyright 2017 <NAME>, <NAME>, <NAME> and the Max Planck Gesellschaft. All rights reserved.
This software is provided for research purposes only.
By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license
More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de.
For comments or questions, please email us at: <EMAIL>
Acknowledgements:
The code file is based on the release code of http://smpl.is.tue.mpg.de with adaptations.
Therefore, we would like to kindly thank <NAME> and <NAME>.
Please Note:
============
This is a demo version of the script for driving the SMPL+H model with python.
We would be happy to receive comments, help and suggestions on improving this code
and in making it available on more platforms.
System Requirements:
====================
Operating system: OSX, Linux
Python Dependencies:
- Numpy & Scipy [http://www.scipy.org/scipylib/download.html]
- Chumpy [https://github.com/mattloper/chumpy]
- OpenCV [http://opencv.org/downloads.html]
--> (alternatively: matplotlib [http://matplotlib.org/downloads.html])
About the Script:
=================
This script demonstrates loading the SMPL+H model and rendering it using OpenDR
to render and OpenCV to display (or alternatively matplotlib can also be used
for display, as shown in commented code below).
This code shows how to:
- Load the SMPL+H model
- Edit pose & shape parameters of the model to create a new body in a new pose
- Create an OpenDR scene (with a basic renderer, camera & light)
- Render the scene using OpenCV / matplotlib
Running the Hello World code:
=============================
Inside Terminal, navigate to the mano/webuser/hello_world directory. You can run
the hello world script now by typing the following:
> python SMPL+H___render.py
'''
import numpy as np
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
from webuser.smpl_handpca_wrapper import load_model
# Load SMPL+H model (here we load the female model)
m = load_model('../../models/SMPLH_female.pkl', ncomps=12, flat_hand_mean=False)
# Assign random pose and shape parameters
m.betas[:] = np.random.rand(m.betas.size) * .03
#m.pose[:] = np.random.rand(m.pose.size) * .2
m.pose[:] = [-0.17192541, +0.36310464, +0.05572387, -0.42836206, -0.00707548, +0.03556427,
+0.18696896, -0.22704364, -0.39019834, +0.20273526, +0.07125099, +0.07105988,
+0.71328310, -0.29426986, -0.18284189, +0.72134655, +0.07865227, +0.08342645,
+0.00934835, +0.12881420, -0.02610217, -0.15579594, +0.25352553, -0.26097519,
-0.04529948, -0.14718626, +0.52724564, -0.07638319, +0.03324086, +0.05886086,
-0.05683995, -0.04069042, +0.68593617, -0.75870686, -0.08579930, -0.55086359,
-0.02401033, -0.46217096, -0.03665799, +0.12397343, +0.10974685, -0.41607569,
-0.26874970, +0.40249335, +0.21223768, +0.03365140, -0.05243080, +0.16074013,
+0.13433811, +0.10414972, -0.98688595, -0.17270103, +0.29374368, +0.61868383,
+0.00458329, -0.15357027, +0.09531648, -0.10624117, +0.94679869, -0.26851003,
+0.58547889, -0.13735695, -0.39952280, -0.16598853, -0.14982575, -0.27937399,
+0.12354536, -0.55101035, -0.41938681, +0.52238684, -0.23376718, -0.29814804,
-0.42671473, -0.85829819, -0.50662164, +1.97374622, -0.84298473, -1.29958491]
m.pose[0] = np.pi
# Create OpenDR renderer
rn = ColoredRenderer()
# Assign attributes to renderer
w, h = (640, 480)
rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0.15, 1.8]), f=np.array([w,w])/2., c=np.array([w,h])/2., k=np.zeros(5))
rn.frustum = {'near': 0.5, 'far': 10., 'width': w, 'height': h}
rn.set(v=m, f=m.f, bgcolor=np.zeros(3))
# Construct point light source
rn.vc = LambertianPointLight( f=m.f,
v=rn.v,
num_verts=len(m),
light_pos=np.array([-1000,-1000,-2000]),
vc=np.ones_like(m)*.9,
light_color=np.array([1., 1., 1.]))
rn.vc += LambertianPointLight( f=m.f,
v=rn.v,
num_verts=len(m),
light_pos=np.array([+2000,+2000,+2000]),
vc=np.ones_like(m)*.9,
light_color=np.array([1., 1., 1.]))
# Show it using OpenCV
import cv2
cv2.imshow('render_SMPL+H', rn.r)
cv2.imwrite('./SMPL+H___hello_world___opencv.png', rn.r * 255)
print ('..Print any key while on the display window')
cv2.waitKey(0)
cv2.destroyAllWindows()
from psbody.mesh import Mesh
from psbody.mesh import MeshViewers
from psbody.mesh.sphere import Sphere
radius = .01
model_Mesh = Mesh(v=m.r, f=m.f)
model_Joints = [Sphere(np.array(jointPos), radius).to_mesh(np.eye(3)[0 if jointID == 0 else 1]) for jointID, jointPos in enumerate(m.J_transformed)]
mvs = MeshViewers(window_width=2000, window_height=800, shape=[1, 3])
mvs[0][0].set_static_meshes([model_Mesh] + model_Joints, blocking=True)
mvs[0][1].set_static_meshes([model_Mesh], blocking=True)
model_Mesh = Mesh(v=m.r, f=[])
mvs[0][2].set_static_meshes([model_Mesh] + model_Joints, blocking=True)
raw_input('Rotate the 3D viewer and press Enter to store a screenshot...')
mvs[0][0].save_snapshot('./SMPL+H___hello_world___3D_viewer.png')
raw_input('Press any Enter...')
# # Could also use matplotlib to display
# import matplotlib
# import platform
# if 'Linux' in platform.system():
# pass # do not need to do anything
# elif 'Darwin' in platform.system():
# matplotlib.use("MacOSX")
# else:
# pass # unhandled # 'Windows' etc
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# from os import popen
# plt.ion()
# plt.imshow(rn.r)
# pathOUT = './SMPL+H___hello_world___matplotlib.png'
# plt.savefig(pathOUT)
# popen('open ' + pathOUT) # OSX
# raw_input('Press any key to exit')
# # matplotlib to display vertices and joints
# import matplotlib
# import platform
# if 'Linux' in platform.system():
# pass # do not need to do anything
# elif 'Darwin' in platform.system():
# matplotlib.use("MacOSX")
# else:
# pass # unhandled # 'Windows' etc
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# vertices = m.r
# joints3D = np.array(m.J_transformed).reshape((-1, 3))
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(vertices[:, 0], vertices[:, 1], vertices[:, 2], color='r')
# ax.scatter(joints3D[:, 0], joints3D[:, 1], joints3D[:, 2], color='b')
# plt.show()
| [
"cv2.imwrite",
"numpy.ones_like",
"numpy.eye",
"numpy.random.rand",
"cv2.imshow",
"opendr.renderer.ColoredRenderer",
"numpy.zeros",
"numpy.array",
"cv2.destroyAllWindows",
"psbody.mesh.Mesh",
"psbody.mesh.MeshViewers",
"cv2.waitKey",
"webuser.smpl_handpca_wrapper.load_model"
] | [((2118, 2194), 'webuser.smpl_handpca_wrapper.load_model', 'load_model', (['"""../../models/SMPLH_female.pkl"""'], {'ncomps': '(12)', 'flat_hand_mean': '(False)'}), "('../../models/SMPLH_female.pkl', ncomps=12, flat_hand_mean=False)\n", (2128, 2194), False, 'from webuser.smpl_handpca_wrapper import load_model\n'), ((3564, 3581), 'opendr.renderer.ColoredRenderer', 'ColoredRenderer', ([], {}), '()\n', (3579, 3581), False, 'from opendr.renderer import ColoredRenderer\n'), ((4590, 4623), 'cv2.imshow', 'cv2.imshow', (['"""render_SMPL+H"""', 'rn.r'], {}), "('render_SMPL+H', rn.r)\n", (4600, 4623), False, 'import cv2\n'), ((4624, 4686), 'cv2.imwrite', 'cv2.imwrite', (['"""./SMPL+H___hello_world___opencv.png"""', '(rn.r * 255)'], {}), "('./SMPL+H___hello_world___opencv.png', rn.r * 255)\n", (4635, 4686), False, 'import cv2\n'), ((4741, 4755), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4752, 4755), False, 'import cv2\n'), ((4756, 4779), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4777, 4779), False, 'import cv2\n'), ((4910, 4928), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'm.r', 'f': 'm.f'}), '(v=m.r, f=m.f)\n', (4914, 4928), False, 'from psbody.mesh import Mesh\n'), ((5084, 5147), 'psbody.mesh.MeshViewers', 'MeshViewers', ([], {'window_width': '(2000)', 'window_height': '(800)', 'shape': '[1, 3]'}), '(window_width=2000, window_height=800, shape=[1, 3])\n', (5095, 5147), False, 'from psbody.mesh import MeshViewers\n'), ((5290, 5307), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'm.r', 'f': '[]'}), '(v=m.r, f=[])\n', (5294, 5307), False, 'from psbody.mesh import Mesh\n'), ((2251, 2279), 'numpy.random.rand', 'np.random.rand', (['m.betas.size'], {}), '(m.betas.size)\n', (2265, 2279), True, 'import numpy as np\n'), ((3668, 3679), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3676, 3679), True, 'import numpy as np\n'), ((3683, 3707), 'numpy.array', 'np.array', (['[0, 0.15, 1.8]'], {}), '([0, 0.15, 1.8])\n', (3691, 3707), True, 'import numpy as np\n'), ((3755, 3766), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (3763, 3766), True, 'import numpy as np\n'), ((3859, 3870), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3867, 3870), True, 'import numpy as np\n'), ((4075, 4106), 'numpy.array', 'np.array', (['[-1000, -1000, -2000]'], {}), '([-1000, -1000, -2000])\n', (4083, 4106), True, 'import numpy as np\n'), ((4205, 4230), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4213, 4230), True, 'import numpy as np\n'), ((4400, 4431), 'numpy.array', 'np.array', (['[+2000, +2000, +2000]'], {}), '([+2000, +2000, +2000])\n', (4408, 4431), True, 'import numpy as np\n'), ((4530, 4555), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4538, 4555), True, 'import numpy as np\n'), ((3711, 3727), 'numpy.array', 'np.array', (['[w, w]'], {}), '([w, w])\n', (3719, 3727), True, 'import numpy as np\n'), ((3733, 3749), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (3741, 3749), True, 'import numpy as np\n'), ((4141, 4156), 'numpy.ones_like', 'np.ones_like', (['m'], {}), '(m)\n', (4153, 4156), True, 'import numpy as np\n'), ((4466, 4481), 'numpy.ones_like', 'np.ones_like', (['m'], {}), '(m)\n', (4478, 4481), True, 'import numpy as np\n'), ((4988, 4997), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4994, 4997), True, 'import numpy as np\n'), ((4952, 4970), 'numpy.array', 'np.array', (['jointPos'], {}), '(jointPos)\n', (4960, 4970), True, 'import numpy as np\n')] |
from typing import List, Optional, Iterable
import numpy as np
import pandas as pd
from fire import Fire
from icecream import ic
from sacrebleu import sentence_bleu, corpus_bleu
from torchmetrics import ROUGEScore
class OracleReranker:
def __init__(self,
target_path: Optional[str] = None,
prediction_path: Optional[str] = None,
smooth_bleu: bool = True,
metric: str = "sacrebleu"):
assert metric in ("sacrebleu", "rougeL")
self.target_path = target_path
self.prediction_path = prediction_path
self.smooth_method_bleu = "exp" if smooth_bleu else "none"
self.metric = metric
def __call__(self):
targets = self._read_sentence_file(self.target_path)
predictions = self._read_predictions_file(self.prediction_path)
self._infer_num_beams(targets, predictions)
bleu_df = self._score_single_predictions(targets, predictions)
random_bleu = self._compute_corpus_score(bleu_df)
original_model_bleu = self._compute_corpus_score(self._get_first_predictions(bleu_df))
oracle_bleu = self._compute_corpus_score(self._get_best_predictions(bleu_df))
self._print_scores(random_bleu, original_model_bleu, oracle_bleu)
def _read_predictions_file(self, prediction_path: str) -> List[str]:
with open(prediction_path, 'r') as f:
start_of_file = f.read(100)
if start_of_file.startswith('S-'):
return self._read_fairseq_generation_file(prediction_path)
else:
return self._read_sentence_file(prediction_path)
@staticmethod
def _read_fairseq_generation_file(path: str) -> List[str]:
with open(path, 'r') as f:
content = f.read()
lines = content.split('\n')
rows = []
for line in lines:
if line.startswith("D-"):
example_index, confidence, prediction = line.split('\t')
confidence = float(confidence)
example_index = int(example_index.replace("D-", ''))
rows.append((example_index, confidence, prediction))
preds_df = pd.DataFrame(rows, columns=["example_index", "confidence", "prediction"])
preds_df = preds_df.sort_values(by=["example_index", "confidence"], ascending=[True, False])
preds = preds_df["prediction"].tolist()
return preds
def _read_sentence_file(self, path: str) -> List[str]:
with open(path, 'r') as f:
content = f.read()
content = self._detokenize_punctuation(content)
lines = content.split('\n')
if lines[-1] == '':
lines = lines[:-1]
return lines
@staticmethod
def _detokenize_punctuation(text: str) -> str:
for punct in [',', '.', ':', ';', '?', '!']:
text = text.replace(' ' + punct, punct)
return text
def _score_single_predictions(self, targets: List[str], predictions: List[str]) -> pd.DataFrame:
example_index = np.repeat(range(len(targets)), self.num_beams)
targets = np.repeat(targets, self.num_beams)
bleu_df = pd.DataFrame({"example_index": example_index,
"target": targets,
"prediction": predictions})
bleu_df["score"] = bleu_df.apply(self._compute_sentence_score, axis=1)
return bleu_df
def _compute_corpus_score(self, bleu_df: pd.DataFrame) -> float:
preds = bleu_df["prediction"].values
targets = bleu_df["target"].values
if self.metric == "sacrebleu":
score = self._compute_corpus_bleu(preds, targets)
else: # self.metric == "rougeL":
score = self._compute_corpus_rougeL(preds, targets)
return round(score, 2)
def _compute_corpus_bleu(self, preds: Iterable[str], targets: Iterable[str]) -> float:
return corpus_bleu(sys_stream=preds,
ref_streams=[targets],
smooth_method=self.smooth_method_bleu).score
def _compute_corpus_rougeL(self, preds: Iterable[str], targets: Iterable[str]) -> float:
metric = ROUGEScore(rouge_keys="rougeL")
score_dict = metric(preds=preds, targets=targets)
score = 100 * score_dict["rougeL_fmeasure"].item()
return score
def _compute_sentence_score(self, row: pd.Series) -> float:
pred = row["prediction"]
target = row["target"]
if self.metric == "sacrebleu":
return self._compute_sentence_bleu(pred, target)
else: # self.metric == "rougeL"
return self._compute_sentence_rougeL(pred, target)
def _compute_sentence_bleu(self, pred: str, target: str) -> float:
return sentence_bleu(hypothesis=pred,
references=[target],
smooth_method=self.smooth_method_bleu).score
def _compute_sentence_rougeL(self, pred: str, target: str) -> float:
return self._compute_corpus_rougeL(preds=[pred], targets=[target])
def _get_first_predictions(self, bleu_df: pd.DataFrame) -> pd.DataFrame:
return bleu_df.groupby("example_index").apply(lambda mini_df: mini_df.iloc[0])
def _get_best_predictions(self, bleu_df: pd.DataFrame) -> pd.DataFrame:
return bleu_df.groupby("example_index").apply(
lambda mini_df: mini_df.iloc[mini_df["score"].astype(float).argmax()])
def _print_scores(self, random_score: float, original_model_score: float, oracle_score: float) -> None:
oracle_abs_gain = round(oracle_score - original_model_score, 2)
oracle_percent_gain = round(oracle_abs_gain / original_model_score * 100, 1)
ic(self.metric)
ic(random_score)
ic(original_model_score)
ic(oracle_score)
ic(oracle_abs_gain)
ic(oracle_percent_gain)
def _infer_num_beams(self, targets: List[str], predictions: List[str]) -> None:
assert len(predictions) % len(targets) == 0
self.num_beams = int(len(predictions) / len(targets))
if __name__ == '__main__':
Fire(OracleReranker)
"""
# half_iwslt14
python oracle.py \
--target-path="/home/olab/tomerronen1/deploys/fairseq-mcrerank/data/first-half.iwslt14.tokenized.de-en/detok.valid.en" \
--prediction-path="/home/olab/tomerronen1/deploys/fairseq-mcrerank/data/first-half.iwslt14.tokenized.de-en/generations/detok.clean_generations_valid.en" \
# half_iwslt14 with full generations file
python oracle.py \
--target-path="/home/olab/tomerronen1/deploys/fairseq-mcrerank/data/first-half.iwslt14.tokenized.de-en/valid.en" \
--prediction-path="/home/olab/tomerronen1/deploys/fairseq-mcrerank/data/first-half.iwslt14.tokenized.de-en/generations/full_generations_valid.en"
# full_iwslt14
python oracle.py \
--target-path="/home/olab/tomerronen1/deploys/fairseq-mcrerank/data/full.iwslt14.tokenized.de-en/detok.valid.en" \
--prediction-path="/home/olab/tomerronen1/deploys/fairseq-mcrerank/data/full.iwslt14.tokenized.de-en/generations/detok.clean_generations_valid.en"
# trained on full_iwslt14, generated on wmt19
DATA_DIR="${TOMER}/deploys/fairseq-mcrerank/data/OLD_dr_nmt_paper"
python oracle.py \
--target-path="${DATA_DIR}/detok.valid.en" \
--prediction-path="${DATA_DIR}/generations/detok.clean_generations_valid.en"
ic| random_bleu: 16.4
ic| original_model_bleu: 17.2
ic| oracle_bleu: 20.25
ic| oracle_abs_gain: 3.05
ic| oracle_percent_gain: 17.7
# wmt19
( DATA_DIR="${TOMER}/data/fairseq-mcrerank/dr_nmt_paper/parallel"
DETOK_OR_TOK="detok"
BEAM=10
python oracle.py \
--target-path="${DATA_DIR}/text_data/clean_${DETOK_OR_TOK}/valid.en" \
--prediction-path="${DATA_DIR}/generations/beam_${BEAM}/${DETOK_OR_TOK}_generations_valid.en" )
# beam 5 detok:
ic| random_bleu: 24.04
ic| original_model_bleu: 24.74
ic| oracle_bleu: 27.97
ic| oracle_abs_gain: 3.23
ic| oracle_percent_gain: 13.1
# beam 10 detok:
ic| random_bleu: 23.82
ic| original_model_bleu: 24.74
ic| oracle_bleu: 29.63
ic| oracle_abs_gain: 4.89
ic| oracle_percent_gain: 19.8
# xsum
( BEAM=5
DATA_DIR="$TOMER/data/fairseq-mcrerank/huggingface_models/xsum/sshleifer--distilbart-xsum-12-3/beam_${BEAM}"
python oracle.py \
--target-path="${DATA_DIR}/text_data/clean_detok/valid.tgt" \
--prediction-path="${DATA_DIR}/generations/beam_${BEAM}/detok_generations_valid.tgt" \
--metric="rougeL" )
ic| self.metric: 'rougeL'
ic| random_score: 36.02
ic| original_model_score: 36.58 ## can also calculate by running: sed -n '1~5p' valid.rougeL | awk '{ total += $1; count++ } END { print total/count }'
ic| oracle_score: 41.85
ic| oracle_abs_gain: 5.27
ic| oracle_percent_gain: 14.4
"""
| [
"icecream.ic",
"numpy.repeat",
"fire.Fire",
"sacrebleu.sentence_bleu",
"torchmetrics.ROUGEScore",
"pandas.DataFrame",
"sacrebleu.corpus_bleu"
] | [((6121, 6141), 'fire.Fire', 'Fire', (['OracleReranker'], {}), '(OracleReranker)\n', (6125, 6141), False, 'from fire import Fire\n'), ((2176, 2249), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': "['example_index', 'confidence', 'prediction']"}), "(rows, columns=['example_index', 'confidence', 'prediction'])\n", (2188, 2249), True, 'import pandas as pd\n'), ((3104, 3138), 'numpy.repeat', 'np.repeat', (['targets', 'self.num_beams'], {}), '(targets, self.num_beams)\n', (3113, 3138), True, 'import numpy as np\n'), ((3157, 3253), 'pandas.DataFrame', 'pd.DataFrame', (["{'example_index': example_index, 'target': targets, 'prediction': predictions}"], {}), "({'example_index': example_index, 'target': targets,\n 'prediction': predictions})\n", (3169, 3253), True, 'import pandas as pd\n'), ((4182, 4213), 'torchmetrics.ROUGEScore', 'ROUGEScore', ([], {'rouge_keys': '"""rougeL"""'}), "(rouge_keys='rougeL')\n", (4192, 4213), False, 'from torchmetrics import ROUGEScore\n'), ((5730, 5745), 'icecream.ic', 'ic', (['self.metric'], {}), '(self.metric)\n', (5732, 5745), False, 'from icecream import ic\n'), ((5754, 5770), 'icecream.ic', 'ic', (['random_score'], {}), '(random_score)\n', (5756, 5770), False, 'from icecream import ic\n'), ((5779, 5803), 'icecream.ic', 'ic', (['original_model_score'], {}), '(original_model_score)\n', (5781, 5803), False, 'from icecream import ic\n'), ((5812, 5828), 'icecream.ic', 'ic', (['oracle_score'], {}), '(oracle_score)\n', (5814, 5828), False, 'from icecream import ic\n'), ((5837, 5856), 'icecream.ic', 'ic', (['oracle_abs_gain'], {}), '(oracle_abs_gain)\n', (5839, 5856), False, 'from icecream import ic\n'), ((5865, 5888), 'icecream.ic', 'ic', (['oracle_percent_gain'], {}), '(oracle_percent_gain)\n', (5867, 5888), False, 'from icecream import ic\n'), ((3919, 4015), 'sacrebleu.corpus_bleu', 'corpus_bleu', ([], {'sys_stream': 'preds', 'ref_streams': '[targets]', 'smooth_method': 'self.smooth_method_bleu'}), '(sys_stream=preds, ref_streams=[targets], smooth_method=self.\n smooth_method_bleu)\n', (3930, 4015), False, 'from sacrebleu import sentence_bleu, corpus_bleu\n'), ((4772, 4867), 'sacrebleu.sentence_bleu', 'sentence_bleu', ([], {'hypothesis': 'pred', 'references': '[target]', 'smooth_method': 'self.smooth_method_bleu'}), '(hypothesis=pred, references=[target], smooth_method=self.\n smooth_method_bleu)\n', (4785, 4867), False, 'from sacrebleu import sentence_bleu, corpus_bleu\n')] |
'''This is a module that tries to use emcee to solve SNooPy models.
The SN object should have all the necessary ingredients. All that is
left is to define prior probabilities.'''
import emcee
import numpy as np
from scipy.optimize import minimize
import types,os
gconst = -0.5*np.log(2*np.pi)
def builtin_priors(x, st):
'''Some built-in priors that are simple strings that we parse.'''
if st[0] not in ['U','G','E']:
raise ValueError("I don't understand the prior code %s" % st)
if st[0] == 'U':
'''uniform prior: U,l,u'''
u,l = list(map(float, st.split(',')[1:]))
if not u < x < l:
return -np.inf
return 0
elif st[0] == 'G':
'''Gaussian prior, G(mu,sigma) = 1/sigma/sqrt(2*pi)*exp((x-mu)^2/2/sigma^2)'''
mu,sigma = list(map(float, st.split(',')[1:]))
return gconst - 0.5*np.power(x - mu,2)/sigma**2 - np.log(sigma)
elif st[0] == 'E':
'''exponential prior, E(x,tau) = 1/tau*exp(-x/tau).'''
tau = float(st.split(',')[1])
return -np.log(tau) - x/tau
def vecgauss(x, mu, sig):
'''A simple vector-based Gaussian with mean mu and std sig'''
return np.sum(gconst - 0.5*np.power(x-mu,2)/np.power(sig,2) - np.log(sig))
def guess(varinfo, snobj):
'''Get starting values from the fitter.'''
p = np.zeros((varinfo['Nvar'],))
ep = np.zeros((varinfo['Nvar'],))
for var in varinfo['free']:
if var in snobj.model.nparameters:
p[varinfo[var]['index']] = snobj.model.nparameters[var]
ep[varinfo[var]['index']] = snobj.model.enparameters[var]
else:
if snobj.model.parameters[var] is None:
raise ValueError("model parameters not set, run initial fit() first")
p[varinfo[var]['index']] = snobj.model.parameters[var]
#ep[varinfo[var]['index']] = max(0.001,snobj.model.errors[var])
ep[varinfo[var]['index']] = 1e-4
return p,ep
def setup_varinfo(snobj, args):
'''Given a sn object and its associated model, setup the varinfo.'''
varinfo = {}
varinfo['varlist'] = list(snobj.model.parameters.keys())
# Nuissance parameters
varinfo['varlist'] = varinfo['varlist'] + list(snobj.model.nparameters.keys())
varinfo['fitflux'] = args.get('fitflux',True)
i = 0
varinfo['free'] = []
for var in varinfo['varlist']:
varinfo[var] = {}
if var in args:
if type(args[var]) is bytes:
varinfo[var]['fixed'] = False
varinfo[var]['index'] = i
varinfo['free'].append(var)
i += 1
varinfo[var]['prior'] = args[var]
varinfo[var]['prior_type'] = 'builtin'
elif type(args[var]) in np.ScalarType:
varinfo[var]['value'] = args[var]
varinfo[var]['fixed'] = True
elif type(args[var]) is types.FunctionType:
varinfo[var]['fixed'] = False
varinfo[var]['index'] = i
varinfo['free'].append(var)
i += 1
varinfo[var]['prior'] = args[var]
varinfo[var]['prior_type'] = 'function'
else:
if var in snobj.model.nparameters:
if snobj.model.enparameters[var] is not None:
varinfo[var]['fixed'] = False
varinfo[var]['prior_type'] = 'nuissance'
varinfo[var]['value'] = snobj.model.nparameters[var]
varinfo[var]['std'] = snobj.model.enparameters[var]
if len(np.shape(varinfo[var]['value'])) == 1:
varinfo[var]['index'] = slice(i,i+varinfo[var]['value'].shape[0])
i += varinfo[var]['value'].shape[0]
else:
varinfo[var]['index'] = i
i += 1
varinfo['free'].append(var)
else:
varinfo[var]['fixed'] = True
varinfo[var]['value'] = snobj.model.nparameters[var]
else:
varinfo[var]['fixed'] = False
varinfo[var]['index'] = i
varinfo['free'].append(var)
i += 1
varinfo[var]['prior_type'] = 'model'
varinfo['Nvar'] = i
return varinfo
def lnprior(p, varinfo, snobj):
lp = 0
for var in varinfo['free']:
id = varinfo[var]['index']
val = p[id]
if varinfo[var]['prior_type'] == 'function':
lp += varinfo[var]['prior'](val)
elif varinfo[var]['prior_type'] == 'builtin':
lp += builtin_priors(val, varinfo[var]['prior'])
elif varinfo[var]['prior_type'] == 'model':
lp += snobj.model.prior(var,val)
elif varinfo[var]['prior_type'] == 'nuissance':
lp += vecgauss(p[id], varinfo[var]['value'], varinfo[var]['std'])
return lp
def lnlike(p, varinfo, snobj, bands):
# first, assign all variables to the model:
for id,var in enumerate(varinfo['varlist']):
if varinfo[var]['fixed']:
if var in snobj.model.parameters:
snobj.model.parameters[var] = varinfo[var]['value']
else:
snobj.model.nparameters[var] = varinfo[var]['value']
else:
val = p[varinfo[var]['index']]
if var in snobj.model.parameters:
snobj.model.parameters[var] = val
else:
snobj.model.nparameters[var] = p[varinfo[var]['index']]
lp = 0
for band in bands:
mod,err,mask = snobj.model.__call__(band, snobj.data[band].MJD)
fitflux = varinfo['fitflux']
if fitflux:
if snobj.model.model_in_mags:
f = np.power(10, -0.4*(mod - snobj.data[band].filter.zp))
cov_f = np.power(f*err/1.0857,2)
else:
f = mod
cov_f = np.power(err, 2)
else:
if snobj.model.model_in_mags:
f = mod
cov_f = np.power(err, 2)
else:
f = -2.5*log10(mod) + snobj.data[band].filter.zp
cov_f = np.power(err/mod*1.0857,2)
m = mask*snobj.data[band].mask
if not np.sometrue(m):
# We're outside the support of the data
return -np.inf
N = sum(m)
X = snobj.data[band].flux[m] - f[m]
#if not np.alltrue(m):
# ids = np.nonzero(-m)[0]
# thiscovar = np.delete(np.delete(snobj.bcovar[band],ids,axis=0),
# ids, axis=1)
#else:
# thiscovar = snobj.bcovar[band]
#detcovar = np.linalg.det(thiscovar)
#invcovar = np.linalg.inv(thiscovar)
#lp = lp - 0.5*np.log(2*np.pi**N*detcovar) -\
# 0.5*np.dot(X, np.dot(invcovar,X))
denom = cov_f[m] + np.power(snobj.data[band].e_flux[m],2)
lp = lp - 0.5*np.sum(np.power(X,2)/denom + \
np.log(denom) + np.log(2*np.pi))
return lp
def lnprob(p, varinfo, snobj, bands):
lprior = lnprior(p, varinfo, snobj)
if not np.isfinite(lprior):
return -np.inf
return lprior + lnlike(p, varinfo, snobj, bands)
#raise RuntimeError, "Model must be in mags"
#raise RuntimeError, "Model must be in mags"
def generateSampler(snobj, bands, nwalkers, threads=1, tracefile=None, **args):
'''Generate an emcee sampler from the sn object [snobj] and its
associated model (chosen with snobj.choose_model). You must set the
number of walkers (see emcee documentation). You can control
the priors of the model by passing them as arguments. For example,
using Tmax='G,1000,10' would use a Gaussian prior with mean 1000
and standard deviation 10. You can also set any parameter to a
constant value. Lastly, you can set a parameter equal to a function
that takes a single argument and returns the log-probability as
a prior.
This function returns: sampler,p0
where sampler is an emcee sampler, and p0 is [nwalkers] starting
points.'''
tp0 = None
if tracefile is not None:
if os.path.isfile(tracefile):
tpars = []
f = open(tracefile)
line = f.readline()
Nwalkers = 50
while line[0] == '#':
if line.find('Col') > 0:
tpars.append(line.split()[-1])
elif line.find('Nwalkers') >= 0:
Nwalkers = int(line.split()[-1])
line = f.readline()
f.close()
data = np.loadtxt(tracefile)
Niter = data.shape[0]/Nwalkers
endids = [(i+1)*Niter-1 for i in range(Nwalkers)]
tp0 = [data[ids,:] for ids in endids]
if not snobj.model._fbands:
raise ValueError("You need to do an initial fit to the SN first")
vinfo = setup_varinfo(snobj, args)
p,ep = guess(vinfo, snobj)
# Find the ML:
#nll = lambda *args: -lnlike(*args)
#result = minimize(nll, p, args=(vinfo,snobj,bands))
#p = result["x"]
ndim = p.shape[0]
p0 = []; fail=0
while len(p0) < nwalkers and fail < 1000:
pp = p + ep*np.random.randn(ndim)
if not np.isinf(lnprior(pp, vinfo, snobj)):
p0.append(pp)
else:
fail += 1
if len(p0) < nwalkers:
raise RuntimeError("Could not establish an initial set of MCMC walkers.\n" +\
"Make sure your priors are consistent with your intial fit solution")
if tp0 is not None:
for i in range(len(p0)):
for ii,par in enumerate(tpars):
j = vinfo[par]['index']
p0[i][j] = tp0[i][ii]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(vinfo, snobj, bands),
threads=threads)
return sampler,vinfo,p0
| [
"numpy.shape",
"numpy.sometrue",
"numpy.power",
"numpy.log",
"emcee.EnsembleSampler",
"os.path.isfile",
"numpy.zeros",
"numpy.isfinite",
"numpy.loadtxt",
"numpy.random.randn"
] | [((279, 296), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (285, 296), True, 'import numpy as np\n'), ((1293, 1321), 'numpy.zeros', 'np.zeros', (["(varinfo['Nvar'],)"], {}), "((varinfo['Nvar'],))\n", (1301, 1321), True, 'import numpy as np\n'), ((1330, 1358), 'numpy.zeros', 'np.zeros', (["(varinfo['Nvar'],)"], {}), "((varinfo['Nvar'],))\n", (1338, 1358), True, 'import numpy as np\n'), ((9229, 9323), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnprob'], {'args': '(vinfo, snobj, bands)', 'threads': 'threads'}), '(nwalkers, ndim, lnprob, args=(vinfo, snobj, bands),\n threads=threads)\n', (9250, 9323), False, 'import emcee\n'), ((6728, 6747), 'numpy.isfinite', 'np.isfinite', (['lprior'], {}), '(lprior)\n', (6739, 6747), True, 'import numpy as np\n'), ((7753, 7778), 'os.path.isfile', 'os.path.isfile', (['tracefile'], {}), '(tracefile)\n', (7767, 7778), False, 'import types, os\n'), ((1199, 1210), 'numpy.log', 'np.log', (['sig'], {}), '(sig)\n', (1205, 1210), True, 'import numpy as np\n'), ((5912, 5926), 'numpy.sometrue', 'np.sometrue', (['m'], {}), '(m)\n', (5923, 5926), True, 'import numpy as np\n'), ((6491, 6530), 'numpy.power', 'np.power', (['snobj.data[band].e_flux[m]', '(2)'], {}), '(snobj.data[band].e_flux[m], 2)\n', (6499, 6530), True, 'import numpy as np\n'), ((8155, 8176), 'numpy.loadtxt', 'np.loadtxt', (['tracefile'], {}), '(tracefile)\n', (8165, 8176), True, 'import numpy as np\n'), ((875, 888), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (881, 888), True, 'import numpy as np\n'), ((5459, 5514), 'numpy.power', 'np.power', (['(10)', '(-0.4 * (mod - snobj.data[band].filter.zp))'], {}), '(10, -0.4 * (mod - snobj.data[band].filter.zp))\n', (5467, 5514), True, 'import numpy as np\n'), ((5533, 5562), 'numpy.power', 'np.power', (['(f * err / 1.0857)', '(2)'], {}), '(f * err / 1.0857, 2)\n', (5541, 5562), True, 'import numpy as np\n'), ((5613, 5629), 'numpy.power', 'np.power', (['err', '(2)'], {}), '(err, 2)\n', (5621, 5629), True, 'import numpy as np\n'), ((5721, 5737), 'numpy.power', 'np.power', (['err', '(2)'], {}), '(err, 2)\n', (5729, 5737), True, 'import numpy as np\n'), ((5834, 5865), 'numpy.power', 'np.power', (['(err / mod * 1.0857)', '(2)'], {}), '(err / mod * 1.0857, 2)\n', (5842, 5865), True, 'import numpy as np\n'), ((8733, 8754), 'numpy.random.randn', 'np.random.randn', (['ndim'], {}), '(ndim)\n', (8748, 8754), True, 'import numpy as np\n'), ((1181, 1197), 'numpy.power', 'np.power', (['sig', '(2)'], {}), '(sig, 2)\n', (1189, 1197), True, 'import numpy as np\n'), ((1022, 1033), 'numpy.log', 'np.log', (['tau'], {}), '(tau)\n', (1028, 1033), True, 'import numpy as np\n'), ((1164, 1183), 'numpy.power', 'np.power', (['(x - mu)', '(2)'], {}), '(x - mu, 2)\n', (1172, 1183), True, 'import numpy as np\n'), ((6609, 6626), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6615, 6626), True, 'import numpy as np\n'), ((845, 864), 'numpy.power', 'np.power', (['(x - mu)', '(2)'], {}), '(x - mu, 2)\n', (853, 864), True, 'import numpy as np\n'), ((3425, 3456), 'numpy.shape', 'np.shape', (["varinfo[var]['value']"], {}), "(varinfo[var]['value'])\n", (3433, 3456), True, 'import numpy as np\n'), ((6593, 6606), 'numpy.log', 'np.log', (['denom'], {}), '(denom)\n', (6599, 6606), True, 'import numpy as np\n'), ((6557, 6571), 'numpy.power', 'np.power', (['X', '(2)'], {}), '(X, 2)\n', (6565, 6571), True, 'import numpy as np\n')] |
from reprojection import runSuperGlueSinglePair,image_pair_candidates, runSIFTSinglePair
from ray_dist_loss import preprocess_match, proj_ray_dist_loss_single
import torch
import numpy as np
import os
from random import random
import numpy as np
import torch
import torchvision.transforms as TF
import matplotlib.pyplot as plt
tol=1e-4
match_num = 4
run_unit_test = lambda args, kwargs, test_name: None if not args.debug else \
test_name(**kwargs)
def unit_test_matches(**kwargs):
msg = "Failed to pass the unit test named matches"
print("Starting Unit Test : matches")
dirname = "_unit_test_matches_result"
# Check whether argument is currently provided.
assert "args" in kwargs.keys(), msg
assert "result" in kwargs.keys(), msg
assert "img_i" in kwargs.keys(), msg
assert "img_j" in kwargs.keys(), msg
assert "img_i_idx" in kwargs.keys(), msg
assert "img_j_idx" in kwargs.keys(), msg
args= kwargs["args"]
result = kwargs["result"]
img_i, img_j = kwargs["img_i"], kwargs["img_j"]
img_i_idx, img_j_idx = kwargs["img_i_idx"], kwargs["img_j_idx"]
kps1, kps2 = result
W = img_i.shape[1]
# Draw matches and save them
assert hasattr(args, "datadir"), msg
scene_name = args.datadir.split("/")[-1]
scene_path = os.path.join(dirname, scene_name)
os.makedirs(scene_path, exist_ok=True)
img_name = "{}_{}.png".format(img_i_idx, img_j_idx)
img_path = os.path.join(scene_path, img_name)
img_cat = torch.cat([img_i, img_j], dim=1)
img_cat_pil = TF.ToPILImage()(img_cat.permute(2, 0, 1))
plt.imshow(img_cat_pil)
i_visualize = np.random.choice(range(len(kps1)), match_num)
for i in i_visualize:
kp1, kp2 = kps1[i].cpu().numpy(), kps2[i].cpu().numpy()
color = (random(), random(), random())
plt.plot([kp1[0], kp2[0]+W], [kp1[1], kp2[1]], c=color, lw=2)
plt.savefig(img_path)
plt.close()
def projected_ray_distance_evaluation(
images,
index_list,
args,
ray_fun,
ray_fun_gt,
H,
W,
mode,
matcher,
gt_intrinsic,
gt_extrinsic,
method,
device,
intrinsic=None,
extrinsic=None,
camera_model=None,
i_map=None,
):
prd_list = []
match_fun = runSuperGlueSinglePair if args.matcher == "superglue" else \
runSIFTSinglePair
extrinsic_gt_numpy = gt_extrinsic[index_list].cpu().numpy()
with torch.no_grad():
feasible_image_pairs = image_pair_candidates(
extrinsic_gt_numpy, args, index_list
)
for img_i in feasible_image_pairs.keys():
for img_j in feasible_image_pairs[img_i]:
if img_i >= img_j:
continue
result = match_fun(
matcher,
images[img_i],
images[img_j],
0,
args
)
kps0_list, kps1_list = preprocess_match(result)
if kps0_list is None and kps1_list is None:
continue
result = kps0_list, kps1_list
kwargs_unit_test = {
"args": args,
"result": result,
"img_i": images[img_i],
"img_j": images[img_j],
"img_i_idx": img_i,
"img_j_idx": img_j
}
run_unit_test(
args, kwargs_unit_test, unit_test_matches
)
if mode != "train":
# Acquiring correct matches using the ground truth camera info
# In the training mode, we don't use the ground truth information.
rays_i_gt = ray_fun_gt(
H=H, W=W,focal=gt_intrinsic[0][0],
extrinsic=gt_extrinsic[img_i], kps_list=kps0_list
)
rays_j_gt = ray_fun_gt(
H=H, W=W,focal=gt_intrinsic[0][0],
extrinsic=gt_extrinsic[img_j], kps_list=kps1_list
)
filter_idx = filter_matches_with_gt(
kps0_list=kps0_list,
kps1_list=kps1_list,
H=H,
W=W,
gt_intrinsic=gt_intrinsic,
gt_extrinsic=gt_extrinsic[[img_i, img_j]],
rays0=rays_i_gt,
rays1=rays_j_gt,
args=args,
device=device,
method=method
)
kps0_list = kps0_list[filter_idx]
kps1_list = kps1_list[filter_idx]
if camera_model is None:
# Evaluate with gt_extrinsic for val,test
# Evaluate with noisy_extrinsic for train
extrinsic_evaluate = gt_extrinsic if mode != "train" else \
extrinsic
rays_i = ray_fun(
H=H, W=W, focal=intrinsic[0][0],
extrinsic=extrinsic_evaluate[img_i], kps_list=kps0_list
)
rays_j = ray_fun(
H=H, W=W, focal=intrinsic[0][0],
extrinsic=extrinsic_evaluate[img_j], kps_list=kps1_list
)
projected_ray_dist, _ = proj_ray_dist_loss_single(
kps0_list=kps0_list, kps1_list=kps1_list, img_idx0=img_i,
img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,
device=device, H=H, W=W, args=args,
intrinsic=gt_intrinsic, extrinsic=extrinsic_evaluate
)
else:
# In the train mode, we use the
extrinsic_evaluate = gt_extrinsic if mode != "train" else \
None
extrinsic_evaluate_i = gt_extrinsic[img_i] if mode != "train" \
else None
extrinsic_evaluate_j = gt_extrinsic[img_j] if mode != "train" \
else None
camera_idx_i = np.where(i_map == img_i)[0][0] \
if mode == "train" else None
camera_idx_j = np.where(i_map == img_j)[0][0] \
if mode == "train" else None
rays_i = ray_fun(
H=H, W=W, camera_model=camera_model,
extrinsic=extrinsic_evaluate_i, kps_list=kps0_list,
idx_in_camera_param=camera_idx_i
)
rays_j = ray_fun(
H=H, W=W, camera_model=camera_model,
extrinsic=extrinsic_evaluate_j, kps_list=kps1_list,
idx_in_camera_param=camera_idx_j
)
projected_ray_dist, _ = proj_ray_dist_loss_single(
kps0_list=kps0_list, kps1_list=kps1_list, img_idx0=img_i,
img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,
device=device, H=H, W=W, args=args, i_map=i_map,
camera_model=camera_model, extrinsic=extrinsic_evaluate
)
if not torch.isnan(projected_ray_dist):
prd_list.append(projected_ray_dist.item())
prd_list = torch.tensor(prd_list)
return prd_list.mean()
# Since SuperGlue sometimes fail to acquire reliable matches,
# we filter matches using the ground truth information only when
# evaluating PRD on val/test.
def filter_matches_with_gt(
kps0_list,
kps1_list,
W,
H,
gt_intrinsic,
gt_extrinsic,
rays0,
rays1,
args,
method,
device,
eps=1e-6
):
assert method in ["NeRF", "NeRF++"]
assert kps0_list.dim() == 2 and kps1_list.dim() == 2
gt_intrinsic=gt_intrinsic.clone().detach()
# NeRF is using an opposite coordinate.
if method == "NeRF":
gt_intrinsic[0][0] = -gt_intrinsic[0][0]
rays0_o, rays0_d = rays0
rays1_o, rays1_d = rays1
rays0_o, rays0_d = rays0_o.unsqueeze(0), rays0_d.unsqueeze(0)
rays1_o, rays1_d = rays1_o.unsqueeze(0), rays1_d.unsqueeze(0)
gt_extrinsic_inv = torch.inverse(gt_extrinsic.cpu())
gt_extrinsic_inv = gt_extrinsic_inv.to(device)
rays0_d = rays0_d / (rays0_d.norm(p=2, dim=-1)[:, :, None] + eps)
rays1_d = rays1_d / (rays1_d.norm(p=2, dim=-1)[:, :, None] + eps)
rays0_o_world = torch.cat(
[
rays0_o,
torch.ones((rays0_o.shape[:2]), device=device)[:, :, None]
],
dim=-1
)[:, :, :3]
rays1_o_world = torch.cat(
[
rays1_o,
torch.ones((rays1_o.shape[:2]), device=device)[:, :, None]
],
dim=-1
)[:, :, :3]
rays0_d_world = rays0_d[:, :, :3]
rays1_d_world = rays1_d[:, :, :3]
r0_r1 = torch.einsum("ijk, ijk -> ij", rays0_d_world, rays1_d_world)
t0 = (
torch.einsum(
"ijk, ijk -> ij", rays0_d_world, rays0_o_world - rays1_o_world
) - r0_r1
* torch.einsum(
"ijk, ijk -> ij", rays1_d_world, rays0_o_world - rays1_o_world
)
) / (r0_r1 ** 2 - 1 + eps)
t1 = (
torch.einsum(
"ijk, ijk -> ij", rays1_d_world, rays1_o_world - rays0_o_world
) - r0_r1
* torch.einsum(
"ijk, ijk -> ij", rays0_d_world, rays1_o_world - rays0_o_world
)
) / (r0_r1 ** 2 - 1 + eps)
p0 = t0[:, :, None] * rays0_d_world + rays0_o_world
p1 = t1[:, :, None] * rays1_d_world + rays1_o_world
p0_4d = torch.cat(
[p0, torch.ones((p0.shape[:2]), device=device)[:, :, None]], dim=-1
)
p1_4d = torch.cat(
[p1, torch.ones((p1.shape[:2]), device=device)[:, :, None]], dim=-1
)
p0_proj_to_im1 = torch.einsum(
"ijk, ipk -> ijp", p0_4d, gt_extrinsic_inv[1:]
)
p1_proj_to_im0 = torch.einsum(
"ijk, ipk -> ijp", p1_4d, gt_extrinsic_inv[:-1]
)
p0_norm_im1 = torch.einsum("ijk, pk -> ijp", p0_proj_to_im1, gt_intrinsic)
p1_norm_im0 = torch.einsum("ijk, pk -> ijp", p1_proj_to_im0, gt_intrinsic)
p0_norm_im1_2d = p0_norm_im1[:, :, :2] / (p0_norm_im1[:, :, 2, None] + eps)
p1_norm_im0_2d = p1_norm_im0[:, :, :2] / (p1_norm_im0[:, :, 2, None] + eps)
# Chirality check: remove rays behind cameras
# First, flatten the correspondences
# Find indices of valid rays
valid_t0 = (t0 > 0).flatten()
valid_t1 = (t1 > 0).flatten()
valid = torch.logical_and(valid_t0, valid_t1)
# Second, select losses that are valid
# When using NeRF++
loss0_list = ((p1_norm_im0_2d - kps0_list) ** 2).sum(-1).flatten()
loss1_list = ((p0_norm_im1_2d - kps1_list) ** 2).sum(-1).flatten()
# Remove cloned tensor after the computation
del gt_intrinsic
return torch.logical_and(
torch.logical_and(loss0_list < 1.0, loss1_list < 1.0), valid
) | [
"torchvision.transforms.ToPILImage",
"matplotlib.pyplot.imshow",
"reprojection.image_pair_candidates",
"numpy.where",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"ray_dist_loss.proj_ray_dist_loss_single",
"ray_dist_loss.preprocess_match",
"matplotlib.pyplot.savefig",
"torch.einsum",
"to... | [((1325, 1358), 'os.path.join', 'os.path.join', (['dirname', 'scene_name'], {}), '(dirname, scene_name)\n', (1337, 1358), False, 'import os\n'), ((1363, 1401), 'os.makedirs', 'os.makedirs', (['scene_path'], {'exist_ok': '(True)'}), '(scene_path, exist_ok=True)\n', (1374, 1401), False, 'import os\n'), ((1473, 1507), 'os.path.join', 'os.path.join', (['scene_path', 'img_name'], {}), '(scene_path, img_name)\n', (1485, 1507), False, 'import os\n'), ((1527, 1559), 'torch.cat', 'torch.cat', (['[img_i, img_j]'], {'dim': '(1)'}), '([img_i, img_j], dim=1)\n', (1536, 1559), False, 'import torch\n'), ((1624, 1647), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_cat_pil'], {}), '(img_cat_pil)\n', (1634, 1647), True, 'import matplotlib.pyplot as plt\n'), ((1943, 1964), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_path'], {}), '(img_path)\n', (1954, 1964), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1984), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1982, 1984), True, 'import matplotlib.pyplot as plt\n'), ((7463, 7485), 'torch.tensor', 'torch.tensor', (['prd_list'], {}), '(prd_list)\n', (7475, 7485), False, 'import torch\n'), ((9005, 9065), 'torch.einsum', 'torch.einsum', (['"""ijk, ijk -> ij"""', 'rays0_d_world', 'rays1_d_world'], {}), "('ijk, ijk -> ij', rays0_d_world, rays1_d_world)\n", (9017, 9065), False, 'import torch\n'), ((9945, 10005), 'torch.einsum', 'torch.einsum', (['"""ijk, ipk -> ijp"""', 'p0_4d', 'gt_extrinsic_inv[1:]'], {}), "('ijk, ipk -> ijp', p0_4d, gt_extrinsic_inv[1:])\n", (9957, 10005), False, 'import torch\n'), ((10041, 10102), 'torch.einsum', 'torch.einsum', (['"""ijk, ipk -> ijp"""', 'p1_4d', 'gt_extrinsic_inv[:-1]'], {}), "('ijk, ipk -> ijp', p1_4d, gt_extrinsic_inv[:-1])\n", (10053, 10102), False, 'import torch\n'), ((10135, 10195), 'torch.einsum', 'torch.einsum', (['"""ijk, pk -> ijp"""', 'p0_proj_to_im1', 'gt_intrinsic'], {}), "('ijk, pk -> ijp', p0_proj_to_im1, gt_intrinsic)\n", (10147, 10195), False, 'import torch\n'), ((10214, 10274), 'torch.einsum', 'torch.einsum', (['"""ijk, pk -> ijp"""', 'p1_proj_to_im0', 'gt_intrinsic'], {}), "('ijk, pk -> ijp', p1_proj_to_im0, gt_intrinsic)\n", (10226, 10274), False, 'import torch\n'), ((10641, 10678), 'torch.logical_and', 'torch.logical_and', (['valid_t0', 'valid_t1'], {}), '(valid_t0, valid_t1)\n', (10658, 10678), False, 'import torch\n'), ((1578, 1593), 'torchvision.transforms.ToPILImage', 'TF.ToPILImage', ([], {}), '()\n', (1591, 1593), True, 'import torchvision.transforms as TF\n'), ((1868, 1931), 'matplotlib.pyplot.plot', 'plt.plot', (['[kp1[0], kp2[0] + W]', '[kp1[1], kp2[1]]'], {'c': 'color', 'lw': '(2)'}), '([kp1[0], kp2[0] + W], [kp1[1], kp2[1]], c=color, lw=2)\n', (1876, 1931), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2570), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2568, 2570), False, 'import torch\n'), ((2603, 2662), 'reprojection.image_pair_candidates', 'image_pair_candidates', (['extrinsic_gt_numpy', 'args', 'index_list'], {}), '(extrinsic_gt_numpy, args, index_list)\n', (2624, 2662), False, 'from reprojection import runSuperGlueSinglePair, image_pair_candidates, runSIFTSinglePair\n'), ((11003, 11056), 'torch.logical_and', 'torch.logical_and', (['(loss0_list < 1.0)', '(loss1_list < 1.0)'], {}), '(loss0_list < 1.0, loss1_list < 1.0)\n', (11020, 11056), False, 'import torch\n'), ((1830, 1838), 'random.random', 'random', ([], {}), '()\n', (1836, 1838), False, 'from random import random\n'), ((1840, 1848), 'random.random', 'random', ([], {}), '()\n', (1846, 1848), False, 'from random import random\n'), ((1850, 1858), 'random.random', 'random', ([], {}), '()\n', (1856, 1858), False, 'from random import random\n'), ((3086, 3110), 'ray_dist_loss.preprocess_match', 'preprocess_match', (['result'], {}), '(result)\n', (3102, 3110), False, 'from ray_dist_loss import preprocess_match, proj_ray_dist_loss_single\n'), ((9085, 9161), 'torch.einsum', 'torch.einsum', (['"""ijk, ijk -> ij"""', 'rays0_d_world', '(rays0_o_world - rays1_o_world)'], {}), "('ijk, ijk -> ij', rays0_d_world, rays0_o_world - rays1_o_world)\n", (9097, 9161), False, 'import torch\n'), ((9352, 9428), 'torch.einsum', 'torch.einsum', (['"""ijk, ijk -> ij"""', 'rays1_d_world', '(rays1_o_world - rays0_o_world)'], {}), "('ijk, ijk -> ij', rays1_d_world, rays1_o_world - rays0_o_world)\n", (9364, 9428), False, 'import torch\n'), ((5470, 5711), 'ray_dist_loss.proj_ray_dist_loss_single', 'proj_ray_dist_loss_single', ([], {'kps0_list': 'kps0_list', 'kps1_list': 'kps1_list', 'img_idx0': 'img_i', 'img_idx1': 'img_j', 'rays0': 'rays_i', 'rays1': 'rays_j', 'mode': 'mode', 'device': 'device', 'H': 'H', 'W': 'W', 'args': 'args', 'intrinsic': 'gt_intrinsic', 'extrinsic': 'extrinsic_evaluate'}), '(kps0_list=kps0_list, kps1_list=kps1_list,\n img_idx0=img_i, img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,\n device=device, H=H, W=W, args=args, intrinsic=gt_intrinsic, extrinsic=\n extrinsic_evaluate)\n', (5495, 5711), False, 'from ray_dist_loss import preprocess_match, proj_ray_dist_loss_single\n'), ((6986, 7243), 'ray_dist_loss.proj_ray_dist_loss_single', 'proj_ray_dist_loss_single', ([], {'kps0_list': 'kps0_list', 'kps1_list': 'kps1_list', 'img_idx0': 'img_i', 'img_idx1': 'img_j', 'rays0': 'rays_i', 'rays1': 'rays_j', 'mode': 'mode', 'device': 'device', 'H': 'H', 'W': 'W', 'args': 'args', 'i_map': 'i_map', 'camera_model': 'camera_model', 'extrinsic': 'extrinsic_evaluate'}), '(kps0_list=kps0_list, kps1_list=kps1_list,\n img_idx0=img_i, img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,\n device=device, H=H, W=W, args=args, i_map=i_map, camera_model=\n camera_model, extrinsic=extrinsic_evaluate)\n', (7011, 7243), False, 'from ray_dist_loss import preprocess_match, proj_ray_dist_loss_single\n'), ((7350, 7381), 'torch.isnan', 'torch.isnan', (['projected_ray_dist'], {}), '(projected_ray_dist)\n', (7361, 7381), False, 'import torch\n'), ((9202, 9278), 'torch.einsum', 'torch.einsum', (['"""ijk, ijk -> ij"""', 'rays1_d_world', '(rays0_o_world - rays1_o_world)'], {}), "('ijk, ijk -> ij', rays1_d_world, rays0_o_world - rays1_o_world)\n", (9214, 9278), False, 'import torch\n'), ((9469, 9545), 'torch.einsum', 'torch.einsum', (['"""ijk, ijk -> ij"""', 'rays0_d_world', '(rays1_o_world - rays0_o_world)'], {}), "('ijk, ijk -> ij', rays0_d_world, rays1_o_world - rays0_o_world)\n", (9481, 9545), False, 'import torch\n'), ((9749, 9788), 'torch.ones', 'torch.ones', (['p0.shape[:2]'], {'device': 'device'}), '(p0.shape[:2], device=device)\n', (9759, 9788), False, 'import torch\n'), ((9854, 9893), 'torch.ones', 'torch.ones', (['p1.shape[:2]'], {'device': 'device'}), '(p1.shape[:2], device=device)\n', (9864, 9893), False, 'import torch\n'), ((8636, 8680), 'torch.ones', 'torch.ones', (['rays0_o.shape[:2]'], {'device': 'device'}), '(rays0_o.shape[:2], device=device)\n', (8646, 8680), False, 'import torch\n'), ((8813, 8857), 'torch.ones', 'torch.ones', (['rays1_o.shape[:2]'], {'device': 'device'}), '(rays1_o.shape[:2], device=device)\n', (8823, 8857), False, 'import torch\n'), ((6247, 6271), 'numpy.where', 'np.where', (['(i_map == img_i)'], {}), '(i_map == img_i)\n', (6255, 6271), True, 'import numpy as np\n'), ((6360, 6384), 'numpy.where', 'np.where', (['(i_map == img_j)'], {}), '(i_map == img_j)\n', (6368, 6384), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
import scipy.signal
import os
import pandas as pd
from skimage.transform import resize
def get_mean(signal: np.ndarray, axis=0):
return signal.mean(axis=axis)
def get_std_dev(signal: np.ndarray, axis=0):
return signal.std(axis=axis)
def get_power(signal: np.ndarray, axis=0, fs=1000):
f_welch, S_xx_welch = scipy.signal.welch(signal, fs=fs, axis=0)
df_welch = f_welch[1] - f_welch[0]
return np.sum(S_xx_welch, axis=axis) * df_welch
def get_energy(signal: np.ndarray, axis=0):
N = signal.shape[0]
Xk = np.fft.fft(signal)
return np.sum(np.abs(Xk) ** 2, axis=axis) / N
def pca(data: np.ndarray, labels: np.ndarray, n_components=3):
X = data.copy()
y = labels.copy()
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig=fig,auto_add_to_figure=False, rect=[0, 0, .95, 1], elev=48, azim=134)
fig.add_axes(ax)
plt.cla()
pca = decomposition.PCA(n_components=n_components)
pca.fit(X)
X = pca.transform(X)
for name, label in [('box', 0), ('pufa', 1), ('profil', 2), ('gasnica',3)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
def check_pca(path_to_data):
data_dirs = []
class_names = []
class_counter = 0
for subdir, dirs, files in os.walk(path_to_data):
data_dirs.append(subdir)
data_dirs.pop(0)
column_names = ["class", "force_x_mean", "force_y_mean", "force_z_mean"]
df_all = pd.DataFrame(columns=column_names)
for subdir in data_dirs:
df_log_file = pd.read_csv(os.path.join(subdir, 'log.csv'))
print(subdir)
for i in range(len(df_log_file)):
forces_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'forces_path']))
df_forces = pd.read_csv(os.path.join('', forces_path))
# standaryzacja
# df_forces['0'] = (df_forces['0']-df_forces['0'].mean())/df_forces['0'].std()
# df_forces['1'] = (df_forces['1'] - df_forces['1'].mean()) / df_forces['1'].std()
# df_forces['2'] = (df_forces['2'] - df_forces['2'].mean() )/ df_forces['2'].std()
#
df_f_x = df_forces['0'].mean()
df_f_y = df_forces['1'].mean()
df_f_z = df_forces['2'].mean()
df_all = df_all.append(
{"class": int(class_counter), "force_x_mean": float(df_f_x), "force_y_mean": float(df_f_y),
"force_z_mean": float(df_f_z)}, ignore_index=True)
class_counter += 1
print(df_all)
labels = df_all['class'].to_numpy()
data = df_all.loc[:, df_all.columns != 'class'].to_numpy()
pca(data,labels)
def df_resample(df1, num=1):
df2 = pd.DataFrame()
for key, value in df1.iteritems():
temp = value.to_numpy() / value.abs().max() # normalize
resampled = resize(temp, (num, 1), mode='edge') * value.abs().max() # de-normalize
df2[key] = resampled.flatten().round(2)
return df2
def preprocess_data(path_to_data):
data_dirs = []
class_names = []
class_counter = 0
for subdir, dirs, files in os.walk(path_to_data):
data_dirs.append(subdir)
data_dirs.pop(0)
column_names = ["force_x_mean", "force_y_mean", "force_z_mean",
"quat_0_x", "quat_0_y", "quat_0_z", "quat_0_w",
"quat_1_x", "quat_1_y", "quat_1_z", "quat_1_w",
"quat_2_x", "quat_2_y", "quat_2_z", "quat_2_w",
"quat_3_x", "quat_3_y", "quat_3_z", "quat_3_w"]
for subdir in data_dirs:
df_log_file = pd.read_csv(os.path.join(subdir, 'log.csv'))
for i in range(len(df_log_file)):
forces_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'forces_path']))
df_forces = pd.read_csv(os.path.join('', forces_path))
quat_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'quat_path']))
df_quat = pd.read_csv(os.path.join('', quat_path))
df_forces = df_forces.rename(columns={"0": "force_x_mean",
"1": "force_y_mean",
"2": "force_z_mean"})
df_quat = df_quat.rename(columns={"0": "quat_0_x",
"1": "quat_0_y",
"2": "quat_0_z",
"3": "quat_0_w",
"4": "quat_1_x",
"5": "quat_1_y",
"6": "quat_1_z",
"7": "quat_1_w",
"8": "quat_2_x",
"9": "quat_2_y",
"10": "quat_2_z",
"11": "quat_2_w",
"12": "quat_3_x",
"13": "quat_3_y",
"14": "quat_3_z",
"15": "quat_3_w"
})
df_forces = df_forces.drop(['Unnamed: 0'], axis=1)
df_quat = df_quat.drop(['Unnamed: 0'], axis=1)
df_forces = df_resample(df_forces, 3000)
df_quat = df_resample(df_quat, 3000)
df_combined = pd.concat([df_forces,df_quat], axis=1)
df_combined.plot()
plt.show()
def plot_data(path_to_data):
data_dirs = []
class_names = []
class_counter = 0
for subdir, dirs, files in os.walk(path_to_data):
data_dirs.append(subdir)
data_dirs.pop(0)
column_names = ["class", "force_x_mean", "force_y_mean", "force_z_mean",
"quat_0_x", "quat_0_y", "quat_0_z", "quat_0_w",
"quat_1_x", "quat_1_y", "quat_1_z", "quat_1_w",
"quat_2_x", "quat_2_y", "quat_2_z", "quat_2_w",
"quat_3_x", "quat_3_y", "quat_3_z", "quat_3_w"]
df_data = pd.DataFrame(columns=column_names)
for subdir in data_dirs:
df_log_file = pd.read_csv(os.path.join(subdir, 'log.csv'))
for i in range(len(df_log_file)):
forces_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'forces_path']))
df_forces = pd.read_csv(os.path.join('', forces_path))
quat_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'quat_path']))
df_quat = pd.read_csv(os.path.join('', quat_path))
df_forces = df_resample(df_forces,3000)
df_quat = df_resample(df_quat,3000)
plt.plot(df_forces['0'])
plt.plot(df_quat['0'])
plt.plot(df_quat['1'])
plt.plot(df_quat['2'])
plt.plot(df_quat['3'])
plt.show()
| [
"numpy.abs",
"sklearn.decomposition.PCA",
"numpy.fft.fft",
"matplotlib.pyplot.clf",
"os.walk",
"os.path.join",
"matplotlib.pyplot.plot",
"numpy.sum",
"matplotlib.pyplot.figure",
"pandas.concat",
"os.path.basename",
"pandas.DataFrame",
"skimage.transform.resize",
"matplotlib.pyplot.cla",
... | [((659, 677), 'numpy.fft.fft', 'np.fft.fft', (['signal'], {}), '(signal)\n', (669, 677), True, 'import numpy as np\n'), ((846, 875), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(4, 3)'}), '(1, figsize=(4, 3))\n', (856, 875), True, 'import matplotlib.pyplot as plt\n'), ((880, 889), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (887, 889), True, 'import matplotlib.pyplot as plt\n'), ((899, 985), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', ([], {'fig': 'fig', 'auto_add_to_figure': '(False)', 'rect': '[0, 0, 0.95, 1]', 'elev': '(48)', 'azim': '(134)'}), '(fig=fig, auto_add_to_figure=False, rect=[0, 0, 0.95, 1], elev=48,\n azim=134)\n', (905, 985), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((1006, 1015), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1013, 1015), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1070), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (1043, 1070), False, 'from sklearn import decomposition\n'), ((1662, 1672), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1670, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1817), 'os.walk', 'os.walk', (['path_to_data'], {}), '(path_to_data)\n', (1803, 1817), False, 'import os\n'), ((1963, 1997), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (1975, 1997), True, 'import pandas as pd\n'), ((3203, 3217), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3215, 3217), True, 'import pandas as pd\n'), ((3606, 3627), 'os.walk', 'os.walk', (['path_to_data'], {}), '(path_to_data)\n', (3613, 3627), False, 'import os\n'), ((6232, 6253), 'os.walk', 'os.walk', (['path_to_data'], {}), '(path_to_data)\n', (6239, 6253), False, 'import os\n'), ((6672, 6706), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (6684, 6706), True, 'import pandas as pd\n'), ((540, 569), 'numpy.sum', 'np.sum', (['S_xx_welch'], {'axis': 'axis'}), '(S_xx_welch, axis=axis)\n', (546, 569), True, 'import numpy as np\n'), ((2062, 2093), 'os.path.join', 'os.path.join', (['subdir', '"""log.csv"""'], {}), "(subdir, 'log.csv')\n", (2074, 2093), False, 'import os\n'), ((3342, 3377), 'skimage.transform.resize', 'resize', (['temp', '(num, 1)'], {'mode': '"""edge"""'}), "(temp, (num, 1), mode='edge')\n", (3348, 3377), False, 'from skimage.transform import resize\n'), ((4088, 4119), 'os.path.join', 'os.path.join', (['subdir', '"""log.csv"""'], {}), "(subdir, 'log.csv')\n", (4100, 4119), False, 'import os\n'), ((6015, 6054), 'pandas.concat', 'pd.concat', (['[df_forces, df_quat]'], {'axis': '(1)'}), '([df_forces, df_quat], axis=1)\n', (6024, 6054), True, 'import pandas as pd\n'), ((6097, 6107), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6105, 6107), True, 'import matplotlib.pyplot as plt\n'), ((6771, 6802), 'os.path.join', 'os.path.join', (['subdir', '"""log.csv"""'], {}), "(subdir, 'log.csv')\n", (6783, 6802), False, 'import os\n'), ((7286, 7310), 'matplotlib.pyplot.plot', 'plt.plot', (["df_forces['0']"], {}), "(df_forces['0'])\n", (7294, 7310), True, 'import matplotlib.pyplot as plt\n'), ((7323, 7345), 'matplotlib.pyplot.plot', 'plt.plot', (["df_quat['0']"], {}), "(df_quat['0'])\n", (7331, 7345), True, 'import matplotlib.pyplot as plt\n'), ((7358, 7380), 'matplotlib.pyplot.plot', 'plt.plot', (["df_quat['1']"], {}), "(df_quat['1'])\n", (7366, 7380), True, 'import matplotlib.pyplot as plt\n'), ((7393, 7415), 'matplotlib.pyplot.plot', 'plt.plot', (["df_quat['2']"], {}), "(df_quat['2'])\n", (7401, 7415), True, 'import matplotlib.pyplot as plt\n'), ((7428, 7450), 'matplotlib.pyplot.plot', 'plt.plot', (["df_quat['3']"], {}), "(df_quat['3'])\n", (7436, 7450), True, 'import matplotlib.pyplot as plt\n'), ((7463, 7473), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7471, 7473), True, 'import matplotlib.pyplot as plt\n'), ((696, 706), 'numpy.abs', 'np.abs', (['Xk'], {}), '(Xk)\n', (702, 706), True, 'import numpy as np\n'), ((2206, 2257), 'os.path.basename', 'os.path.basename', (["df_log_file.loc[i, 'forces_path']"], {}), "(df_log_file.loc[i, 'forces_path'])\n", (2222, 2257), False, 'import os\n'), ((2295, 2324), 'os.path.join', 'os.path.join', (['""""""', 'forces_path'], {}), "('', forces_path)\n", (2307, 2324), False, 'import os\n'), ((4210, 4261), 'os.path.basename', 'os.path.basename', (["df_log_file.loc[i, 'forces_path']"], {}), "(df_log_file.loc[i, 'forces_path'])\n", (4226, 4261), False, 'import os\n'), ((4299, 4328), 'os.path.join', 'os.path.join', (['""""""', 'forces_path'], {}), "('', forces_path)\n", (4311, 4328), False, 'import os\n'), ((4375, 4424), 'os.path.basename', 'os.path.basename', (["df_log_file.loc[i, 'quat_path']"], {}), "(df_log_file.loc[i, 'quat_path'])\n", (4391, 4424), False, 'import os\n'), ((4460, 4487), 'os.path.join', 'os.path.join', (['""""""', 'quat_path'], {}), "('', quat_path)\n", (4472, 4487), False, 'import os\n'), ((6893, 6944), 'os.path.basename', 'os.path.basename', (["df_log_file.loc[i, 'forces_path']"], {}), "(df_log_file.loc[i, 'forces_path'])\n", (6909, 6944), False, 'import os\n'), ((6982, 7011), 'os.path.join', 'os.path.join', (['""""""', 'forces_path'], {}), "('', forces_path)\n", (6994, 7011), False, 'import os\n'), ((7058, 7107), 'os.path.basename', 'os.path.basename', (["df_log_file.loc[i, 'quat_path']"], {}), "(df_log_file.loc[i, 'quat_path'])\n", (7074, 7107), False, 'import os\n'), ((7143, 7170), 'os.path.join', 'os.path.join', (['""""""', 'quat_path'], {}), "('', quat_path)\n", (7155, 7170), False, 'import os\n')] |
import librosa
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
script, content_audio_name, style_audio_name, output_audio_name = argv
N_FFT=2048
def read_audio_spectum(filename):
x, fs = librosa.load(filename, duration=58.04) # Duration=58.05 so as to make sizes convenient
S = librosa.stft(x, N_FFT)
p = np.angle(S)
S = np.log1p(np.abs(S))
return S, fs
style_audio, style_sr = read_audio_spectum(style_audio_name)
content_audio, content_sr = read_audio_spectum(content_audio_name)
output_audio, output_sr = read_audio_spectum(output_audio_name)
print(style_audio.shape)
print(content_audio.shape)
print(output_audio.shape)
plt.figure(figsize=(15,25))
plt.subplot(1,3,1)
plt.title('Content')
plt.imshow(content_audio[:500,:500])
plt.subplot(1,3,2)
plt.title('Style')
plt.imshow(style_audio[:500,:500])
plt.subplot(1,3,3)
plt.title('Result')
plt.imshow(output_audio[:500,:500])
plt.show() | [
"matplotlib.pyplot.imshow",
"numpy.abs",
"librosa.load",
"numpy.angle",
"matplotlib.pyplot.figure",
"librosa.stft",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((660, 688), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 25)'}), '(figsize=(15, 25))\n', (670, 688), True, 'import matplotlib.pyplot as plt\n'), ((688, 708), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (699, 708), True, 'import matplotlib.pyplot as plt\n'), ((707, 727), 'matplotlib.pyplot.title', 'plt.title', (['"""Content"""'], {}), "('Content')\n", (716, 727), True, 'import matplotlib.pyplot as plt\n'), ((728, 765), 'matplotlib.pyplot.imshow', 'plt.imshow', (['content_audio[:500, :500]'], {}), '(content_audio[:500, :500])\n', (738, 765), True, 'import matplotlib.pyplot as plt\n'), ((765, 785), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (776, 785), True, 'import matplotlib.pyplot as plt\n'), ((784, 802), 'matplotlib.pyplot.title', 'plt.title', (['"""Style"""'], {}), "('Style')\n", (793, 802), True, 'import matplotlib.pyplot as plt\n'), ((803, 838), 'matplotlib.pyplot.imshow', 'plt.imshow', (['style_audio[:500, :500]'], {}), '(style_audio[:500, :500])\n', (813, 838), True, 'import matplotlib.pyplot as plt\n'), ((838, 858), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (849, 858), True, 'import matplotlib.pyplot as plt\n'), ((857, 876), 'matplotlib.pyplot.title', 'plt.title', (['"""Result"""'], {}), "('Result')\n", (866, 876), True, 'import matplotlib.pyplot as plt\n'), ((877, 913), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output_audio[:500, :500]'], {}), '(output_audio[:500, :500])\n', (887, 913), True, 'import matplotlib.pyplot as plt\n'), ((913, 923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (921, 923), True, 'import matplotlib.pyplot as plt\n'), ((214, 252), 'librosa.load', 'librosa.load', (['filename'], {'duration': '(58.04)'}), '(filename, duration=58.04)\n', (226, 252), False, 'import librosa\n'), ((306, 328), 'librosa.stft', 'librosa.stft', (['x', 'N_FFT'], {}), '(x, N_FFT)\n', (318, 328), False, 'import librosa\n'), ((334, 345), 'numpy.angle', 'np.angle', (['S'], {}), '(S)\n', (342, 345), True, 'import numpy as np\n'), ((360, 369), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (366, 369), True, 'import numpy as np\n')] |
import numpy as np
class Ingredients:
'''
Class for calculations of ingredients inside an area.
'''
def __init__(self, pizza_lines):
self._lines = [list(l) for l in pizza_lines]
self._unique, self._map = np.unique(self._lines, return_inverse=True)
self._map = self._map.reshape((len(self._lines),len(self._lines[0])))
self.shape = self._map.shape
self.total = self.shape[0]*self.shape[1]
self.total_unique = np.max(self._map)+1
self.initialize()
def initialize(self):
'''
Create an array for faster calculation of ingredients inside an area.
'''
from_origin = np.zeros((*self.shape, self.total_unique))
for r in range(self.shape[0]):
for c in range(self.shape[1]):
ingredient_id = self._map[r][c]
from_origin[r][c][ingredient_id] = 1
if r > 0:
from_origin[r][c] += from_origin[r-1][c]
if c > 0:
from_origin[r][c] += from_origin[r][c-1]
if r > 0 and c > 0:
from_origin[r][c] -= from_origin[r-1][c-1]
self._from_origin = from_origin
def of(self, slice):
'''
Return 1d array of number of ingredients, so i-th element is the number of
ingredient with id i inside specified slice.
'''
ingredients_inside_slice = np.copy(self._from_origin[
slice.r1,
slice.c1])
if slice.r0 > 0:
ingredients_inside_slice -= self._from_origin[slice.r0-1][slice.c1]
if slice.c0 > 0:
ingredients_inside_slice -= self._from_origin[slice.r1][slice.c0-1]
if slice.r0 > 0 and slice.c0 > 0:
ingredients_inside_slice += self._from_origin[slice.r0-1][slice.c0-1]
return ingredients_inside_slice
| [
"numpy.copy",
"numpy.zeros",
"numpy.unique",
"numpy.max"
] | [((238, 281), 'numpy.unique', 'np.unique', (['self._lines'], {'return_inverse': '(True)'}), '(self._lines, return_inverse=True)\n', (247, 281), True, 'import numpy as np\n'), ((674, 716), 'numpy.zeros', 'np.zeros', (['(*self.shape, self.total_unique)'], {}), '((*self.shape, self.total_unique))\n', (682, 716), True, 'import numpy as np\n'), ((1437, 1483), 'numpy.copy', 'np.copy', (['self._from_origin[slice.r1, slice.c1]'], {}), '(self._from_origin[slice.r1, slice.c1])\n', (1444, 1483), True, 'import numpy as np\n'), ((475, 492), 'numpy.max', 'np.max', (['self._map'], {}), '(self._map)\n', (481, 492), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import argparse
from PIL import Image
import imutils
import cv2
import os
import pprint
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="base path for TFlite detection model")
ap.add_argument("-l", "--labels", required=True,
help="labels file")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-c", "--min-confidence", type=float, default=0.5,
help="minimum probability used to filter weak detections")
ap.add_argument("-D", "--debug", action="store_true",
help="print debug information [False]")
args = vars(ap.parse_args())
def main():
with open(args["labels"], "r") as fh:
labelLst = fh.read().splitlines()[1:]
print("[INFO] loading the detection model '{}'".format(args["model"]))
interpreter = tf.contrib.lite.Interpreter(model_path=args["model"])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
imgShape = input_details[0]["shape"][2:0:-1]
if args["debug"]:
print("\n[INFO] input details:")
pprint.pprint(input_details)
print("\n[INFO] output details:")
pprint.pprint(output_details)
print("[INFO] loading and resizing image to {}".format(imgShape))
img = Image.open(args["image"])
img.load()
img = img.resize(imgShape, Image.ANTIALIAS)
data = np.asarray(img, dtype="int32")
input_data = np.expand_dims(data,0).astype(np.uint8)
print("[INFO] running inference ...")
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
boxes = interpreter.get_tensor(output_details[0]['index'])
labels = interpreter.get_tensor(output_details[1]['index'])
scores = interpreter.get_tensor(output_details[2]['index'])
boxes = np.squeeze(boxes)
labels = np.squeeze(labels.astype(np.int32))
scores = np.squeeze(scores)
image = cv2.imread(args["image"])
(H, W) = image.shape[:2]
dropCount = 0
for (box, score, label) in zip(boxes, scores, labels):
if score < args["min_confidence"] or score > 1:
dropCount += 1
continue
(startY, startX, endY, endX) = box
startX = int(startX * W)
startY = int(startY * H)
endX = int(endX * W)
endY = int(endY * H)
label = "{}: {:.2f}".format(labelLst[label], score)
cv2.rectangle(image, (startX, startY), (endX, endY),
(255, 255, 255), 2)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.putText(image, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
print("[INFO] dropped {:d} detections for low score".format(dropCount))
cv2.imshow("Output", image)
cv2.waitKey(0)
if __name__ == "__main__":
main()
| [
"cv2.rectangle",
"PIL.Image.open",
"tensorflow.contrib.lite.Interpreter",
"argparse.ArgumentParser",
"numpy.asarray",
"numpy.squeeze",
"cv2.imshow",
"cv2.putText",
"cv2.waitKey",
"numpy.expand_dims",
"cv2.imread",
"pprint.pprint"
] | [((199, 224), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (222, 224), False, 'import argparse\n'), ((985, 1038), 'tensorflow.contrib.lite.Interpreter', 'tf.contrib.lite.Interpreter', ([], {'model_path': "args['model']"}), "(model_path=args['model'])\n", (1012, 1038), True, 'import tensorflow as tf\n'), ((1489, 1514), 'PIL.Image.open', 'Image.open', (["args['image']"], {}), "(args['image'])\n", (1499, 1514), False, 'from PIL import Image\n'), ((1589, 1619), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': '"""int32"""'}), "(img, dtype='int32')\n", (1599, 1619), True, 'import numpy as np\n'), ((2013, 2030), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (2023, 2030), True, 'import numpy as np\n'), ((2093, 2111), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (2103, 2111), True, 'import numpy as np\n'), ((2124, 2149), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (2134, 2149), False, 'import cv2\n'), ((2951, 2978), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'image'], {}), "('Output', image)\n", (2961, 2978), False, 'import cv2\n'), ((2983, 2997), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2994, 2997), False, 'import cv2\n'), ((1300, 1328), 'pprint.pprint', 'pprint.pprint', (['input_details'], {}), '(input_details)\n', (1313, 1328), False, 'import pprint\n'), ((1379, 1408), 'pprint.pprint', 'pprint.pprint', (['output_details'], {}), '(output_details)\n', (1392, 1408), False, 'import pprint\n'), ((2596, 2668), 'cv2.rectangle', 'cv2.rectangle', (['image', '(startX, startY)', '(endX, endY)', '(255, 255, 255)', '(2)'], {}), '(image, (startX, startY), (endX, endY), (255, 255, 255), 2)\n', (2609, 2668), False, 'import cv2\n'), ((2760, 2854), 'cv2.putText', 'cv2.putText', (['image', 'label', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', '(255, 255, 255)', '(2)'], {}), '(image, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,\n 255, 255), 2)\n', (2771, 2854), False, 'import cv2\n'), ((1637, 1660), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (1651, 1660), True, 'import numpy as np\n')] |
"""
Display a plot and an image with minimal setup.
pg.plot() and pg.image() are indended to be used from an interactive prompt
to allow easy data inspection (but note that PySide unfortunately does not
call the Qt event loop while the interactive prompt is running, in this case
it is necessary to call QApplication.exec_() to make the windows appear).
"""
import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
import pyqtgraph as pg
data = np.random.normal(size=1000)
pg.plot(data, title="Simplest possible plotting example")
data = np.random.normal(size=(500,500))
pg.image(data, title="Simplest possible image example")
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_()
| [
"numpy.random.normal",
"pyqtgraph.image",
"pyqtgraph.plot",
"pyqtgraph.QtGui.QApplication.exec_"
] | [((510, 537), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (526, 537), True, 'import numpy as np\n'), ((539, 596), 'pyqtgraph.plot', 'pg.plot', (['data'], {'title': '"""Simplest possible plotting example"""'}), "(data, title='Simplest possible plotting example')\n", (546, 596), True, 'import pyqtgraph as pg\n'), ((607, 640), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(500, 500)'}), '(size=(500, 500))\n', (623, 640), True, 'import numpy as np\n'), ((641, 696), 'pyqtgraph.image', 'pg.image', (['data'], {'title': '"""Simplest possible image example"""'}), "(data, title='Simplest possible image example')\n", (649, 696), True, 'import pyqtgraph as pg\n'), ((905, 934), 'pyqtgraph.QtGui.QApplication.exec_', 'pg.QtGui.QApplication.exec_', ([], {}), '()\n', (932, 934), True, 'import pyqtgraph as pg\n')] |
# -*- coding: utf-8 -*-
import gc
import numpy as np
import pandas as pd
import lightgbm as lgb
from data import *
from feat import *
from resource import *
from utils import (load_dataframe, convert_dtype, CrossValidation, merge_all)
def rank_feat_inside_session(df, cols):
for col in cols:
col_n = '%s_rank' % col
df[col_n] = df.groupby(['session_id'])[col].rank(method='first').astype('float32')
return df
def rank_similarity_inside_session(df):
cols_rank = [
'item_id_interaction_last_similarity_wv_impression',
'item_id_impression_prev_item_meta_cos',
'item_id_impression_prev_co_appearence_impression_count',
'item_id_impression_first_co_appearence_interaction_count',
'item_id_impression_first_co_appearence_impression_count',
'item_id_interaction_last_co_appearence_impression_count',
'item_id_interaction_last_co_appearence_interaction_count',
]
return rank_feat_inside_session(df, cols_rank)
f_m2_top30 = TrainTestResource(FeatResource, 'm2_%s_top30_fea',
fix=['tr', 'te'], fmt='ftr')
f_m3_top30 = TrainTestResource(FeatResource, 'm3_%s_feat_top30',
fix=['tr', 'te'], fmt='ftr')
m_20190622 = TrainTestResource(ModelResource, '%s_m1_20190622',
fix=['tr', 'te'], fmt='csv')
m_20190624 = TrainTestResource(ModelResource, '%s_m1_20190624',
fix=['tr', 'te'], fmt='csv')
m_20190626 = TrainTestResource(ModelResource, '%s_m1_20190626',
fix=['tr', 'te'], fmt='csv')
@register(out=m_20190622, inp=[t_tr_te_classify, f_top100, f_si_sim])
def train_predict_lgb_20190622_2():
from feat_names import names_lgb_20190622_2 as feats
def load_data(tt):
df = merge_all([
t_tr_te_classify[tt].load().rename(columns={'item_id': 'impressions'}),
f_top100[tt].load(),
f_si_sim[tt].load().rename(columns={'item_id': 'impressions'}),
], on=['session_id', 'impressions'], how='left')
df = rank_similarity_inside_session(df)
return df
train = load_data('train')
cv = CrossValidation()
model = lgb.LGBMClassifier(n_estimators=50000, objective="binary", metric='binary_logloss',
num_leaves=31, min_child_samples=100, learning_rate=0.1,
bagging_fraction=0.7, feature_fraction=0.7, bagging_frequency=5,
seed=1, feature_fraction_seed=1, use_best_model=True, n_jobs=16)
df_train = train[['session_id', 'impressions']]
df_train['target'] = cv.validate(model, feats, train, train['target'], early_stopping_rounds=100, verbose=100)
print('Validation Score:', np.mean(cv.scores))
del train
gc.collect()
test = load_data('test')
df_test = test[['session_id', 'impressions']]
df_test['target'] = cv.predict_proba(test)
df_train.to_csv(m_20190622.train.path, index=False, float_format='%.4f')
df_test.to_csv(m_20190622.test.path, index=False, float_format='%.4f')
@register(out=m_20190624, inp=[t_tr_te_classify, f_top30, f_si_sim,
f_m2_top30, f_m3_top30])
def train_predict_lgb_20190624_1():
from feat_names import names_lgb_20190624_1 as feats
def load_data(tt):
df = merge_all([
t_tr_te_classify[tt].load().rename(columns={'item_id': 'impressions'}),
f_m2_top30[tt].load(),
f_m3_top30[tt].load(),
f_top30[tt].load(),
f_si_sim[tt].load().rename(columns={'item_id': 'impressions'}),
], on=['session_id', 'impressions'], how='left')
df = rank_similarity_inside_session(df)
return df
train = load_data('train')
cv = CrossValidation()
model = lgb.LGBMClassifier(n_estimators=50000, objective="binary", metric='binary_logloss',
num_leaves=31, min_child_samples=100, learning_rate=0.1,
bagging_fraction=0.7, feature_fraction=0.7, bagging_frequency=5,
seed=1, use_best_model=True, n_jobs=16)
df_train = train[['session_id', 'impressions']]
df_train['target'] = cv.validate(model, feats, train, train['target'], early_stopping_rounds=100, verbose=100)
print('Validation Score:', np.mean(cv.scores))
del train
gc.collect()
test = load_data('test')
df_test = test[['session_id', 'impressions']]
df_test['target'] = cv.predict_proba(test)
df_train.to_csv(m_20190624.train.path, index=False, float_format='%.4f')
df_test.to_csv(m_20190624.test.path, index=False, float_format='%.4f')
@register(out=m_20190626, inp=[t_tr_te_classify, f_top30, f_si_sim,
f_si_cmp, f_si_win,
f_m2_top30, f_m3_top30])
def train_predict_lgb_20190626_2():
from feat_names import names_lgb_20190626_2 as feats
def load_data(tt):
df = merge_all([
t_tr_te_classify[tt].load().rename(columns={'item_id': 'impressions'}),
f_m2_top30[tt].load(),
f_m3_top30[tt].load(),
f_top30[tt].load(),
f_si_sim[tt].load().rename(columns={'item_id': 'impressions'}),
f_si_cmp[tt].load().rename(columns={'item_id': 'impressions'}),
f_si_win[tt].load().rename(columns={'item_id': 'impressions'}),
], on=['session_id', 'impressions'], how='left')
df = rank_similarity_inside_session(df)
cols_win = [
'item_id_impression_prev_item_win_ratio',
'item_id_impression_first_item_win_ratio',
'item_id_interaction_last_item_win_ratio',
'item_id_interaction_most_item_win_ratio',
]
df = rank_feat_inside_session(df, cols_win)
return df
train = load_data('train')
cv = CrossValidation()
model = lgb.LGBMClassifier(n_estimators=50000, objective="binary", metric='binary_logloss',
num_leaves=31, min_child_samples=100, learning_rate=0.1,
bagging_fraction=0.7, feature_fraction=0.7, bagging_frequency=5,
seed=1, feature_fraction_seed=1, use_best_model=True, n_jobs=16)
df_train = train[['session_id', 'impressions']]
df_train['target'] = cv.validate(model, feats, train, train['target'], early_stopping_rounds=100, verbose=100)
print('Validation Score:', np.mean(cv.scores))
del train
gc.collect()
test = load_data('test')
df_test = test[['session_id', 'impressions']]
df_test['target'] = cv.predict_proba(test)
df_train.to_csv(m_20190626.train.path, index=False, float_format='%.4f')
df_test.to_csv(m_20190626.test.path, index=False, float_format='%.4f')
| [
"utils.CrossValidation",
"numpy.mean",
"lightgbm.LGBMClassifier",
"gc.collect"
] | [((2226, 2243), 'utils.CrossValidation', 'CrossValidation', ([], {}), '()\n', (2241, 2243), False, 'from utils import load_dataframe, convert_dtype, CrossValidation, merge_all\n'), ((2257, 2541), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'n_estimators': '(50000)', 'objective': '"""binary"""', 'metric': '"""binary_logloss"""', 'num_leaves': '(31)', 'min_child_samples': '(100)', 'learning_rate': '(0.1)', 'bagging_fraction': '(0.7)', 'feature_fraction': '(0.7)', 'bagging_frequency': '(5)', 'seed': '(1)', 'feature_fraction_seed': '(1)', 'use_best_model': '(True)', 'n_jobs': '(16)'}), "(n_estimators=50000, objective='binary', metric=\n 'binary_logloss', num_leaves=31, min_child_samples=100, learning_rate=\n 0.1, bagging_fraction=0.7, feature_fraction=0.7, bagging_frequency=5,\n seed=1, feature_fraction_seed=1, use_best_model=True, n_jobs=16)\n", (2275, 2541), True, 'import lightgbm as lgb\n'), ((2863, 2875), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2873, 2875), False, 'import gc\n'), ((3860, 3877), 'utils.CrossValidation', 'CrossValidation', ([], {}), '()\n', (3875, 3877), False, 'from utils import load_dataframe, convert_dtype, CrossValidation, merge_all\n'), ((3891, 4150), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'n_estimators': '(50000)', 'objective': '"""binary"""', 'metric': '"""binary_logloss"""', 'num_leaves': '(31)', 'min_child_samples': '(100)', 'learning_rate': '(0.1)', 'bagging_fraction': '(0.7)', 'feature_fraction': '(0.7)', 'bagging_frequency': '(5)', 'seed': '(1)', 'use_best_model': '(True)', 'n_jobs': '(16)'}), "(n_estimators=50000, objective='binary', metric=\n 'binary_logloss', num_leaves=31, min_child_samples=100, learning_rate=\n 0.1, bagging_fraction=0.7, feature_fraction=0.7, bagging_frequency=5,\n seed=1, use_best_model=True, n_jobs=16)\n", (3909, 4150), True, 'import lightgbm as lgb\n'), ((4472, 4484), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4482, 4484), False, 'import gc\n'), ((5989, 6006), 'utils.CrossValidation', 'CrossValidation', ([], {}), '()\n', (6004, 6006), False, 'from utils import load_dataframe, convert_dtype, CrossValidation, merge_all\n'), ((6020, 6304), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'n_estimators': '(50000)', 'objective': '"""binary"""', 'metric': '"""binary_logloss"""', 'num_leaves': '(31)', 'min_child_samples': '(100)', 'learning_rate': '(0.1)', 'bagging_fraction': '(0.7)', 'feature_fraction': '(0.7)', 'bagging_frequency': '(5)', 'seed': '(1)', 'feature_fraction_seed': '(1)', 'use_best_model': '(True)', 'n_jobs': '(16)'}), "(n_estimators=50000, objective='binary', metric=\n 'binary_logloss', num_leaves=31, min_child_samples=100, learning_rate=\n 0.1, bagging_fraction=0.7, feature_fraction=0.7, bagging_frequency=5,\n seed=1, feature_fraction_seed=1, use_best_model=True, n_jobs=16)\n", (6038, 6304), True, 'import lightgbm as lgb\n'), ((6626, 6638), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6636, 6638), False, 'import gc\n'), ((2823, 2841), 'numpy.mean', 'np.mean', (['cv.scores'], {}), '(cv.scores)\n', (2830, 2841), True, 'import numpy as np\n'), ((4432, 4450), 'numpy.mean', 'np.mean', (['cv.scores'], {}), '(cv.scores)\n', (4439, 4450), True, 'import numpy as np\n'), ((6586, 6604), 'numpy.mean', 'np.mean', (['cv.scores'], {}), '(cv.scores)\n', (6593, 6604), True, 'import numpy as np\n')] |
from numpy import argsort, diff, max, where
from numpy import abs as np_abs
from numpy import mean as np_mean
from numpy import median as np_median
from numpy import var as np_var
from numpy import min as np_min
from numpy import max as np_max
import numpy as np
from numpy.lib.histograms import _unsigned_subtract
from scipy.signal import convolve, find_peaks
from mne.filter import create_filter
class NoValidTroughException(Exception):
pass
class SharpwaveAnalyzer:
def __init__(self, sw_settings, sfreq) -> None:
"""
Parameters
----------
sw_settings : dict
Sharpwave settings from settings.json
sfreq : float
data sampling frequency
"""
self.sw_settings = sw_settings
self.sfreq = sfreq
self.filter = \
create_filter(None, sfreq,
l_freq=sw_settings['filter_low_cutoff'],
h_freq=sw_settings['filter_high_cutoff'],
fir_design='firwin', l_trans_bandwidth=4,
h_trans_bandwidth=4, filter_length=str(sfreq)+'ms',
verbose=False)
# initialize used features
self.used_features = list()
for feature_name, val in self.sw_settings["sharpwave_features"].items():
if val is True:
self.used_features.append(feature_name)
# initialize attributes
self.initialize_sw_features()
# initializing estimator functions, respecitive for all sharpwave features
fun_names = []
for used_feature in self.used_features:
for estimator, est_features in self.sw_settings["estimator"].items():
if est_features is not None:
for est_feature in est_features:
if used_feature == est_feature:
fun_names.append(estimator)
self.estimator_names = fun_names
self.estimator_functions = [getattr(np, est_name) for est_name in self.estimator_names]
def initialize_sw_features(self) -> None:
"""Resets used attributes to empty lists
"""
for feature_name in self.used_features:
setattr(self, feature_name, list())
if "trough" not in self.used_features:
# trough attribute is still necessary, even if it is not specified in settings
self.trough = list()
self.troughs_idx = list()
def get_peaks_around(self, trough_ind, arr_ind_peaks, filtered_dat):
""" Find the closest peaks to the right and left side a given trough.
Parameters
----------
trough_ind (int): index of trough
arr_ind_peaks (np.ndarray): array of peak indices
filtered_dat (np.ndarray): raw data batch
Raises:
NoValidTroughException: Returned if no adjacent peak can be found
Returns
-------
peak_left_idx (np.ndarray): index of left peak
peak_right_idx (np.ndarray): index of right peak
peak_left_val (np.ndarray): value of left peak
peak_right_val (np.ndarray): value of righ peak
"""
ind_greater = where(arr_ind_peaks > trough_ind)[0]
if ind_greater.shape[0] == 0:
raise NoValidTroughException("No valid trough")
val_ind_greater = arr_ind_peaks[ind_greater]
peak_right_idx = arr_ind_peaks[ind_greater[argsort(val_ind_greater)[0]]]
ind_smaller = where(arr_ind_peaks < trough_ind)[0]
if ind_smaller.shape[0] == 0:
raise NoValidTroughException("No valid trough")
val_ind_smaller = arr_ind_peaks[ind_smaller]
peak_left_idx = \
arr_ind_peaks[ind_smaller[argsort(val_ind_smaller)[-1]]]
return peak_left_idx, peak_right_idx, filtered_dat[peak_left_idx], \
filtered_dat[peak_right_idx]
def get_sharpwave_features(self, features_, data, ch):
""" Given a new data batch, the peaks, troughs and sharpwave features
are estimated. Importantly only new data is being analyzed here. In
steps of 1/settings["sampling_rate_features] are analyzed and returned.
Data is assumed to be notch filtered and bandpass filtered beforehand.
Parameters
----------
features_ (dict): Features.py estimated features
data (np.ndarray): 1d single channel data batch
ch (string): channel name
Returns
-------
features_ (dict): set features for Features.py object
"""
self.filtered_data = convolve(data, self.filter, mode='same')
# check settings if troughs and peaks are analyzed
dict_ch_features = {}
for detect_troughs in [False, True]:
if detect_troughs is False:
if self.sw_settings["detect_peaks"]["estimate"] is False:
continue
key_name_pt = 'Peak'
# the detect_troughs loop start with peaks, s.t. data does not
# need to be flipped
if detect_troughs is True:
if self.sw_settings["detect_troughs"]["estimate"] is False:
continue
key_name_pt = 'Trough'
self.filtered_data = -self.filtered_data
self.initialize_sw_features() # reset sharpwave feature attriubtes to empty lists
self.analyze_waveform()
# this function needs to looks different;
# for each feature take the respective fun.
for feature_idx, feature_name in enumerate(self.used_features):
key_name = '_'.join([ch, 'Sharpwave', self.estimator_names[feature_idx].title(), feature_name])
val = self.estimator_functions[feature_idx](getattr(self, feature_name)) \
if len(getattr(self, feature_name)) != 0 else 0
if key_name not in dict_ch_features:
dict_ch_features[key_name] = {}
dict_ch_features[key_name][key_name_pt] = val
if self.sw_settings["apply_estimator_between_peaks_and_troughs"]:
# apply between 'Trough' and 'Peak' the respective function again
# save only the 'est_fun' (e.g. max) between them
for idx, key_name in enumerate(dict_ch_features):
# the key_name stays, since the estimator function stays between peaks and troughs
features_[key_name] = self.estimator_functions[idx]([list(dict_ch_features[key_name].values())[0],\
list(dict_ch_features[key_name].values())[1]])
else:
# otherwise, save all
# write all "flatted" key value pairs in features_
for key, value in dict_ch_features.items():
for key_sub, value_sub in dict_ch_features[key].items():
features_[key+"_analyze_"+key_sub] = value_sub
return features_
def analyze_waveform(self) -> None:
""" Given the scipy.signal.find_peaks trough/peak distance
settings specified sharpwave features are estimated.
Parameters
----------
Raises:
NoValidTroughException: Return if no adjacent peak can be found
NoValidTroughException: Return if no adjacent peak can be found
"""
peaks = find_peaks(self.filtered_data, distance=self.sw_settings["detect_troughs"]["distance_peaks"])[0]
troughs = find_peaks(-self.filtered_data, distance=self.sw_settings["detect_troughs"]["distance_troughs"])[0]
for trough_idx in troughs:
try:
peak_idx_left, peak_idx_right, peak_left, peak_right = \
self.get_peaks_around(trough_idx, peaks, self.filtered_data)
except NoValidTroughException:
# in this case there are no adjacent two peaks around this trough
# str(e) could print the exception error message
# print(str(e))
continue
trough = self.filtered_data[trough_idx]
self.trough.append(trough)
self.troughs_idx.append(trough_idx)
if self.sw_settings["sharpwave_features"]["interval"] is True:
if len(self.troughs_idx) > 1:
# take the last identified trough idx
# corresponds here to second last trough_idx
interval = (trough_idx - self.troughs_idx[-2]) * \
(1000/self.sfreq)
else:
# set first interval to zero
interval = 0
self.interval.append(interval)
if self.sw_settings["sharpwave_features"]["peak_left"] is True:
self.peak_left.append(peak_left)
if self.sw_settings["sharpwave_features"]["peak_right"] is True:
self.peak_right.append(peak_right)
if self.sw_settings["sharpwave_features"]["sharpness"] is True:
# check if sharpness can be calculated
# trough_idx 5 ms need to be consistent
if (trough_idx - int(5*(1000/self.sfreq)) <= 0) or \
(trough_idx + int(5*(1000/self.sfreq)) >=
self.filtered_data.shape[0]):
continue
sharpness = ((self.filtered_data[trough_idx] -
self.filtered_data[trough_idx-int(5*(1000/self.sfreq))]) +
(self.filtered_data[trough_idx] -
self.filtered_data[trough_idx+int(5*(1000/self.sfreq))])) / 2
self.sharpness.append(sharpness)
if self.sw_settings["sharpwave_features"]["rise_steepness"] is True:
# steepness is calculated as the first derivative
# from peak/trough to trough/peak
# here + 1 due to python syntax, s.t. the last element is included
rise_steepness = max(diff(self.filtered_data[peak_idx_left: trough_idx+1]))
self.rise_steepness.append(rise_steepness)
if self.sw_settings["sharpwave_features"]["decay_steepness"] is True:
decay_steepness = max(diff(self.filtered_data[trough_idx: peak_idx_right+1]))
self.decay_steepness.append(decay_steepness)
if self.sw_settings["sharpwave_features"]["rise_steepness"] is True and \
self.sw_settings["sharpwave_features"]["decay_steepness"] is True and \
self.sw_settings["sharpwave_features"]["slope_ratio"] is True:
self.slope_ratio.append(rise_steepness - decay_steepness)
if self.sw_settings["sharpwave_features"]["prominence"] is True:
self.prominence.append(np_abs(
(peak_right + peak_left) / 2 - self.filtered_data[trough_idx])) # mV
if self.sw_settings["sharpwave_features"]["decay_time"] is True:
self.decay_time.append((peak_idx_left - trough_idx) * (1000/self.sfreq)) # ms
if self.sw_settings["sharpwave_features"]["rise_time"] is True:
self.rise_time.append((peak_idx_right - trough_idx) * (1000/self.sfreq)) # ms
if self.sw_settings["sharpwave_features"]["width"] is True:
self.width.append(peak_idx_right - peak_idx_left) # ms
| [
"numpy.abs",
"scipy.signal.convolve",
"numpy.where",
"numpy.diff",
"numpy.argsort",
"scipy.signal.find_peaks"
] | [((4597, 4637), 'scipy.signal.convolve', 'convolve', (['data', 'self.filter'], {'mode': '"""same"""'}), "(data, self.filter, mode='same')\n", (4605, 4637), False, 'from scipy.signal import convolve, find_peaks\n'), ((3208, 3241), 'numpy.where', 'where', (['(arr_ind_peaks > trough_ind)'], {}), '(arr_ind_peaks > trough_ind)\n', (3213, 3241), False, 'from numpy import argsort, diff, max, where\n'), ((3500, 3533), 'numpy.where', 'where', (['(arr_ind_peaks < trough_ind)'], {}), '(arr_ind_peaks < trough_ind)\n', (3505, 3533), False, 'from numpy import argsort, diff, max, where\n'), ((7400, 7498), 'scipy.signal.find_peaks', 'find_peaks', (['self.filtered_data'], {'distance': "self.sw_settings['detect_troughs']['distance_peaks']"}), "(self.filtered_data, distance=self.sw_settings['detect_troughs'][\n 'distance_peaks'])\n", (7410, 7498), False, 'from scipy.signal import convolve, find_peaks\n'), ((7515, 7616), 'scipy.signal.find_peaks', 'find_peaks', (['(-self.filtered_data)'], {'distance': "self.sw_settings['detect_troughs']['distance_troughs']"}), "(-self.filtered_data, distance=self.sw_settings['detect_troughs']\n ['distance_troughs'])\n", (7525, 7616), False, 'from scipy.signal import convolve, find_peaks\n'), ((3447, 3471), 'numpy.argsort', 'argsort', (['val_ind_greater'], {}), '(val_ind_greater)\n', (3454, 3471), False, 'from numpy import argsort, diff, max, where\n'), ((3753, 3777), 'numpy.argsort', 'argsort', (['val_ind_smaller'], {}), '(val_ind_smaller)\n', (3760, 3777), False, 'from numpy import argsort, diff, max, where\n'), ((10054, 10108), 'numpy.diff', 'diff', (['self.filtered_data[peak_idx_left:trough_idx + 1]'], {}), '(self.filtered_data[peak_idx_left:trough_idx + 1])\n', (10058, 10108), False, 'from numpy import argsort, diff, max, where\n'), ((10289, 10344), 'numpy.diff', 'diff', (['self.filtered_data[trough_idx:peak_idx_right + 1]'], {}), '(self.filtered_data[trough_idx:peak_idx_right + 1])\n', (10293, 10344), False, 'from numpy import argsort, diff, max, where\n'), ((10849, 10918), 'numpy.abs', 'np_abs', (['((peak_right + peak_left) / 2 - self.filtered_data[trough_idx])'], {}), '((peak_right + peak_left) / 2 - self.filtered_data[trough_idx])\n', (10855, 10918), True, 'from numpy import abs as np_abs\n')] |
import os
import pickle
import glob
import numpy as np
from tqdm import tqdm
import random
class Create:
"""
Reads, transforms and saves the data in the format your network will use.
Keyword arguments:
raw_data_folder_path -- the Folder Path where all the raw data is saved.
save_data_folder_path -- the Folder Path where the newly created data is stored.
max_number_of_records -- limit the maximum number of record if -1 then all record will
be used. (default -1)
percent_validation_data -- the percent of the data used to create the validation set.
(default 15)
percent_test_data -- the percent of the data used to create the test set. (default 15)
labeled_data_in_separate_file -- if True splits the features and labels into separate files,
otherwise the labels an features are saved into one file. (default True)
shuffle -- if True the data is shuffled before splitting the data out into validation
and test sets (default True)
"""
def __init__(self, raw_data_folder_path, save_data_folder_path, max_number_of_records=-1,
percent_validation_data=15, percent_test_data=15,
labeled_data_in_separate_file=True, shuffle=True):
self.raw_data_folder_path = raw_data_folder_path
self.save_data_folder_path = save_data_folder_path
self.max_number_of_records = max_number_of_records
self.percent_validation_data = percent_validation_data
self.percent_test_data = percent_test_data
self.labeled_data_in_separate_file = labeled_data_in_separate_file
self.shuffle = shuffle
self.path_training_features = '/training_features.npy'
self.path_training_labels = '/training_labels.npy'
self.path_validation_features = '/validation_features.npy'
self.path_validation_labels = '/validation_labels.npy'
self.path_test_features = '/test_features.npy'
self.path_test_labels = '/test_labels.npy'
self.path_training = '/training.npy'
self.path_validation = '/validation.npy'
self.path_test = '/test.npy'
def network_data(self):
"""
Reads the raw data from raw_data_folder_path (uses .pkl only)
and transforms the data ready for saving
returns transformed data
"""
listing = glob.glob(self.raw_data_folder_path + '/*.pkl')
data = []
records_added_count = 0
if self.shuffle:
np.random.shuffle(listing)
prev_steering_feature = 0
prev_throttle_feature = 0
for filename in tqdm(listing):
filename = filename.replace('\\', '/')
with open(filename, 'rb') as file_data:
project_cars_state = pickle.load(file_data)
controller_state = pickle.load(file_data)
car = project_cars_state.mParticipantInfo[0]
# remove all record that are not on a flying lap
position = car.mWorldPosition
#angle = project_cars_state.mOrientation
#velocity = project_cars_state.mLocalVelocity
#speed = project_cars_state.mSpeed
throttle = controller_state['right_trigger'] / 255.0 # 0 - 255
brakes = controller_state['left_trigger'] / 255.0#0 - 255
steering = controller_state['thumb_lx'] /32767#-32768 - 32767
# feature = np.array([position[0], position[1], position[2],
# angle[0], angle[1], angle[2],
# velocity[0], velocity[1], velocity[2]])
# feature = np.array([position[0], position[1], position[2],
# angle[0], angle[1], angle[2]])
#position 2 is up and down
# round(angle[1], 1)]
#feature = np.array([position[0], position[1], position[2], angle[1]])
#rolling_previous_features = np.append(rolling_previous_features, [feature], axis=0)
#rolling_previous_features.append([feature])
#rolling_previous_features = rolling_previous_features[1:]
# if rolling_previous_features[0][0] == 0.0 and rolling_previous_features[0][3] == 0.0:
# continue
#print(rolling_previous_features)
#print(feature.shape())
# label = np.array([throttle, brakes, steering])
#print(project_cars_state.mSteering)
# if project_cars_state.mSteering == 0.0:
# #print(steering)
# if random.randint(0,2) == 0:
# continue
# if throttle > 0.0 and brakes > 0.0:#not many (4)
# print('throttle {}, brakes {}'.format(throttle, brakes))
# speed = throttle / 255.0
# speed = speed + ((brakes / 255.0) * -1)
# label = np.array([speed, steering / 32767.0])
feature = np.array([round(position[0], 2), round(position[2], 2), prev_steering_feature, prev_throttle_feature])
throttle_label = throttle
throttle_label = throttle_label + ((brakes) * -1)
label = np.array([throttle_label, steering])
#label = np.array([speed])
#label = np.array([steering / 32768])
prev_steering_feature = steering
prev_throttle_feature = throttle_label
if car.mCurrentLapDistance == 0.0:
continue
data.append([feature, label])
# new_features = np.array([rolling_previous_features[0][0], rolling_previous_features[0][1], rolling_previous_features[0][2], rolling_previous_features[0][3],
# rolling_previous_features[1][0], rolling_previous_features[1][1], rolling_previous_features[1][2], rolling_previous_features[1][3],
# rolling_previous_features[2][0], rolling_previous_features[2][1], rolling_previous_features[2][2], rolling_previous_features[2][3]])
#data.append([new_features, label])
records_added_count += 1
if records_added_count == self.max_number_of_records:
break
print('Total records found: {}'.format(len(data)))
for data_record in data:
print(data_record)
return data
def save_data(self, data):
"""
Save the data to the save_data_folder_path into train, validation and test sets
Keyword arguments:
data -- contains the transformed data from 'network_data()'
Note this calls remove_existing_files() before saving the data
"""
self.remove_existing_files()
total_number_of_records = len(data)
validation_percentage = int(
(total_number_of_records / 100) * self.percent_validation_data)
test_percentage = int(
(total_number_of_records / 100) * self.percent_test_data)
data_training = np.array(
data[test_percentage + validation_percentage:])
data_validation = np.array(
data[test_percentage: test_percentage + validation_percentage])
data_test = np.array(data[0: test_percentage])
# save data
if self.labeled_data_in_separate_file:
# create variables to store labels and features
data_training_features = []
data_training_labels = []
data_validation_features = []
data_validation_labels = []
data_test_features = []
data_test_labels = []
throttle_count = 0
no_throttle_count = 0
no_steering_count = 0
#np.random.shuffle(data_training)
for record in data_training: # is there a better way?
#print(record[1])
#print(record[1] / 32768)
#temp_record = temp_record + ((record[1][1] / 255.0) * -1)
balance_throttle = False
if balance_throttle:
temp_record = record[1][0]
if temp_record > 0.0:
if no_throttle_count > throttle_count:
data_training_features.append(np.array(record[0]))
data_training_labels.append(record[1])
throttle_count += 1
print(temp_record)
else:
data_training_features.append(np.array(record[0]))
data_training_labels.append(record[1])
no_throttle_count += 1
print(temp_record)
else:
data_training_features.append(np.array(record[0]))
data_training_labels.append(record[1])
#print(record[1])
# if no_throttle_count < throttle_count:
# continue
#if record[1] <= -0.5:
# for record in data_training: # is there a better way?
# # temp_record = record[1][0] / 255.0
# # temp_record = temp_record + ((record[1][1] / 255.0) * -1)
# if record[1] >= 0.5 and throttle_count < no_throttle_count:
# data_training_features.append(np.array(record[0]))
# data_training_labels.append(np.array(record[1]))
# throttle_count += 1
# for record in data_training: # is there a better way?
# # temp_record = record[1][0] / 255.0
# # temp_record = temp_record + ((record[1][1] / 255.0) * -1)
# if record[1] > -0.02 and record[1] < 0.02 and no_steering_count < no_throttle_count:
# data_training_features.append(np.array(record[0]))
# data_training_labels.append(np.array(0.0))
# no_steering_count += 1
for record in data_validation: # is there a better way?
data_validation_features.append(np.array(record[0]))
data_validation_labels.append(np.array(record[1]))
for record in data_test: # is there a better way?
data_test_features.append(np.array(record[0]))
data_test_labels.append(np.array(record[1]))
#print('data {}'.format(len(data_training_features)))
print('throttle : {} brake: {} '.format(throttle_count, no_throttle_count))
np.save(self.save_data_folder_path + self.path_training_features, data_training_features)
np.save(self.save_data_folder_path + self.path_training_labels, data_training_labels)
np.save(self.save_data_folder_path + self.path_validation_features, data_validation_features)
np.save(self.save_data_folder_path + self.path_validation_labels, data_validation_labels)
np.save(self.save_data_folder_path + self.path_test_features, data_test_features)
np.save(self.save_data_folder_path + self.path_test_labels, data_test_labels)
self.save_mean_and_std()
else:
np.save(self.path_training, data_training)
np.save(self.path_validation, data_validation)
np.save(self.path_test, data_test)
print('Completed: Training examples: {}, Validation examples: {}, Test examples: {}'.format(
len(data_training_features), len(data_validation_features), len(data_test_features)))
def remove_existing_files(self):
"""
Checks and delete existing .npy files.
Also creates the folder path if it doesn't exist
"""
# create path if is does not exist
if not os.path.exists(self.save_data_folder_path):
os.makedirs(self.save_data_folder_path)
# as the folder was just create there's no need to check if the files exist
return
# make sure old data is removed
list_of_files_to_check = [self.path_training_features,
self.path_training_labels,
self.path_validation_features,
self.path_validation_labels,
self.path_test_features,
self.path_test_labels,
self.path_training,
self.path_validation,
self.path_test]
for file in list_of_files_to_check:
path_training = self.save_data_folder_path + file
if os.path.exists(path_training):
os.remove(path_training)
def save_mean_and_std(self):
#load training data
training_features = np.load(self.save_data_folder_path + self.path_training_features)
#training_labels = np.load(self.save_data_folder_path + self.path_training_features)
#normilize data
mean = np.mean(training_features, axis=0)
std = np.std(training_features, axis=0)
np.save(self.save_data_folder_path + '/mean.npy', mean)
np.save(self.save_data_folder_path + '/std.npy', std)
| [
"numpy.mean",
"os.path.exists",
"os.makedirs",
"tqdm.tqdm",
"pickle.load",
"os.remove",
"numpy.array",
"numpy.save",
"numpy.std",
"numpy.load",
"glob.glob",
"numpy.random.shuffle"
] | [((2345, 2392), 'glob.glob', 'glob.glob', (["(self.raw_data_folder_path + '/*.pkl')"], {}), "(self.raw_data_folder_path + '/*.pkl')\n", (2354, 2392), False, 'import glob\n'), ((2603, 2616), 'tqdm.tqdm', 'tqdm', (['listing'], {}), '(listing)\n', (2607, 2616), False, 'from tqdm import tqdm\n'), ((6970, 7026), 'numpy.array', 'np.array', (['data[test_percentage + validation_percentage:]'], {}), '(data[test_percentage + validation_percentage:])\n', (6978, 7026), True, 'import numpy as np\n'), ((7066, 7137), 'numpy.array', 'np.array', (['data[test_percentage:test_percentage + validation_percentage]'], {}), '(data[test_percentage:test_percentage + validation_percentage])\n', (7074, 7137), True, 'import numpy as np\n'), ((7172, 7205), 'numpy.array', 'np.array', (['data[0:test_percentage]'], {}), '(data[0:test_percentage])\n', (7180, 7205), True, 'import numpy as np\n'), ((12949, 13014), 'numpy.load', 'np.load', (['(self.save_data_folder_path + self.path_training_features)'], {}), '(self.save_data_folder_path + self.path_training_features)\n', (12956, 13014), True, 'import numpy as np\n'), ((13148, 13182), 'numpy.mean', 'np.mean', (['training_features'], {'axis': '(0)'}), '(training_features, axis=0)\n', (13155, 13182), True, 'import numpy as np\n'), ((13197, 13230), 'numpy.std', 'np.std', (['training_features'], {'axis': '(0)'}), '(training_features, axis=0)\n', (13203, 13230), True, 'import numpy as np\n'), ((13240, 13295), 'numpy.save', 'np.save', (["(self.save_data_folder_path + '/mean.npy')", 'mean'], {}), "(self.save_data_folder_path + '/mean.npy', mean)\n", (13247, 13295), True, 'import numpy as np\n'), ((13304, 13357), 'numpy.save', 'np.save', (["(self.save_data_folder_path + '/std.npy')", 'std'], {}), "(self.save_data_folder_path + '/std.npy', std)\n", (13311, 13357), True, 'import numpy as np\n'), ((2482, 2508), 'numpy.random.shuffle', 'np.random.shuffle', (['listing'], {}), '(listing)\n', (2499, 2508), True, 'import numpy as np\n'), ((5174, 5210), 'numpy.array', 'np.array', (['[throttle_label, steering]'], {}), '([throttle_label, steering])\n', (5182, 5210), True, 'import numpy as np\n'), ((10672, 10765), 'numpy.save', 'np.save', (['(self.save_data_folder_path + self.path_training_features)', 'data_training_features'], {}), '(self.save_data_folder_path + self.path_training_features,\n data_training_features)\n', (10679, 10765), True, 'import numpy as np\n'), ((10774, 10863), 'numpy.save', 'np.save', (['(self.save_data_folder_path + self.path_training_labels)', 'data_training_labels'], {}), '(self.save_data_folder_path + self.path_training_labels,\n data_training_labels)\n', (10781, 10863), True, 'import numpy as np\n'), ((10873, 10970), 'numpy.save', 'np.save', (['(self.save_data_folder_path + self.path_validation_features)', 'data_validation_features'], {}), '(self.save_data_folder_path + self.path_validation_features,\n data_validation_features)\n', (10880, 10970), True, 'import numpy as np\n'), ((10979, 11072), 'numpy.save', 'np.save', (['(self.save_data_folder_path + self.path_validation_labels)', 'data_validation_labels'], {}), '(self.save_data_folder_path + self.path_validation_labels,\n data_validation_labels)\n', (10986, 11072), True, 'import numpy as np\n'), ((11082, 11167), 'numpy.save', 'np.save', (['(self.save_data_folder_path + self.path_test_features)', 'data_test_features'], {}), '(self.save_data_folder_path + self.path_test_features,\n data_test_features)\n', (11089, 11167), True, 'import numpy as np\n'), ((11176, 11253), 'numpy.save', 'np.save', (['(self.save_data_folder_path + self.path_test_labels)', 'data_test_labels'], {}), '(self.save_data_folder_path + self.path_test_labels, data_test_labels)\n', (11183, 11253), True, 'import numpy as np\n'), ((11318, 11360), 'numpy.save', 'np.save', (['self.path_training', 'data_training'], {}), '(self.path_training, data_training)\n', (11325, 11360), True, 'import numpy as np\n'), ((11373, 11419), 'numpy.save', 'np.save', (['self.path_validation', 'data_validation'], {}), '(self.path_validation, data_validation)\n', (11380, 11419), True, 'import numpy as np\n'), ((11432, 11466), 'numpy.save', 'np.save', (['self.path_test', 'data_test'], {}), '(self.path_test, data_test)\n', (11439, 11466), True, 'import numpy as np\n'), ((11892, 11934), 'os.path.exists', 'os.path.exists', (['self.save_data_folder_path'], {}), '(self.save_data_folder_path)\n', (11906, 11934), False, 'import os\n'), ((11948, 11987), 'os.makedirs', 'os.makedirs', (['self.save_data_folder_path'], {}), '(self.save_data_folder_path)\n', (11959, 11987), False, 'import os\n'), ((12786, 12815), 'os.path.exists', 'os.path.exists', (['path_training'], {}), '(path_training)\n', (12800, 12815), False, 'import os\n'), ((2760, 2782), 'pickle.load', 'pickle.load', (['file_data'], {}), '(file_data)\n', (2771, 2782), False, 'import pickle\n'), ((2818, 2840), 'pickle.load', 'pickle.load', (['file_data'], {}), '(file_data)\n', (2829, 2840), False, 'import pickle\n'), ((12833, 12857), 'os.remove', 'os.remove', (['path_training'], {}), '(path_training)\n', (12842, 12857), False, 'import os\n'), ((10227, 10246), 'numpy.array', 'np.array', (['record[0]'], {}), '(record[0])\n', (10235, 10246), True, 'import numpy as np\n'), ((10294, 10313), 'numpy.array', 'np.array', (['record[1]'], {}), '(record[1])\n', (10302, 10313), True, 'import numpy as np\n'), ((10421, 10440), 'numpy.array', 'np.array', (['record[0]'], {}), '(record[0])\n', (10429, 10440), True, 'import numpy as np\n'), ((10482, 10501), 'numpy.array', 'np.array', (['record[1]'], {}), '(record[1])\n', (10490, 10501), True, 'import numpy as np\n'), ((8797, 8816), 'numpy.array', 'np.array', (['record[0]'], {}), '(record[0])\n', (8805, 8816), True, 'import numpy as np\n'), ((8551, 8570), 'numpy.array', 'np.array', (['record[0]'], {}), '(record[0])\n', (8559, 8570), True, 'import numpy as np\n'), ((8288, 8307), 'numpy.array', 'np.array', (['record[0]'], {}), '(record[0])\n', (8296, 8307), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
from pandas_datareader import data
import statsmodels.api as sm
from statsmodels.tsa.seasonal import STL
import pandas_datareader.data as DataReader
def get_stock(stock,start,end):
df = data.DataReader(stock, 'stooq',start)["Close"]
df = df.iloc[::-1]
return df[start:end]
from numba import jit
@jit(nopython=True)
def EMA3(x, n):
alpha = 2/(n+1)
y = np.empty_like(x)
y[0] = x[0]
for i in range(1,len(x)):
y[i] = alpha*x[i] + (1-alpha)*y[i-1]
return y
def EMA1(x, n):
#k = 3.45*(n+1)
a= 2/(n+1)
return pd.Series(x).ewm(alpha=a).mean()
#stock0 = 'ZM' #sony6758 Jal 9201 三井住友フィナンシャル 8316 docomo9437 ana9202 日産7201 fasuto9983 8411 みずほ 4005 住友化 4553 東和薬品 9432 NTT NTTデータ: 9613 'GOOG','AAPL','FB','AMZN', 'AAL' シマノ7309 'ZM'
stock0 = ['6758','9201','8316','9437','9202','7201','9983','8411','4005','4553','9432','9613','7309']
#stock0 = ['GOOG','AAPL','FB','AMZN', 'AAL','ZM']
for j in stock0:
stock = j + '.T' #6758.T for yahoo, .JP for stooq
start = dt.date(2020,1,1)
end = dt.date(2020,6,11)
#df = pd.DataFrame(get_stock(stock, start, end))
df=DataReader.get_data_yahoo("{}".format(stock),start,end)
date_df=df['Close'].index.tolist()
series = df['Close'].values.tolist()
bunseki = "trend" #series" #cycle" #trend
cycle, trend = sm.tsa.filters.hpfilter(series, 144)
df['Close'] = trend
series2 = df['Close'].values.tolist()
#print(series2[len(series2)-10:len(series2)])
df['Close']=series #series" #cycle" #trend
df['Close2']=series2
df['y12'] = EMA1(df['Close2'], 12)
df['y26'] = EMA1(df['Close2'], 26)
df['MACD'] = df['y12'] -df['y26']
df['MACD2'] = df['Close2'] -df['y26']
df['signal2'] = EMA1(df['MACD2'], 9)
df['signal'] = EMA1(df['MACD'], 9)
df['hist_']=df['MACD2']-df['signal2']
date_df=df['Close'].index.tolist()
print(df[len(series)-10:len(series)])
fig, (ax1,ax2) = plt.subplots(2,1,figsize=(1.6180 * 8, 4*2),dpi=200)
ax1.plot(df['Close'],label="series")
ax1.plot(df['Close2'],label="series2")
ax1.plot(df['y12'],label="y12")
ax1.plot(df['y26'],label="y26")
ax2.plot(df['MACD2'],label="MACD2")
#ax2.plot(df['MACD'],label="MACD")
ax2.plot(df['signal2'],label="signal2")
#ax2.plot(df['signal'],label="signal")
ax2.bar(date_df,df['hist_'])
ax1.set_title("{}".format(j))
ax1.legend()
ax2.legend()
ax1.grid()
ax2.grid()
#ax2.set_ylim(-5,20)
plt.savefig("./stock/{}/{}_{}_{}_.png".format("stock0",j,bunseki,end))
plt.pause(1)
plt.close() | [
"pandas.Series",
"pandas_datareader.data.DataReader",
"matplotlib.pyplot.close",
"numba.jit",
"numpy.empty_like",
"datetime.date",
"statsmodels.api.tsa.filters.hpfilter",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots"
] | [((404, 422), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (407, 422), False, 'from numba import jit\n'), ((467, 483), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (480, 483), True, 'import numpy as np\n'), ((1107, 1126), 'datetime.date', 'dt.date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (1114, 1126), True, 'import datetime as dt\n'), ((1135, 1155), 'datetime.date', 'dt.date', (['(2020)', '(6)', '(11)'], {}), '(2020, 6, 11)\n', (1142, 1155), True, 'import datetime as dt\n'), ((1416, 1452), 'statsmodels.api.tsa.filters.hpfilter', 'sm.tsa.filters.hpfilter', (['series', '(144)'], {}), '(series, 144)\n', (1439, 1452), True, 'import statsmodels.api as sm\n'), ((2027, 2082), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(1.618 * 8, 4 * 2)', 'dpi': '(200)'}), '(2, 1, figsize=(1.618 * 8, 4 * 2), dpi=200)\n', (2039, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2648), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (2645, 2648), True, 'import matplotlib.pyplot as plt\n'), ((2653, 2664), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2662, 2664), True, 'import matplotlib.pyplot as plt\n'), ((285, 323), 'pandas_datareader.data.DataReader', 'data.DataReader', (['stock', '"""stooq"""', 'start'], {}), "(stock, 'stooq', start)\n", (300, 323), False, 'from pandas_datareader import data\n'), ((651, 663), 'pandas.Series', 'pd.Series', (['x'], {}), '(x)\n', (660, 663), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 17 11:02:24 2022
@author: rossgra
"""
import numpy as np
from numpy.core.fromnumeric import std
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy
from scipy.stats import mannwhitneyu
import statistics as stat
metric = input('SAE or Non - ')
# I am goign to bring in the NO- hood section first
#for Megajouels
#No_hood_MJ_path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/MJ per SAE - No_Hood.csv" #rossgra or gvros
#Hood_MJ_Path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/MJ per SAE - Hood.csv"
#### for FUEL_REMOVED _perd
if metric== 'SAE':
No_hood_MJ_path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/24 Hour Remove SAE - No_Hood.csv" #rossgra or gvros
Hood_MJ_Path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/24 Hour Remove SAE - Hood.csv"
#No_hood_MJ_path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/MJ per SAE - No_Hood.csv" #rossgra or gvros
#Hood_MJ_Path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/MJ per SAE - Hood.csv"
else:
#No_hood_MJ_path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/24 Hour Remove - No_Hood.csv" #rossgra or gvros
#Hood_MJ_Path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/24 Hour Remove - Hood.csv"
No_hood_MJ_path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/MJ per Day - No_Hood.csv" #rossgra or gvros
Hood_MJ_Path = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/MJ per Day - Hood.csv"
######For Fuel removed per 24 hours per SAE
Level_of_confidence = 0.05
No_hood_MJ = pd.read_csv(No_hood_MJ_path)
Hood_MJ = pd.read_csv(Hood_MJ_Path)
#C:\Users\rossgra\Box\Classes\Software Dev C:\Users\rossgra\Box\OSU, CSC, CQC Project files
HH_1N = [x for x in No_hood_MJ.iloc[:, 0] if x != -1]
HH_2N = [x for x in No_hood_MJ.iloc[:, 11] if x != -1]
HH_3N = [x for x in No_hood_MJ.iloc[:, 22] if x != -1]
HH_4N = [x for x in No_hood_MJ.iloc[:, 33] if x != -1]
HH_1H = [x for x in Hood_MJ.iloc[:, 0] if x != -1]
HH_2H = [x for x in Hood_MJ.iloc[:, 11] if x != -1]
HH_3H = [x for x in Hood_MJ.iloc[:, 22] if x != -1]
Mj_1N_Phase = [x for x in No_hood_MJ.iloc[:, 5] if x != -1]
Mj_2N_Phase = [x for x in No_hood_MJ.iloc[:, 16] if x != -1]
Mj_3N_Phase = [x for x in No_hood_MJ.iloc[:, 27] if x != -1]
Mj_4N_Phase = [x for x in No_hood_MJ.iloc[:, 38] if x != -1]
Mj_1H_Phase = [x for x in Hood_MJ.iloc[:, 5] if x != -1]
Mj_2H_Phase = [x for x in Hood_MJ.iloc[:, 16] if x != -1]
Mj_3H_Phase = [x for x in Hood_MJ.iloc[:, 27] if x != -1]
Mj_filter_1N_Phase = [x for x in No_hood_MJ.iloc[:, 6] if x != -1]
Mj_filter_2N_Phase = [x for x in No_hood_MJ.iloc[:, 17] if x != -1]
Mj_filter_3N_Phase = [x for x in No_hood_MJ.iloc[:, 28] if x != -1]
Mj_filter_4N_Phase = [x for x in No_hood_MJ.iloc[:, 39] if x != -1]
Mj_filter_1H_Phase = [x for x in Hood_MJ.iloc[:, 6] if x != -1]
Mj_filter_2H_Phase = [x for x in Hood_MJ.iloc[:, 17] if x != -1]
Mj_filter_3H_Phase = [x for x in Hood_MJ.iloc[:, 28] if x != -1]
Fuel_1N_Phase = [x for x in No_hood_MJ.iloc[:, 3] if x != -1]
Fuel_2N_Phase = [x for x in No_hood_MJ.iloc[:, 14] if x != -1]
Fuel_3N_Phase = [x for x in No_hood_MJ.iloc[:, 25] if x != -1]
Fuel_4N_Phase = [x for x in No_hood_MJ.iloc[:, 36] if x != -1]
Fuel_1H_Phase = [x for x in Hood_MJ.iloc[:, 3] if x != -1]
Fuel_2H_Phase = [x for x in Hood_MJ.iloc[:, 14] if x != -1]
Fuel_3H_Phase = [x for x in Hood_MJ.iloc[:, 25] if x != -1]
Avg_Fuel_1N = [x for x in No_hood_MJ.iloc[:, 2] if x != -1]
Avg_Fuel_2N = [x for x in No_hood_MJ.iloc[:, 13] if x != -1]
Avg_Fuel_3N = [x for x in No_hood_MJ.iloc[:, 24] if x != -1]
Avg_Fuel_4N = [x for x in No_hood_MJ.iloc[:, 35] if x != -1]
Avg_Fuel_1H = [x for x in Hood_MJ.iloc[:, 2] if x != -1]
Avg_Fuel_2H = [x for x in Hood_MJ.iloc[:, 13] if x != -1]
Avg_Fuel_3H = [x for x in Hood_MJ.iloc[:, 24] if x != -1]
Phase_1N_day_count = [x for x in No_hood_MJ.iloc[:, 1] if x != -1]
Phase_2N_day_count = [x for x in No_hood_MJ.iloc[:, 12] if x != -1]
Phase_3N_day_count = [x for x in No_hood_MJ.iloc[:, 23] if x != -1]
Phase_4N_day_count = [x for x in No_hood_MJ.iloc[:, 34] if x != -1]
Phase_1H_day_count = [x for x in Hood_MJ.iloc[:, 1] if x != -1]
Phase_2H_day_count = [x for x in Hood_MJ.iloc[:, 12] if x != -1]
Phase_3H_day_count = [x for x in Hood_MJ.iloc[:, 23] if x != -1]
Filter_1N_day_count = [x for x in No_hood_MJ.iloc[:, 7] if x != -1]
Filter_2N_day_count = [x for x in No_hood_MJ.iloc[:, 18] if x != -1]
Filter_3N_day_count = [x for x in No_hood_MJ.iloc[:, 29] if x != -1]
Filter_4N_day_count = [x for x in No_hood_MJ.iloc[:, 40] if x != -1]
Filter_1H_day_count = [x for x in Hood_MJ.iloc[:, 7] if x != -1]
Filter_2H_day_count = [x for x in Hood_MJ.iloc[:, 18] if x != -1]
Filter_3H_day_count = [x for x in Hood_MJ.iloc[:, 29] if x != -1]
cooking_times_1N = [x for x in No_hood_MJ.iloc[:, 8] if x != -1]
cooking_times_2N = [x for x in No_hood_MJ.iloc[:, 19] if x != -1]
cooking_times_3N = [x for x in No_hood_MJ.iloc[:, 30] if x != -1]
cooking_times_4N = [x for x in No_hood_MJ.iloc[:, 41] if x != -1]
cooking_times_1H = [x for x in Hood_MJ.iloc[:, 8] if x != -1]
cooking_times_2H = [x for x in Hood_MJ.iloc[:, 19] if x != -1]
cooking_times_3H = [x for x in Hood_MJ.iloc[:, 30] if x != -1]
## data frames of metrics
no_hood_df = {'1N': Mj_1N_Phase,'2N':Mj_2N_Phase,'3N':Mj_3N_Phase,'4N':Mj_4N_Phase}
no_hood_filter_df = {'1N':Mj_filter_1N_Phase,'2N':Mj_filter_2N_Phase,'3N':Mj_filter_3N_Phase,'4N':Mj_filter_4N_Phase}
Hood_df = {'1H':Mj_1H_Phase,'2H':Mj_2H_Phase,'3H':Mj_3H_Phase }
Hood_Filter_df = {'1H':Mj_1H_Phase,'2H':Mj_2H_Phase,'3H':Mj_3H_Phase }
# Graphing
if metric== 'SAE':
sns.displot((Mj_1N_Phase, Mj_2N_Phase, Mj_3N_Phase,Mj_4N_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day/SAE No-Hood')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
sns.displot((Mj_filter_1N_Phase, Mj_filter_2N_Phase, Mj_filter_3N_Phase,Mj_filter_4N_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day/SAE No-Hood - Filtered')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
sns.displot((Mj_1H_Phase, Mj_2H_Phase, Mj_3H_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day/SAE Hood')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
sns.displot((Mj_filter_1H_Phase, Mj_filter_2H_Phase, Mj_filter_3H_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day/SAE ood - Filtered')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
else:
sns.displot((Mj_1N_Phase, Mj_2N_Phase, Mj_3N_Phase,Mj_4N_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day No-Hood')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
sns.displot((Mj_filter_1N_Phase, Mj_filter_2N_Phase, Mj_filter_3N_Phase,Mj_filter_4N_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day No-Hood - Filtered')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
sns.displot((Mj_1H_Phase, Mj_2H_Phase, Mj_3H_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day Hood')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
sns.displot((Mj_filter_1H_Phase, Mj_filter_2H_Phase, Mj_filter_3H_Phase), kind="kde", common_norm=False)
plt.title('Fuel/Day Hood - Filtered')
#plt.legend(labels=['1N', '2N', '3N', '4N'])
plt.show()
#1N to 2N
# for Phase
MJ_Phase_1N_to_2_comon = []
MJ_Phase_2N_to_1_comon = []
Day_count_MJ_Phase_1N_2N = []
count_n = 0
for row_1N, hh_1N in enumerate(HH_1N):
if hh_1N == str(-1):
break
for row_2N, hh_2N in enumerate(HH_2N):
if hh_1N == hh_2N:
MJ_Phase_1N_to_2_comon.append(Mj_1N_Phase[row_1N])
MJ_Phase_2N_to_1_comon.append(Mj_2N_Phase[row_2N])
Day_count_MJ_Phase_1N_2N.append(Phase_1N_day_count[row_1N] +Phase_2N_day_count[row_2N] )
count_n = count_n + 1
N_MJ_Phase_1N_2N = count_n -1
#for filter
MJ_filter_1N_to_2_comon = []
MJ_filter_2N_to_1_comon = []
Day_count_MJ_filter_1N_2N = []
count_n = 0
for row_1N, hh_1N in enumerate(HH_1N):
if hh_1N == str(-1):
break
for row_2N, hh_2N in enumerate(HH_2N):
if hh_1N == hh_2N:
MJ_filter_1N_to_2_comon.append(Mj_filter_1N_Phase[row_1N])
MJ_filter_2N_to_1_comon.append(Mj_filter_2N_Phase[row_2N])
Day_count_MJ_filter_1N_2N.append(Filter_1N_day_count[row_1N] +Filter_2N_day_count[row_2N] )
count_n = count_n + 1
N_MJ_filter_1N_2N = count_n -1
#1N to 2N
###################____________________HOOOD
# for Phase
MJ_Phase_1H_to_2_comon = []
MJ_Phase_2H_to_1_comon = []
Day_count_MJ_Phase_1H_2H = []
count_n = 0
for row_1H, hh_1H in enumerate(HH_1H):
if hh_1H == str(-1):
break
for row_2H, hh_2H in enumerate(HH_2H):
if hh_1H == hh_2H:
MJ_Phase_1H_to_2_comon.append(Mj_1H_Phase[row_1H])
MJ_Phase_2H_to_1_comon.append(Mj_2H_Phase[row_2H])
Day_count_MJ_Phase_1H_2H.append(Phase_1H_day_count[row_1H] +Phase_2H_day_count[row_2H] )
count_n = count_n + 1
N_MJ_Phase_1H_2H = count_n -1
#for filter
MJ_filter_1H_to_2_comon = []
MJ_filter_2H_to_1_comon = []
Day_count_MJ_filter_1H_2H = []
count_n = 0
for row_1H, hh_1H in enumerate(HH_1H):
if hh_1H == str(-1):
break
for row_2H, hh_2H in enumerate(HH_2H):
if hh_1H == hh_2H:
MJ_filter_1H_to_2_comon.append(Mj_filter_1H_Phase[row_1H])
MJ_filter_2H_to_1_comon.append(Mj_filter_2H_Phase[row_2H])
Day_count_MJ_filter_1H_2H.append(Filter_1H_day_count[row_1H] +Filter_2H_day_count[row_2H] )
count_n = count_n + 1
N_MJ_filter_1H_2H = count_n -1
#1N to 3N
MJ_Phase_1N_to_3_comon = []
MJ_Phase_3N_to_1_comon = []
Day_count_MJ_Phase_1N_3N = []
count_n = 0
breakme = 0
for row_1N, hh_1N in enumerate(HH_1N):
if hh_1N == (-1) :
break
for row_3N, hh_3N in enumerate(HH_3N):
if hh_1N == hh_3N:
MJ_Phase_1N_to_3_comon.append(Mj_1N_Phase[row_1N])
MJ_Phase_3N_to_1_comon.append(Mj_3N_Phase[row_3N])
Day_count_MJ_Phase_1N_3N.append(Phase_1N_day_count[row_1N] + Phase_3N_day_count[row_3N])
count_n = count_n + 1
N_MJ_Phase_1N_3N = count_n -1
#for filter
MJ_filter_1N_to_3_comon = []
MJ_filter_3N_to_1_comon = []
Day_count_MJ_filter_1N_3N = []
count_n = 0
for row_1N, hh_1N in enumerate(HH_1N):
if hh_1N == str(-1):
break
for row_3N, hh_3N in enumerate(HH_3N):
if hh_1N == hh_3N:
MJ_filter_1N_to_3_comon.append(Mj_filter_1N_Phase[row_1N])
MJ_filter_3N_to_1_comon.append(Mj_filter_3N_Phase[row_3N])
Day_count_MJ_filter_1N_3N.append(Filter_1N_day_count[row_1N] +Filter_3N_day_count[row_3N] )
count_n = count_n + 1
N_MJ_filter_1N_3N = count_n -1
#1N to 3N
###################____________________HOOOD
# for Phase
MJ_Phase_1H_to_3_comon = []
MJ_Phase_3H_to_1_comon = []
Day_count_MJ_Phase_1H_3H = []
count_n = 0
breakme = 0
for row_1H, hh_1H in enumerate(HH_1H):
if hh_1H == (-1) :
break
for row_3H, hh_3H in enumerate(HH_3H):
if hh_1H == hh_3H:
MJ_Phase_1H_to_3_comon.append(Mj_1H_Phase[row_1H])
MJ_Phase_3H_to_1_comon.append(Mj_3H_Phase[row_3H])
Day_count_MJ_Phase_1H_3H.append(Phase_1H_day_count[row_1H] + Phase_3H_day_count[row_3H])
count_n = count_n + 1
N_MJ_Phase_1H_3H = count_n -1
#for filter
MJ_filter_1H_to_3_comon = []
MJ_filter_3H_to_1_comon = []
Day_count_MJ_filter_1H_3H = []
count_n = 0
for row_1H, hh_1H in enumerate(HH_1H):
if hh_1H == str(-1):
break
for row_3H, hh_3H in enumerate(HH_3H):
if hh_1H == hh_3H:
MJ_filter_1H_to_3_comon.append(Mj_filter_1H_Phase[row_1H])
MJ_filter_3H_to_1_comon.append(Mj_filter_3H_Phase[row_3H])
Day_count_MJ_filter_1H_3H.append(Filter_1H_day_count[row_1H] +Filter_3H_day_count[row_3H] )
count_n = count_n + 1
N_MJ_filter_1H_3H = count_n -1
#1N to 4N
MJ_Phase_1N_to_4_comon = []
MJ_Phase_4N_to_1_comon = []
Day_count_MJ_Phase_1N_4N = []
count_n = 0
for row_1N, hh_1N in enumerate(HH_1N):
if hh_1N == str(-1):
break
for row_4N, hh_4N in enumerate(HH_4N):
if hh_1N == hh_4N:
MJ_Phase_1N_to_4_comon.append(Mj_1N_Phase[row_1N])
MJ_Phase_4N_to_1_comon.append(Mj_4N_Phase[row_4N])
Day_count_MJ_Phase_1N_4N.append(Phase_1N_day_count[row_1N] +Phase_4N_day_count[row_4N] )
count_n = count_n + 1
print('length of 1n and 4 n:', len(MJ_Phase_1N_to_4_comon), len(MJ_Phase_4N_to_1_comon) )
N_MJ_Phase_1N_4N = count_n -1
#for filter
MJ_filter_1N_to_4_comon = []
MJ_filter_4N_to_1_comon = []
Day_count_MJ_filter_1N_4N = []
count_n = 0
for row_1N, hh_1N in enumerate(HH_1N):
if hh_1N == str(-1):
break
for row_4N, hh_4N in enumerate(HH_4N):
if hh_1N == hh_4N:
MJ_filter_1N_to_4_comon.append(Mj_filter_1N_Phase[row_1N])
MJ_filter_4N_to_1_comon.append(Mj_filter_4N_Phase[row_4N])
Day_count_MJ_filter_1N_4N.append(Filter_1N_day_count[row_1N] +Filter_4N_day_count[row_4N] )
count_n = count_n + 1
N_MJ_filter_1N_4N = count_n -1
#2N to 3N
MJ_Phase_2N_to_3_comon = []
MJ_Phase_3N_to_2_comon = []
Day_count_MJ_Phase_2N_3N = []
count_n = 0
for row_2N, hh_2N in enumerate(HH_2N):
if hh_2N == str(-1):
break
for row_3N, hh_3N in enumerate(HH_3N):
if hh_2N == hh_3N:
MJ_Phase_2N_to_3_comon.append(Mj_2N_Phase[row_2N])
MJ_Phase_3N_to_2_comon.append(Mj_3N_Phase[row_3N])
Day_count_MJ_Phase_2N_3N.append(Phase_2N_day_count[row_2N] +Phase_3N_day_count[row_3N] )
print(hh_2N,Mj_2N_Phase[row_2N],hh_3N,Mj_3N_Phase[row_3N])
count_n = count_n + 1
N_MJ_Phase_2N_3N = count_n -1
#for filter
MJ_filter_2N_to_3_comon = []
MJ_filter_3N_to_2_comon = []
Day_count_MJ_filter_2N_3N = []
count_n = 0
for row_2N, hh_2N in enumerate(HH_2N):
if hh_2N == str(-1):
break
for row_3N, hh_3N in enumerate(HH_3N):
if hh_2N == hh_3N:
MJ_filter_2N_to_3_comon.append(Mj_filter_2N_Phase[row_2N])
MJ_filter_3N_to_2_comon.append(Mj_filter_3N_Phase[row_3N])
print(hh_2N,Mj_filter_2N_Phase[row_2N],hh_3N,Mj_filter_3N_Phase[row_3N])
Day_count_MJ_filter_2N_3N.append(Filter_2N_day_count[row_2N] +Filter_3N_day_count[row_3N] )
count_n = count_n + 1
N_MJ_filter_2N_3N = count_n - 1
#2N to 3N
###################____________________HOOOD
# for Phase
MJ_Phase_2H_to_3_comon = []
MJ_Phase_3H_to_2_comon = []
Day_count_MJ_Phase_2H_3H = []
count_n = 0
for row_2H, hh_2H in enumerate(HH_2H):
if hh_2H == str(-1):
break
for row_3H, hh_3H in enumerate(HH_3H):
if hh_2H == hh_3H:
MJ_Phase_2H_to_3_comon.append(Mj_2H_Phase[row_2H])
MJ_Phase_3H_to_2_comon.append(Mj_3H_Phase[row_3H])
Day_count_MJ_Phase_2H_3H.append(Phase_2H_day_count[row_2H] +Phase_3H_day_count[row_3H] )
count_n = count_n + 1
N_MJ_Phase_2H_3H = count_n -1
#for filter
MJ_filter_2H_to_3_comon = []
MJ_filter_3H_to_2_comon = []
Day_count_MJ_filter_2H_3H = []
count_n = 0
for row_2H, hh_2H in enumerate(HH_2H):
if hh_2H == str(-1):
break
for row_3H, hh_3H in enumerate(HH_3H):
if hh_2H == hh_3H:
MJ_filter_2H_to_3_comon.append(Mj_filter_2H_Phase[row_2H])
MJ_filter_3H_to_2_comon.append(Mj_filter_3H_Phase[row_3H])
Day_count_MJ_filter_2H_3H.append(Filter_2H_day_count[row_2H] +Filter_3H_day_count[row_3H] )
count_n = count_n + 1
N_MJ_filter_2H_3H = count_n - 1
#2N to 4N
MJ_Phase_2N_to_4_comon = []
MJ_Phase_4N_to_2_comon = []
Day_count_MJ_Phase_2N_4N = []
count_n = 0
for row_2N, hh_2N in enumerate(HH_2N):
if hh_2N == str(-1):
break
for row_4N, hh_4N in enumerate(HH_4N):
if hh_2N == hh_4N:
MJ_Phase_2N_to_4_comon.append(Mj_2N_Phase[row_2N])
MJ_Phase_4N_to_2_comon.append(Mj_4N_Phase[row_4N])
Day_count_MJ_Phase_2N_4N.append(Phase_2N_day_count[row_2N] +Phase_4N_day_count[row_4N] )
count_n = count_n + 1
N_MJ_Phase_2N_4N = count_n -1
#for filter
MJ_filter_2N_to_4_comon = []
MJ_filter_4N_to_2_comon = []
Day_count_MJ_filter_2N_4N = []
count_n = 0
for row_2N, hh_2N in enumerate(HH_2N):
if hh_2N == str(-1):
break
for row_4N, hh_4N in enumerate(HH_4N):
if hh_2N == hh_4N:
MJ_filter_2N_to_4_comon.append(Mj_filter_2N_Phase[row_2N])
MJ_filter_4N_to_2_comon.append(Mj_filter_4N_Phase[row_4N])
Day_count_MJ_filter_2N_4N.append(Filter_2N_day_count[row_2N] +Filter_4N_day_count[row_4N] )
count_n = count_n + 1
N_MJ_filter_2N_4N = count_n - 1
#3N to 4N
MJ_Phase_3N_to_4_comon = []
MJ_Phase_4N_to_3_comon = []
Day_count_MJ_Phase_3N_4N = []
count_n = 0
for row_3N, hh_3N in enumerate(HH_3N):
if hh_3N == str(-1):
break
for row_4N, hh_4N in enumerate(HH_4N):
if hh_3N == hh_4N:
MJ_Phase_3N_to_4_comon.append(Mj_3N_Phase[row_3N])
MJ_Phase_4N_to_3_comon.append(Mj_4N_Phase[row_4N])
Day_count_MJ_Phase_3N_4N.append(Phase_3N_day_count[row_3N] +Phase_4N_day_count[row_4N] )
count_n = count_n + 1
N_MJ_Phase_3N_4N = count_n -1
#for filter
MJ_filter_3N_to_4_comon = []
MJ_filter_4N_to_3_comon = []
Day_count_MJ_filter_3N_4N = []
count_n = 0
for row_3N, hh_3N in enumerate(HH_3N):
if hh_3N == str(-1):
break
for row_4N, hh_4N in enumerate(HH_4N):
if hh_3N == hh_4N:
MJ_filter_3N_to_4_comon.append(Mj_filter_3N_Phase[row_3N])
MJ_filter_4N_to_3_comon.append(Mj_filter_4N_Phase[row_4N])
Day_count_MJ_filter_3N_4N.append(Filter_3N_day_count[row_3N] +Filter_4N_day_count[row_4N] )
count_n = count_n + 1
N_MJ_filter_3N_4N = count_n - 1
T_stat_1N_2N, P_val_1N_2N = scipy.stats.ttest_ind(MJ_Phase_1N_to_2_comon,MJ_Phase_2N_to_1_comon, axis=0, equal_var=True)
degree_1N_2N = (N_MJ_Phase_1N_2N -1) *Level_of_confidence
if degree_1N_2N < abs(T_stat_1N_2N):
print('1N and 2N Phase rejects the null', T_stat_1N_2N,'P-value', P_val_1N_2N,'Sample size N', N_MJ_Phase_1N_2N)
else:
print('1N and 2N Phase accepts the null', T_stat_1N_2N,'P-value', P_val_1N_2N,'Sample size N', N_MJ_Phase_1N_2N)
T_sign_1N_2N, P_sign_1N_2N = scipy.stats.wilcoxon(MJ_Phase_1N_to_2_comon, MJ_Phase_2N_to_1_comon)
T_stat_1N_2N_filter, P_val_1N_2N_filter = scipy.stats.ttest_ind(MJ_filter_1N_to_2_comon,MJ_filter_2N_to_1_comon, axis=0, equal_var=True)
degree_1N_2N_filter = (N_MJ_filter_1N_2N -1) *Level_of_confidence
if degree_1N_2N_filter < abs(T_stat_1N_2N_filter):
print('1N and 2N Filter rejects the null', T_stat_1N_2N_filter,'P-value', P_val_1N_2N_filter,'Sample size N', N_MJ_filter_1N_2N)
else:
print('1N and 2N Filter accepts the null', T_stat_1N_2N_filter,'P-value', P_val_1N_2N_filter,'Sample size N', N_MJ_filter_1N_2N)
T_sign_1N_2N_filter, P_sign_1N_2N_filter = scipy.stats.wilcoxon(MJ_filter_1N_to_2_comon, MJ_filter_2N_to_1_comon)
# 1n to 2n HOOOOOOD
T_stat_1H_2H, P_val_1H_2H = scipy.stats.ttest_ind(MJ_Phase_1H_to_2_comon,MJ_Phase_2H_to_1_comon, axis=0, equal_var=True)
degree_1H_2H = (N_MJ_Phase_1H_2H -1) *Level_of_confidence
if degree_1H_2H < abs(T_stat_1H_2H):
print('1H and 2H Phase rejects the null', T_stat_1H_2H,'P-value', P_val_1H_2H,'Sample size N', N_MJ_Phase_1H_2H)
else:
print('1H and 2H Phase accepts the null', T_stat_1H_2H,'P-value', P_val_1H_2H,'Sample size N', N_MJ_Phase_1H_2H)
T_sign_1H_2H, P_sign_1H_2H = scipy.stats.wilcoxon(MJ_Phase_1H_to_2_comon, MJ_Phase_2H_to_1_comon)
T_stat_1H_2H_filter, P_val_1H_2H_filter = scipy.stats.ttest_ind(MJ_filter_1H_to_2_comon,MJ_filter_2H_to_1_comon, axis=0, equal_var=True)
degree_1H_2H_filter = (N_MJ_filter_1H_2H -1) *Level_of_confidence
if degree_1H_2H_filter < abs(T_stat_1H_2H_filter):
print('1H and 2H Filter rejects the null', T_stat_1H_2H_filter,'P-value', P_val_1H_2H_filter,'Sample size N', N_MJ_filter_1H_2H)
else:
print('1H and 2H Filter accepts the null', T_stat_1H_2H_filter,'P-value', P_val_1H_2H_filter,'Sample size N', N_MJ_filter_1H_2H)
T_sign_1H_2H_filter, P_sign_1H_2H_filter = scipy.stats.wilcoxon(MJ_filter_1H_to_2_comon, MJ_filter_2H_to_1_comon)
T_stat_1N_3N, P_val_1N_3N = scipy.stats.ttest_ind(MJ_Phase_1N_to_3_comon,MJ_Phase_3N_to_1_comon, axis=0, equal_var=True)
degree_1N_3N = (N_MJ_Phase_1N_3N -1) *Level_of_confidence
if degree_1N_3N < abs(T_stat_1N_3N):
print('1N and 3N Phase rejects the null', T_stat_1N_3N,'P-value', P_val_1N_3N,'Sample size N', N_MJ_Phase_1N_3N)
else:
print('1N and 3N Phase accepts the null', T_stat_1N_3N,'P-value', P_val_1N_3N,'Sample size N', N_MJ_Phase_1N_3N)
T_sign_1N_3N, P_sign_1N_3N = scipy.stats.wilcoxon(MJ_Phase_1N_to_3_comon, MJ_Phase_3N_to_1_comon)
T_stat_1N_3N_filter, P_val_1N_3N_filter = scipy.stats.ttest_ind(MJ_filter_1N_to_3_comon,MJ_filter_3N_to_1_comon, axis=0, equal_var=True)
degree_1N_3N_filter = (N_MJ_filter_1N_3N -1) *Level_of_confidence
if degree_1N_3N_filter < abs(T_stat_1N_3N_filter):
print('1N and 3N Filter rejects the null', T_stat_1N_3N_filter,'P-value', P_val_1N_3N_filter,'Sample size N', N_MJ_filter_1N_3N)
else:
print('1N and 3N Filter accepts the null', T_stat_1N_3N_filter,'P-value', P_val_1N_3N_filter,'Sample size N', N_MJ_filter_1N_3N)
T_sign_1N_3N_filter, P_sign_1N_3N_filter = scipy.stats.wilcoxon(MJ_filter_1N_to_3_comon, MJ_filter_3N_to_1_comon)
# 1n to 3n HOOOOOOD
T_stat_1H_3H, P_val_1H_3H = scipy.stats.ttest_ind(MJ_Phase_1H_to_3_comon,MJ_Phase_3H_to_1_comon, axis=0, equal_var=True)
degree_1H_3H = (N_MJ_Phase_1H_3H -1) *Level_of_confidence
if degree_1H_3H < abs(T_stat_1H_3H):
print('1H and 3H Phase rejects the null', T_stat_1H_3H,'P-value', P_val_1H_3H,'Sample size N', N_MJ_Phase_1H_3H)
else:
print('1H and 3H Phase accepts the null', T_stat_1H_3H,'P-value', P_val_1H_3H,'Sample size N', N_MJ_Phase_1H_3H)
T_sign_1H_3H, P_sign_1H_3H = scipy.stats.wilcoxon(MJ_Phase_1H_to_3_comon, MJ_Phase_3H_to_1_comon)
T_stat_1H_3H_filter, P_val_1H_3H_filter = scipy.stats.ttest_ind(MJ_filter_1H_to_3_comon,MJ_filter_3H_to_1_comon, axis=0, equal_var=True)
degree_1H_3H_filter = (N_MJ_filter_1H_3H -1) *Level_of_confidence
if degree_1H_3H_filter < abs(T_stat_1H_3H_filter):
print('1H and 3H Filter rejects the null', T_stat_1H_3H_filter,'P-value', P_val_1H_3H_filter,'Sample size N', N_MJ_filter_1H_3H)
else:
print('1H and 3H Filter accepts the null', T_stat_1H_3H_filter,'P-value', P_val_1H_3H_filter,'Sample size N', N_MJ_filter_1H_3H)
T_sign_1H_3H_filter, P_sign_1H_3H_filter = scipy.stats.wilcoxon(MJ_filter_1H_to_3_comon, MJ_filter_3H_to_1_comon)
T_stat_1N_4N, P_val_1N_4N = scipy.stats.ttest_ind(MJ_Phase_1N_to_4_comon,MJ_Phase_4N_to_1_comon, axis=0, equal_var=True)
degree_1N_4N = (N_MJ_Phase_1N_4N -1) *Level_of_confidence
if degree_1N_4N < abs(T_stat_1N_4N):
print('1N and 4N Phase rejects the null', T_stat_1N_4N,'P-value', P_val_1N_4N,'Sample size N', N_MJ_Phase_1N_4N)
else:
print('1N and 4N Phase accepts the null', T_stat_1N_4N,'P-value', P_val_1N_4N,'Sample size N', N_MJ_Phase_1N_4N)
T_sign_1N_4N, P_sign_1N_4N = scipy.stats.wilcoxon(MJ_Phase_1N_to_4_comon, MJ_Phase_4N_to_1_comon)
T_stat_1N_4N_filter, P_val_1N_4N_filter = scipy.stats.ttest_ind(MJ_filter_1N_to_4_comon,MJ_filter_4N_to_1_comon, axis=0, equal_var=True)
degree_1N_4N_filter = (N_MJ_filter_1N_4N -1) *Level_of_confidence
if degree_1N_4N_filter < abs(T_stat_1N_4N_filter):
print('1N and 4N Filter rejects the null', T_stat_1N_4N_filter,'P-value', P_val_1N_4N_filter,'Sample size N', N_MJ_filter_1N_4N)
else:
print('1N and 4N Filter accepts the null', T_stat_1N_4N_filter,'P-value', P_val_1N_4N_filter,'Sample size N', N_MJ_filter_1N_4N)
T_sign_1N_4N_filter, P_sign_1N_4N_filter = scipy.stats.wilcoxon(MJ_filter_1N_to_4_comon, MJ_filter_4N_to_1_comon)
T_stat_2N_3N, P_val_2N_3N = scipy.stats.ttest_ind(MJ_Phase_2N_to_3_comon,MJ_Phase_3N_to_2_comon, axis=0, equal_var=True)
degree_2N_3N = (N_MJ_Phase_2N_3N -1) *Level_of_confidence
if degree_2N_3N < abs(T_stat_2N_3N):
print('2N and 3N Phase rejects the null', T_stat_2N_3N,'P-value', P_val_2N_3N,'Sample size N', N_MJ_Phase_2N_3N)
else:
print('2N and 3N Phase accepts the null', T_stat_2N_3N,'P-value', P_val_2N_3N,'Sample size N', N_MJ_Phase_2N_3N)
T_sign_2N_3N, P_sign_2N_3N = scipy.stats.wilcoxon(MJ_Phase_2N_to_3_comon, MJ_Phase_3N_to_2_comon)
T_stat_2N_3N_filter, P_val_2N_3N_filter = scipy.stats.ttest_ind(MJ_filter_2N_to_3_comon,MJ_filter_3N_to_2_comon, axis=0, equal_var=True)
degree_2N_3N_filter = (N_MJ_filter_2N_3N -1) *Level_of_confidence
if degree_2N_3N_filter < abs(T_stat_2N_3N_filter):
print('2N and 3N Filter rejects the null', T_stat_2N_3N_filter,'P-value', P_val_2N_3N_filter,'Sample size N', N_MJ_filter_2N_3N)
else:
print('2N and 3N Filter accepts the null', T_stat_2N_3N_filter,'P-value', P_val_2N_3N_filter,'Sample size N', N_MJ_filter_2N_3N)
T_sign_2N_3N_filter, P_sign_2N_3N_filter = scipy.stats.wilcoxon(MJ_filter_2N_to_3_comon, MJ_filter_3N_to_2_comon)
# 2n to 3n HOOOOOOD
T_stat_2H_3H, P_val_2H_3H = scipy.stats.ttest_ind(MJ_Phase_2H_to_3_comon,MJ_Phase_3H_to_2_comon, axis=0, equal_var=True)
degree_2H_3H = (N_MJ_Phase_2H_3H -1) *Level_of_confidence
if degree_2H_3H < abs(T_stat_2H_3H):
print('2H and 3H Phase rejects the null', T_stat_2H_3H,'P-value', P_val_2H_3H,'Sample size N', N_MJ_Phase_2H_3H)
else:
print('2H and 3H Phase accepts the null', T_stat_2H_3H,'P-value', P_val_2H_3H,'Sample size N', N_MJ_Phase_2H_3H)
T_sign_2H_3H, P_sign_2H_3H = scipy.stats.wilcoxon(MJ_Phase_2H_to_3_comon, MJ_Phase_3H_to_2_comon)
T_stat_2H_3H_filter, P_val_2H_3H_filter = scipy.stats.ttest_ind(MJ_filter_2H_to_3_comon,MJ_filter_3H_to_2_comon, axis=0, equal_var=True)
degree_2H_3H_filter = (N_MJ_filter_2H_3H -1) *Level_of_confidence
if degree_2H_3H_filter < abs(T_stat_2H_3H_filter):
print('2H and 3H Filter rejects the null', T_stat_2H_3H_filter,'P-value', P_val_2H_3H_filter,'Sample size N', N_MJ_filter_2H_3H)
else:
print('2H and 3H Filter accepts the null', T_stat_2H_3H_filter,'P-value', P_val_2H_3H_filter,'Sample size N', N_MJ_filter_2H_3H)
T_sign_2H_3H_filter, P_sign_2H_3H_filter = scipy.stats.wilcoxon(MJ_filter_2H_to_3_comon, MJ_filter_3H_to_2_comon)
#2N to 4N
T_stat_2N_4N, P_val_2N_4N = scipy.stats.ttest_ind(MJ_Phase_2N_to_4_comon,MJ_Phase_4N_to_2_comon, axis=0, equal_var=True)
degree_2N_4N = (N_MJ_Phase_2N_4N -1) *Level_of_confidence
if degree_2N_4N < abs(T_stat_2N_4N):
print('2N and 4N Phase rejects the null', T_stat_2N_4N,'P-value', P_val_2N_4N,'Sample size N', N_MJ_Phase_2N_4N)
else:
print('2N and 4N Phase accepts the null', T_stat_2N_4N,'P-value', P_val_2N_4N,'Sample size N', N_MJ_Phase_2N_4N)
T_sign_2N_4N, P_sign_2N_4N = scipy.stats.wilcoxon(MJ_Phase_2N_to_4_comon, MJ_Phase_4N_to_2_comon)
T_stat_2N_4N_filter, P_val_2N_4N_filter = scipy.stats.ttest_ind(MJ_filter_2N_to_4_comon,MJ_filter_4N_to_2_comon, axis=0, equal_var=True)
degree_2N_4N_filter = (N_MJ_filter_2N_4N -1) *Level_of_confidence
if degree_2N_4N_filter < abs(T_stat_2N_4N_filter):
print('2N and 4N Filter rejects the null', T_stat_2N_4N_filter,'P-value', P_val_2N_4N_filter,'Sample size N', N_MJ_filter_2N_4N)
else:
print('2N and 4N Filter accepts the null', T_stat_2N_4N_filter,'P-value', P_val_2N_4N_filter,'Sample size N', N_MJ_filter_2N_4N)
T_sign_2N_4N_filter, P_sign_2N_4N_filter = scipy.stats.wilcoxon(MJ_filter_2N_to_4_comon, MJ_filter_4N_to_2_comon)
#3N to 4N
T_stat_3N_4N, P_val_3N_4N = scipy.stats.ttest_ind(MJ_Phase_3N_to_4_comon,MJ_Phase_4N_to_3_comon, axis=0, equal_var=True)
degree_3N_4N = (N_MJ_Phase_3N_4N -1) *Level_of_confidence
if degree_3N_4N < abs(T_stat_3N_4N):
print('3N and 4N Phase rejects the null', T_stat_3N_4N,'P-value', P_val_3N_4N,'Sample size N', N_MJ_Phase_3N_4N)
else:
print('3N and 4N Phase accepts the null', T_stat_3N_4N,'P-value', P_val_3N_4N,'Sample size N', N_MJ_Phase_3N_4N)
T_sign_3N_4N, P_sign_3N_4N = scipy.stats.wilcoxon(MJ_Phase_3N_to_4_comon, MJ_Phase_4N_to_3_comon)
T_stat_3N_4N_filter, P_val_3N_4N_filter = scipy.stats.ttest_ind(MJ_filter_3N_to_4_comon,MJ_filter_4N_to_3_comon, axis=0, equal_var=True)
degree_3N_4N_filter = (N_MJ_filter_3N_4N -1) *Level_of_confidence
if degree_3N_4N_filter < abs(T_stat_3N_4N_filter):
print('3N and 4N Filter rejects the null', T_stat_3N_4N_filter,'P-value', P_val_3N_4N_filter,'Sample size N', N_MJ_filter_3N_4N)
else:
print('3N and 4N Filter accepts the null', T_stat_3N_4N_filter,'P-value', P_val_3N_4N_filter,'Sample size N', N_MJ_filter_3N_4N)
T_sign_3N_4N_filter, P_sign_3N_4N_filter = scipy.stats.wilcoxon(MJ_filter_3N_to_4_comon, MJ_filter_4N_to_3_comon)
whole_t_stat = [T_stat_1N_2N, T_stat_1N_3N, T_stat_1N_4N, T_stat_2N_3N, T_stat_3N_4N,T_stat_2N_4N]
whole_p_test = [P_val_1N_2N,P_val_1N_3N,P_val_1N_4N,P_val_2N_3N,P_val_3N_4N,P_val_2N_4N]
Whole_sample = [N_MJ_Phase_1N_2N, N_MJ_Phase_1N_3N, N_MJ_Phase_1N_4N, N_MJ_Phase_2N_3N, N_MJ_Phase_3N_4N,N_MJ_Phase_2N_4N]
Whole_degree = [degree_1N_2N, degree_1N_3N, degree_1N_4N, degree_2N_3N, degree_3N_4N, degree_2N_4N]
Whole_sighn_t_stat = [T_sign_1N_2N,T_sign_1N_3N,T_sign_1N_4N,T_sign_2N_3N,T_sign_3N_4N,T_sign_2N_4N]
Whole_sighn_p_test = [P_sign_1N_2N,P_sign_1N_3N,P_sign_1N_4N,P_sign_2N_3N,P_sign_3N_4N, P_sign_3N_4N]
STD_1 = [np.std(MJ_Phase_1N_to_2_comon), np.std(MJ_Phase_1N_to_3_comon),np.std(MJ_Phase_1N_to_4_comon),np.std(MJ_Phase_2N_to_3_comon),np.std(MJ_Phase_3N_to_4_comon),np.std(MJ_Phase_2N_to_4_comon)]
Median_1 = [stat.median(MJ_Phase_1N_to_2_comon), stat.median(MJ_Phase_1N_to_3_comon),stat.median(MJ_Phase_1N_to_4_comon),stat.median(MJ_Phase_2N_to_3_comon),stat.median(MJ_Phase_3N_to_4_comon), stat.median(MJ_Phase_2N_to_4_comon)]
Mean_1 = [np.average(MJ_Phase_1N_to_2_comon),np.average(MJ_Phase_1N_to_3_comon),np.average(MJ_Phase_1N_to_4_comon),np.average(MJ_Phase_2N_to_3_comon),np.average(MJ_Phase_3N_to_4_comon),np.average(MJ_Phase_2N_to_4_comon)]
STD_2 = [np.std(MJ_Phase_2N_to_1_comon), np.std(MJ_Phase_3N_to_1_comon),np.std(MJ_Phase_4N_to_1_comon),np.std(MJ_Phase_3N_to_2_comon),np.std(MJ_Phase_4N_to_3_comon),np.std(MJ_Phase_4N_to_2_comon)]
Median_2 = [stat.median(MJ_Phase_2N_to_1_comon), stat.median(MJ_Phase_3N_to_1_comon),stat.median(MJ_Phase_4N_to_1_comon),stat.median(MJ_Phase_3N_to_2_comon),stat.median(MJ_Phase_4N_to_3_comon),stat.median(MJ_Phase_4N_to_2_comon)]
Mean_2 = [np.average(MJ_Phase_2N_to_1_comon),np.average(MJ_Phase_3N_to_1_comon),np.average(MJ_Phase_4N_to_1_comon),np.average(MJ_Phase_3N_to_2_comon),np.average(MJ_Phase_4N_to_3_comon),np.average(MJ_Phase_4N_to_2_comon)]
No_hood_percent_days_Filtered = [sum(Filter_1N_day_count)/sum(Phase_1N_day_count),sum(Filter_2N_day_count)/sum(Phase_2N_day_count),sum(Filter_3N_day_count)/sum(Phase_3N_day_count),sum(Filter_4N_day_count)/sum(Phase_4N_day_count) ]
hood_percent_days_Filtered = [sum(Filter_1H_day_count)/sum(Phase_1H_day_count),sum(Filter_2H_day_count)/sum(Phase_2H_day_count),sum(Filter_3H_day_count)/sum(Phase_3H_day_count)]
Hood_percentage = {'Phase':['1H','2H','3H'], 'Percentatges of hood filter':hood_percent_days_Filtered}
No_Hood_percentage = {'Phase':['1N','2N','3N','4N'],'Percentatges of No hood filter':No_hood_percent_days_Filtered}
df_percent_hood = pd.DataFrame(Hood_percentage)
df_percent_No_hood = pd.DataFrame(No_Hood_percentage)
whole_t_stat_H = [T_stat_1H_2H, T_stat_1H_3H, T_stat_2H_3H]
whole_p_test_H = [P_val_1H_2H,P_val_1H_3H,P_val_2H_3H]
Whole_sample_H = [N_MJ_Phase_1H_2H, N_MJ_Phase_1H_3H,N_MJ_Phase_2H_3H]
Whole_degree_H = [degree_1H_2H, degree_1H_3H, degree_2H_3H]
Whole_sighn_t_stat_H = [T_sign_1H_2H,T_sign_1H_3H,T_sign_2H_3H]
Whole_sighn_p_test_H = [P_sign_1H_2H,P_sign_1H_3H,P_sign_2H_3H]
STD_1_H = [np.std(MJ_Phase_1H_to_2_comon), np.std(MJ_Phase_1H_to_3_comon),np.std(MJ_Phase_2H_to_3_comon)]
Median_1_H = [stat.median(MJ_Phase_1H_to_2_comon), stat.median(MJ_Phase_1H_to_3_comon),stat.median(MJ_Phase_2H_to_3_comon)]
Mean_1_H = [np.average(MJ_Phase_1H_to_2_comon),np.average(MJ_Phase_1H_to_3_comon),np.average(MJ_Phase_2H_to_3_comon)]
STD_2_H = [np.std(MJ_Phase_2H_to_1_comon), np.std(MJ_Phase_3H_to_1_comon),np.std(MJ_Phase_3H_to_2_comon)]
Median_2_H = [stat.median(MJ_Phase_2H_to_1_comon), stat.median(MJ_Phase_3H_to_1_comon),stat.median(MJ_Phase_3H_to_2_comon)]
Mean_2_H = [np.average(MJ_Phase_2H_to_1_comon),np.average(MJ_Phase_3H_to_1_comon),np.average(MJ_Phase_3H_to_2_comon)]
Non_filtered_no_hood = {'Phase':['1n-2N','1n-3n','1n-4n','2n-3n', '3n-4n','2n-4n'],'T-statistic':whole_t_stat, 'P Value':whole_p_test,
'T-statistic-Sign-Test':Whole_sighn_t_stat, 'P Vaue-Sign Test':Whole_sighn_p_test,
'Deggree of Confidence':Whole_degree, 'Sample Size':Whole_sample,'Std _1':STD_1,'median _1':Median_1,'mean _1':Mean_1,'Std _2':STD_2,'median _2':Median_2,'mean _2':Mean_2 }
df_Non_filtered_no_hood = pd.DataFrame(Non_filtered_no_hood, columns=['Phase','T-statistic','P Value','T-statistic-Sign-Test',
'P Vaue-Sign Test','Deggree of Confidence','Sample Size', 'Std _1','median _1','mean _1','Std _2','median _2','mean _2'])
Non_filtered_hood = {'Phase _Hood':['1H-2H','1H-3H','2H-3H'],'T-statistic':whole_t_stat_H, 'P Value':whole_p_test_H,
'T-statistic-Sign-Test':Whole_sighn_t_stat_H, 'P Vaue-Sign Test':Whole_sighn_p_test_H,
'Deggree of Confidence':Whole_degree_H, 'Sample Size':Whole_sample_H,'Std _1':STD_1_H,'median _1':Median_1_H,'mean _1':Mean_1_H,'Std _2':STD_2_H,'median _2':Median_2_H,'mean _2':Mean_2_H }
df_Non_filtered_hood = pd.DataFrame(Non_filtered_hood, columns=['Phase _Hood','T-statistic','P Value','T-statistic-Sign-Test',
'P Vaue-Sign Test','Deggree of Confidence','Sample Size', 'Std _1','median _1','mean _1','Std _2','median _2','mean _2'])
whole_t_stat_filter = [T_stat_1N_2N_filter, T_stat_1N_3N_filter, T_stat_1N_4N_filter, T_stat_2N_3N_filter, T_stat_3N_4N_filter, T_stat_2N_4N_filter]
whole_p_test_filter = [P_val_1N_2N_filter,P_val_1N_3N_filter,P_val_1N_4N_filter,P_val_2N_3N_filter,P_val_3N_4N_filter, P_val_2N_4N_filter]
Whole_sample_filter = [N_MJ_Phase_1N_2N, N_MJ_Phase_1N_3N, N_MJ_Phase_1N_4N, N_MJ_Phase_2N_3N, N_MJ_Phase_3N_4N, N_MJ_Phase_2N_4N]
Whole_degree_filter = [degree_1N_2N, degree_1N_3N, degree_1N_4N, degree_2N_3N, degree_3N_4N, degree_2N_4N]
Whole_sighn_t_stat_filter = [T_sign_1N_2N_filter,T_sign_1N_3N_filter,T_sign_1N_4N_filter,T_sign_2N_3N_filter,T_sign_3N_4N_filter,T_sign_2N_4N_filter]
Whole_sighn_p_test_filter = [P_sign_1N_2N_filter ,P_sign_1N_3N_filter,P_sign_1N_4N_filter,P_sign_2N_3N_filter,P_sign_3N_4N_filter,P_sign_2N_4N_filter]
whole_t_stat_filter_H = [T_stat_1H_2H_filter, T_stat_1H_3H_filter, T_stat_2H_3H_filter]
whole_p_test_filter_H = [P_val_1H_2H_filter,P_val_1H_3H_filter,P_val_2H_3H_filter]
Whole_sample_filter_H = [N_MJ_Phase_1H_2H, N_MJ_Phase_1H_3H, N_MJ_Phase_2H_3H]
Whole_degree_filter_H = [degree_1H_2H, degree_1H_3H, degree_2H_3H]
Whole_sighn_t_stat_filter_H = [T_sign_1H_2H_filter,T_sign_1H_3H_filter,T_sign_2H_3H_filter]
Whole_sighn_p_test_filter_H = [P_sign_1H_2H_filter ,P_sign_1H_3H_filter,P_sign_2H_3H_filter]
filtered_No_hood = {'Phase Filtered ':['1n-2N - Filter','1n-3n - Filter','1n-4n - Filter','2n-3n - Filter', '3n-4n - Filter','2n-4n - Filter'],'T-statistic':whole_t_stat_filter, 'P Value':whole_p_test_filter,
'T-statistic-Sign-Test':Whole_sighn_t_stat_filter, 'P Vaue-Sign Test':Whole_sighn_p_test_filter,
'Deggree of Confidence':Whole_degree_filter, 'Sample Size':Whole_sample_filter }
df_filtered_No_hood = pd.DataFrame(filtered_No_hood, columns=['Phase Filtered ' ,'T-statistic','P Value','T-statistic-Sign-Test',
'P Vaue-Sign Test','Deggree of Confidence','Sample Size'])
filtered_hood = {'Phase Filtered HOOD':['1H-2H - Filter','1H-3H - Filter','2H-3H - Filter'],'T-statistic':whole_t_stat_filter_H, 'P Value':whole_p_test_filter_H,
'T-statistic-Sign-Test':Whole_sighn_t_stat_filter_H, 'P Vaue-Sign Test':Whole_sighn_p_test_filter_H,
'Deggree of Confidence':Whole_degree_filter_H, 'Sample Size':Whole_sample_filter_H }
df_filtered_hood = pd.DataFrame(filtered_hood, columns=['Phase Filtered HOOD' ,'T-statistic','P Value','T-statistic-Sign-Test',
'P Vaue-Sign Test','Deggree of Confidence','Sample Size'])
Kj_per_sae_no_hood = {'median':[np.median(Mj_1N_Phase),np.median(Mj_2N_Phase),np.median(Mj_3N_Phase),np.median(Mj_4N_Phase)],
'Phase':['1n','2n','3n','4n']}
df_Kj_per_sae_no_hood = pd.DataFrame(Kj_per_sae_no_hood)
print(' this is the median filter for 1N----=-==-=-=-=-=-=-=-',np.mean(Mj_filter_1N_Phase) )
Kj_per_sae_filter_no_hood = {'median filter':[np.median(Mj_filter_1N_Phase),np.median(Mj_filter_2N_Phase),np.median(Mj_filter_3N_Phase),np.median(Mj_filter_4N_Phase)],
'Phase':['1n','2n','3n','4n']}
df_Kj_per_sae_filter_no_hood = pd.DataFrame(Kj_per_sae_filter_no_hood)
Kj_per_sae_mean_filter_no_hood = {'Mean filter':[np.mean(Mj_filter_1N_Phase),np.mean(Mj_filter_2N_Phase),np.mean(Mj_filter_3N_Phase),np.mean(Mj_filter_4N_Phase)],
'Phase':['1n','2n','3n','4n']}
df_Kj_per_sae_mean_filter_no_hood = pd.DataFrame(Kj_per_sae_mean_filter_no_hood)
Kj_per_sae_mean_no_hood = {'mean':[np.mean(Mj_1N_Phase),np.mean(Mj_2N_Phase),np.mean(Mj_3N_Phase),np.mean(Mj_4N_Phase)],
'Phase':['1n','2n','3n','4n']}
df_Kj_per_sae_mean_no_hood = pd.DataFrame(Kj_per_sae_mean_no_hood)
###hood
print('Hood section')
Kj_per_sae_Hood = {'median':[np.median(Mj_1H_Phase),np.median(Mj_2H_Phase),np.median(Mj_3H_Phase)],
'Phase':['1H','2H','3H']}
df_Kj_per_sae_Hood = pd.DataFrame(Kj_per_sae_Hood)
Kj_per_sae_filter_Hood = {'median filter':[np.median(Mj_filter_1H_Phase),np.median(Mj_filter_2H_Phase),np.median(Mj_filter_3H_Phase)],
'Phase':['1H','2H','3H']}
df_Kj_per_sae_filter_Hood = pd.DataFrame(Kj_per_sae_filter_Hood)
Kj_per_sae_mean_filter_Hood = {'Mean filter':[np.mean(Mj_filter_1H_Phase),np.mean(Mj_filter_2H_Phase),np.mean(Mj_filter_3H_Phase)],
'Phase':['1H','2H','3H']}
df_Kj_per_mean_filter_Hood = pd.DataFrame(Kj_per_sae_mean_filter_Hood)
Kj_per_sae_mean_Hood = {'mean':[np.mean(Mj_1H_Phase),np.mean(Mj_2H_Phase),np.mean(Mj_3H_Phase)],
'Phase':['1H','2H','3H']}
df_Kj_per_sae_mean_Hood = pd.DataFrame(Kj_per_sae_mean_Hood)
pATH = "C:/Users/gvros/Box/OSU, CSC, CQC Project files/P_TEST_NO_HOOD_MJ_DAY_CHECH 2N_3N.csv"
df_Non_filtered_no_hood.to_csv(pATH, index=False,mode='a')
df_filtered_No_hood.to_csv(pATH, index=False,mode='a')
df_Non_filtered_hood.to_csv(pATH, index=False,mode='a')
df_filtered_hood.to_csv(pATH, index=False,mode='a')
df_percent_hood.to_csv(pATH, index=False,mode='a')
df_percent_No_hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_sae_no_hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_sae_filter_no_hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_sae_mean_filter_no_hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_sae_mean_no_hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_sae_Hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_sae_filter_Hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_sae_mean_Hood.to_csv(pATH, index=False,mode='a')
df_Kj_per_mean_filter_Hood.to_csv(pATH, index=False,mode='a')
MJ_Phase_1N_to_3_comon
Mj_filter_3N_Phase
| [
"seaborn.displot",
"numpy.mean",
"numpy.median",
"pandas.read_csv",
"numpy.average",
"statistics.median",
"scipy.stats.wilcoxon",
"scipy.stats.ttest_ind",
"numpy.std",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1581, 1609), 'pandas.read_csv', 'pd.read_csv', (['No_hood_MJ_path'], {}), '(No_hood_MJ_path)\n', (1592, 1609), True, 'import pandas as pd\n'), ((1620, 1645), 'pandas.read_csv', 'pd.read_csv', (['Hood_MJ_Path'], {}), '(Hood_MJ_Path)\n', (1631, 1645), True, 'import pandas as pd\n'), ((18233, 18331), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_1N_to_2_comon', 'MJ_Phase_2N_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_1N_to_2_comon, MJ_Phase_2N_to_1_comon, axis=\n 0, equal_var=True)\n', (18254, 18331), False, 'import scipy\n'), ((18690, 18758), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_1N_to_2_comon', 'MJ_Phase_2N_to_1_comon'], {}), '(MJ_Phase_1N_to_2_comon, MJ_Phase_2N_to_1_comon)\n', (18710, 18758), False, 'import scipy\n'), ((18802, 18901), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_1N_to_2_comon', 'MJ_filter_2N_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_1N_to_2_comon, MJ_filter_2N_to_1_comon,\n axis=0, equal_var=True)\n', (18823, 18901), False, 'import scipy\n'), ((19329, 19399), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_1N_to_2_comon', 'MJ_filter_2N_to_1_comon'], {}), '(MJ_filter_1N_to_2_comon, MJ_filter_2N_to_1_comon)\n', (19349, 19399), False, 'import scipy\n'), ((19450, 19548), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_1H_to_2_comon', 'MJ_Phase_2H_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_1H_to_2_comon, MJ_Phase_2H_to_1_comon, axis=\n 0, equal_var=True)\n', (19471, 19548), False, 'import scipy\n'), ((19907, 19975), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_1H_to_2_comon', 'MJ_Phase_2H_to_1_comon'], {}), '(MJ_Phase_1H_to_2_comon, MJ_Phase_2H_to_1_comon)\n', (19927, 19975), False, 'import scipy\n'), ((20019, 20118), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_1H_to_2_comon', 'MJ_filter_2H_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_1H_to_2_comon, MJ_filter_2H_to_1_comon,\n axis=0, equal_var=True)\n', (20040, 20118), False, 'import scipy\n'), ((20546, 20616), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_1H_to_2_comon', 'MJ_filter_2H_to_1_comon'], {}), '(MJ_filter_1H_to_2_comon, MJ_filter_2H_to_1_comon)\n', (20566, 20616), False, 'import scipy\n'), ((20649, 20747), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_1N_to_3_comon', 'MJ_Phase_3N_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_1N_to_3_comon, MJ_Phase_3N_to_1_comon, axis=\n 0, equal_var=True)\n', (20670, 20747), False, 'import scipy\n'), ((21106, 21174), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_1N_to_3_comon', 'MJ_Phase_3N_to_1_comon'], {}), '(MJ_Phase_1N_to_3_comon, MJ_Phase_3N_to_1_comon)\n', (21126, 21174), False, 'import scipy\n'), ((21218, 21317), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_1N_to_3_comon', 'MJ_filter_3N_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_1N_to_3_comon, MJ_filter_3N_to_1_comon,\n axis=0, equal_var=True)\n', (21239, 21317), False, 'import scipy\n'), ((21745, 21815), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_1N_to_3_comon', 'MJ_filter_3N_to_1_comon'], {}), '(MJ_filter_1N_to_3_comon, MJ_filter_3N_to_1_comon)\n', (21765, 21815), False, 'import scipy\n'), ((21866, 21964), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_1H_to_3_comon', 'MJ_Phase_3H_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_1H_to_3_comon, MJ_Phase_3H_to_1_comon, axis=\n 0, equal_var=True)\n', (21887, 21964), False, 'import scipy\n'), ((22323, 22391), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_1H_to_3_comon', 'MJ_Phase_3H_to_1_comon'], {}), '(MJ_Phase_1H_to_3_comon, MJ_Phase_3H_to_1_comon)\n', (22343, 22391), False, 'import scipy\n'), ((22435, 22534), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_1H_to_3_comon', 'MJ_filter_3H_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_1H_to_3_comon, MJ_filter_3H_to_1_comon,\n axis=0, equal_var=True)\n', (22456, 22534), False, 'import scipy\n'), ((22962, 23032), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_1H_to_3_comon', 'MJ_filter_3H_to_1_comon'], {}), '(MJ_filter_1H_to_3_comon, MJ_filter_3H_to_1_comon)\n', (22982, 23032), False, 'import scipy\n'), ((23065, 23163), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_1N_to_4_comon', 'MJ_Phase_4N_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_1N_to_4_comon, MJ_Phase_4N_to_1_comon, axis=\n 0, equal_var=True)\n', (23086, 23163), False, 'import scipy\n'), ((23523, 23591), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_1N_to_4_comon', 'MJ_Phase_4N_to_1_comon'], {}), '(MJ_Phase_1N_to_4_comon, MJ_Phase_4N_to_1_comon)\n', (23543, 23591), False, 'import scipy\n'), ((23639, 23738), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_1N_to_4_comon', 'MJ_filter_4N_to_1_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_1N_to_4_comon, MJ_filter_4N_to_1_comon,\n axis=0, equal_var=True)\n', (23660, 23738), False, 'import scipy\n'), ((24166, 24236), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_1N_to_4_comon', 'MJ_filter_4N_to_1_comon'], {}), '(MJ_filter_1N_to_4_comon, MJ_filter_4N_to_1_comon)\n', (24186, 24236), False, 'import scipy\n'), ((24272, 24370), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_2N_to_3_comon', 'MJ_Phase_3N_to_2_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_2N_to_3_comon, MJ_Phase_3N_to_2_comon, axis=\n 0, equal_var=True)\n', (24293, 24370), False, 'import scipy\n'), ((24729, 24797), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_2N_to_3_comon', 'MJ_Phase_3N_to_2_comon'], {}), '(MJ_Phase_2N_to_3_comon, MJ_Phase_3N_to_2_comon)\n', (24749, 24797), False, 'import scipy\n'), ((24841, 24940), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_2N_to_3_comon', 'MJ_filter_3N_to_2_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_2N_to_3_comon, MJ_filter_3N_to_2_comon,\n axis=0, equal_var=True)\n', (24862, 24940), False, 'import scipy\n'), ((25368, 25438), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_2N_to_3_comon', 'MJ_filter_3N_to_2_comon'], {}), '(MJ_filter_2N_to_3_comon, MJ_filter_3N_to_2_comon)\n', (25388, 25438), False, 'import scipy\n'), ((25494, 25592), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_2H_to_3_comon', 'MJ_Phase_3H_to_2_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_2H_to_3_comon, MJ_Phase_3H_to_2_comon, axis=\n 0, equal_var=True)\n', (25515, 25592), False, 'import scipy\n'), ((25951, 26019), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_2H_to_3_comon', 'MJ_Phase_3H_to_2_comon'], {}), '(MJ_Phase_2H_to_3_comon, MJ_Phase_3H_to_2_comon)\n', (25971, 26019), False, 'import scipy\n'), ((26063, 26162), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_2H_to_3_comon', 'MJ_filter_3H_to_2_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_2H_to_3_comon, MJ_filter_3H_to_2_comon,\n axis=0, equal_var=True)\n', (26084, 26162), False, 'import scipy\n'), ((26590, 26660), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_2H_to_3_comon', 'MJ_filter_3H_to_2_comon'], {}), '(MJ_filter_2H_to_3_comon, MJ_filter_3H_to_2_comon)\n', (26610, 26660), False, 'import scipy\n'), ((26701, 26799), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_2N_to_4_comon', 'MJ_Phase_4N_to_2_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_2N_to_4_comon, MJ_Phase_4N_to_2_comon, axis=\n 0, equal_var=True)\n', (26722, 26799), False, 'import scipy\n'), ((27158, 27226), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_2N_to_4_comon', 'MJ_Phase_4N_to_2_comon'], {}), '(MJ_Phase_2N_to_4_comon, MJ_Phase_4N_to_2_comon)\n', (27178, 27226), False, 'import scipy\n'), ((27276, 27375), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_2N_to_4_comon', 'MJ_filter_4N_to_2_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_2N_to_4_comon, MJ_filter_4N_to_2_comon,\n axis=0, equal_var=True)\n', (27297, 27375), False, 'import scipy\n'), ((27803, 27873), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_2N_to_4_comon', 'MJ_filter_4N_to_2_comon'], {}), '(MJ_filter_2N_to_4_comon, MJ_filter_4N_to_2_comon)\n', (27823, 27873), False, 'import scipy\n'), ((27918, 28016), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_Phase_3N_to_4_comon', 'MJ_Phase_4N_to_3_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_Phase_3N_to_4_comon, MJ_Phase_4N_to_3_comon, axis=\n 0, equal_var=True)\n', (27939, 28016), False, 'import scipy\n'), ((28375, 28443), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_Phase_3N_to_4_comon', 'MJ_Phase_4N_to_3_comon'], {}), '(MJ_Phase_3N_to_4_comon, MJ_Phase_4N_to_3_comon)\n', (28395, 28443), False, 'import scipy\n'), ((28493, 28592), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['MJ_filter_3N_to_4_comon', 'MJ_filter_4N_to_3_comon'], {'axis': '(0)', 'equal_var': '(True)'}), '(MJ_filter_3N_to_4_comon, MJ_filter_4N_to_3_comon,\n axis=0, equal_var=True)\n', (28514, 28592), False, 'import scipy\n'), ((29020, 29090), 'scipy.stats.wilcoxon', 'scipy.stats.wilcoxon', (['MJ_filter_3N_to_4_comon', 'MJ_filter_4N_to_3_comon'], {}), '(MJ_filter_3N_to_4_comon, MJ_filter_4N_to_3_comon)\n', (29040, 29090), False, 'import scipy\n'), ((31657, 31686), 'pandas.DataFrame', 'pd.DataFrame', (['Hood_percentage'], {}), '(Hood_percentage)\n', (31669, 31686), True, 'import pandas as pd\n'), ((31708, 31740), 'pandas.DataFrame', 'pd.DataFrame', (['No_Hood_percentage'], {}), '(No_Hood_percentage)\n', (31720, 31740), True, 'import pandas as pd\n'), ((33269, 33513), 'pandas.DataFrame', 'pd.DataFrame', (['Non_filtered_no_hood'], {'columns': "['Phase', 'T-statistic', 'P Value', 'T-statistic-Sign-Test',\n 'P Vaue-Sign Test', 'Deggree of Confidence', 'Sample Size', 'Std _1',\n 'median _1', 'mean _1', 'Std _2', 'median _2', 'mean _2']"}), "(Non_filtered_no_hood, columns=['Phase', 'T-statistic',\n 'P Value', 'T-statistic-Sign-Test', 'P Vaue-Sign Test',\n 'Deggree of Confidence', 'Sample Size', 'Std _1', 'median _1',\n 'mean _1', 'Std _2', 'median _2', 'mean _2'])\n", (33281, 33513), True, 'import pandas as pd\n'), ((34007, 34254), 'pandas.DataFrame', 'pd.DataFrame', (['Non_filtered_hood'], {'columns': "['Phase _Hood', 'T-statistic', 'P Value', 'T-statistic-Sign-Test',\n 'P Vaue-Sign Test', 'Deggree of Confidence', 'Sample Size', 'Std _1',\n 'median _1', 'mean _1', 'Std _2', 'median _2', 'mean _2']"}), "(Non_filtered_hood, columns=['Phase _Hood', 'T-statistic',\n 'P Value', 'T-statistic-Sign-Test', 'P Vaue-Sign Test',\n 'Deggree of Confidence', 'Sample Size', 'Std _1', 'median _1',\n 'mean _1', 'Std _2', 'median _2', 'mean _2'])\n", (34019, 34254), True, 'import pandas as pd\n'), ((36072, 36250), 'pandas.DataFrame', 'pd.DataFrame', (['filtered_No_hood'], {'columns': "['Phase Filtered ', 'T-statistic', 'P Value', 'T-statistic-Sign-Test',\n 'P Vaue-Sign Test', 'Deggree of Confidence', 'Sample Size']"}), "(filtered_No_hood, columns=['Phase Filtered ', 'T-statistic',\n 'P Value', 'T-statistic-Sign-Test', 'P Vaue-Sign Test',\n 'Deggree of Confidence', 'Sample Size'])\n", (36084, 36250), True, 'import pandas as pd\n'), ((36702, 36881), 'pandas.DataFrame', 'pd.DataFrame', (['filtered_hood'], {'columns': "['Phase Filtered HOOD', 'T-statistic', 'P Value', 'T-statistic-Sign-Test',\n 'P Vaue-Sign Test', 'Deggree of Confidence', 'Sample Size']"}), "(filtered_hood, columns=['Phase Filtered HOOD', 'T-statistic',\n 'P Value', 'T-statistic-Sign-Test', 'P Vaue-Sign Test',\n 'Deggree of Confidence', 'Sample Size'])\n", (36714, 36881), True, 'import pandas as pd\n'), ((37140, 37172), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_no_hood'], {}), '(Kj_per_sae_no_hood)\n', (37152, 37172), True, 'import pandas as pd\n'), ((37521, 37560), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_filter_no_hood'], {}), '(Kj_per_sae_filter_no_hood)\n', (37533, 37560), True, 'import pandas as pd\n'), ((37817, 37861), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_mean_filter_no_hood'], {}), '(Kj_per_sae_mean_filter_no_hood)\n', (37829, 37861), True, 'import pandas as pd\n'), ((38070, 38107), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_mean_no_hood'], {}), '(Kj_per_sae_mean_no_hood)\n', (38082, 38107), True, 'import pandas as pd\n'), ((38311, 38340), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_Hood'], {}), '(Kj_per_sae_Hood)\n', (38323, 38340), True, 'import pandas as pd\n'), ((38556, 38592), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_filter_Hood'], {}), '(Kj_per_sae_filter_Hood)\n', (38568, 38592), True, 'import pandas as pd\n'), ((38806, 38847), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_mean_filter_Hood'], {}), '(Kj_per_sae_mean_filter_Hood)\n', (38818, 38847), True, 'import pandas as pd\n'), ((39023, 39057), 'pandas.DataFrame', 'pd.DataFrame', (['Kj_per_sae_mean_Hood'], {}), '(Kj_per_sae_mean_Hood)\n', (39035, 39057), True, 'import pandas as pd\n'), ((5644, 5745), 'seaborn.displot', 'sns.displot', (['(Mj_1N_Phase, Mj_2N_Phase, Mj_3N_Phase, Mj_4N_Phase)'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_1N_Phase, Mj_2N_Phase, Mj_3N_Phase, Mj_4N_Phase), kind=\n 'kde', common_norm=False)\n", (5655, 5745), True, 'import seaborn as sns\n'), ((5744, 5777), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day/SAE No-Hood"""'], {}), "('Fuel/Day/SAE No-Hood')\n", (5753, 5777), True, 'import matplotlib.pyplot as plt\n'), ((5831, 5841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5839, 5841), True, 'import matplotlib.pyplot as plt\n'), ((5846, 5974), 'seaborn.displot', 'sns.displot', (['(Mj_filter_1N_Phase, Mj_filter_2N_Phase, Mj_filter_3N_Phase, Mj_filter_4N_Phase\n )'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_filter_1N_Phase, Mj_filter_2N_Phase, Mj_filter_3N_Phase,\n Mj_filter_4N_Phase), kind='kde', common_norm=False)\n", (5857, 5974), True, 'import seaborn as sns\n'), ((5974, 6018), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day/SAE No-Hood - Filtered"""'], {}), "('Fuel/Day/SAE No-Hood - Filtered')\n", (5983, 6018), True, 'import matplotlib.pyplot as plt\n'), ((6072, 6082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6080, 6082), True, 'import matplotlib.pyplot as plt\n'), ((6087, 6174), 'seaborn.displot', 'sns.displot', (['(Mj_1H_Phase, Mj_2H_Phase, Mj_3H_Phase)'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_1H_Phase, Mj_2H_Phase, Mj_3H_Phase), kind='kde',\n common_norm=False)\n", (6098, 6174), True, 'import seaborn as sns\n'), ((6175, 6205), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day/SAE Hood"""'], {}), "('Fuel/Day/SAE Hood')\n", (6184, 6205), True, 'import matplotlib.pyplot as plt\n'), ((6259, 6269), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6267, 6269), True, 'import matplotlib.pyplot as plt\n'), ((6274, 6382), 'seaborn.displot', 'sns.displot', (['(Mj_filter_1H_Phase, Mj_filter_2H_Phase, Mj_filter_3H_Phase)'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_filter_1H_Phase, Mj_filter_2H_Phase, Mj_filter_3H_Phase),\n kind='kde', common_norm=False)\n", (6285, 6382), True, 'import seaborn as sns\n'), ((6383, 6423), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day/SAE ood - Filtered"""'], {}), "('Fuel/Day/SAE ood - Filtered')\n", (6392, 6423), True, 'import matplotlib.pyplot as plt\n'), ((6477, 6487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6485, 6487), True, 'import matplotlib.pyplot as plt\n'), ((6498, 6599), 'seaborn.displot', 'sns.displot', (['(Mj_1N_Phase, Mj_2N_Phase, Mj_3N_Phase, Mj_4N_Phase)'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_1N_Phase, Mj_2N_Phase, Mj_3N_Phase, Mj_4N_Phase), kind=\n 'kde', common_norm=False)\n", (6509, 6599), True, 'import seaborn as sns\n'), ((6598, 6627), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day No-Hood"""'], {}), "('Fuel/Day No-Hood')\n", (6607, 6627), True, 'import matplotlib.pyplot as plt\n'), ((6681, 6691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6689, 6691), True, 'import matplotlib.pyplot as plt\n'), ((6696, 6824), 'seaborn.displot', 'sns.displot', (['(Mj_filter_1N_Phase, Mj_filter_2N_Phase, Mj_filter_3N_Phase, Mj_filter_4N_Phase\n )'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_filter_1N_Phase, Mj_filter_2N_Phase, Mj_filter_3N_Phase,\n Mj_filter_4N_Phase), kind='kde', common_norm=False)\n", (6707, 6824), True, 'import seaborn as sns\n'), ((6824, 6864), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day No-Hood - Filtered"""'], {}), "('Fuel/Day No-Hood - Filtered')\n", (6833, 6864), True, 'import matplotlib.pyplot as plt\n'), ((6918, 6928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6926, 6928), True, 'import matplotlib.pyplot as plt\n'), ((6933, 7020), 'seaborn.displot', 'sns.displot', (['(Mj_1H_Phase, Mj_2H_Phase, Mj_3H_Phase)'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_1H_Phase, Mj_2H_Phase, Mj_3H_Phase), kind='kde',\n common_norm=False)\n", (6944, 7020), True, 'import seaborn as sns\n'), ((7021, 7047), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day Hood"""'], {}), "('Fuel/Day Hood')\n", (7030, 7047), True, 'import matplotlib.pyplot as plt\n'), ((7101, 7111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7109, 7111), True, 'import matplotlib.pyplot as plt\n'), ((7116, 7224), 'seaborn.displot', 'sns.displot', (['(Mj_filter_1H_Phase, Mj_filter_2H_Phase, Mj_filter_3H_Phase)'], {'kind': '"""kde"""', 'common_norm': '(False)'}), "((Mj_filter_1H_Phase, Mj_filter_2H_Phase, Mj_filter_3H_Phase),\n kind='kde', common_norm=False)\n", (7127, 7224), True, 'import seaborn as sns\n'), ((7225, 7262), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuel/Day Hood - Filtered"""'], {}), "('Fuel/Day Hood - Filtered')\n", (7234, 7262), True, 'import matplotlib.pyplot as plt\n'), ((7316, 7326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7324, 7326), True, 'import matplotlib.pyplot as plt\n'), ((29721, 29751), 'numpy.std', 'np.std', (['MJ_Phase_1N_to_2_comon'], {}), '(MJ_Phase_1N_to_2_comon)\n', (29727, 29751), True, 'import numpy as np\n'), ((29753, 29783), 'numpy.std', 'np.std', (['MJ_Phase_1N_to_3_comon'], {}), '(MJ_Phase_1N_to_3_comon)\n', (29759, 29783), True, 'import numpy as np\n'), ((29784, 29814), 'numpy.std', 'np.std', (['MJ_Phase_1N_to_4_comon'], {}), '(MJ_Phase_1N_to_4_comon)\n', (29790, 29814), True, 'import numpy as np\n'), ((29815, 29845), 'numpy.std', 'np.std', (['MJ_Phase_2N_to_3_comon'], {}), '(MJ_Phase_2N_to_3_comon)\n', (29821, 29845), True, 'import numpy as np\n'), ((29846, 29876), 'numpy.std', 'np.std', (['MJ_Phase_3N_to_4_comon'], {}), '(MJ_Phase_3N_to_4_comon)\n', (29852, 29876), True, 'import numpy as np\n'), ((29877, 29907), 'numpy.std', 'np.std', (['MJ_Phase_2N_to_4_comon'], {}), '(MJ_Phase_2N_to_4_comon)\n', (29883, 29907), True, 'import numpy as np\n'), ((29921, 29956), 'statistics.median', 'stat.median', (['MJ_Phase_1N_to_2_comon'], {}), '(MJ_Phase_1N_to_2_comon)\n', (29932, 29956), True, 'import statistics as stat\n'), ((29958, 29993), 'statistics.median', 'stat.median', (['MJ_Phase_1N_to_3_comon'], {}), '(MJ_Phase_1N_to_3_comon)\n', (29969, 29993), True, 'import statistics as stat\n'), ((29994, 30029), 'statistics.median', 'stat.median', (['MJ_Phase_1N_to_4_comon'], {}), '(MJ_Phase_1N_to_4_comon)\n', (30005, 30029), True, 'import statistics as stat\n'), ((30030, 30065), 'statistics.median', 'stat.median', (['MJ_Phase_2N_to_3_comon'], {}), '(MJ_Phase_2N_to_3_comon)\n', (30041, 30065), True, 'import statistics as stat\n'), ((30066, 30101), 'statistics.median', 'stat.median', (['MJ_Phase_3N_to_4_comon'], {}), '(MJ_Phase_3N_to_4_comon)\n', (30077, 30101), True, 'import statistics as stat\n'), ((30103, 30138), 'statistics.median', 'stat.median', (['MJ_Phase_2N_to_4_comon'], {}), '(MJ_Phase_2N_to_4_comon)\n', (30114, 30138), True, 'import statistics as stat\n'), ((30150, 30184), 'numpy.average', 'np.average', (['MJ_Phase_1N_to_2_comon'], {}), '(MJ_Phase_1N_to_2_comon)\n', (30160, 30184), True, 'import numpy as np\n'), ((30185, 30219), 'numpy.average', 'np.average', (['MJ_Phase_1N_to_3_comon'], {}), '(MJ_Phase_1N_to_3_comon)\n', (30195, 30219), True, 'import numpy as np\n'), ((30220, 30254), 'numpy.average', 'np.average', (['MJ_Phase_1N_to_4_comon'], {}), '(MJ_Phase_1N_to_4_comon)\n', (30230, 30254), True, 'import numpy as np\n'), ((30255, 30289), 'numpy.average', 'np.average', (['MJ_Phase_2N_to_3_comon'], {}), '(MJ_Phase_2N_to_3_comon)\n', (30265, 30289), True, 'import numpy as np\n'), ((30290, 30324), 'numpy.average', 'np.average', (['MJ_Phase_3N_to_4_comon'], {}), '(MJ_Phase_3N_to_4_comon)\n', (30300, 30324), True, 'import numpy as np\n'), ((30325, 30359), 'numpy.average', 'np.average', (['MJ_Phase_2N_to_4_comon'], {}), '(MJ_Phase_2N_to_4_comon)\n', (30335, 30359), True, 'import numpy as np\n'), ((30370, 30400), 'numpy.std', 'np.std', (['MJ_Phase_2N_to_1_comon'], {}), '(MJ_Phase_2N_to_1_comon)\n', (30376, 30400), True, 'import numpy as np\n'), ((30402, 30432), 'numpy.std', 'np.std', (['MJ_Phase_3N_to_1_comon'], {}), '(MJ_Phase_3N_to_1_comon)\n', (30408, 30432), True, 'import numpy as np\n'), ((30433, 30463), 'numpy.std', 'np.std', (['MJ_Phase_4N_to_1_comon'], {}), '(MJ_Phase_4N_to_1_comon)\n', (30439, 30463), True, 'import numpy as np\n'), ((30464, 30494), 'numpy.std', 'np.std', (['MJ_Phase_3N_to_2_comon'], {}), '(MJ_Phase_3N_to_2_comon)\n', (30470, 30494), True, 'import numpy as np\n'), ((30495, 30525), 'numpy.std', 'np.std', (['MJ_Phase_4N_to_3_comon'], {}), '(MJ_Phase_4N_to_3_comon)\n', (30501, 30525), True, 'import numpy as np\n'), ((30526, 30556), 'numpy.std', 'np.std', (['MJ_Phase_4N_to_2_comon'], {}), '(MJ_Phase_4N_to_2_comon)\n', (30532, 30556), True, 'import numpy as np\n'), ((30570, 30605), 'statistics.median', 'stat.median', (['MJ_Phase_2N_to_1_comon'], {}), '(MJ_Phase_2N_to_1_comon)\n', (30581, 30605), True, 'import statistics as stat\n'), ((30607, 30642), 'statistics.median', 'stat.median', (['MJ_Phase_3N_to_1_comon'], {}), '(MJ_Phase_3N_to_1_comon)\n', (30618, 30642), True, 'import statistics as stat\n'), ((30643, 30678), 'statistics.median', 'stat.median', (['MJ_Phase_4N_to_1_comon'], {}), '(MJ_Phase_4N_to_1_comon)\n', (30654, 30678), True, 'import statistics as stat\n'), ((30679, 30714), 'statistics.median', 'stat.median', (['MJ_Phase_3N_to_2_comon'], {}), '(MJ_Phase_3N_to_2_comon)\n', (30690, 30714), True, 'import statistics as stat\n'), ((30715, 30750), 'statistics.median', 'stat.median', (['MJ_Phase_4N_to_3_comon'], {}), '(MJ_Phase_4N_to_3_comon)\n', (30726, 30750), True, 'import statistics as stat\n'), ((30751, 30786), 'statistics.median', 'stat.median', (['MJ_Phase_4N_to_2_comon'], {}), '(MJ_Phase_4N_to_2_comon)\n', (30762, 30786), True, 'import statistics as stat\n'), ((30798, 30832), 'numpy.average', 'np.average', (['MJ_Phase_2N_to_1_comon'], {}), '(MJ_Phase_2N_to_1_comon)\n', (30808, 30832), True, 'import numpy as np\n'), ((30833, 30867), 'numpy.average', 'np.average', (['MJ_Phase_3N_to_1_comon'], {}), '(MJ_Phase_3N_to_1_comon)\n', (30843, 30867), True, 'import numpy as np\n'), ((30868, 30902), 'numpy.average', 'np.average', (['MJ_Phase_4N_to_1_comon'], {}), '(MJ_Phase_4N_to_1_comon)\n', (30878, 30902), True, 'import numpy as np\n'), ((30903, 30937), 'numpy.average', 'np.average', (['MJ_Phase_3N_to_2_comon'], {}), '(MJ_Phase_3N_to_2_comon)\n', (30913, 30937), True, 'import numpy as np\n'), ((30938, 30972), 'numpy.average', 'np.average', (['MJ_Phase_4N_to_3_comon'], {}), '(MJ_Phase_4N_to_3_comon)\n', (30948, 30972), True, 'import numpy as np\n'), ((30973, 31007), 'numpy.average', 'np.average', (['MJ_Phase_4N_to_2_comon'], {}), '(MJ_Phase_4N_to_2_comon)\n', (30983, 31007), True, 'import numpy as np\n'), ((32129, 32159), 'numpy.std', 'np.std', (['MJ_Phase_1H_to_2_comon'], {}), '(MJ_Phase_1H_to_2_comon)\n', (32135, 32159), True, 'import numpy as np\n'), ((32161, 32191), 'numpy.std', 'np.std', (['MJ_Phase_1H_to_3_comon'], {}), '(MJ_Phase_1H_to_3_comon)\n', (32167, 32191), True, 'import numpy as np\n'), ((32192, 32222), 'numpy.std', 'np.std', (['MJ_Phase_2H_to_3_comon'], {}), '(MJ_Phase_2H_to_3_comon)\n', (32198, 32222), True, 'import numpy as np\n'), ((32238, 32273), 'statistics.median', 'stat.median', (['MJ_Phase_1H_to_2_comon'], {}), '(MJ_Phase_1H_to_2_comon)\n', (32249, 32273), True, 'import statistics as stat\n'), ((32275, 32310), 'statistics.median', 'stat.median', (['MJ_Phase_1H_to_3_comon'], {}), '(MJ_Phase_1H_to_3_comon)\n', (32286, 32310), True, 'import statistics as stat\n'), ((32311, 32346), 'statistics.median', 'stat.median', (['MJ_Phase_2H_to_3_comon'], {}), '(MJ_Phase_2H_to_3_comon)\n', (32322, 32346), True, 'import statistics as stat\n'), ((32360, 32394), 'numpy.average', 'np.average', (['MJ_Phase_1H_to_2_comon'], {}), '(MJ_Phase_1H_to_2_comon)\n', (32370, 32394), True, 'import numpy as np\n'), ((32395, 32429), 'numpy.average', 'np.average', (['MJ_Phase_1H_to_3_comon'], {}), '(MJ_Phase_1H_to_3_comon)\n', (32405, 32429), True, 'import numpy as np\n'), ((32430, 32464), 'numpy.average', 'np.average', (['MJ_Phase_2H_to_3_comon'], {}), '(MJ_Phase_2H_to_3_comon)\n', (32440, 32464), True, 'import numpy as np\n'), ((32477, 32507), 'numpy.std', 'np.std', (['MJ_Phase_2H_to_1_comon'], {}), '(MJ_Phase_2H_to_1_comon)\n', (32483, 32507), True, 'import numpy as np\n'), ((32509, 32539), 'numpy.std', 'np.std', (['MJ_Phase_3H_to_1_comon'], {}), '(MJ_Phase_3H_to_1_comon)\n', (32515, 32539), True, 'import numpy as np\n'), ((32540, 32570), 'numpy.std', 'np.std', (['MJ_Phase_3H_to_2_comon'], {}), '(MJ_Phase_3H_to_2_comon)\n', (32546, 32570), True, 'import numpy as np\n'), ((32586, 32621), 'statistics.median', 'stat.median', (['MJ_Phase_2H_to_1_comon'], {}), '(MJ_Phase_2H_to_1_comon)\n', (32597, 32621), True, 'import statistics as stat\n'), ((32623, 32658), 'statistics.median', 'stat.median', (['MJ_Phase_3H_to_1_comon'], {}), '(MJ_Phase_3H_to_1_comon)\n', (32634, 32658), True, 'import statistics as stat\n'), ((32659, 32694), 'statistics.median', 'stat.median', (['MJ_Phase_3H_to_2_comon'], {}), '(MJ_Phase_3H_to_2_comon)\n', (32670, 32694), True, 'import statistics as stat\n'), ((32708, 32742), 'numpy.average', 'np.average', (['MJ_Phase_2H_to_1_comon'], {}), '(MJ_Phase_2H_to_1_comon)\n', (32718, 32742), True, 'import numpy as np\n'), ((32743, 32777), 'numpy.average', 'np.average', (['MJ_Phase_3H_to_1_comon'], {}), '(MJ_Phase_3H_to_1_comon)\n', (32753, 32777), True, 'import numpy as np\n'), ((32778, 32812), 'numpy.average', 'np.average', (['MJ_Phase_3H_to_2_comon'], {}), '(MJ_Phase_3H_to_2_comon)\n', (32788, 32812), True, 'import numpy as np\n'), ((37236, 37263), 'numpy.mean', 'np.mean', (['Mj_filter_1N_Phase'], {}), '(Mj_filter_1N_Phase)\n', (37243, 37263), True, 'import numpy as np\n'), ((36966, 36988), 'numpy.median', 'np.median', (['Mj_1N_Phase'], {}), '(Mj_1N_Phase)\n', (36975, 36988), True, 'import numpy as np\n'), ((36989, 37011), 'numpy.median', 'np.median', (['Mj_2N_Phase'], {}), '(Mj_2N_Phase)\n', (36998, 37011), True, 'import numpy as np\n'), ((37012, 37034), 'numpy.median', 'np.median', (['Mj_3N_Phase'], {}), '(Mj_3N_Phase)\n', (37021, 37034), True, 'import numpy as np\n'), ((37035, 37057), 'numpy.median', 'np.median', (['Mj_4N_Phase'], {}), '(Mj_4N_Phase)\n', (37044, 37057), True, 'import numpy as np\n'), ((37312, 37341), 'numpy.median', 'np.median', (['Mj_filter_1N_Phase'], {}), '(Mj_filter_1N_Phase)\n', (37321, 37341), True, 'import numpy as np\n'), ((37342, 37371), 'numpy.median', 'np.median', (['Mj_filter_2N_Phase'], {}), '(Mj_filter_2N_Phase)\n', (37351, 37371), True, 'import numpy as np\n'), ((37372, 37401), 'numpy.median', 'np.median', (['Mj_filter_3N_Phase'], {}), '(Mj_filter_3N_Phase)\n', (37381, 37401), True, 'import numpy as np\n'), ((37402, 37431), 'numpy.median', 'np.median', (['Mj_filter_4N_Phase'], {}), '(Mj_filter_4N_Phase)\n', (37411, 37431), True, 'import numpy as np\n'), ((37611, 37638), 'numpy.mean', 'np.mean', (['Mj_filter_1N_Phase'], {}), '(Mj_filter_1N_Phase)\n', (37618, 37638), True, 'import numpy as np\n'), ((37639, 37666), 'numpy.mean', 'np.mean', (['Mj_filter_2N_Phase'], {}), '(Mj_filter_2N_Phase)\n', (37646, 37666), True, 'import numpy as np\n'), ((37667, 37694), 'numpy.mean', 'np.mean', (['Mj_filter_3N_Phase'], {}), '(Mj_filter_3N_Phase)\n', (37674, 37694), True, 'import numpy as np\n'), ((37695, 37722), 'numpy.mean', 'np.mean', (['Mj_filter_4N_Phase'], {}), '(Mj_filter_4N_Phase)\n', (37702, 37722), True, 'import numpy as np\n'), ((37899, 37919), 'numpy.mean', 'np.mean', (['Mj_1N_Phase'], {}), '(Mj_1N_Phase)\n', (37906, 37919), True, 'import numpy as np\n'), ((37920, 37940), 'numpy.mean', 'np.mean', (['Mj_2N_Phase'], {}), '(Mj_2N_Phase)\n', (37927, 37940), True, 'import numpy as np\n'), ((37941, 37961), 'numpy.mean', 'np.mean', (['Mj_3N_Phase'], {}), '(Mj_3N_Phase)\n', (37948, 37961), True, 'import numpy as np\n'), ((37962, 37982), 'numpy.mean', 'np.mean', (['Mj_4N_Phase'], {}), '(Mj_4N_Phase)\n', (37969, 37982), True, 'import numpy as np\n'), ((38168, 38190), 'numpy.median', 'np.median', (['Mj_1H_Phase'], {}), '(Mj_1H_Phase)\n', (38177, 38190), True, 'import numpy as np\n'), ((38191, 38213), 'numpy.median', 'np.median', (['Mj_2H_Phase'], {}), '(Mj_2H_Phase)\n', (38200, 38213), True, 'import numpy as np\n'), ((38214, 38236), 'numpy.median', 'np.median', (['Mj_3H_Phase'], {}), '(Mj_3H_Phase)\n', (38223, 38236), True, 'import numpy as np\n'), ((38385, 38414), 'numpy.median', 'np.median', (['Mj_filter_1H_Phase'], {}), '(Mj_filter_1H_Phase)\n', (38394, 38414), True, 'import numpy as np\n'), ((38415, 38444), 'numpy.median', 'np.median', (['Mj_filter_2H_Phase'], {}), '(Mj_filter_2H_Phase)\n', (38424, 38444), True, 'import numpy as np\n'), ((38445, 38474), 'numpy.median', 'np.median', (['Mj_filter_3H_Phase'], {}), '(Mj_filter_3H_Phase)\n', (38454, 38474), True, 'import numpy as np\n'), ((38640, 38667), 'numpy.mean', 'np.mean', (['Mj_filter_1H_Phase'], {}), '(Mj_filter_1H_Phase)\n', (38647, 38667), True, 'import numpy as np\n'), ((38668, 38695), 'numpy.mean', 'np.mean', (['Mj_filter_2H_Phase'], {}), '(Mj_filter_2H_Phase)\n', (38675, 38695), True, 'import numpy as np\n'), ((38696, 38723), 'numpy.mean', 'np.mean', (['Mj_filter_3H_Phase'], {}), '(Mj_filter_3H_Phase)\n', (38703, 38723), True, 'import numpy as np\n'), ((38882, 38902), 'numpy.mean', 'np.mean', (['Mj_1H_Phase'], {}), '(Mj_1H_Phase)\n', (38889, 38902), True, 'import numpy as np\n'), ((38903, 38923), 'numpy.mean', 'np.mean', (['Mj_2H_Phase'], {}), '(Mj_2H_Phase)\n', (38910, 38923), True, 'import numpy as np\n'), ((38924, 38944), 'numpy.mean', 'np.mean', (['Mj_3H_Phase'], {}), '(Mj_3H_Phase)\n', (38931, 38944), True, 'import numpy as np\n')] |
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import names
from nltk.stem import WordNetLemmatizer
import glob
import os
import numpy as np
file_path = 'enron1/ham/0007.1999-12-14.farmer.ham.txt'
with open(file_path, 'r') as infile:
ham_sample = infile.read()
print(ham_sample)
file_path = 'enron1/spam/0058.2003-12-21.GP.spam.txt'
with open(file_path, 'r') as infile:
spam_sample = infile.read()
print(spam_sample)
cv = CountVectorizer(stop_words="english", max_features=500)
emails, labels = [], []
file_path = 'enron1/spam/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(1)
file_path = 'enron1/ham/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(0)
def letters_only(astr):
return astr.isalpha()
all_names = set(names.words())
lemmatizer = WordNetLemmatizer()
def clean_text(docs):
cleaned_docs = []
for doc in docs:
cleaned_docs.append(' '.join([lemmatizer.lemmatize(word.lower())
for word in doc.split()
if letters_only(word)
and word not in all_names]))
return cleaned_docs
cleaned_emails = clean_text(emails)
term_docs = cv.fit_transform(cleaned_emails)
print(term_docs [0])
feature_mapping = cv.vocabulary
feature_names = cv.get_feature_names()
def get_label_index(labels):
from collections import defaultdict
label_index = defaultdict(list)
for index, label in enumerate(labels):
label_index[label].append(index)
return label_index
def get_prior(label_index):
""" Compute prior based on training samples
Args:
label_index (grouped sample indices by class)
Returns:
dictionary, with class label as key, corresponding prior as the value
"""
prior = {label: len(index) for label, index in label_index.items()}
total_count = sum(prior.values())
for label in prior:
prior[label] /= float(total_count)
return prior
def get_likelihood(term_document_matrix, label_index, smoothing=0):
""" Compute likelihood based on training samples
Args:
term_document_matrix (sparse matrix)
label_index (grouped sample indices by class)
smoothing (integer, additive Laplace smoothing parameter)
Returns:
dictionary, with class as key, corresponding conditional probability P(feature|class) vector as value
"""
likelihood = {}
for label, index in label_index.items():
likelihood[label] = term_document_matrix[index, :].sum(axis=0) + smoothing
likelihood[label] = np.asarray(likelihood[label])[0]
total_count = likelihood[label].sum()
likelihood[label] = likelihood[label] / float(total_count)
return likelihood
feature_names[:5]
def get_posterior(term_document_matrix, prior, likelihood):
""" Compute posterior of testing samples, based on prior and likelihood
Args:
term_document_matrix (sparse matrix)
prior (dictionary, with class label as key, corresponding prior as the value)
likelihood (dictionary, with class label as key, corresponding conditional probability vector as value)
Returns:
dictionary, with class label as key, corresponding posterior as value
"""
num_docs = term_document_matrix.shape[0]
posteriors = []
for i in range(num_docs):
# posterior is proportional to prior * likelihood
# = exp(log(prior * likelihood))
# = exp(log(prior) + log(likelihood))
posterior = {key: np.log(prior_label) for key, prior_label in prior.items()}
for label, likelihood_label in likelihood.items():
term_document_vector = term_document_matrix.getrow(i)
counts = term_document_vector.data
indices = term_document_vector.indices
for count, index in zip(counts, indices):
posterior[label] += np.log(likelihood_label[index]) * count
# exp(-1000):exp(-999) will cause zero division error,
# however it equates to exp(0):exp(1)
min_log_posterior = min(posterior.values())
for label in posterior:
try:
posterior[label] = np.exp(posterior[label] - min_log_posterior)
except:
# if one's log value is excessively large, assign it infinity
posterior[label] = float('inf')
# normalize so that all sums up to 1
sum_posterior = sum(posterior.values())
for label in posterior:
if posterior[label] == float('inf'):
posterior[label] = 1.0
else:
posterior[label] /= sum_posterior
posteriors.append(posterior.copy())
return posteriors
label_index = get_label_index(labels)
prior = get_prior(label_index)
smoothing = 1
likelihood = get_likelihood(term_docs, label_index, smoothing)
emails_test = [
'''Subject: flat screens
hello ,
please call or contact regarding the other flat screens requested .
<NAME> - eb 3132 b
<NAME> - eb 3132 a
also the sun blocker that was taken away from eb 3131 a .
trisha should two monitors also michael .
thanks
<NAME>''',
'''Subject: having problems in bed ? we can help !
cialis allows men to enjoy a fully normal sex life without having to plan the sexual act .
if we let things terrify us , life will not be worth living .
brevity is the soul of lingerie .
suspicion always haunts the guilty mind .''',
]
cleaned_test = clean_text(emails_test)
term_docs_test = cv.transform(cleaned_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
print(posterior)
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(cleaned_emails, labels, test_size=0.33, random_state=42)
len(X_train), len(Y_train)
len(X_test), len(Y_test)
term_docs_train = cv.fit_transform(X_train)
label_index = get_label_index(Y_train)
prior = get_prior(label_index)
likelihood = get_likelihood(term_docs_train, label_index, smoothing)
term_docs_test = cv.transform(X_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
correct = 0.0
for pred, actual in zip(posterior, Y_test):
if actual == 1:
if pred[1] >= 0.5:
correct += 1
elif pred[0] > 0.5:
correct += 1
print('The accuracy on {0} testing samples is: {1:.1f}%'.format(len(Y_test), correct/len(Y_test)*100))
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB(alpha=1.0, fit_prior=True)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
prediction_prob[0:10]
prediction = clf.predict(term_docs_test)
prediction[:10]
accuracy = clf.score(term_docs_test, Y_test)
print('The accuracy using MultinomialNB is: {0:.1f}%'.format(accuracy*100))
from sklearn.metrics import confusion_matrix
confusion_matrix(Y_test, prediction, labels=[0, 1])
from sklearn.metrics import precision_score, recall_score, f1_score
precision_score(Y_test, prediction, pos_label=1)
recall_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=0)
from sklearn.metrics import classification_report
report = classification_report(Y_test, prediction)
print(report)
pos_prob = prediction_prob[:, 1]
thresholds = np.arange(0.0, 1.2, 0.1)
true_pos, false_pos = [0]*len(thresholds), [0]*len(thresholds)
for pred, y in zip(pos_prob, Y_test):
for i, threshold in enumerate(thresholds):
if pred >= threshold:
if y == 1:
true_pos[i] += 1
else:
false_pos[i] += 1
else:
break
true_pos_rate = [tp / 516.0 for tp in true_pos]
false_pos_rate = [fp / 1191.0 for fp in false_pos]
import matplotlib.pyplot as plt
plt.figure()
lw = 2
plt.plot(false_pos_rate, true_pos_rate, color='darkorange',
lw=lw)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(Y_test, pos_prob)
from sklearn.model_selection import StratifiedKFold
k = 10
k_fold = StratifiedKFold(n_splits=k)
# convert to numpy array for more efficient slicing
cleaned_emails_np = np.array(cleaned_emails)
labels_np = np.array(labels)
max_features_option = [2000, 4000, 8000]
smoothing_factor_option = [0.5, 1.0, 1.5, 2.0]
fit_prior_option = [True, False]
auc_record = {}
for train_indices, test_indices in k_fold.split(cleaned_emails, labels):
X_train, X_test = cleaned_emails_np[train_indices], cleaned_emails_np[test_indices]
Y_train, Y_test = labels_np[train_indices], labels_np[test_indices]
for max_features in max_features_option:
if max_features not in auc_record:
auc_record[max_features] = {}
cv = CountVectorizer(stop_words="english", max_features=max_features)
term_docs_train = cv.fit_transform(X_train)
term_docs_test = cv.transform(X_test)
for smoothing_factor in smoothing_factor_option:
if smoothing_factor not in auc_record[max_features]:
auc_record[max_features][smoothing_factor] = {}
for fit_prior in fit_prior_option:
clf = MultinomialNB(alpha=smoothing_factor, fit_prior=fit_prior)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
pos_prob = prediction_prob[:, 1]
auc = roc_auc_score(Y_test, pos_prob)
auc_record[max_features][smoothing_factor][fit_prior] \
= auc + auc_record[max_features][smoothing_factor].get(fit_prior, 0.0)
print(auc_record)
print('max features smoothing fit prior auc')
for max_features, max_feature_record in auc_record.items():
for smoothing, smoothing_record in max_feature_record.items():
for fit_prior, auc in smoothing_record.items():
print(' {0} {1} {2} {3:.4f}'.format(max_features, smoothing, fit_prior, auc/k))
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"numpy.log",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"numpy.arange",
"sklearn.feature_extraction.text.CountVect... | [((465, 520), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': '"""english"""', 'max_features': '(500)'}), "(stop_words='english', max_features=500)\n", (480, 520), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1076, 1095), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1093, 1095), False, 'from nltk.stem import WordNetLemmatizer\n'), ((6043, 6116), 'sklearn.model_selection.train_test_split', 'train_test_split', (['cleaned_emails', 'labels'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(cleaned_emails, labels, test_size=0.33, random_state=42)\n', (6059, 6116), False, 'from sklearn.model_selection import train_test_split\n'), ((6790, 6830), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {'alpha': '(1.0)', 'fit_prior': '(True)'}), '(alpha=1.0, fit_prior=True)\n', (6803, 6830), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((7165, 7216), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_test', 'prediction'], {'labels': '[0, 1]'}), '(Y_test, prediction, labels=[0, 1])\n', (7181, 7216), False, 'from sklearn.metrics import confusion_matrix\n'), ((7286, 7334), 'sklearn.metrics.precision_score', 'precision_score', (['Y_test', 'prediction'], {'pos_label': '(1)'}), '(Y_test, prediction, pos_label=1)\n', (7301, 7334), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((7335, 7380), 'sklearn.metrics.recall_score', 'recall_score', (['Y_test', 'prediction'], {'pos_label': '(1)'}), '(Y_test, prediction, pos_label=1)\n', (7347, 7380), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((7381, 7422), 'sklearn.metrics.f1_score', 'f1_score', (['Y_test', 'prediction'], {'pos_label': '(1)'}), '(Y_test, prediction, pos_label=1)\n', (7389, 7422), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((7424, 7465), 'sklearn.metrics.f1_score', 'f1_score', (['Y_test', 'prediction'], {'pos_label': '(0)'}), '(Y_test, prediction, pos_label=0)\n', (7432, 7465), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((7526, 7567), 'sklearn.metrics.classification_report', 'classification_report', (['Y_test', 'prediction'], {}), '(Y_test, prediction)\n', (7547, 7567), False, 'from sklearn.metrics import classification_report\n'), ((7632, 7656), 'numpy.arange', 'np.arange', (['(0.0)', '(1.2)', '(0.1)'], {}), '(0.0, 1.2, 0.1)\n', (7641, 7656), True, 'import numpy as np\n'), ((8109, 8121), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8119, 8121), True, 'import matplotlib.pyplot as plt\n'), ((8129, 8195), 'matplotlib.pyplot.plot', 'plt.plot', (['false_pos_rate', 'true_pos_rate'], {'color': '"""darkorange"""', 'lw': 'lw'}), "(false_pos_rate, true_pos_rate, color='darkorange', lw=lw)\n", (8137, 8195), True, 'import matplotlib.pyplot as plt\n'), ((8205, 8266), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (8213, 8266), True, 'import matplotlib.pyplot as plt\n'), ((8267, 8287), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (8275, 8287), True, 'import matplotlib.pyplot as plt\n'), ((8288, 8309), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (8296, 8309), True, 'import matplotlib.pyplot as plt\n'), ((8310, 8343), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (8320, 8343), True, 'import matplotlib.pyplot as plt\n'), ((8344, 8376), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (8354, 8376), True, 'import matplotlib.pyplot as plt\n'), ((8377, 8423), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic"""'], {}), "('Receiver Operating Characteristic')\n", (8386, 8423), True, 'import matplotlib.pyplot as plt\n'), ((8424, 8453), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (8434, 8453), True, 'import matplotlib.pyplot as plt\n'), ((8454, 8464), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8462, 8464), True, 'import matplotlib.pyplot as plt\n'), ((8511, 8542), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y_test', 'pos_prob'], {}), '(Y_test, pos_prob)\n', (8524, 8542), False, 'from sklearn.metrics import roc_auc_score\n'), ((8614, 8641), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k'}), '(n_splits=k)\n', (8629, 8641), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((8714, 8738), 'numpy.array', 'np.array', (['cleaned_emails'], {}), '(cleaned_emails)\n', (8722, 8738), True, 'import numpy as np\n'), ((8751, 8767), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8759, 8767), True, 'import numpy as np\n'), ((600, 632), 'os.path.join', 'os.path.join', (['file_path', '"""*.txt"""'], {}), "(file_path, '*.txt')\n", (612, 632), False, 'import os\n'), ((815, 847), 'os.path.join', 'os.path.join', (['file_path', '"""*.txt"""'], {}), "(file_path, '*.txt')\n", (827, 847), False, 'import os\n'), ((1048, 1061), 'nltk.corpus.names.words', 'names.words', ([], {}), '()\n', (1059, 1061), False, 'from nltk.corpus import names\n'), ((1719, 1736), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1730, 1736), False, 'from collections import defaultdict\n'), ((9283, 9347), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': '"""english"""', 'max_features': 'max_features'}), "(stop_words='english', max_features=max_features)\n", (9298, 9347), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((2884, 2913), 'numpy.asarray', 'np.asarray', (['likelihood[label]'], {}), '(likelihood[label])\n', (2894, 2913), True, 'import numpy as np\n'), ((3828, 3847), 'numpy.log', 'np.log', (['prior_label'], {}), '(prior_label)\n', (3834, 3847), True, 'import numpy as np\n'), ((4485, 4529), 'numpy.exp', 'np.exp', (['(posterior[label] - min_log_posterior)'], {}), '(posterior[label] - min_log_posterior)\n', (4491, 4529), True, 'import numpy as np\n'), ((9701, 9759), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {'alpha': 'smoothing_factor', 'fit_prior': 'fit_prior'}), '(alpha=smoothing_factor, fit_prior=fit_prior)\n', (9714, 9759), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((9949, 9980), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y_test', 'pos_prob'], {}), '(Y_test, pos_prob)\n', (9962, 9980), False, 'from sklearn.metrics import roc_auc_score\n'), ((4200, 4231), 'numpy.log', 'np.log', (['likelihood_label[index]'], {}), '(likelihood_label[index])\n', (4206, 4231), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def draw_xml(imgpath, xmlpath):
img = cv2.imread(imgpath)
if img is None:
return img
target = ET.parse(xmlpath).getroot()
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
# get face rect
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = [int(bbox.find(pt).text) for pt in pts]
img = cv2.rectangle(img, tuple(bndbox[:2]), tuple(bndbox[2:]), (0,0,255), 2)
img = cv2.putText(img, name, tuple(bndbox[:2]), 3, 1, (0,255,0), 1)
return img
def read_jpeg_image(img_path):
image = tf.io.read_file(img_path)
image = tf.image.decode_jpeg(image, channels=3)
return image
def resize(image, min_side=800.0, max_side=1333.0):
h = tf.cast(tf.shape(image)[0], tf.float32)
w = tf.cast(tf.shape(image)[1], tf.float32)
cur_min_side = tf.minimum(w, h)
cur_max_side = tf.maximum(w, h)
scale = tf.minimum(max_side / cur_max_side,
min_side / cur_min_side)
nh = tf.cast(scale * h, tf.int32)
nw = tf.cast(scale * w, tf.int32)
image = tf.image.resize(image, (nh, nw))
return image
def build_mask(image):
return tf.zeros(tf.shape(image)[:2], dtype=tf.bool)
def cxcywh_to_xyxy(array):
"""
input: array: numpy array of shape (batch,4) | 4 -> cx,cy,w,h
return:array: numpy array of shape (batch,4) | 4 -> x1,y1,x2,y2
"""
new_array = np.zeros(array.shape)
new_array[...,0] = array[...,0] - (0.5*array[...,2])
new_array[...,1] = array[...,1] - (0.5*array[...,3])
new_array[...,2] = array[...,0] + (0.5*array[...,2])
new_array[...,3] = array[...,1] + (0.5*array[...,3])
return new_array
def unnormalize2image(array,width,height):
"""
input: array: numpy array of shape (batch,4) | 4 -> cx,cy,w,h
return:array: numpy array of shape (batch,4) | 4 -> x1,y1,x2,y2
"""
new_array = np.zeros_like(array)
new_array[...,[0,2]] = array[...,[0,2]]*width
new_array[...,[1,3]] = array[...,[1,3]]*height
return new_array
def absolute2relative(boxes, img_size):
width, height = img_size
scale = tf.constant([width, height, width, height], dtype=tf.float32)
boxes *= scale
return boxes
def xyxy2xywh(boxes):
xmin, ymin, xmax, ymax = [boxes[..., i] for i in range(4)]
return tf.stack([xmin, ymin, xmax - xmin, ymax - ymin], axis=-1)
def preprocess_input(image):
channel_avg = tf.constant([0.485, 0.456, 0.406])
channel_std = tf.constant([0.229, 0.224, 0.225])
image = (image / 255.0 - channel_avg) / channel_std
return image
def preprocess_image(image):
image = resize(image, min_side=800.0, max_side=1333.0)
return image, build_mask(image) | [
"tensorflow.shape",
"numpy.float32",
"tensorflow.image.resize",
"numpy.arange",
"tensorflow.io.read_file",
"numpy.zeros",
"tensorflow.constant",
"numpy.cos",
"tensorflow.maximum",
"numpy.sin",
"tensorflow.cast",
"numpy.zeros_like",
"tensorflow.minimum",
"tensorflow.stack",
"tensorflow.im... | [((461, 488), 'numpy.sin', 'np.sin', (['angle_rads[:, 0::2]'], {}), '(angle_rads[:, 0::2])\n', (467, 488), True, 'import numpy as np\n'), ((566, 593), 'numpy.cos', 'np.cos', (['angle_rads[:, 1::2]'], {}), '(angle_rads[:, 1::2])\n', (572, 593), True, 'import numpy as np\n'), ((654, 693), 'tensorflow.cast', 'tf.cast', (['pos_encoding'], {'dtype': 'tf.float32'}), '(pos_encoding, dtype=tf.float32)\n', (661, 693), True, 'import tensorflow as tf\n'), ((1372, 1397), 'tensorflow.io.read_file', 'tf.io.read_file', (['img_path'], {}), '(img_path)\n', (1387, 1397), True, 'import tensorflow as tf\n'), ((1410, 1449), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image'], {'channels': '(3)'}), '(image, channels=3)\n', (1430, 1449), True, 'import tensorflow as tf\n'), ((1636, 1652), 'tensorflow.minimum', 'tf.minimum', (['w', 'h'], {}), '(w, h)\n', (1646, 1652), True, 'import tensorflow as tf\n'), ((1672, 1688), 'tensorflow.maximum', 'tf.maximum', (['w', 'h'], {}), '(w, h)\n', (1682, 1688), True, 'import tensorflow as tf\n'), ((1702, 1762), 'tensorflow.minimum', 'tf.minimum', (['(max_side / cur_max_side)', '(min_side / cur_min_side)'], {}), '(max_side / cur_max_side, min_side / cur_min_side)\n', (1712, 1762), True, 'import tensorflow as tf\n'), ((1795, 1823), 'tensorflow.cast', 'tf.cast', (['(scale * h)', 'tf.int32'], {}), '(scale * h, tf.int32)\n', (1802, 1823), True, 'import tensorflow as tf\n'), ((1833, 1861), 'tensorflow.cast', 'tf.cast', (['(scale * w)', 'tf.int32'], {}), '(scale * w, tf.int32)\n', (1840, 1861), True, 'import tensorflow as tf\n'), ((1875, 1907), 'tensorflow.image.resize', 'tf.image.resize', (['image', '(nh, nw)'], {}), '(image, (nh, nw))\n', (1890, 1907), True, 'import tensorflow as tf\n'), ((2208, 2229), 'numpy.zeros', 'np.zeros', (['array.shape'], {}), '(array.shape)\n', (2216, 2229), True, 'import numpy as np\n'), ((2697, 2717), 'numpy.zeros_like', 'np.zeros_like', (['array'], {}), '(array)\n', (2710, 2717), True, 'import numpy as np\n'), ((2922, 2983), 'tensorflow.constant', 'tf.constant', (['[width, height, width, height]'], {'dtype': 'tf.float32'}), '([width, height, width, height], dtype=tf.float32)\n', (2933, 2983), True, 'import tensorflow as tf\n'), ((3117, 3174), 'tensorflow.stack', 'tf.stack', (['[xmin, ymin, xmax - xmin, ymax - ymin]'], {'axis': '(-1)'}), '([xmin, ymin, xmax - xmin, ymax - ymin], axis=-1)\n', (3125, 3174), True, 'import tensorflow as tf\n'), ((3223, 3257), 'tensorflow.constant', 'tf.constant', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (3234, 3257), True, 'import tensorflow as tf\n'), ((3276, 3310), 'tensorflow.constant', 'tf.constant', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (3287, 3310), True, 'import tensorflow as tf\n'), ((253, 272), 'numpy.arange', 'np.arange', (['position'], {}), '(position)\n', (262, 272), True, 'import numpy as np\n'), ((315, 333), 'numpy.arange', 'np.arange', (['d_model'], {}), '(d_model)\n', (324, 333), True, 'import numpy as np\n'), ((1537, 1552), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1545, 1552), True, 'import tensorflow as tf\n'), ((1585, 1600), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1593, 1600), True, 'import tensorflow as tf\n'), ((1970, 1985), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1978, 1985), True, 'import tensorflow as tf\n'), ((130, 149), 'numpy.float32', 'np.float32', (['d_model'], {}), '(d_model)\n', (140, 149), True, 'import numpy as np\n')] |
####################################### MALETESS ##################################
################################################################################
#
# Copyright (C) 2019 <NAME>
# <EMAIL>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License GPLv3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# <NAME>
# Instituto de Astrofisica de Canarias
# C/ Via Lactea s/n
# 38200 La Laguna
# <NAME>
# SPAIN
#
#
##################################################################################
import numpy as np
from scipy.fftpack import fft, fftfreq
from scipy import ndimage
from scipy.signal import find_peaks
from sklearn.preprocessing import normalize, StandardScaler
import pickle
import statsmodels.api as stm
import argparse as argp
import sys, json, time
from astropy.io import fits
parser = argp.ArgumentParser(prog='maletess.py',description='This is a Python3 algorithm to make predictions for planets candidates using Machine Learning ', usage='%(prog)s')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('mlf', type=str, help='File trained for ML (pickle file)')
parser.add_argument('lcf', type=str, help='Light curve file (numpy file .npz) ')
parser.add_argument('tpf', type=str, help='Target Pixel File (reference file .fits) ')
args = parser.parse_args()
def preplanet(con, onsp, dtime, perd, sigma, FF, LL):
# files : files to load
# onsp : number of frequencies to be sampled
# dtime : delta_time
# perd : Range of periods to be sampled
########## INTERPOLATE AT A FIXED TIME############
itime = dtime/(3600*24)
atime = np.arange(con['time_flat'][0], con['time_flat'][-1], itime)
flx = np.interp(atime, con['time_flat'], con['flux_flat'])
############################ ###############
flx = np.pad(flx, pad_width = (0, max(0, onsp - len(flx))), mode = 'constant', constant_values = 0) # 0 PADDING
flx = flx[:onsp] # FOR COMPUTATIONAL LIMITS TAKE onsp (must be power of 2 for fft) SAMPLES
flx = fft(flx) # FFT
flx = np.abs(flx)[LL] # TAKES ONLY FREQUENCIES WITHIN THE RANGE
######## GAUSSIAN FILTER ON NOT FLATTEN SIGNAL FOR FINDING PEAKS #######
gfl = ndimage.filters.gaussian_filter1d(flx, sigma=1)
indk = find_peaks(gfl, distance=1)[0]
ll = gfl[indk] >= np.mean(gfl) + 1*np.std(gfl)
if sum(ll) > 0:
lm = np.argmax(gfl[indk[ll]])
per = 1/FF[LL][indk[ll][lm]]/(3600*24)
per = '{:4.2f}'.format(per)
else:
per = '0.00'
####### FLATTENING THE PROFILE ######
kk = stm.nonparametric.lowess(flx, FF[LL], frac=0.95)
flx = flx - kk[:,1]
flx = ndimage.filters.gaussian_filter1d(flx, sigma=sigma) # GAUSSIAN FILTER
####### NORMALIZATION AND STANDARIZATION ######
flx = normalize(flx.reshape(1,-1))[0]
flx = scal.fit_transform(flx.reshape(-1,1)).reshape(1,-1)[0]
return flx, per
start_time = time.time()
scal = StandardScaler()
# LARGEST SAMPLE SIZE
sigma = 1
xsp = int(1024) # FOR TESS ML-GABLAB
nsp = 2**int(np.floor(np.log2(xsp)))
perd = [0.3*24*3600, 21*24*3600] # PERIOD RANGE TO BE SAMPLED
dtime = 1800 # DELTA TIME FOR INTERPOLATE
# # CONVERT PERIODS TO FREQUENCIES
frqr = [1/perd[1], 1/perd[0]]
FF = fftfreq(nsp,dtime)
LL = (FF >= frqr[0]) & (FF <= frqr[1])
# LOADING MODEL...
savfil = open(args.mlf, 'rb')
model = pickle.load(savfil)
savfil.close()
# LOADING LIGHT CURVE
con = np.load(args.lcf)
# PREPROCESING LIGHT CURVE
flx, per = preplanet(con, nsp, dtime, perd, sigma, FF, LL)
# PREDICTION: NO_PLANET, PLANET
flxprb = model.predict_proba(flx.reshape(1,-1))[0]
if flxprb[0] > 0.5:
per = '0.00'
perV = [float(per)]
# RESULTS
hdu = fits.open(args.tpf)
X = hdu[0].header['SECTOR']
TICID = hdu[0].header['TICID']
Camera = hdu[0].header['CAMERA']
CCD = hdu[0].header['CCD']
RA = hdu[0].header['RA_OBJ']
DEC = hdu[0].header['DEC_OBJ']
MAG = hdu[0].header['TESSMAG']
hdu.close()
print('TICID = {}'.format(str(TICID)))
print('Sector = {}'.format(str(X)))
print('Camera = {}'.format(str(Camera)))
print('CCD = {}'.format(str(CCD)))
print('RA = {}'.format(str(RA)))
print('DEC = {}'.format(str(DEC)))
print('Magnitude = {}'.format(str(MAG)))
print("Frequencies = {}".format(perV))
print("Is not a planet = {}".format(flxprb[0]))
print("Is a planet = {}".format(flxprb[1]))
resultfilename = args.lcf.replace("_data.npz", "_data.result")
data = {"ticid": str(TICID), "sector": X, "camera": Camera, "ccd": CCD, "ra": RA, "dec": DEC, "tmag": MAG, "lc": args.lcf, "frequencies": perV, "isnotplanet": flxprb[0], "isplanet": flxprb[1] }
json_data = json.dumps(data)
rf = open(resultfilename, "w")
rf.write(json_data)
rf.close()
print("Total time: {} sec".format(time.strftime("%H:%M:%S", time.gmtime(time.time()-start_time)))) | [
"scipy.fftpack.fftfreq",
"scipy.fftpack.fft",
"astropy.io.fits.open",
"numpy.arange",
"numpy.mean",
"argparse.ArgumentParser",
"json.dumps",
"scipy.signal.find_peaks",
"numpy.abs",
"pickle.load",
"numpy.argmax",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.interp",
"numpy.std",
"num... | [((1412, 1589), 'argparse.ArgumentParser', 'argp.ArgumentParser', ([], {'prog': '"""maletess.py"""', 'description': '"""This is a Python3 algorithm to make predictions for planets candidates using Machine Learning """', 'usage': '"""%(prog)s"""'}), "(prog='maletess.py', description=\n 'This is a Python3 algorithm to make predictions for planets candidates using Machine Learning '\n , usage='%(prog)s')\n", (1431, 1589), True, 'import argparse as argp\n'), ((3619, 3630), 'time.time', 'time.time', ([], {}), '()\n', (3628, 3630), False, 'import sys, json, time\n'), ((3639, 3655), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3653, 3655), False, 'from sklearn.preprocessing import normalize, StandardScaler\n'), ((3997, 4016), 'scipy.fftpack.fftfreq', 'fftfreq', (['nsp', 'dtime'], {}), '(nsp, dtime)\n', (4004, 4016), False, 'from scipy.fftpack import fft, fftfreq\n'), ((4114, 4133), 'pickle.load', 'pickle.load', (['savfil'], {}), '(savfil)\n', (4125, 4133), False, 'import pickle\n'), ((4178, 4195), 'numpy.load', 'np.load', (['args.lcf'], {}), '(args.lcf)\n', (4185, 4195), True, 'import numpy as np\n'), ((4452, 4471), 'astropy.io.fits.open', 'fits.open', (['args.tpf'], {}), '(args.tpf)\n', (4461, 4471), False, 'from astropy.io import fits\n'), ((5355, 5371), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (5365, 5371), False, 'import sys, json, time\n'), ((2239, 2298), 'numpy.arange', 'np.arange', (["con['time_flat'][0]", "con['time_flat'][-1]", 'itime'], {}), "(con['time_flat'][0], con['time_flat'][-1], itime)\n", (2248, 2298), True, 'import numpy as np\n'), ((2310, 2362), 'numpy.interp', 'np.interp', (['atime', "con['time_flat']", "con['flux_flat']"], {}), "(atime, con['time_flat'], con['flux_flat'])\n", (2319, 2362), True, 'import numpy as np\n'), ((2645, 2653), 'scipy.fftpack.fft', 'fft', (['flx'], {}), '(flx)\n', (2648, 2653), False, 'from scipy.fftpack import fft, fftfreq\n'), ((2882, 2929), 'scipy.ndimage.filters.gaussian_filter1d', 'ndimage.filters.gaussian_filter1d', (['flx'], {'sigma': '(1)'}), '(flx, sigma=1)\n', (2915, 2929), False, 'from scipy import ndimage\n'), ((3258, 3306), 'statsmodels.api.nonparametric.lowess', 'stm.nonparametric.lowess', (['flx', 'FF[LL]'], {'frac': '(0.95)'}), '(flx, FF[LL], frac=0.95)\n', (3282, 3306), True, 'import statsmodels.api as stm\n'), ((3341, 3392), 'scipy.ndimage.filters.gaussian_filter1d', 'ndimage.filters.gaussian_filter1d', (['flx'], {'sigma': 'sigma'}), '(flx, sigma=sigma)\n', (3374, 3392), False, 'from scipy import ndimage\n'), ((2709, 2720), 'numpy.abs', 'np.abs', (['flx'], {}), '(flx)\n', (2715, 2720), True, 'import numpy as np\n'), ((2944, 2971), 'scipy.signal.find_peaks', 'find_peaks', (['gfl'], {'distance': '(1)'}), '(gfl, distance=1)\n', (2954, 2971), False, 'from scipy.signal import find_peaks\n'), ((3059, 3083), 'numpy.argmax', 'np.argmax', (['gfl[indk[ll]]'], {}), '(gfl[indk[ll]])\n', (3068, 3083), True, 'import numpy as np\n'), ((2997, 3009), 'numpy.mean', 'np.mean', (['gfl'], {}), '(gfl)\n', (3004, 3009), True, 'import numpy as np\n'), ((3764, 3776), 'numpy.log2', 'np.log2', (['xsp'], {}), '(xsp)\n', (3771, 3776), True, 'import numpy as np\n'), ((3014, 3025), 'numpy.std', 'np.std', (['gfl'], {}), '(gfl)\n', (3020, 3025), True, 'import numpy as np\n'), ((5507, 5518), 'time.time', 'time.time', ([], {}), '()\n', (5516, 5518), False, 'import sys, json, time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 09:22:42 2021
@author: luyao.li
"""
import numpy as np
import os
from functools import partial
from collections import defaultdict
import matplotlib.pyplot as plt
def hash_fun(a,b,n_buckets,x, p=123457):
y=x%p
hash_val = (a*y+b ) %p
return hash_val % n_buckets
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__ )) )
counts_dir= 'counts_tiny.txt'
words_dir ='words_stream.txt'
hash_params = np.loadtxt('hash_params.txt',delimiter='\t')
delta = np.exp(-5)
eps= np.exp(1)*pow(10,-4)
n_hash= int( np.ceil( np.log(1/delta) ))
n_buckets= int(np.ceil( np.exp(1) /eps ) )
hash_list = [ partial(hash_fun,a=int(hash_params[i,0]),b=int(hash_params[i,1]),n_buckets =n_buckets ) for i in range( n_hash ) ]
# for x in data:
counts_dict =[ defaultdict(int) for i in range(len(hash_list))]
t=0
with open(words_dir) as f:
for x in f:
x= int(x.strip())
t+=1
if not t%1000000:
print(t ,'element')
for idx,h in enumerate(hash_list):
h_value = h(x=x)
counts_dict[idx][h_value] +=1
Er =[]
wordFreq= []
with open(counts_dir) as f:
for l in f:
items= l.strip().split('\t')
word= int( items[0])
count =int(items[1])
Fhat= np.min( [ counts_dict[idx][ h(x=word)] for idx,h in enumerate(hash_list)] )
Er.append( (Fhat-count)/ count )
wordFreq.append( count/t)
plt.figure(figsize=(20,10))
plt.loglog(wordFreq,Er,"+")
plt.title('Relative Error(log) vs Word Frequency(log)')
plt.xlabel('Word Frequency(log)')
plt.ylabel('Relative Error(log)')
plt.grid()
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.log",
"numpy.exp",
"matplotlib.pyplot.figure",
"collections.defaultdict",
"os.path.abspath",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] | [((526, 571), 'numpy.loadtxt', 'np.loadtxt', (['"""hash_params.txt"""'], {'delimiter': '"""\t"""'}), "('hash_params.txt', delimiter='\\t')\n", (536, 571), True, 'import numpy as np\n'), ((583, 593), 'numpy.exp', 'np.exp', (['(-5)'], {}), '(-5)\n', (589, 593), True, 'import numpy as np\n'), ((1662, 1690), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1672, 1690), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1723), 'matplotlib.pyplot.loglog', 'plt.loglog', (['wordFreq', 'Er', '"""+"""'], {}), "(wordFreq, Er, '+')\n", (1704, 1723), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1782), 'matplotlib.pyplot.title', 'plt.title', (['"""Relative Error(log) vs Word Frequency(log)"""'], {}), "('Relative Error(log) vs Word Frequency(log)')\n", (1736, 1782), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1820), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Word Frequency(log)"""'], {}), "('Word Frequency(log)')\n", (1797, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Error(log)"""'], {}), "('Relative Error(log)')\n", (1835, 1858), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1873), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1871, 1873), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1886, 1888), True, 'import matplotlib.pyplot as plt\n'), ((604, 613), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (610, 613), True, 'import numpy as np\n'), ((914, 930), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (925, 930), False, 'from collections import defaultdict\n'), ((405, 430), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (420, 430), False, 'import os\n'), ((652, 669), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (658, 669), True, 'import numpy as np\n'), ((700, 709), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (706, 709), True, 'import numpy as np\n')] |
import numpy as np
from .qnumber import is_qsparse
__all__ = ['retained_bond_indices', 'split_matrix_svd', 'qr']
def retained_bond_indices(s, tol):
"""
Indices of retained singular values based on given tolerance.
"""
w = np.linalg.norm(s)
if w == 0:
return np.array([], dtype=int)
# normalized squares
s = (s / w)**2
# accumulate values from smallest to largest
sort_idx = np.argsort(s)
s[sort_idx] = np.cumsum(s[sort_idx])
return np.where(s > tol)[0]
def split_matrix_svd(A, q0, q1, tol):
"""
Split a matrix by singular value decomposition,
taking block sparsity structure dictated by quantum numbers into account,
and truncate small singular values based on tolerance.
"""
assert A.ndim == 2
assert len(q0) == A.shape[0]
assert len(q1) == A.shape[1]
assert is_qsparse(A, [q0, -q1])
# find common quantum numbers
qis = np.intersect1d(q0, q1)
if len(qis) == 0:
assert np.linalg.norm(A) == 0
# special case: no common quantum numbers;
# use dummy intermediate dimension 1
u = np.zeros((A.shape[0], 1), dtype=A.dtype)
v = np.zeros((1, A.shape[1]), dtype=A.dtype)
s = np.zeros(1)
# single column of 'u' should have norm 1
if A.shape[0] > 0:
u[0, 0] = 1
# ensure non-zero entry in 'u' formally matches quantum numbers
q = q0[:1]
# 'v' must remain zero matrix to satisfy quantum number constraints
return (u, s, v, q)
# require NumPy arrays for indexing
q0 = np.array(q0)
q1 = np.array(q1)
# sort quantum numbers and arrange entries in A accordingly;
# using mergesort to avoid permutations of identical quantum numbers
idx0 = np.argsort(q0, kind='mergesort')
idx1 = np.argsort(q1, kind='mergesort')
if np.any(idx0 - np.arange(len(idx0))):
# if not sorted yet...
q0 = q0[idx0]
A = A[idx0, :]
if np.any(idx1 - np.arange(len(idx1))):
# if not sorted yet...
q1 = q1[idx1]
A = A[:, idx1]
# maximum intermediate dimension
max_interm_dim = min(A.shape)
# keep track of intermediate dimension
D = 0
# allocate memory for U and V matrices, singular values and
# corresponding intermediate quantum numbers
u = np.zeros((A.shape[0], max_interm_dim), dtype=A.dtype)
v = np.zeros((max_interm_dim, A.shape[1]), dtype=A.dtype)
s = np.zeros(max_interm_dim)
q = np.zeros(max_interm_dim, dtype=q0.dtype)
# for each shared quantum number...
for qn in qis:
# indices of current quantum number
iqn = np.where(q0 == qn)[0]; i0 = iqn[0]; i1 = iqn[-1] + 1
iqn = np.where(q1 == qn)[0]; j0 = iqn[0]; j1 = iqn[-1] + 1
# perform SVD decomposition of current block
usub, ssub, vsub = np.linalg.svd(A[i0:i1, j0:j1], full_matrices=False)
# update intermediate dimension
Dprev = D
D += len(ssub)
u[i0:i1, Dprev:D] = usub
v[Dprev:D, j0:j1] = vsub
s[Dprev:D] = ssub
q[Dprev:D] = qn
assert D <= max_interm_dim
# use actual intermediate dimensions
u = u[:, :D]
v = v[:D, :]
s = s[:D]
q = q[:D]
# truncate small singular values
idx = retained_bond_indices(s, tol)
u = u[:, idx]
v = v[idx, :]
s = s[idx]
q = q[idx]
# undo sorting of quantum numbers
if np.any(idx0 - np.arange(len(idx0))):
u = u[np.argsort(idx0), :]
if np.any(idx1 - np.arange(len(idx1))):
v = v[:, np.argsort(idx1)]
return (u, s, v, q)
def qr(A, q0, q1):
"""
Compute the block-wise QR decompositions of a matrix, taking block sparsity
structure dictated by quantum numbers into account (that is, `A[i, j]` can
only be non-zero if `q0[i] == q1[j]`).
The resulting R matrix is not necessarily upper triangular due to
reordering of entries.
"""
assert A.ndim == 2
assert len(q0) == A.shape[0]
assert len(q1) == A.shape[1]
assert is_qsparse(A, [q0, -q1])
# find common quantum numbers
qis = np.intersect1d(q0, q1)
if len(qis) == 0:
assert np.linalg.norm(A) == 0
# special case: no common quantum numbers;
# use dummy intermediate dimension 1 with all entries in 'R' set to zero
Q = np.zeros((A.shape[0], 1), dtype=A.dtype)
R = np.zeros((1, A.shape[1]), dtype=A.dtype)
# single column of 'Q' should have norm 1
Q[0, 0] = 1
# ensure non-zero entry in 'Q' formally matches quantum numbers
qinterm = q0[:1]
return (Q, R, qinterm)
# require NumPy arrays for indexing
q0 = np.array(q0)
q1 = np.array(q1)
# sort quantum numbers and arrange entries in A accordingly;
# using mergesort to avoid permutations of identical quantum numbers
idx0 = np.argsort(q0, kind='mergesort')
idx1 = np.argsort(q1, kind='mergesort')
if np.any(idx0 - np.arange(len(idx0))):
# if not sorted yet...
q0 = q0[idx0]
A = A[idx0, :]
if np.any(idx1 - np.arange(len(idx1))):
# if not sorted yet...
q1 = q1[idx1]
A = A[:, idx1]
# maximum intermediate dimension
max_interm_dim = min(A.shape)
# keep track of intermediate dimension
D = 0
Q = np.zeros((A.shape[0], max_interm_dim), dtype=A.dtype)
R = np.zeros((max_interm_dim, A.shape[1]), dtype=A.dtype)
# corresponding intermediate quantum numbers
qinterm = np.zeros(max_interm_dim, dtype=q0.dtype)
# for each shared quantum number...
for qn in qis:
# indices of current quantum number
iqn = np.where(q0 == qn)[0]; i0 = iqn[0]; i1 = iqn[-1] + 1
iqn = np.where(q1 == qn)[0]; j0 = iqn[0]; j1 = iqn[-1] + 1
# perform QR decomposition of current block
Qsub, Rsub = np.linalg.qr(A[i0:i1, j0:j1], mode='reduced')
# update intermediate dimension
Dprev = D
D += Qsub.shape[1]
Q[i0:i1, Dprev:D] = Qsub
R[Dprev:D, j0:j1] = Rsub
qinterm[Dprev:D] = qn
assert D <= max_interm_dim
# use actual intermediate dimensions
Q = Q[:, :D]
R = R[:D, :]
qinterm = qinterm[:D]
# undo sorting of quantum numbers
if np.any(idx0 - np.arange(len(idx0))):
Q = Q[np.argsort(idx0), :]
if np.any(idx1 - np.arange(len(idx1))):
R = R[:, np.argsort(idx1)]
return (Q, R, qinterm)
| [
"numpy.intersect1d",
"numpy.linalg.qr",
"numpy.where",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.linalg.svd",
"numpy.cumsum"
] | [((241, 258), 'numpy.linalg.norm', 'np.linalg.norm', (['s'], {}), '(s)\n', (255, 258), True, 'import numpy as np\n'), ((423, 436), 'numpy.argsort', 'np.argsort', (['s'], {}), '(s)\n', (433, 436), True, 'import numpy as np\n'), ((455, 477), 'numpy.cumsum', 'np.cumsum', (['s[sort_idx]'], {}), '(s[sort_idx])\n', (464, 477), True, 'import numpy as np\n'), ((926, 948), 'numpy.intersect1d', 'np.intersect1d', (['q0', 'q1'], {}), '(q0, q1)\n', (940, 948), True, 'import numpy as np\n'), ((1582, 1594), 'numpy.array', 'np.array', (['q0'], {}), '(q0)\n', (1590, 1594), True, 'import numpy as np\n'), ((1604, 1616), 'numpy.array', 'np.array', (['q1'], {}), '(q1)\n', (1612, 1616), True, 'import numpy as np\n'), ((1767, 1799), 'numpy.argsort', 'np.argsort', (['q0'], {'kind': '"""mergesort"""'}), "(q0, kind='mergesort')\n", (1777, 1799), True, 'import numpy as np\n'), ((1811, 1843), 'numpy.argsort', 'np.argsort', (['q1'], {'kind': '"""mergesort"""'}), "(q1, kind='mergesort')\n", (1821, 1843), True, 'import numpy as np\n'), ((2332, 2385), 'numpy.zeros', 'np.zeros', (['(A.shape[0], max_interm_dim)'], {'dtype': 'A.dtype'}), '((A.shape[0], max_interm_dim), dtype=A.dtype)\n', (2340, 2385), True, 'import numpy as np\n'), ((2394, 2447), 'numpy.zeros', 'np.zeros', (['(max_interm_dim, A.shape[1])'], {'dtype': 'A.dtype'}), '((max_interm_dim, A.shape[1]), dtype=A.dtype)\n', (2402, 2447), True, 'import numpy as np\n'), ((2456, 2480), 'numpy.zeros', 'np.zeros', (['max_interm_dim'], {}), '(max_interm_dim)\n', (2464, 2480), True, 'import numpy as np\n'), ((2489, 2529), 'numpy.zeros', 'np.zeros', (['max_interm_dim'], {'dtype': 'q0.dtype'}), '(max_interm_dim, dtype=q0.dtype)\n', (2497, 2529), True, 'import numpy as np\n'), ((4109, 4131), 'numpy.intersect1d', 'np.intersect1d', (['q0', 'q1'], {}), '(q0, q1)\n', (4123, 4131), True, 'import numpy as np\n'), ((4679, 4691), 'numpy.array', 'np.array', (['q0'], {}), '(q0)\n', (4687, 4691), True, 'import numpy as np\n'), ((4701, 4713), 'numpy.array', 'np.array', (['q1'], {}), '(q1)\n', (4709, 4713), True, 'import numpy as np\n'), ((4864, 4896), 'numpy.argsort', 'np.argsort', (['q0'], {'kind': '"""mergesort"""'}), "(q0, kind='mergesort')\n", (4874, 4896), True, 'import numpy as np\n'), ((4908, 4940), 'numpy.argsort', 'np.argsort', (['q1'], {'kind': '"""mergesort"""'}), "(q1, kind='mergesort')\n", (4918, 4940), True, 'import numpy as np\n'), ((5316, 5369), 'numpy.zeros', 'np.zeros', (['(A.shape[0], max_interm_dim)'], {'dtype': 'A.dtype'}), '((A.shape[0], max_interm_dim), dtype=A.dtype)\n', (5324, 5369), True, 'import numpy as np\n'), ((5378, 5431), 'numpy.zeros', 'np.zeros', (['(max_interm_dim, A.shape[1])'], {'dtype': 'A.dtype'}), '((max_interm_dim, A.shape[1]), dtype=A.dtype)\n', (5386, 5431), True, 'import numpy as np\n'), ((5496, 5536), 'numpy.zeros', 'np.zeros', (['max_interm_dim'], {'dtype': 'q0.dtype'}), '(max_interm_dim, dtype=q0.dtype)\n', (5504, 5536), True, 'import numpy as np\n'), ((289, 312), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (297, 312), True, 'import numpy as np\n'), ((490, 507), 'numpy.where', 'np.where', (['(s > tol)'], {}), '(s > tol)\n', (498, 507), True, 'import numpy as np\n'), ((1118, 1158), 'numpy.zeros', 'np.zeros', (['(A.shape[0], 1)'], {'dtype': 'A.dtype'}), '((A.shape[0], 1), dtype=A.dtype)\n', (1126, 1158), True, 'import numpy as np\n'), ((1171, 1211), 'numpy.zeros', 'np.zeros', (['(1, A.shape[1])'], {'dtype': 'A.dtype'}), '((1, A.shape[1]), dtype=A.dtype)\n', (1179, 1211), True, 'import numpy as np\n'), ((1224, 1235), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1232, 1235), True, 'import numpy as np\n'), ((2849, 2900), 'numpy.linalg.svd', 'np.linalg.svd', (['A[i0:i1, j0:j1]'], {'full_matrices': '(False)'}), '(A[i0:i1, j0:j1], full_matrices=False)\n', (2862, 2900), True, 'import numpy as np\n'), ((4337, 4377), 'numpy.zeros', 'np.zeros', (['(A.shape[0], 1)'], {'dtype': 'A.dtype'}), '((A.shape[0], 1), dtype=A.dtype)\n', (4345, 4377), True, 'import numpy as np\n'), ((4390, 4430), 'numpy.zeros', 'np.zeros', (['(1, A.shape[1])'], {'dtype': 'A.dtype'}), '((1, A.shape[1]), dtype=A.dtype)\n', (4398, 4430), True, 'import numpy as np\n'), ((5849, 5894), 'numpy.linalg.qr', 'np.linalg.qr', (['A[i0:i1, j0:j1]'], {'mode': '"""reduced"""'}), "(A[i0:i1, j0:j1], mode='reduced')\n", (5861, 5894), True, 'import numpy as np\n'), ((987, 1004), 'numpy.linalg.norm', 'np.linalg.norm', (['A'], {}), '(A)\n', (1001, 1004), True, 'import numpy as np\n'), ((2648, 2666), 'numpy.where', 'np.where', (['(q0 == qn)'], {}), '(q0 == qn)\n', (2656, 2666), True, 'import numpy as np\n'), ((2715, 2733), 'numpy.where', 'np.where', (['(q1 == qn)'], {}), '(q1 == qn)\n', (2723, 2733), True, 'import numpy as np\n'), ((4170, 4187), 'numpy.linalg.norm', 'np.linalg.norm', (['A'], {}), '(A)\n', (4184, 4187), True, 'import numpy as np\n'), ((5655, 5673), 'numpy.where', 'np.where', (['(q0 == qn)'], {}), '(q0 == qn)\n', (5663, 5673), True, 'import numpy as np\n'), ((5722, 5740), 'numpy.where', 'np.where', (['(q1 == qn)'], {}), '(q1 == qn)\n', (5730, 5740), True, 'import numpy as np\n'), ((3477, 3493), 'numpy.argsort', 'np.argsort', (['idx0'], {}), '(idx0)\n', (3487, 3493), True, 'import numpy as np\n'), ((3559, 3575), 'numpy.argsort', 'np.argsort', (['idx1'], {}), '(idx1)\n', (3569, 3575), True, 'import numpy as np\n'), ((6309, 6325), 'numpy.argsort', 'np.argsort', (['idx0'], {}), '(idx0)\n', (6319, 6325), True, 'import numpy as np\n'), ((6391, 6407), 'numpy.argsort', 'np.argsort', (['idx1'], {}), '(idx1)\n', (6401, 6407), True, 'import numpy as np\n')] |
import copy
import inspect
import itertools
import types
import warnings
from typing import Any, Dict
import numpy as np
from axelrod import _module_random
from axelrod.action import Action
from axelrod.game import DefaultGame
from axelrod.history import History
from axelrod.random_ import RandomGenerator
C, D = Action.C, Action.D
class PostInitCaller(type):
"""Metaclass to be able to handle post __init__ tasks.
If there is a DerivedPlayer class of Player that overrides
_post_init, as follows:
class Player(object, metaclass=PostInitCaller):
def __new__(cls, *args, **kwargs):
print("Player.__new__")
obj = super().__new__(cls)
return obj
def __init__(self):
print("Player.__init__")
def _post_init(self):
print("Player._post_init")
def _post_transform(self):
print("Player._post_transform")
class DerivedPlayer(Player):
def __init__(self):
print("DerivedPlayer.__init__")
super().__init__()
def _post_init(self):
print("DerivedPlayer._post_init")
super()._post_init()
dp = DerivedPlayer()
Then the call order is:
* PostInitCaller.__call__
* Player.__new__
* DerivedPlayer.__init__
* Player.__init__
* DerivedPlayer._post_init
* Player._post_init
* Player._post_transform
See here to learn more: https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/
"""
def __call__(cls, *args, **kwargs):
# This calls cls.__new__ and cls.__init__
obj = type.__call__(cls, *args, **kwargs)
# Next we do any post init or post transform tasks, like recomputing
# classifiers
# Note that subclasses inherit the metaclass, and subclasses my override
# or extend __init__ so it's necessary to do these tasks after all the
# __init__'s have run in the case of a post-transform reclassification.
obj._post_init()
obj._post_transform()
return obj
class Player(object, metaclass=PostInitCaller):
"""A class for a player in the tournament.
This is an abstract base class, not intended to be used directly.
"""
name = "Player"
classifier = {} # type: Dict[str, Any]
_reclassifiers = []
def __new__(cls, *args, **kwargs):
"""Caches arguments for Player cloning."""
obj = super().__new__(cls)
obj.init_kwargs = cls.init_params(*args, **kwargs)
return obj
@classmethod
def init_params(cls, *args, **kwargs):
"""
Return a dictionary containing the init parameters of a strategy
(without 'self').
Use *args and **kwargs as value if specified
and complete the rest with the default values.
"""
sig = inspect.signature(cls.__init__)
# The 'self' parameter needs to be removed or the first *args will be
# assigned to it
self_param = sig.parameters.get("self")
new_params = list(sig.parameters.values())
new_params.remove(self_param)
sig = sig.replace(parameters=new_params)
boundargs = sig.bind_partial(*args, **kwargs)
boundargs.apply_defaults()
return boundargs.arguments
def __init__(self):
"""Initial class setup."""
self._history = History()
self.classifier = copy.deepcopy(self.classifier)
self.set_match_attributes()
def _post_init(self):
"""Post initialization tasks such as reclassifying the strategy."""
pass
def _post_transform(self):
"""Handles post transform tasks such as further reclassifying."""
# Reclassify strategy post __init__, if needed.
for (reclassifier, args, kwargs) in self._reclassifiers:
self.classifier = reclassifier(self.classifier, *args, **kwargs)
def __eq__(self, other):
"""
Test if two players are equal, ignoring random seed and RNG state.
"""
if self.__repr__() != other.__repr__():
return False
for attribute in set(
list(self.__dict__.keys()) + list(other.__dict__.keys())
):
value = getattr(self, attribute, None)
other_value = getattr(other, attribute, None)
if attribute in ["_random", "_seed"]:
# Don't compare the random generators.
continue
if isinstance(value, np.ndarray):
if not (np.array_equal(value, other_value)):
return False
elif isinstance(value, types.GeneratorType) or isinstance(
value, itertools.cycle
):
# Split the original generator so it is not touched
generator, original_value = itertools.tee(value)
other_generator, original_other_value = itertools.tee(
other_value
)
if isinstance(value, types.GeneratorType):
setattr(self, attribute, (ele for ele in original_value))
setattr(
other, attribute, (ele for ele in original_other_value)
)
else:
setattr(self, attribute, itertools.cycle(original_value))
setattr(
other, attribute, itertools.cycle(original_other_value)
)
for _ in range(200):
try:
if next(generator) != next(other_generator):
return False
except StopIteration:
break
# Code for a strange edge case where each strategy points at each
# other
elif value is other and other_value is self:
pass
else:
if value != other_value:
return False
return True
def receive_match_attributes(self):
# Overwrite this function if your strategy needs
# to make use of match_attributes such as
# the game matrix, the number of rounds or the noise
pass
def set_match_attributes(self, length=-1, game=None, noise=0):
if not game:
game = DefaultGame
self.match_attributes = {"length": length, "game": game, "noise": noise}
self.receive_match_attributes()
def set_seed(self, seed):
"""Set a random seed for the player's random number generator."""
if seed is None:
warnings.warn(
"Initializing player with seed from Axelrod module random number generator. "
"Results may not be seed reproducible."
)
self._seed = _module_random.random_seed_int()
else:
self._seed = seed
self._random = RandomGenerator(seed=self._seed)
def __repr__(self):
"""The string method for the strategy.
Appends the `__init__` parameters to the strategy's name."""
name = self.name
prefix = ": "
gen = (
value for value in self.init_kwargs.values() if value is not None
)
for value in gen:
try:
if issubclass(value, Player):
value = value.name
except TypeError:
pass
name = "".join([name, prefix, str(value)])
prefix = ", "
return name
def __getstate__(self):
"""Used for pickling. Override if Player contains unpickleable attributes."""
return self.__dict__
def strategy(self, opponent):
"""This is a placeholder strategy."""
raise NotImplementedError()
def clone(self):
"""Clones the player without history, reapplying configuration
parameters as necessary."""
# You may be tempted to re-implement using the `copy` module
# Note that this would require a deepcopy in some cases and there may
# be significant changes required throughout the library.
# Consider overriding in special cases only if necessary
cls = self.__class__
new_player = cls(**self.init_kwargs)
new_player.match_attributes = copy.copy(self.match_attributes)
return new_player
def reset(self):
"""Resets a player to its initial state
This method is called at the beginning of each match (between a pair
of players) to reset a player's state to its initial starting point.
It ensures that no 'memory' of previous matches is carried forward.
"""
# This also resets the history.
self.__init__(**self.init_kwargs)
def update_history(self, play, coplay):
self.history.append(play, coplay)
@property
def history(self):
return self._history
# Properties maintained for legacy API, can refactor to self.history.X
# in 5.0.0 to reduce function call overhead.
@property
def cooperations(self):
return self._history.cooperations
@property
def defections(self):
return self._history.defections
@property
def state_distribution(self):
return self._history.state_distribution
| [
"axelrod.random_.RandomGenerator",
"itertools.cycle",
"axelrod._module_random.random_seed_int",
"itertools.tee",
"inspect.signature",
"numpy.array_equal",
"axelrod.history.History",
"copy.deepcopy",
"warnings.warn",
"copy.copy"
] | [((2878, 2909), 'inspect.signature', 'inspect.signature', (['cls.__init__'], {}), '(cls.__init__)\n', (2895, 2909), False, 'import inspect\n'), ((3407, 3416), 'axelrod.history.History', 'History', ([], {}), '()\n', (3414, 3416), False, 'from axelrod.history import History\n'), ((3443, 3473), 'copy.deepcopy', 'copy.deepcopy', (['self.classifier'], {}), '(self.classifier)\n', (3456, 3473), False, 'import copy\n'), ((6948, 6980), 'axelrod.random_.RandomGenerator', 'RandomGenerator', ([], {'seed': 'self._seed'}), '(seed=self._seed)\n', (6963, 6980), False, 'from axelrod.random_ import RandomGenerator\n'), ((8334, 8366), 'copy.copy', 'copy.copy', (['self.match_attributes'], {}), '(self.match_attributes)\n', (8343, 8366), False, 'import copy\n'), ((6644, 6783), 'warnings.warn', 'warnings.warn', (['"""Initializing player with seed from Axelrod module random number generator. Results may not be seed reproducible."""'], {}), "(\n 'Initializing player with seed from Axelrod module random number generator. Results may not be seed reproducible.'\n )\n", (6657, 6783), False, 'import warnings\n'), ((6848, 6880), 'axelrod._module_random.random_seed_int', '_module_random.random_seed_int', ([], {}), '()\n', (6878, 6880), False, 'from axelrod import _module_random\n'), ((4555, 4589), 'numpy.array_equal', 'np.array_equal', (['value', 'other_value'], {}), '(value, other_value)\n', (4569, 4589), True, 'import numpy as np\n'), ((4863, 4883), 'itertools.tee', 'itertools.tee', (['value'], {}), '(value)\n', (4876, 4883), False, 'import itertools\n'), ((4940, 4966), 'itertools.tee', 'itertools.tee', (['other_value'], {}), '(other_value)\n', (4953, 4966), False, 'import itertools\n'), ((5341, 5372), 'itertools.cycle', 'itertools.cycle', (['original_value'], {}), '(original_value)\n', (5356, 5372), False, 'import itertools\n'), ((5445, 5482), 'itertools.cycle', 'itertools.cycle', (['original_other_value'], {}), '(original_other_value)\n', (5460, 5482), False, 'import itertools\n')] |
from typing import List
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from chemcharts.core.container.chemdata import ChemData
from chemcharts.core.plots.base_plot import BasePlot, _check_value_input
from chemcharts.core.utils.value_functions import generate_value
from chemcharts.core.utils.enums import PlottingEnum
from chemcharts.core.utils.enums import PlotLabellingEnum
_PE = PlottingEnum
_PLE = PlotLabellingEnum
class HistogramPlot(BasePlot):
def __init__(self):
super().__init__()
def plot(self, chemdata_list: List[ChemData], parameters: dict, settings: dict):
# no warning message for multiple chemdata object inputs since normalisation
# for xlim and ylim is here anyways applied
# checks whether there is a value input
value_input_result = _check_value_input(chemdata_list, "Histogram")
# checks whether there are multiple input objects
if value_input_result: # checks whether _check_value_input function returns 'True'
# lim setting
xlim, ylim, valuelim = self._get_lims(chemdata_list=chemdata_list,
parameters=parameters)
# final path setting
final_path = settings.get(_PE.SETTINGS_PATH, None)
self._prepare_folder(path=final_path)
# temp path setting
temp_folder_path, temp_plots_path_list = self._generate_temp_paths(number_paths=len(chemdata_list))
max_columns = 3
# loop over ChemData objects and generate plots
for idx in range(len(chemdata_list)):
fig, axs = plt.subplots()
value_column, value_name = generate_value(chemdata_list=chemdata_list,
parameters=parameters,
idx=idx)
# TODO fix tanimoto
"""
# include tanimoto_similarity
if selection == "tanimoto_similarity":
value_input = chemdata.get_tanimoto_similarity()
value_name = "Tanimoto Similarity"
elif selection == "value":
value_input = chemdata.get_values()
value_name = parameters.get(_PE.V
else:
raise ValueError(f"Selection input: {selection} is not as expected.")
"""
# generate data frame
scatter_df = pd.DataFrame({_PLE.UMAP_1: chemdata_list[idx].get_embedding().np_array[:, 0],
_PLE.UMAP_2: chemdata_list[idx].get_embedding().np_array[:, 1],
value_name: value_column})
sns.set_context("talk",
font_scale=0.5)
# deal with axs issue (array if multiple input, otherwise not)
if isinstance(axs, np.ndarray):
row_pos = int(idx / max_columns)
col_pos = idx % max_columns
# makes sure that array is 2D, even if only one row
axs = np.atleast_2d(axs)
selected_axis = axs[row_pos, col_pos]
else:
selected_axis = axs
# generate seaborn histplot
sns.histplot(scatter_df[value_name],
element="step",
bins=parameters.get(_PE.PARAMETERS_BINS, 20),
stat="proportion",
kde=True,
color=parameters.get(_PE.PARAMETERS_PLOT_COLOR, "#d11d80"),
ax=selected_axis)
# Setting axs ranges (for this plot only x and y axis ranges from 0 to 1 make sense)
if xlim is not None or ylim is not None:
print("Histogram plot does not support setting arbitrary axis limits.")
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.gcf().set_size_inches(settings.get(_PE.SETTINGS_FIG_SIZE, (7, 7)))
plt.subplots_adjust(top=parameters.get(_PE.PARAMETERS_PLOT_ADJUST_TOP, 0.9))
plt.xlabel(parameters.get(_PE.PARAMETERS_VALUENAME, "Value"), fontsize=10)
name = f"Dataset_{idx}" if chemdata_list[idx].get_name() == "" else chemdata_list[idx].get_name()
plt.suptitle(name,
fontsize=parameters.get(_PE.PARAMETERS_PLOT_TITLE_FONTSIZE, 14))
plt.savefig(temp_plots_path_list[idx],
format=settings.get(_PE.SETTINGS_FIG_FORMAT, 'png'),
dpi=settings.get(_PE.SETTINGS_FIG_DPI, _PE.SETTINGS_FIG_DPI_DEFAULT))
plt.close("all")
self._merge_multiple_plots(subplot_paths=temp_plots_path_list,
merged_path=final_path,
title=parameters.get(_PE.PARAMETERS_PLOT_TITLE, "Histogram ChemCharts Plot"))
self._clear_temp_dir(path=temp_folder_path)
| [
"numpy.atleast_2d",
"matplotlib.pyplot.gcf",
"seaborn.set_context",
"matplotlib.pyplot.close",
"chemcharts.core.utils.value_functions.generate_value",
"chemcharts.core.plots.base_plot._check_value_input",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots"
] | [((854, 900), 'chemcharts.core.plots.base_plot._check_value_input', '_check_value_input', (['chemdata_list', '"""Histogram"""'], {}), "(chemdata_list, 'Histogram')\n", (872, 900), False, 'from chemcharts.core.plots.base_plot import BasePlot, _check_value_input\n'), ((1693, 1707), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1705, 1707), True, 'import matplotlib.pyplot as plt\n'), ((1752, 1827), 'chemcharts.core.utils.value_functions.generate_value', 'generate_value', ([], {'chemdata_list': 'chemdata_list', 'parameters': 'parameters', 'idx': 'idx'}), '(chemdata_list=chemdata_list, parameters=parameters, idx=idx)\n', (1766, 1827), False, 'from chemcharts.core.utils.value_functions import generate_value\n'), ((2857, 2896), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {'font_scale': '(0.5)'}), "('talk', font_scale=0.5)\n", (2872, 2896), True, 'import seaborn as sns\n'), ((4104, 4118), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (4112, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4135, 4149), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4143, 4149), True, 'import matplotlib.pyplot as plt\n'), ((4918, 4934), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4927, 4934), True, 'import matplotlib.pyplot as plt\n'), ((3257, 3275), 'numpy.atleast_2d', 'np.atleast_2d', (['axs'], {}), '(axs)\n', (3270, 3275), True, 'import numpy as np\n'), ((4167, 4176), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4174, 4176), True, 'import matplotlib.pyplot as plt\n')] |
from scipy.stats import multivariate_normal as normal
import numpy as np
from time import time
from experiments.lnpdfs.create_target_lnpfs import build_target_likelihood_planar_n_link
from sampler.SliceSampling.slice_sampler import slice_sample
num_dimensions = 10
conf_likelihood_var = 4e-2 * np.ones(num_dimensions)
conf_likelihood_var[0] = 1
cart_likelihood_var = np.array([1e-4, 1e-4])
lnpdf = build_target_likelihood_planar_n_link(num_dimensions, conf_likelihood_var, cart_likelihood_var)[0]
prior = normal(np.zeros((num_dimensions)), conf_likelihood_var * np.eye((num_dimensions)))
initial = prior.rvs(1)
def sample(n_samps, sigma, path):
start = time()
[samples, fevals, timestamps] = slice_sample(lnpdf, initial, n_samps, sigma * np.ones(num_dimensions))
timestamps -= start
samples = samples.transpose().reshape(len(timestamps),-1,num_dimensions).copy()
np.savez(path + 'processed_data', samples=samples, fevals=fevals, timestamps = timestamps)
sample(100, 0.1, "slice_test")
print("done")
| [
"numpy.savez",
"numpy.eye",
"numpy.ones",
"experiments.lnpdfs.create_target_lnpfs.build_target_likelihood_planar_n_link",
"numpy.array",
"numpy.zeros",
"time.time"
] | [((368, 394), 'numpy.array', 'np.array', (['[0.0001, 0.0001]'], {}), '([0.0001, 0.0001])\n', (376, 394), True, 'import numpy as np\n'), ((295, 318), 'numpy.ones', 'np.ones', (['num_dimensions'], {}), '(num_dimensions)\n', (302, 318), True, 'import numpy as np\n'), ((400, 499), 'experiments.lnpdfs.create_target_lnpfs.build_target_likelihood_planar_n_link', 'build_target_likelihood_planar_n_link', (['num_dimensions', 'conf_likelihood_var', 'cart_likelihood_var'], {}), '(num_dimensions, conf_likelihood_var,\n cart_likelihood_var)\n', (437, 499), False, 'from experiments.lnpdfs.create_target_lnpfs import build_target_likelihood_planar_n_link\n'), ((515, 539), 'numpy.zeros', 'np.zeros', (['num_dimensions'], {}), '(num_dimensions)\n', (523, 539), True, 'import numpy as np\n'), ((661, 667), 'time.time', 'time', ([], {}), '()\n', (665, 667), False, 'from time import time\n'), ((887, 979), 'numpy.savez', 'np.savez', (["(path + 'processed_data')"], {'samples': 'samples', 'fevals': 'fevals', 'timestamps': 'timestamps'}), "(path + 'processed_data', samples=samples, fevals=fevals,\n timestamps=timestamps)\n", (895, 979), True, 'import numpy as np\n'), ((565, 587), 'numpy.eye', 'np.eye', (['num_dimensions'], {}), '(num_dimensions)\n', (571, 587), True, 'import numpy as np\n'), ((750, 773), 'numpy.ones', 'np.ones', (['num_dimensions'], {}), '(num_dimensions)\n', (757, 773), True, 'import numpy as np\n')] |
import xlrd
import numpy as np
import xlwt
from tempfile import TemporaryFile
book = xlwt.Workbook()
sheet1 = book.add_sheet('sheet1')
data=xlrd.open_workbook(r'C:\Users\Desktop\teamE\D1_route.xlsx')
table=data.sheets()[0]
all_data=[]
row_num=table.nrows
col_num=table.ncols
all_loc=[]
for i in range(table.nrows):
every_row=table.row_values(i)
all_data.append(every_row)
new_all_data=np.array(all_data)
data_try=new_all_data.flatten()
data_try1=sorted(data_try)
for l in range(len(data_try1)):
j = 1
order_min=data_try1[l]
loca=np.where(new_all_data==order_min)
all_data_for_choose=new_all_data
sheet1.write(l, 0, order_min)
while j<12:
#all_loc.append([loca[0][0],loca[1][0]])
change1=np.delete(all_data_for_choose,[loca[0][0],loca[1][0]],0)
change2=np.delete(change1,[loca[0][0],loca[1][0]],1)
dis = np.min(change2)
all_data_for_choose = change2
loca = np.where(all_data_for_choose == dis)
sheet1.write(l, j, dis)
j+=1
name = "find_route_sensitivity_D1.xls"
book.save(name)
book.save(TemporaryFile())
| [
"numpy.where",
"numpy.delete",
"xlrd.open_workbook",
"numpy.array",
"numpy.min",
"tempfile.TemporaryFile",
"xlwt.Workbook"
] | [((93, 108), 'xlwt.Workbook', 'xlwt.Workbook', ([], {}), '()\n', (106, 108), False, 'import xlwt\n'), ((152, 214), 'xlrd.open_workbook', 'xlrd.open_workbook', (['"""C:\\\\Users\\\\Desktop\\\\teamE\\\\D1_route.xlsx"""'], {}), "('C:\\\\Users\\\\Desktop\\\\teamE\\\\D1_route.xlsx')\n", (170, 214), False, 'import xlrd\n'), ((418, 436), 'numpy.array', 'np.array', (['all_data'], {}), '(all_data)\n', (426, 436), True, 'import numpy as np\n'), ((584, 619), 'numpy.where', 'np.where', (['(new_all_data == order_min)'], {}), '(new_all_data == order_min)\n', (592, 619), True, 'import numpy as np\n'), ((1140, 1155), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (1153, 1155), False, 'from tempfile import TemporaryFile\n'), ((777, 836), 'numpy.delete', 'np.delete', (['all_data_for_choose', '[loca[0][0], loca[1][0]]', '(0)'], {}), '(all_data_for_choose, [loca[0][0], loca[1][0]], 0)\n', (786, 836), True, 'import numpy as np\n'), ((851, 898), 'numpy.delete', 'np.delete', (['change1', '[loca[0][0], loca[1][0]]', '(1)'], {}), '(change1, [loca[0][0], loca[1][0]], 1)\n', (860, 898), True, 'import numpy as np\n'), ((911, 926), 'numpy.min', 'np.min', (['change2'], {}), '(change2)\n', (917, 926), True, 'import numpy as np\n'), ((982, 1018), 'numpy.where', 'np.where', (['(all_data_for_choose == dis)'], {}), '(all_data_for_choose == dis)\n', (990, 1018), True, 'import numpy as np\n')] |
import pandas as pd
# data from https://archive.ics.uci.edu/ml/datasets/Computer+Hardware
df = pd.read_csv('../data/machine.data', header=None)
df.columns = [
'VENDOR', 'MODEL', 'MYCT', 'MMIN', 'MMAX',
'CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP'
]
# print(df.head())
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='whitegrid', context="notebook")
cols = ['CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP']
# sns.pairplot(df[cols], size=2.0)
# plt.show()
# 恢复默认的matplotlib的样式设置
# sns.reset_orig()
import numpy as np
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
hm = sns.heatmap(
cm, cbar=True, annot=True, square=True, fmt=".2f",
annot_kws={'size':15}, yticklabels=cols, xticklabels=cols
)
plt.show()
class LinearRegressionGD(object):
def __init__(self, eta=0.001, n_iter=20):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta *X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:] + self.w_[0])
def predict(self, X):
return self.net_input(X)
X = df[['PRP']].values
y = df['ERP'].values
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
sc_y = StandardScaler()
X_std = sc_x.fit_transform(X)
y_std = sc_y.fit_transform(y)
lr = LinearRegressionGD()
lr.fit(X_std, y_std)
plt.plot(range(1, lr.n_iter+1), lr.cost_)
plt.ylabel('SSE')
plt.xlabel('Epoch')
plt.show()
def lin_regplot(X, y, model):
plt.scatter(X, y, c='blue')
plt.plot(X, model.predict(X), color='red')
lin_regplot(X_std, y_std, lr)
plt.xlabel('PRP (standardized)')
plt.ylabel('ERP (standardized)')
plt.show()
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RANSACRegressor
ransac = RANSACRegressor(
LinearRegression(), max_trials=100, min_samples=50, random_state=0,
residual_metric=lambda x: np.sum(np.abs(x), axis=1), residual_threshold=50.0
)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(0,1200,50)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask], c='blue', marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], c='lightgreen', marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='red')
plt.xlabel('Average PRP')
plt.ylabel('ERP')
plt.legend(loc='upper left')
plt.show()
print('Slope: %.3f' % ransac.estimator_.coef_[0])
print('Intercept: %.3f' % ransac.estimator_.intercept_)
from sklearn.model_selection import train_test_split
X = df[['CACH', 'CHMIN', 'CHMAX', 'PRP']].values
y = df['ERP'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0
)
slr = LinearRegression()
slr.fit(X_train, y_train)
y_train_pred = slr.predict(X_train)
y_test_pred = slr.predict(X_test)
plt.scatter(
y_train_pred, y_train_pred - y_train,
c='blue', marker='o', label='Training data'
)
plt.scatter(
y_test_pred, y_test_pred - y_test,
c='lightgreen', marker='s', label='Test data'
)
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=0, xmax=1000, lw=2, color='red')
plt.xlim([0, 1000])
plt.show()
from sklearn.metrics import mean_squared_error
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)
))
from sklearn.metrics import r2_score
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)
)) | [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.logical_not",
"sklearn.metrics.r2_score",
"numpy.arange",
"seaborn.set",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.abs",
"numpy.corrcoef",
"sklearn.model_selection.train_test_sp... | [((97, 145), 'pandas.read_csv', 'pd.read_csv', (['"""../data/machine.data"""'], {'header': 'None'}), "('../data/machine.data', header=None)\n", (108, 145), True, 'import pandas as pd\n'), ((328, 374), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'context': '"""notebook"""'}), "(style='whitegrid', context='notebook')\n", (335, 374), True, 'import seaborn as sns\n'), ((540, 570), 'numpy.corrcoef', 'np.corrcoef', (['df[cols].values.T'], {}), '(df[cols].values.T)\n', (551, 570), True, 'import numpy as np\n'), ((571, 594), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.5)'}), '(font_scale=1.5)\n', (578, 594), True, 'import seaborn as sns\n'), ((600, 727), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'cbar': '(True)', 'annot': '(True)', 'square': '(True)', 'fmt': '""".2f"""', 'annot_kws': "{'size': 15}", 'yticklabels': 'cols', 'xticklabels': 'cols'}), "(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={\n 'size': 15}, yticklabels=cols, xticklabels=cols)\n", (611, 727), True, 'import seaborn as sns\n'), ((732, 742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (740, 742), True, 'import matplotlib.pyplot as plt\n'), ((1525, 1541), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1539, 1541), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1549, 1565), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1563, 1565), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1717, 1734), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSE"""'], {}), "('SSE')\n", (1727, 1734), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1754), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1745, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1755, 1765), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1763, 1765), True, 'import matplotlib.pyplot as plt\n'), ((1907, 1939), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""PRP (standardized)"""'], {}), "('PRP (standardized)')\n", (1917, 1939), True, 'import matplotlib.pyplot as plt\n'), ((1940, 1972), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ERP (standardized)"""'], {}), "('ERP (standardized)')\n", (1950, 1972), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1981, 1983), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2359), 'numpy.logical_not', 'np.logical_not', (['inlier_mask'], {}), '(inlier_mask)\n', (2346, 2359), True, 'import numpy as np\n'), ((2369, 2391), 'numpy.arange', 'np.arange', (['(0)', '(1200)', '(50)'], {}), '(0, 1200, 50)\n', (2378, 2391), True, 'import numpy as np\n'), ((2444, 2531), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[inlier_mask]', 'y[inlier_mask]'], {'c': '"""blue"""', 'marker': '"""o"""', 'label': '"""Inliers"""'}), "(X[inlier_mask], y[inlier_mask], c='blue', marker='o', label=\n 'Inliers')\n", (2455, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2622), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[outlier_mask]', 'y[outlier_mask]'], {'c': '"""lightgreen"""', 'marker': '"""s"""', 'label': '"""Outliers"""'}), "(X[outlier_mask], y[outlier_mask], c='lightgreen', marker='s',\n label='Outliers')\n", (2538, 2622), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2663), 'matplotlib.pyplot.plot', 'plt.plot', (['line_X', 'line_y_ransac'], {'color': '"""red"""'}), "(line_X, line_y_ransac, color='red')\n", (2627, 2663), True, 'import matplotlib.pyplot as plt\n'), ((2664, 2689), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Average PRP"""'], {}), "('Average PRP')\n", (2674, 2689), True, 'import matplotlib.pyplot as plt\n'), ((2690, 2707), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ERP"""'], {}), "('ERP')\n", (2700, 2707), True, 'import matplotlib.pyplot as plt\n'), ((2708, 2736), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2718, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2737, 2747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2745, 2747), True, 'import matplotlib.pyplot as plt\n'), ((3014, 3067), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (3030, 3067), False, 'from sklearn.model_selection import train_test_split\n'), ((3080, 3098), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3096, 3098), False, 'from sklearn.linear_model import LinearRegression\n'), ((3196, 3294), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_train_pred', '(y_train_pred - y_train)'], {'c': '"""blue"""', 'marker': '"""o"""', 'label': '"""Training data"""'}), "(y_train_pred, y_train_pred - y_train, c='blue', marker='o',\n label='Training data')\n", (3207, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3301, 3398), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_test_pred', '(y_test_pred - y_test)'], {'c': '"""lightgreen"""', 'marker': '"""s"""', 'label': '"""Test data"""'}), "(y_test_pred, y_test_pred - y_test, c='lightgreen', marker='s',\n label='Test data')\n", (3312, 3398), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3435), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted values"""'], {}), "('Predicted values')\n", (3415, 3435), True, 'import matplotlib.pyplot as plt\n'), ((3436, 3459), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residuals"""'], {}), "('Residuals')\n", (3446, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3460, 3488), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3470, 3488), True, 'import matplotlib.pyplot as plt\n'), ((3489, 3542), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': '(0)', 'xmin': '(0)', 'xmax': '(1000)', 'lw': '(2)', 'color': '"""red"""'}), "(y=0, xmin=0, xmax=1000, lw=2, color='red')\n", (3499, 3542), True, 'import matplotlib.pyplot as plt\n'), ((3543, 3562), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1000]'], {}), '([0, 1000])\n', (3551, 3562), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3573), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3571, 3573), True, 'import matplotlib.pyplot as plt\n'), ((1801, 1828), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {'c': '"""blue"""'}), "(X, y, c='blue')\n", (1812, 1828), True, 'import matplotlib.pyplot as plt\n'), ((2114, 2132), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2130, 2132), False, 'from sklearn.linear_model import LinearRegression\n'), ((921, 945), 'numpy.zeros', 'np.zeros', (['(1 + X.shape[1])'], {}), '(1 + X.shape[1])\n', (929, 945), True, 'import numpy as np\n'), ((1327, 1362), 'numpy.dot', 'np.dot', (['X', '(self.w_[1:] + self.w_[0])'], {}), '(X, self.w_[1:] + self.w_[0])\n', (1333, 1362), True, 'import numpy as np\n'), ((3666, 3707), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'y_train_pred'], {}), '(y_train, y_train_pred)\n', (3684, 3707), False, 'from sklearn.metrics import mean_squared_error\n'), ((3713, 3752), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (3731, 3752), False, 'from sklearn.metrics import mean_squared_error\n'), ((3838, 3869), 'sklearn.metrics.r2_score', 'r2_score', (['y_train', 'y_train_pred'], {}), '(y_train, y_train_pred)\n', (3846, 3869), False, 'from sklearn.metrics import r2_score\n'), ((3875, 3904), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (3883, 3904), False, 'from sklearn.metrics import r2_score\n'), ((2219, 2228), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2225, 2228), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import collections
import pytest
import numpy as np
from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError
TRAVEL_TIME_ABS_TOL = 100
travel_time_order_param = pytest.mark.parametrize('travel_time_order', [
pytest.param(1),
pytest.param(2),
])
@pytest.mark.parametrize('ode_method, start_point, end_point', [
(OdeSolverMethod.RK23, (37, 255), (172, 112)),
(OdeSolverMethod.RK45, (37, 255), (172, 112)),
(OdeSolverMethod.DOP853, (37, 255), (172, 112)),
(OdeSolverMethod.Radau, (37, 255), (172, 112)),
(OdeSolverMethod.BDF, (37, 255), (172, 112)),
(OdeSolverMethod.LSODA, (37, 255), (172, 112)),
(OdeSolverMethod.RK45, (37, 255), (484, 300)),
])
@travel_time_order_param
def test_extract_path_without_waypoints(retina_speed_image, travel_time_order, ode_method, start_point, end_point):
with parameters(ode_solver_method=ode_method, travel_time_order=travel_time_order):
path_info = mpe(retina_speed_image, start_point, end_point)
assert path_info.point_count > 0
path_piece_info = path_info.pieces[0]
start_travel_time = path_piece_info.travel_time[start_point[0], start_point[1]]
end_travel_time = path_piece_info.travel_time[end_point[0], end_point[1]]
path_start_travel_time = path_piece_info.extraction_result.path_travel_times[0]
path_end_travel_time = path_piece_info.extraction_result.path_travel_times[-1]
assert path_start_travel_time == pytest.approx(start_travel_time, abs=TRAVEL_TIME_ABS_TOL)
assert path_end_travel_time == pytest.approx(end_travel_time, abs=TRAVEL_TIME_ABS_TOL)
@pytest.mark.parametrize('start_point, end_point, way_points, ttime_cache, ttime_count, reversed_count', [
((37, 255), (484, 300), ((172, 112), (236, 98), (420, 153)), True, 2, 2),
((37, 255), (484, 300), ((172, 112), (236, 98), (420, 153)), False, 4, 0),
])
@travel_time_order_param
def test_extract_path_with_waypoints(retina_speed_image, travel_time_order,
start_point, end_point, way_points, ttime_cache,
ttime_count, reversed_count):
with parameters(travel_time_order=travel_time_order, travel_time_cache=ttime_cache):
path_info = mpe(retina_speed_image, start_point, end_point, way_points)
assert path_info.point_count > 0
for path_piece_info in path_info.pieces:
start_pt = path_piece_info.start_point
end_pt = path_piece_info.end_point
start_travel_time = path_piece_info.travel_time[start_pt[0], start_pt[1]]
end_travel_time = path_piece_info.travel_time[end_pt[0], end_pt[1]]
path_start_travel_time = path_piece_info.extraction_result.path_travel_times[0]
path_end_travel_time = path_piece_info.extraction_result.path_travel_times[-1]
assert path_start_travel_time == pytest.approx(start_travel_time, abs=TRAVEL_TIME_ABS_TOL)
assert path_end_travel_time == pytest.approx(end_travel_time, abs=TRAVEL_TIME_ABS_TOL)
ttime_counter = collections.Counter(id(piece.travel_time) for piece in path_info.pieces)
assert len(ttime_counter) == ttime_count
assert list(piece.reversed for piece in path_info.pieces).count(True) == reversed_count
@pytest.mark.parametrize('start_point, end_point, time_bound', [
((37, 255), (484, 300), 500),
])
def test_end_point_not_reached(retina_speed_image, start_point, end_point, time_bound):
with pytest.raises(EndPointNotReachedError):
with parameters(integrate_time_bound=time_bound):
mpe(retina_speed_image, start_point, end_point)
@pytest.mark.parametrize('start_point, end_point, time_bound, wall', [
((37, 255), (484, 300), 2000.0, (slice(245, 247), slice(0, None))),
])
def test_unreachable_end_point(retina_speed_image, start_point, end_point, time_bound, wall):
mask = np.zeros_like(retina_speed_image, dtype=np.bool_)
mask[wall] = True
retina_speed_image = np.ma.masked_array(retina_speed_image, mask=mask)
with pytest.raises(EndPointNotReachedError):
with parameters(integrate_time_bound=time_bound):
mpe(retina_speed_image, start_point, end_point)
| [
"pytest.approx",
"pytest.param",
"pytest.mark.parametrize",
"skmpe.mpe",
"pytest.raises",
"numpy.ma.masked_array",
"skmpe.parameters",
"numpy.zeros_like"
] | [((305, 723), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ode_method, start_point, end_point"""', '[(OdeSolverMethod.RK23, (37, 255), (172, 112)), (OdeSolverMethod.RK45, (37,\n 255), (172, 112)), (OdeSolverMethod.DOP853, (37, 255), (172, 112)), (\n OdeSolverMethod.Radau, (37, 255), (172, 112)), (OdeSolverMethod.BDF, (\n 37, 255), (172, 112)), (OdeSolverMethod.LSODA, (37, 255), (172, 112)),\n (OdeSolverMethod.RK45, (37, 255), (484, 300))]'], {}), "('ode_method, start_point, end_point', [(\n OdeSolverMethod.RK23, (37, 255), (172, 112)), (OdeSolverMethod.RK45, (\n 37, 255), (172, 112)), (OdeSolverMethod.DOP853, (37, 255), (172, 112)),\n (OdeSolverMethod.Radau, (37, 255), (172, 112)), (OdeSolverMethod.BDF, (\n 37, 255), (172, 112)), (OdeSolverMethod.LSODA, (37, 255), (172, 112)),\n (OdeSolverMethod.RK45, (37, 255), (484, 300))])\n", (328, 723), False, 'import pytest\n'), ((1630, 1902), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start_point, end_point, way_points, ttime_cache, ttime_count, reversed_count"""', '[((37, 255), (484, 300), ((172, 112), (236, 98), (420, 153)), True, 2, 2),\n ((37, 255), (484, 300), ((172, 112), (236, 98), (420, 153)), False, 4, 0)]'], {}), "(\n 'start_point, end_point, way_points, ttime_cache, ttime_count, reversed_count'\n , [((37, 255), (484, 300), ((172, 112), (236, 98), (420, 153)), True, 2,\n 2), ((37, 255), (484, 300), ((172, 112), (236, 98), (420, 153)), False,\n 4, 0)])\n", (1653, 1902), False, 'import pytest\n'), ((3257, 3354), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start_point, end_point, time_bound"""', '[((37, 255), (484, 300), 500)]'], {}), "('start_point, end_point, time_bound', [((37, 255),\n (484, 300), 500)])\n", (3280, 3354), False, 'import pytest\n'), ((3866, 3915), 'numpy.zeros_like', 'np.zeros_like', (['retina_speed_image'], {'dtype': 'np.bool_'}), '(retina_speed_image, dtype=np.bool_)\n', (3879, 3915), True, 'import numpy as np\n'), ((3963, 4012), 'numpy.ma.masked_array', 'np.ma.masked_array', (['retina_speed_image'], {'mask': 'mask'}), '(retina_speed_image, mask=mask)\n', (3981, 4012), True, 'import numpy as np\n'), ((261, 276), 'pytest.param', 'pytest.param', (['(1)'], {}), '(1)\n', (273, 276), False, 'import pytest\n'), ((282, 297), 'pytest.param', 'pytest.param', (['(2)'], {}), '(2)\n', (294, 297), False, 'import pytest\n'), ((883, 960), 'skmpe.parameters', 'parameters', ([], {'ode_solver_method': 'ode_method', 'travel_time_order': 'travel_time_order'}), '(ode_solver_method=ode_method, travel_time_order=travel_time_order)\n', (893, 960), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n'), ((982, 1029), 'skmpe.mpe', 'mpe', (['retina_speed_image', 'start_point', 'end_point'], {}), '(retina_speed_image, start_point, end_point)\n', (985, 1029), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n'), ((1478, 1535), 'pytest.approx', 'pytest.approx', (['start_travel_time'], {'abs': 'TRAVEL_TIME_ABS_TOL'}), '(start_travel_time, abs=TRAVEL_TIME_ABS_TOL)\n', (1491, 1535), False, 'import pytest\n'), ((1571, 1626), 'pytest.approx', 'pytest.approx', (['end_travel_time'], {'abs': 'TRAVEL_TIME_ABS_TOL'}), '(end_travel_time, abs=TRAVEL_TIME_ABS_TOL)\n', (1584, 1626), False, 'import pytest\n'), ((2159, 2237), 'skmpe.parameters', 'parameters', ([], {'travel_time_order': 'travel_time_order', 'travel_time_cache': 'ttime_cache'}), '(travel_time_order=travel_time_order, travel_time_cache=ttime_cache)\n', (2169, 2237), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n'), ((2259, 2318), 'skmpe.mpe', 'mpe', (['retina_speed_image', 'start_point', 'end_point', 'way_points'], {}), '(retina_speed_image, start_point, end_point, way_points)\n', (2262, 2318), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n'), ((3455, 3493), 'pytest.raises', 'pytest.raises', (['EndPointNotReachedError'], {}), '(EndPointNotReachedError)\n', (3468, 3493), False, 'import pytest\n'), ((4023, 4061), 'pytest.raises', 'pytest.raises', (['EndPointNotReachedError'], {}), '(EndPointNotReachedError)\n', (4036, 4061), False, 'import pytest\n'), ((2869, 2926), 'pytest.approx', 'pytest.approx', (['start_travel_time'], {'abs': 'TRAVEL_TIME_ABS_TOL'}), '(start_travel_time, abs=TRAVEL_TIME_ABS_TOL)\n', (2882, 2926), False, 'import pytest\n'), ((2966, 3021), 'pytest.approx', 'pytest.approx', (['end_travel_time'], {'abs': 'TRAVEL_TIME_ABS_TOL'}), '(end_travel_time, abs=TRAVEL_TIME_ABS_TOL)\n', (2979, 3021), False, 'import pytest\n'), ((3508, 3551), 'skmpe.parameters', 'parameters', ([], {'integrate_time_bound': 'time_bound'}), '(integrate_time_bound=time_bound)\n', (3518, 3551), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n'), ((3565, 3612), 'skmpe.mpe', 'mpe', (['retina_speed_image', 'start_point', 'end_point'], {}), '(retina_speed_image, start_point, end_point)\n', (3568, 3612), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n'), ((4076, 4119), 'skmpe.parameters', 'parameters', ([], {'integrate_time_bound': 'time_bound'}), '(integrate_time_bound=time_bound)\n', (4086, 4119), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n'), ((4133, 4180), 'skmpe.mpe', 'mpe', (['retina_speed_image', 'start_point', 'end_point'], {}), '(retina_speed_image, start_point, end_point)\n', (4136, 4180), False, 'from skmpe import mpe, parameters, OdeSolverMethod, EndPointNotReachedError\n')] |
import numpy as np
import os.path
import time
import matplotlib._pylab_helpers
from matplotlib.backends.backend_pdf import PdfPages
# import plotly.plotly as py
# import plotly.tools as tls
def return_length_of_nonzero_array(X):
"""
Takes in a numpy.ndarray X of shape (m,n) and returns the length of the array that removes any trailing zeros.
"""
assert str(type(X))=="<class 'numpy.ndarray'>", "X should be a numpy array"
assert np.shape(X)[1]!=1, "X should be a wide rectangular array. (m,1) is a column, therefore a nonzero X of this shape will return 1 (trivial solution). Transpose X to properly identify nonzero array length."
assert np.shape(X)!=(1,1), "Check input. Should not be of shape (1,1) (trivial solution)."
if (X[:,1:]!=np.zeros(np.shape(X[:,1:]))).all():
return(np.shape(X)[1])
else:
return(np.argmax((X[:,1:] == np.zeros(np.shape(X[:,1:]))).sum(axis=0) == np.shape(X[:,1:])[0])+1)
def save_figures(Destination,BaseFileName,**kwargs):
"""
"""
SubFolder = kwargs.get("SubFolder",time.strftime("%Y_%m_%d_%H%M%S")+"/")
FilePath = Destination + SubFolder
assert type(Destination) == str and Destination[-1] == "/", \
"Destination must be a string ending is '/'. Currently Destination = " + str(Destination)
assert type(SubFolder) == str and SubFolder[-1] == "/", \
"SubFolder must be a string ending is '/'. Currently SubFolder = " + str(SubFolder)
if not os.path.exists(FilePath):
os.makedirs(FilePath)
figs = kwargs.get("figs",
[manager.canvas.figure for manager in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
)
SaveAsPDF = kwargs.get("SaveAsPDF",False)
assert type(SaveAsPDF)==bool, "SaveAsPDF must be either True or False."
i = 1
FileName = BaseFileName + "_" + "{:0>2d}".format(i) + "-01.jpg"
if os.path.exists(FilePath + FileName) == True:
while os.path.exists(FilePath + FileName) == True:
i += 1
FileName = BaseFileName + "_" + "{:0>2d}".format(i) + "-01.jpg"
for i in range(len(figs)):
figs[i].savefig(FilePath + FileName[:-6] + "{:0>2d}".format(i+1) + ".jpg")
if SaveAsPDF == True:
PDFFileName = FileName[:-7] + ".pdf"
assert not os.path.exists(FilePath + PDFFileName), \
("Error with naming file. "
+ PDFFileName
+ " should not already exist as "
+ FileName
+ " does not exist. Try renaming or deleting "
+ PDFFileName
)
PDFFile = PdfPages(FilePath + PDFFileName)
if len(figs)==1:
PDFFile.savefig(figs[0])
else:
[PDFFile.savefig(fig) for fig in figs]
PDFFile.close()
#
# def save_figures_to_plotly(FileName,**kwargs):
# """
#
# """
# figs = kwargs.get("figs",
# [manager.canvas.figure for manager in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
# )
#
# FileTime = time.strftime("%Y_%m_%d_%H%M%S")
# for i in range(len(figs)):
# plotly_fig = tls.mpl_to_plotly(figs[i])
# py.plot(plotly_fig,filename=(FileName + "-" + FileTime + "-" + "{:0>2d}".format(i+1)))
| [
"numpy.shape",
"time.strftime",
"matplotlib.backends.backend_pdf.PdfPages"
] | [((648, 659), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (656, 659), True, 'import numpy as np\n'), ((2359, 2391), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['(FilePath + PDFFileName)'], {}), '(FilePath + PDFFileName)\n', (2367, 2391), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((437, 448), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (445, 448), True, 'import numpy as np\n'), ((791, 802), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (799, 802), True, 'import numpy as np\n'), ((1016, 1048), 'time.strftime', 'time.strftime', (['"""%Y_%m_%d_%H%M%S"""'], {}), "('%Y_%m_%d_%H%M%S')\n", (1029, 1048), False, 'import time\n'), ((755, 773), 'numpy.shape', 'np.shape', (['X[:, 1:]'], {}), '(X[:, 1:])\n', (763, 773), True, 'import numpy as np\n'), ((889, 907), 'numpy.shape', 'np.shape', (['X[:, 1:]'], {}), '(X[:, 1:])\n', (897, 907), True, 'import numpy as np\n'), ((854, 872), 'numpy.shape', 'np.shape', (['X[:, 1:]'], {}), '(X[:, 1:])\n', (862, 872), True, 'import numpy as np\n')] |
import pickle
from collections import Counter
from math import log
from typing import List, Dict, Tuple
import numpy as np
from scipy.sparse import csr_matrix
from scipy.spatial.distance import cosine
from common import check_data_set, flatten_nested_iterables
from preprocessors.configs import PreProcessingConfigs
from utils.file_ops import create_dir, check_paths
def extract_word_to_doc_ids(docs_of_words: List[List[str]]) -> Dict[str, List[int]]:
"""Extracted the document ids where unique words appeared."""
word_to_doc_ids = {}
for doc_id, words in enumerate(docs_of_words):
appeared_words = set()
for word in words:
if word not in appeared_words:
if word in word_to_doc_ids:
word_to_doc_ids[word].append(doc_id)
else:
word_to_doc_ids[word] = [doc_id]
appeared_words.add(word)
return word_to_doc_ids
def extract_word_to_doc_counts(word_to_doc_ids: Dict[str, List[int]]) -> Dict[str, int]:
return {word: len(doc_ids) for word, doc_ids in word_to_doc_ids.items()}
def extract_windows(docs_of_words: List[List[str]], window_size: int) -> List[List[str]]:
"""Word co-occurrence with context windows"""
windows = []
for doc_words in docs_of_words:
doc_len = len(doc_words)
if doc_len <= window_size:
windows.append(doc_words)
else:
for j in range(doc_len - window_size + 1):
window = doc_words[j: j + window_size]
windows.append(window)
return windows
def extract_word_counts_in_windows(windows_of_words: List[List[str]]) -> Dict[str, int]:
"""Find the total count of unique words in each window, each window is bag-of-words"""
bags_of_words = map(set, windows_of_words)
return Counter(flatten_nested_iterables(bags_of_words))
def extract_word_ids_pair_to_counts(windows_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Dict[str, int]:
word_ids_pair_to_counts = Counter()
for window in windows_of_words:
for i in range(1, len(window)):
word_id_i = word_to_id[window[i]]
for j in range(i):
word_id_j = word_to_id[window[j]]
if word_id_i != word_id_j:
word_ids_pair_to_counts.update(['{},{}'.format(word_id_i, word_id_j),
'{},{}'.format(word_id_j, word_id_i)])
return dict(word_ids_pair_to_counts)
def extract_pmi_word_weights(windows_of_words: List[List[str]], word_to_id: Dict[str, int], vocab: List[str],
train_size: int) -> Tuple[List[int], List[int], List[float]]:
"""Calculate PMI as weights"""
weight_rows = [] # type: List[int]
weight_cols = [] # type: List[int]
pmi_weights = [] # type: List[float]
num_windows = len(windows_of_words)
word_counts_in_windows = extract_word_counts_in_windows(windows_of_words=windows_of_words)
word_ids_pair_to_counts = extract_word_ids_pair_to_counts(windows_of_words, word_to_id)
for word_id_pair, count in word_ids_pair_to_counts.items():
word_ids_in_str = word_id_pair.split(',')
word_id_i, word_id_j = int(word_ids_in_str[0]), int(word_ids_in_str[1])
word_i, word_j = vocab[word_id_i], vocab[word_id_j]
word_freq_i, word_freq_j = word_counts_in_windows[word_i], word_counts_in_windows[word_j]
pmi_score = log((1.0 * count / num_windows) / (1.0 * word_freq_i * word_freq_j / (num_windows * num_windows)))
if pmi_score > 0.0:
weight_rows.append(train_size + word_id_i)
weight_cols.append(train_size + word_id_j)
pmi_weights.append(pmi_score)
return weight_rows, weight_cols, pmi_weights
def extract_cosine_similarity_word_weights(vocab: List[str], train_size: int,
word_vec_path: str) -> Tuple[List[int], List[int], List[float]]:
"""Calculate Cosine Similarity of Word Vectors as weights"""
word_vectors = pickle.load(file=open(word_vec_path, 'rb')) # type: Dict[str,List[float]]
weight_rows = [] # type: List[int]
weight_cols = [] # type: List[int]
cos_sim_weights = [] # type: List[float]
for i, word_i in enumerate(vocab):
for j, word_j in enumerate(vocab):
if word_i in word_vectors and word_j in word_vectors:
vector_i = np.array(word_vectors[word_i])
vector_j = np.array(word_vectors[word_j])
similarity = 1.0 - cosine(vector_i, vector_j)
if similarity > 0.9:
print(word_i, word_j, similarity)
weight_rows.append(train_size + i)
weight_cols.append(train_size + j)
cos_sim_weights.append(similarity)
return weight_rows, weight_cols, cos_sim_weights
def extract_doc_word_ids_pair_to_counts(docs_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Dict[str, int]:
doc_word_freq = Counter()
for doc_id, doc_words in enumerate(docs_of_words):
for word in doc_words:
word_id = word_to_id[word]
doc_word_freq.update([str(doc_id) + ',' + str(word_id)])
return dict(doc_word_freq)
def extract_tf_idf_doc_word_weights(
adj_rows: List[int], adj_cols: List[int], adj_weights: List[float], vocab: List[str], train_size: int,
docs_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Tuple[List[int], List[int], List[float]]:
"""Extract Doc-Word weights with TF-IDF"""
doc_word_ids_pair_to_counts = extract_doc_word_ids_pair_to_counts(docs_of_words, word_to_id)
word_to_doc_ids = extract_word_to_doc_ids(docs_of_words=docs_of_words)
word_to_doc_counts = extract_word_to_doc_counts(word_to_doc_ids=word_to_doc_ids)
vocab_len = len(vocab)
num_docs = len(docs_of_words)
for doc_id, doc_words in enumerate(docs_of_words):
doc_word_set = set()
for word in doc_words:
if word not in doc_word_set:
word_id = word_to_id[word]
word_ids_pair_count = doc_word_ids_pair_to_counts[str(doc_id) + ',' + str(word_id)]
adj_rows.append(doc_id if doc_id < train_size else doc_id + vocab_len)
adj_cols.append(train_size + word_id)
doc_word_idf = log(1.0 * num_docs / word_to_doc_counts[vocab[word_id]])
adj_weights.append(word_ids_pair_count * doc_word_idf)
doc_word_set.add(word)
return adj_rows, adj_cols, adj_weights
def build_adjacency(ds_name: str, cfg: PreProcessingConfigs):
"""Build Adjacency Matrix of Doc-Word Heterogeneous Graph"""
# input files
ds_corpus = cfg.corpus_shuffled_dir + ds_name + ".txt"
ds_corpus_vocabulary = cfg.corpus_shuffled_vocab_dir + ds_name + '.vocab'
ds_corpus_train_idx = cfg.corpus_shuffled_split_index_dir + ds_name + '.train'
ds_corpus_test_idx = cfg.corpus_shuffled_split_index_dir + ds_name + '.test'
# checkers
check_data_set(data_set_name=ds_name, all_data_set_names=cfg.data_sets)
check_paths(ds_corpus, ds_corpus_vocabulary, ds_corpus_train_idx, ds_corpus_test_idx)
create_dir(dir_path=cfg.corpus_shuffled_adjacency_dir, overwrite=False)
docs_of_words = [line.split() for line in open(file=ds_corpus)]
vocab = open(ds_corpus_vocabulary).read().splitlines() # Extract Vocabulary.
word_to_id = {word: i for i, word in enumerate(vocab)} # Word to its id.
train_size = len(open(ds_corpus_train_idx).readlines()) # Real train-size, not adjusted.
test_size = len(open(ds_corpus_test_idx).readlines()) # Real test-size.
windows_of_words = extract_windows(docs_of_words=docs_of_words, window_size=20)
# Extract word-word weights
rows, cols, weights = extract_pmi_word_weights(windows_of_words, word_to_id, vocab, train_size)
# As an alternative, use cosine similarity of word vectors as weights:
# ds_corpus_word_vectors = cfg.CORPUS_WORD_VECTORS_DIR + ds_name + '.word_vectors'
# rows, cols, weights = extract_cosine_similarity_word_weights(vocab, train_size, ds_corpus_word_vectors)
# Extract word-doc weights
rows, cols, weights = extract_tf_idf_doc_word_weights(rows, cols, weights, vocab,
train_size, docs_of_words, word_to_id)
adjacency_len = train_size + len(vocab) + test_size
adjacency_matrix = csr_matrix((weights, (rows, cols)), shape=(adjacency_len, adjacency_len))
# Dump Adjacency Matrix
with open(cfg.corpus_shuffled_adjacency_dir + "/ind.{}.adj".format(ds_name), 'wb') as f:
pickle.dump(adjacency_matrix, f)
print("[INFO] Adjacency Dir='{}'".format(cfg.corpus_shuffled_adjacency_dir))
print("[INFO] ========= EXTRACTED ADJACENCY MATRIX: Heterogenous doc-word adjacency matrix. =========")
| [
"scipy.spatial.distance.cosine",
"pickle.dump",
"common.check_data_set",
"math.log",
"collections.Counter",
"numpy.array",
"common.flatten_nested_iterables",
"scipy.sparse.csr_matrix",
"utils.file_ops.check_paths",
"utils.file_ops.create_dir"
] | [((2033, 2042), 'collections.Counter', 'Counter', ([], {}), '()\n', (2040, 2042), False, 'from collections import Counter\n'), ((5051, 5060), 'collections.Counter', 'Counter', ([], {}), '()\n', (5058, 5060), False, 'from collections import Counter\n'), ((7067, 7138), 'common.check_data_set', 'check_data_set', ([], {'data_set_name': 'ds_name', 'all_data_set_names': 'cfg.data_sets'}), '(data_set_name=ds_name, all_data_set_names=cfg.data_sets)\n', (7081, 7138), False, 'from common import check_data_set, flatten_nested_iterables\n'), ((7143, 7232), 'utils.file_ops.check_paths', 'check_paths', (['ds_corpus', 'ds_corpus_vocabulary', 'ds_corpus_train_idx', 'ds_corpus_test_idx'], {}), '(ds_corpus, ds_corpus_vocabulary, ds_corpus_train_idx,\n ds_corpus_test_idx)\n', (7154, 7232), False, 'from utils.file_ops import create_dir, check_paths\n'), ((7234, 7305), 'utils.file_ops.create_dir', 'create_dir', ([], {'dir_path': 'cfg.corpus_shuffled_adjacency_dir', 'overwrite': '(False)'}), '(dir_path=cfg.corpus_shuffled_adjacency_dir, overwrite=False)\n', (7244, 7305), False, 'from utils.file_ops import create_dir, check_paths\n'), ((8495, 8568), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(weights, (rows, cols))'], {'shape': '(adjacency_len, adjacency_len)'}), '((weights, (rows, cols)), shape=(adjacency_len, adjacency_len))\n', (8505, 8568), False, 'from scipy.sparse import csr_matrix\n'), ((1842, 1881), 'common.flatten_nested_iterables', 'flatten_nested_iterables', (['bags_of_words'], {}), '(bags_of_words)\n', (1866, 1881), False, 'from common import check_data_set, flatten_nested_iterables\n'), ((3472, 3573), 'math.log', 'log', (['(1.0 * count / num_windows / (1.0 * word_freq_i * word_freq_j / (\n num_windows * num_windows)))'], {}), '(1.0 * count / num_windows / (1.0 * word_freq_i * word_freq_j / (\n num_windows * num_windows)))\n', (3475, 3573), False, 'from math import log\n'), ((8699, 8731), 'pickle.dump', 'pickle.dump', (['adjacency_matrix', 'f'], {}), '(adjacency_matrix, f)\n', (8710, 8731), False, 'import pickle\n'), ((4450, 4480), 'numpy.array', 'np.array', (['word_vectors[word_i]'], {}), '(word_vectors[word_i])\n', (4458, 4480), True, 'import numpy as np\n'), ((4508, 4538), 'numpy.array', 'np.array', (['word_vectors[word_j]'], {}), '(word_vectors[word_j])\n', (4516, 4538), True, 'import numpy as np\n'), ((6388, 6444), 'math.log', 'log', (['(1.0 * num_docs / word_to_doc_counts[vocab[word_id]])'], {}), '(1.0 * num_docs / word_to_doc_counts[vocab[word_id]])\n', (6391, 6444), False, 'from math import log\n'), ((4574, 4600), 'scipy.spatial.distance.cosine', 'cosine', (['vector_i', 'vector_j'], {}), '(vector_i, vector_j)\n', (4580, 4600), False, 'from scipy.spatial.distance import cosine\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 11:05:24 2017
The oil and sugar separation (pretreatment) section for the baseline lipid cane biorefinery is defined here as System objects. The systems include all streams and units starting from enzyme treatment to purification of the sugar solution and the oil stream.
@author: Yoel
"""
import numpy as np
from biosteam import System, Stream
from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, \
HXutility, RVF, SplitFlash, VibratingScreen, \
MagneticSeparator, Clarifier, MixTank, \
Shredder, ConveyingBelt, Splitter, \
SplitCentrifuge_LLE, Pump, StorageTank
from biosteam.biorefineries.lipidcane.species import pretreatment_species
from biosteam.biorefineries.lipidcane.process_settings import price
__all__ = ('pretreatment_sys', 'lipid_cane', 'lipidcane', 'area_100', 'area_200')
# %% Species
Stream.species = pretreatment_species
psp = ('Ash', 'CaO', 'Cellulose', 'Ethanol', 'Flocculant',
'Glucose', 'Hemicellulose', 'Lignin', 'Lipid',
'Solids', 'H3PO4', 'Sucrose', 'Water')
psp1 = ('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Lipid', 'Solids', 'Sucrose', 'Water')
psp2 = ('Ash', 'CaO', 'Cellulose', 'Flocculant', 'Glucose',
'Hemicellulose', 'Lignin', 'Lipid',
'H3PO4', 'Sucrose', 'Water')
# %% Streams
f1 = (2000.042, 26986.69 , 2007.067, 15922.734, 14459.241,
10035.334, 5017.667, 22746.761, 234157.798)
lipidcane = lipid_cane = Stream('lipid_cane', f1, psp1, units='kg/hr',
price=price['Lipid cane'])
enzyme = Stream('enzyme', Cellulose=100, Water=900, units='kg/hr',
price=price['Protease'])
imbibition_water = Stream('imbibition_water',
Water=87023.35,
T = 338.15, units='kg/hr')
H3PO4 = Stream('H3PO4', H3PO4=74.23, Water=13.10, units='kg/hr',
price=price['H3PO4']) # to T203
lime = Stream('lime', CaO=333.00, Water=2200.00, units='kg/hr',
price=price['Lime']) # to P5
polymer = Stream('polymer', Flocculant=0.83, units='kg/hr',
price=price['Polymer']) # to T205
rvf_wash_water = Stream('rvf_wash_water',
Water=16770, units='kg/hr',
T=363.15) # to C202
oil_wash_water = Stream('oil_wash_water',
Water=1350, units='kg/hr',
T=358.15) # to T207
# %% Units
Stream.default_ID = 'd'
Stream.default_ID_number = 0
# Stream.default_ID_number = 100
# Feed the shredder
U101 = ConveyingBelt('U101', ins=lipid_cane)
U101.cost_items['Conveying belt'].ub = 2500
# Separate metals
U102 = MagneticSeparator('U102', ins=U101.outs)
# Shredded cane
U103 = Shredder('U103', ins=U102.outs)
# Stream.default_ID_number = 200
# Hydrolyze starch
T201 = EnzymeTreatment('T201', T=323.15) # T=50
# Finely crush lipid cane
U201 = CrushingMill('U201',
split=(0.92, 0.92, 0.04, 0.92, 0.92, 0.04, 0.1, 1),
order=('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Sucrose', 'Lipid', 'Solids'),
moisture_content=0.5)
# Convey out bagasse
U202 = ConveyingBelt('U202', ins=U201.outs[0], outs='Bagasse')
# Mix in water
M201 = Mixer('M201')
# Screen out fibers
S201 = VibratingScreen('S201',
split=(0.35, 0.35, 0.88, 0.35,
0.35, 0.88, 0, 0.88, 0.88),
order=psp1)
# Store juice before treatment
T202 = StorageTank('T202')
T202.tau = 12
# Heat up before adding acid
H201 = HXutility('H201', T=343.15)
# Mix in acid
T203 = MixTank('T203')
# Pump acid solution
P201 = Pump('P201')
# Mix lime solution
T204 = MixTank('T204')
T204.tau = 1
P202 = Pump('P202')
# Blend acid lipid solution with lime
T205 = MixTank('T205')
# Mix recycle
M202 = Mixer('M202')
# Heat before adding flocculant
H202 = HXutility('H202', T=372.15)
# Mix in flocculant
T206 = MixTank('T206')
T206.tau = 1/4
# Separate residual solids
C201 = Clarifier('C201',
split=(0, 0, 0, 0.522, 0.522, 0, 0,
0.98, 0.522, 0.522, 0.522),
order=psp2)
# Remove solids as filter cake
C202 = RVF('C202',
outs=('filte_cake', ''),
moisture_content=0.80,
split=(0.85, 0.85, 0.85, 0.01, 0.85, 0.85, 0.01),
order=('Ash', 'CaO', 'Cellulose', 'Glucose',
'Hemicellulose', 'Lignin', 'Sucrose'))
P203 = Pump('P203')
# Separate oil and sugar
T207 = MixTank('T207', outs=('', ''))
split = np.zeros(len(pretreatment_species), float)
index = pretreatment_species.indices(('Lipid', 'Water'))
split[index] = (1, 0.0001)
T207._split = split
T207._run = lambda : Splitter._run(T207)
del split, index
# Cool the oil
H203 = HXutility('H203', T=343.15)
# Screen out small fibers from sugar stream
S202 = VibratingScreen('S202', outs=('', 'fiber_fines'),
split=1-np.array((0, 0, 0, 1, 0.002, 0, 0,0, 0, 0.002, 0.002)),
order=psp2)
sugar = S202-0
S202.mesh_opening = 2
# Add distilled water to wash lipid
T208 = MixTank('T208')
T208.tau = 2
# Centrifuge out water
C203 = SplitCentrifuge_LLE('C203',
split=(0.99, 0.01),
order=('Lipid', 'Water'))
# Vacume out water
F201 = SplitFlash('F201', T=347.15, P=2026.5,
split=(0.0001, 0.999), order=('Lipid', 'Water'))
lipid = F201.outs[1]
# %% Process specifications
# Specifications dependent on lipid cane flow rate
_enzyme_mass = enzyme.mass[[9, 12]]
_CaO_Water_mass = lime.mass[[7, 12]]
_H3PO4_Water_mass = H3PO4.mass[[1, 12]]
last_lipidcane_massnet = int(lipid_cane.massnet)
def correct_flows():
global last_lipidcane_massnet
massnet = lipid_cane.massnet
if int(massnet) != last_lipidcane_massnet:
# correct enzyme, lime, phosphoric acid, and imbibition water
_enzyme_mass[:] = 0.003 * massnet * np.array([0.1, 0.9])
_CaO_Water_mass[:] = 0.001 * massnet * np.array([0.046, 0.954])
_H3PO4_Water_mass[:] = 0.00025 * massnet
imbibition_water_mass.value = 0.25* massnet
last_lipidcane_massnet = int(massnet)
# Specifications within a system
def correct_lipid_wash_water():
oil_wash_water.mol[12] = H202.outs[0].mol[-2]*100/11
solids_index = Stream.indices(['Ash', 'CaO', 'Cellulose', 'Hemicellulose', 'Lignin'])
def correct_wash_water():
solids = solidsmol[solids_index].sum()
rvf_wash_water.mol[12] = 0.0574*solids
imbibition_water_mass = imbibition_water.mass.item(12)
# %% Pretreatment system set-up
(U103-0, enzyme)-T201
(T201-0, M201-0)-U201-1-S201-0-T202
(S201-1, imbibition_water)-M201
crushing_mill_recycle_sys = System('crushing_mill_recycle_sys',
network=(U201, S201, M201),
recycle=M201-0)
T202-0-H201
(H201-0, H3PO4)-T203-P201
(P201-0, lime-T204-0)-T205-P202
(P202-0, P203-0)-M202-H202
(H202-0, polymer)-T206-C201
(C201-1, rvf_wash_water)-C202-1-P203
clarification_recycle_sys = System('clarification_recycle_sys',
network=(M202, H202, T206, C201, C202, P203),
recycle=C202-1)
C201-0-T207-0-H203
(H203-0, oil_wash_water)-T208-C203-0-F201
T207-1-S202
pretreatment_sys = System('pretreatment_sys',
network=(U101, U102, U103,
correct_flows, T201,
crushing_mill_recycle_sys,
U202, T202, H201, T203,
P201, T204, T205, P202,
correct_wash_water,
clarification_recycle_sys,
T207, H203, S202,
correct_lipid_wash_water,
T208, C203, F201,))
solidsmol = P202.outs[0].mol
area_100 = System('area_100', network=(U101, U102, U103))
units = pretreatment_sys.units.copy()
for i in area_100.network: units.discard(i)
area_200_network = sorted(units, key=lambda x: x.ID)
area_200 = System('area_200', network=area_200_network)
| [
"biosteam.units.MixTank",
"biosteam.units.CrushingMill",
"biosteam.units.ConveyingBelt",
"numpy.array",
"biosteam.biorefineries.lipidcane.species.pretreatment_species.indices",
"biosteam.units.Pump",
"biosteam.units.Mixer",
"biosteam.units.EnzymeTreatment",
"biosteam.Stream.indices",
"biosteam.Str... | [((1507, 1579), 'biosteam.Stream', 'Stream', (['"""lipid_cane"""', 'f1', 'psp1'], {'units': '"""kg/hr"""', 'price': "price['Lipid cane']"}), "('lipid_cane', f1, psp1, units='kg/hr', price=price['Lipid cane'])\n", (1513, 1579), False, 'from biosteam import System, Stream\n'), ((1622, 1709), 'biosteam.Stream', 'Stream', (['"""enzyme"""'], {'Cellulose': '(100)', 'Water': '(900)', 'units': '"""kg/hr"""', 'price': "price['Protease']"}), "('enzyme', Cellulose=100, Water=900, units='kg/hr', price=price[\n 'Protease'])\n", (1628, 1709), False, 'from biosteam import System, Stream\n'), ((1741, 1808), 'biosteam.Stream', 'Stream', (['"""imbibition_water"""'], {'Water': '(87023.35)', 'T': '(338.15)', 'units': '"""kg/hr"""'}), "('imbibition_water', Water=87023.35, T=338.15, units='kg/hr')\n", (1747, 1808), False, 'from biosteam import System, Stream\n'), ((1872, 1949), 'biosteam.Stream', 'Stream', (['"""H3PO4"""'], {'H3PO4': '(74.23)', 'Water': '(13.1)', 'units': '"""kg/hr"""', 'price': "price['H3PO4']"}), "('H3PO4', H3PO4=74.23, Water=13.1, units='kg/hr', price=price['H3PO4'])\n", (1878, 1949), False, 'from biosteam import System, Stream\n'), ((1985, 2060), 'biosteam.Stream', 'Stream', (['"""lime"""'], {'CaO': '(333.0)', 'Water': '(2200.0)', 'units': '"""kg/hr"""', 'price': "price['Lime']"}), "('lime', CaO=333.0, Water=2200.0, units='kg/hr', price=price['Lime'])\n", (1991, 2060), False, 'from biosteam import System, Stream\n'), ((2097, 2170), 'biosteam.Stream', 'Stream', (['"""polymer"""'], {'Flocculant': '(0.83)', 'units': '"""kg/hr"""', 'price': "price['Polymer']"}), "('polymer', Flocculant=0.83, units='kg/hr', price=price['Polymer'])\n", (2103, 2170), False, 'from biosteam import System, Stream\n'), ((2217, 2279), 'biosteam.Stream', 'Stream', (['"""rvf_wash_water"""'], {'Water': '(16770)', 'units': '"""kg/hr"""', 'T': '(363.15)'}), "('rvf_wash_water', Water=16770, units='kg/hr', T=363.15)\n", (2223, 2279), False, 'from biosteam import System, Stream\n'), ((2357, 2418), 'biosteam.Stream', 'Stream', (['"""oil_wash_water"""'], {'Water': '(1350)', 'units': '"""kg/hr"""', 'T': '(358.15)'}), "('oil_wash_water', Water=1350, units='kg/hr', T=358.15)\n", (2363, 2418), False, 'from biosteam import System, Stream\n'), ((2605, 2642), 'biosteam.units.ConveyingBelt', 'ConveyingBelt', (['"""U101"""'], {'ins': 'lipid_cane'}), "('U101', ins=lipid_cane)\n", (2618, 2642), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((2713, 2753), 'biosteam.units.MagneticSeparator', 'MagneticSeparator', (['"""U102"""'], {'ins': 'U101.outs'}), "('U102', ins=U101.outs)\n", (2730, 2753), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((2778, 2809), 'biosteam.units.Shredder', 'Shredder', (['"""U103"""'], {'ins': 'U102.outs'}), "('U103', ins=U102.outs)\n", (2786, 2809), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((2871, 2904), 'biosteam.units.EnzymeTreatment', 'EnzymeTreatment', (['"""T201"""'], {'T': '(323.15)'}), "('T201', T=323.15)\n", (2886, 2904), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((2947, 3145), 'biosteam.units.CrushingMill', 'CrushingMill', (['"""U201"""'], {'split': '(0.92, 0.92, 0.04, 0.92, 0.92, 0.04, 0.1, 1)', 'order': "('Ash', 'Cellulose', 'Glucose', 'Hemicellulose', 'Lignin', 'Sucrose',\n 'Lipid', 'Solids')", 'moisture_content': '(0.5)'}), "('U201', split=(0.92, 0.92, 0.04, 0.92, 0.92, 0.04, 0.1, 1),\n order=('Ash', 'Cellulose', 'Glucose', 'Hemicellulose', 'Lignin',\n 'Sucrose', 'Lipid', 'Solids'), moisture_content=0.5)\n", (2959, 3145), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3254, 3309), 'biosteam.units.ConveyingBelt', 'ConveyingBelt', (['"""U202"""'], {'ins': 'U201.outs[0]', 'outs': '"""Bagasse"""'}), "('U202', ins=U201.outs[0], outs='Bagasse')\n", (3267, 3309), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3333, 3346), 'biosteam.units.Mixer', 'Mixer', (['"""M201"""'], {}), "('M201')\n", (3338, 3346), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3375, 3473), 'biosteam.units.VibratingScreen', 'VibratingScreen', (['"""S201"""'], {'split': '(0.35, 0.35, 0.88, 0.35, 0.35, 0.88, 0, 0.88, 0.88)', 'order': 'psp1'}), "('S201', split=(0.35, 0.35, 0.88, 0.35, 0.35, 0.88, 0, 0.88,\n 0.88), order=psp1)\n", (3390, 3473), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3585, 3604), 'biosteam.units.StorageTank', 'StorageTank', (['"""T202"""'], {}), "('T202')\n", (3596, 3604), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3656, 3683), 'biosteam.units.HXutility', 'HXutility', (['"""H201"""'], {'T': '(343.15)'}), "('H201', T=343.15)\n", (3665, 3683), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3706, 3721), 'biosteam.units.MixTank', 'MixTank', (['"""T203"""'], {}), "('T203')\n", (3713, 3721), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3751, 3763), 'biosteam.units.Pump', 'Pump', (['"""P201"""'], {}), "('P201')\n", (3755, 3763), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3792, 3807), 'biosteam.units.MixTank', 'MixTank', (['"""T204"""'], {}), "('T204')\n", (3799, 3807), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3828, 3840), 'biosteam.units.Pump', 'Pump', (['"""P202"""'], {}), "('P202')\n", (3832, 3840), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3887, 3902), 'biosteam.units.MixTank', 'MixTank', (['"""T205"""'], {}), "('T205')\n", (3894, 3902), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3925, 3938), 'biosteam.units.Mixer', 'Mixer', (['"""M202"""'], {}), "('M202')\n", (3930, 3938), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((3979, 4006), 'biosteam.units.HXutility', 'HXutility', (['"""H202"""'], {'T': '(372.15)'}), "('H202', T=372.15)\n", (3988, 4006), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((4035, 4050), 'biosteam.units.MixTank', 'MixTank', (['"""T206"""'], {}), "('T206')\n", (4042, 4050), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((4101, 4199), 'biosteam.units.Clarifier', 'Clarifier', (['"""C201"""'], {'split': '(0, 0, 0, 0.522, 0.522, 0, 0, 0.98, 0.522, 0.522, 0.522)', 'order': 'psp2'}), "('C201', split=(0, 0, 0, 0.522, 0.522, 0, 0, 0.98, 0.522, 0.522, \n 0.522), order=psp2)\n", (4110, 4199), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((4289, 4490), 'biosteam.units.RVF', 'RVF', (['"""C202"""'], {'outs': "('filte_cake', '')", 'moisture_content': '(0.8)', 'split': '(0.85, 0.85, 0.85, 0.01, 0.85, 0.85, 0.01)', 'order': "('Ash', 'CaO', 'Cellulose', 'Glucose', 'Hemicellulose', 'Lignin', 'Sucrose')"}), "('C202', outs=('filte_cake', ''), moisture_content=0.8, split=(0.85, \n 0.85, 0.85, 0.01, 0.85, 0.85, 0.01), order=('Ash', 'CaO', 'Cellulose',\n 'Glucose', 'Hemicellulose', 'Lignin', 'Sucrose'))\n", (4292, 4490), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((4553, 4565), 'biosteam.units.Pump', 'Pump', (['"""P203"""'], {}), "('P203')\n", (4557, 4565), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((4599, 4629), 'biosteam.units.MixTank', 'MixTank', (['"""T207"""'], {'outs': "('', '')"}), "('T207', outs=('', ''))\n", (4606, 4629), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((4689, 4737), 'biosteam.biorefineries.lipidcane.species.pretreatment_species.indices', 'pretreatment_species.indices', (["('Lipid', 'Water')"], {}), "(('Lipid', 'Water'))\n", (4717, 4737), False, 'from biosteam.biorefineries.lipidcane.species import pretreatment_species\n'), ((4866, 4893), 'biosteam.units.HXutility', 'HXutility', (['"""H203"""'], {'T': '(343.15)'}), "('H203', T=343.15)\n", (4875, 4893), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((5197, 5212), 'biosteam.units.MixTank', 'MixTank', (['"""T208"""'], {}), "('T208')\n", (5204, 5212), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((5257, 5330), 'biosteam.units.SplitCentrifuge_LLE', 'SplitCentrifuge_LLE', (['"""C203"""'], {'split': '(0.99, 0.01)', 'order': "('Lipid', 'Water')"}), "('C203', split=(0.99, 0.01), order=('Lipid', 'Water'))\n", (5276, 5330), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((5412, 5504), 'biosteam.units.SplitFlash', 'SplitFlash', (['"""F201"""'], {'T': '(347.15)', 'P': '(2026.5)', 'split': '(0.0001, 0.999)', 'order': "('Lipid', 'Water')"}), "('F201', T=347.15, P=2026.5, split=(0.0001, 0.999), order=(\n 'Lipid', 'Water'))\n", (5422, 5504), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((6409, 6479), 'biosteam.Stream.indices', 'Stream.indices', (["['Ash', 'CaO', 'Cellulose', 'Hemicellulose', 'Lignin']"], {}), "(['Ash', 'CaO', 'Cellulose', 'Hemicellulose', 'Lignin'])\n", (6423, 6479), False, 'from biosteam import System, Stream\n'), ((6801, 6887), 'biosteam.System', 'System', (['"""crushing_mill_recycle_sys"""'], {'network': '(U201, S201, M201)', 'recycle': '(M201 - 0)'}), "('crushing_mill_recycle_sys', network=(U201, S201, M201), recycle=\n M201 - 0)\n", (6807, 6887), False, 'from biosteam import System, Stream\n'), ((7134, 7237), 'biosteam.System', 'System', (['"""clarification_recycle_sys"""'], {'network': '(M202, H202, T206, C201, C202, P203)', 'recycle': '(C202 - 1)'}), "('clarification_recycle_sys', network=(M202, H202, T206, C201, C202,\n P203), recycle=C202 - 1)\n", (7140, 7237), False, 'from biosteam import System, Stream\n'), ((7396, 7667), 'biosteam.System', 'System', (['"""pretreatment_sys"""'], {'network': '(U101, U102, U103, correct_flows, T201, crushing_mill_recycle_sys, U202,\n T202, H201, T203, P201, T204, T205, P202, correct_wash_water,\n clarification_recycle_sys, T207, H203, S202, correct_lipid_wash_water,\n T208, C203, F201)'}), "('pretreatment_sys', network=(U101, U102, U103, correct_flows, T201,\n crushing_mill_recycle_sys, U202, T202, H201, T203, P201, T204, T205,\n P202, correct_wash_water, clarification_recycle_sys, T207, H203, S202,\n correct_lipid_wash_water, T208, C203, F201))\n", (7402, 7667), False, 'from biosteam import System, Stream\n'), ((8040, 8086), 'biosteam.System', 'System', (['"""area_100"""'], {'network': '(U101, U102, U103)'}), "('area_100', network=(U101, U102, U103))\n", (8046, 8086), False, 'from biosteam import System, Stream\n'), ((8233, 8277), 'biosteam.System', 'System', (['"""area_200"""'], {'network': 'area_200_network'}), "('area_200', network=area_200_network)\n", (8239, 8277), False, 'from biosteam import System, Stream\n'), ((4806, 4825), 'biosteam.units.Splitter._run', 'Splitter._run', (['T207'], {}), '(T207)\n', (4819, 4825), False, 'from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, HXutility, RVF, SplitFlash, VibratingScreen, MagneticSeparator, Clarifier, MixTank, Shredder, ConveyingBelt, Splitter, SplitCentrifuge_LLE, Pump, StorageTank\n'), ((5026, 5081), 'numpy.array', 'np.array', (['(0, 0, 0, 1, 0.002, 0, 0, 0, 0, 0.002, 0.002)'], {}), '((0, 0, 0, 1, 0.002, 0, 0, 0, 0, 0.002, 0.002))\n', (5034, 5081), True, 'import numpy as np\n'), ((6030, 6050), 'numpy.array', 'np.array', (['[0.1, 0.9]'], {}), '([0.1, 0.9])\n', (6038, 6050), True, 'import numpy as np\n'), ((6098, 6122), 'numpy.array', 'np.array', (['[0.046, 0.954]'], {}), '([0.046, 0.954])\n', (6106, 6122), True, 'import numpy as np\n')] |
"""
There are a few important sets of datastructures:
dimensions
* N - Size of the dstore.
* K - Number of retrieved neighbors.
* D - Size of the key vectors.
dstore - This is the "ground truth" source of keys, values, and other important
items created by the KNN-LM.
* dstore_keys.npy - The vectors. NxD
* dstore_vals.npy - The source token. Note: These are NOT the values used in the KNN-LM paper. Nx1
* dstore_tgts.npy - The target token. Note: These ARE the values used in the KNN-LM paper. Nx1
* dstore_prob.npy - The predicted probability of the target token. This can be used to compute perplexity of the non-retrieval model. Nx1
lookup - This is a cache of retrieved neighbors on a subset of the dstore.
* lookup_knns.npy - The retrieved neighbors. NxKx1
* lookup_dist.npy - The approximate distance determined by product quantization and faiss. NxKx1
* lookup_done.npy - We only compute `knns` and `dist` for a subset of the data, and we can use `done` to keep track
of which rows we did this for. If `done` is 1, then the row has been computed, and 0 otherwise. Nx1
"""
import argparse
import collections
import os
import numpy as np
import torch
from tqdm import tqdm
def main(args):
dstore = Dstore(args.dstore, args.dstore_size, 1024)
dstore.initialize()
dstore.add_neighbors(args.lookup, args.lookup_k)
tgts = dstore.tgts[:]
# knns = dstore.knns[:, :args.k]
knn_tgts = dstore.knn_tgts[:, :args.k]
first_neighbor_is_tgt = tgts == knn_tgts[:, 0]
print('{} / {} rows have first neighbor as target'.format(first_neighbor_is_tgt.sum(), tgts.shape[0]))
print('Note: This could be less than 100% because approximate distance is used.')
print('')
# binary label indicating the knn target matches the original target
label = (knn_tgts == tgts.reshape(-1, 1, 1)).astype(np.int)
dsize = tgts.shape[0]
has_positive = label.reshape(dsize, args.k).sum(axis=1) > 0
has_negative = label.reshape(dsize, args.k).sum(axis=1) < args.k
has_both = np.logical_and(has_positive, has_negative)
print('has_positive = {} / {}'.format(np.sum(has_positive), dsize))
print('has_negative = {} / {}'.format(np.sum(has_negative), dsize))
print('has_both = {} / {}'.format(np.sum(has_both), dsize))
print('')
print('read vocab')
vocab = Dictionary()
vocab.add_from_file(args.vocab)
vocab.finalize()
print('found {} tokens'.format(len(vocab)))
print('')
nsample = 10
sample_no_positive = tgts[has_positive == 0][:nsample]
print('examples with no positives:')
for i in range(nsample):
idx = int(sample_no_positive[i])
tok = vocab.symbols[idx]
tf = vocab.count[idx]
print('* [{}] "{}" with term frequency {}'.format(idx, tok, tf))
print('')
sample_no_negative = tgts[has_negative == 0][:nsample]
print('examples with no negatives:')
for i in range(nsample):
idx = int(sample_no_negative[i])
tok = vocab.symbols[idx]
tf = vocab.count[idx]
print('* [{}] "{}" with term frequency {}'.format(idx, tok, tf))
print('')
p = dstore.prob[:]
dist = dstore.dist[:, :args.k]
knn_p = EvalUtil.get_knn_log_prob(tgts, dist, knn_tgts)
p_ = torch.from_numpy(p).float()
knn_p_ = torch.from_numpy(knn_p).float()
coeff_lst = np.arange(20) / 20
for coeff in coeff_lst:
if coeff == 0:
new_p = p_
else:
new_p = EvalUtil.combine_knn_and_vocab_probs(
knn_p_,
p_,
coeff)
ppl = EvalUtil.eval_ppl(new_p)
print('coeff = {:.3f}, knn_ppl = {}'.format(coeff, ppl))
if args.optimal_2:
print('With Optimal-2 Order')
knn_p_optimal = EvalUtil.get_optimal_knn_log_prob(tgts, dist, knn_tgts)
p_ = torch.from_numpy(p).float()
knn_p_ = torch.from_numpy(knn_p_optimal).float()
coeff_lst = np.arange(20) / 20
for coeff in coeff_lst:
if coeff == 0:
new_p = p_
else:
new_p = EvalUtil.combine_knn_and_vocab_probs(
knn_p_,
p_,
coeff)
ppl = EvalUtil.eval_ppl(new_p)
print('coeff = {:.3f}, knn_ppl = {}'.format(coeff, ppl))
if args.original:
print('With Original Order')
nsteps = 16
stepsize = args.k // nsteps
lim_lst = np.arange(nsteps)
with open('coeff_lim_tradeoff-original.txt', 'w') as f:
f.write('k lim coeff ppl\n')
for i, lim in enumerate(lim_lst[1:]):
lim = lim * stepsize
print(i, lim)
dist_ = dist[:, :lim]
knn_tgts_ = knn_tgts[:, :lim]
knn_p = EvalUtil.get_knn_log_prob(tgts, dist_, knn_tgts_)
p_ = torch.from_numpy(p).float()
knn_p_ = torch.from_numpy(knn_p).float()
coeff_lst = np.arange(20) / 20
for coeff in coeff_lst:
if coeff == 0:
new_p = p_
else:
new_p = EvalUtil.combine_knn_and_vocab_probs(
knn_p_,
p_,
coeff)
ppl = EvalUtil.eval_ppl(new_p)
f.write('{} {} {} {}\n'.format(
args.k, lim, coeff, ppl))
if args.optimal:
print('With Optimal Order')
optimal_order = Rerank.optimal_order(label, dist)
nsteps = 16
stepsize = args.k // nsteps
lim_lst = np.arange(nsteps)
with open('coeff_lim_tradeoff.txt', 'w') as f:
f.write('k lim coeff ppl\n')
for lim in lim_lst[1:]:
lim = lim * stepsize
print(lim)
dist_ = np.take_along_axis(dist, optimal_order, axis=1)[:, :lim]
knn_tgts_ = np.take_along_axis(knn_tgts, optimal_order, axis=1)[:, :lim]
knn_p = EvalUtil.get_knn_log_prob(tgts, dist_, knn_tgts_)
p_ = torch.from_numpy(p).float()
knn_p_ = torch.from_numpy(knn_p).float()
coeff_lst = np.arange(20) / 20
for coeff in coeff_lst:
if coeff == 0:
new_p = p_
else:
new_p = EvalUtil.combine_knn_and_vocab_probs(
knn_p_,
p_,
coeff)
ppl = EvalUtil.eval_ppl(new_p)
f.write('{} {} {} {}\n'.format(
args.k, lim, coeff, ppl))
# # count all knns
# def writefile(path, c):
# with open(path, 'w') as f:
# for idx in c.keys():
# tok = vocab.symbols[idx]
# f.write('{} {} {} {}\n'.format(
# idx, tok, vocab.count[idx], c[idx]
# ))
# print('count neighbor frequency and write to file')
# c = collections.Counter()
# c.update(knn_tgts.reshape(-1))
# writefile('knn_count.txt', c)
print('')
class Rerank(object):
@staticmethod
def optimal_order(label, dist, big=1e6):
n, k, _ = label.shape
positive_dist = dist.copy()
positive_dist[label == 0] = -np.inf
positive_dist_sorted = np.sort(positive_dist, axis=1)[:, ::-1]
positive_order = positive_dist.argsort(axis=1)[:, ::-1]
## negatives - sort from lo to hi
negative_dist = dist.copy()
negative_dist[label == 1] = -np.inf
negative_dist_sorted = np.sort(negative_dist, axis=1)
negative_order = negative_dist.argsort(axis=1)
# set positives and negatives
new_order = np.zeros((n, k, 1)).astype(np.int)
new_order[positive_dist_sorted > -np.inf] = positive_order[positive_dist_sorted > -np.inf]
new_order[negative_dist_sorted > -np.inf] = negative_order[negative_dist_sorted > -np.inf]
assert np.all(np.unique(new_order, return_counts=True)[1] == n)
return new_order
class Dstore:
def __init__(self, path, dstore_size=None, vec_size=None):
self.path = path
self.dstore_size = dstore_size
self.vec_size = vec_size
self._initialized = False
def initialize(self):
path = self.path
# self.keys = np.memmap(os.path.join(path, 'dstore_keys.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, self.vec_size))
self.tgts = np.memmap(os.path.join(path, 'dstore_tgts.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, 1))
self.vals = np.memmap(os.path.join(path, 'dstore_vals.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, 1))
self.prob = np.memmap(os.path.join(path, 'dstore_prob.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, 1))
self._initialized = True
def add_neighbors(self, path, k):
# self.knns = np.memmap(os.path.join(path, 'lookup_knns.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, k, 1))
self.knn_tgts = np.memmap(os.path.join(path, 'lookup_knn_tgts.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, k, 1))
self.dist = np.memmap(os.path.join(path, 'lookup_dist.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, k, 1))
# self.lookup_done = np.memmap(os.path.join(path, 'lookup_done.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, 1))
class Dictionary(object):
"""
A mapping from symbols to consecutive integers.
Taken from fairseq repo.
"""
def __init__(
self,
pad="<pad>",
eos="</s>",
unk="<unk>",
bos="<s>",
extra_special_symbols=None,
):
self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
"""Adds a word to the dictionary"""
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = collections.Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
for line in f.readlines():
idx = line.rfind(" ")
if idx == -1:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt>'"
)
word = line[:idx]
count = int(line[idx + 1 :])
self.indices[word] = len(self.symbols)
self.symbols.append(word)
self.count.append(count)
class EvalUtil:
@staticmethod
def get_knn_log_prob(tgts, dists, knn_tgts):
def dist_func(d, k, q):
return -1 * d
tgts = torch.from_numpy(tgts).long().view(-1)
dists = torch.from_numpy(dists).float().squeeze(-1)
dists = -dists
probs = torch.log_softmax(dists, dim=-1)
index_mask = torch.eq(torch.from_numpy(knn_tgts).long().squeeze(-1), tgts.unsqueeze(-1)).float()
index_mask[index_mask == 0] = -10000 # for stability
index_mask[index_mask == 1] = 0
# (T_reducedxB)
yhat_knn_prob = torch.logsumexp(probs + index_mask, dim=-1).clone().numpy()
# Bx1
return yhat_knn_prob.reshape(-1, 1)
@staticmethod
def get_optimal_knn_log_prob(tgts, dists, knn_tgts):
k = knn_tgts.shape[1]
lst = []
for lim in range(1, k):
lst.append(EvalUtil.get_knn_log_prob(tgts, dists[:, :lim], knn_tgts[:, :lim]))
knn_p_ = np.concatenate(lst, axis=1)
return knn_p_.argmin(axis=1).reshape(-1, 1)
@staticmethod
def combine_knn_and_vocab_probs(knn_p, vocab_p, coeff):
combine_probs = torch.stack([vocab_p, knn_p], dim=0)
coeffs = torch.ones_like(combine_probs)
coeffs[0] = np.log(1 - coeff)
coeffs[1] = np.log(coeff)
curr_prob = torch.logsumexp(combine_probs + coeffs, dim=0)
return curr_prob
@staticmethod
def eval_ppl(p):
avg_nll = -p.mean() / np.log(2)
ppl = 2**avg_nll
return ppl
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# dstore
parser.add_argument('--dstore', default='from_dstore_valid/tr', type=str)
parser.add_argument('--dstore-size', default=100000, type=int)
parser.add_argument('--vocab', default='data-bin/wikitext-103/dict.txt')
# dstore neighbors
parser.add_argument('--lookup', default='from_dstore_valid/lookup_tr', type=str)
parser.add_argument('--lookup-k', default=1024, type=int)
# examine
parser.add_argument('--k', default=1024, type=int)
parser.add_argument('--original', action='store_true')
parser.add_argument('--optimal', action='store_true')
parser.add_argument('--optimal-2', action='store_true')
args = parser.parse_args()
print(args)
main(args)
| [
"torch.ones_like",
"numpy.unique",
"torch.log_softmax",
"argparse.ArgumentParser",
"numpy.logical_and",
"numpy.sort",
"torch.stack",
"numpy.log",
"os.path.join",
"torch.from_numpy",
"numpy.sum",
"numpy.zeros",
"numpy.concatenate",
"numpy.take_along_axis",
"numpy.arange",
"torch.logsume... | [((2131, 2173), 'numpy.logical_and', 'np.logical_and', (['has_positive', 'has_negative'], {}), '(has_positive, has_negative)\n', (2145, 2173), True, 'import numpy as np\n'), ((17064, 17089), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17087, 17089), False, 'import argparse\n'), ((3442, 3455), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (3451, 3455), True, 'import numpy as np\n'), ((4603, 4620), 'numpy.arange', 'np.arange', (['nsteps'], {}), '(nsteps)\n', (4612, 4620), True, 'import numpy as np\n'), ((5834, 5851), 'numpy.arange', 'np.arange', (['nsteps'], {}), '(nsteps)\n', (5843, 5851), True, 'import numpy as np\n'), ((7899, 7929), 'numpy.sort', 'np.sort', (['negative_dist'], {'axis': '(1)'}), '(negative_dist, axis=1)\n', (7906, 7929), True, 'import numpy as np\n'), ((15791, 15823), 'torch.log_softmax', 'torch.log_softmax', (['dists'], {'dim': '(-1)'}), '(dists, dim=-1)\n', (15808, 15823), False, 'import torch\n'), ((16464, 16491), 'numpy.concatenate', 'np.concatenate', (['lst'], {'axis': '(1)'}), '(lst, axis=1)\n', (16478, 16491), True, 'import numpy as np\n'), ((16648, 16684), 'torch.stack', 'torch.stack', (['[vocab_p, knn_p]'], {'dim': '(0)'}), '([vocab_p, knn_p], dim=0)\n', (16659, 16684), False, 'import torch\n'), ((16702, 16732), 'torch.ones_like', 'torch.ones_like', (['combine_probs'], {}), '(combine_probs)\n', (16717, 16732), False, 'import torch\n'), ((16753, 16770), 'numpy.log', 'np.log', (['(1 - coeff)'], {}), '(1 - coeff)\n', (16759, 16770), True, 'import numpy as np\n'), ((16791, 16804), 'numpy.log', 'np.log', (['coeff'], {}), '(coeff)\n', (16797, 16804), True, 'import numpy as np\n'), ((16825, 16871), 'torch.logsumexp', 'torch.logsumexp', (['(combine_probs + coeffs)'], {'dim': '(0)'}), '(combine_probs + coeffs, dim=0)\n', (16840, 16871), False, 'import torch\n'), ((2216, 2236), 'numpy.sum', 'np.sum', (['has_positive'], {}), '(has_positive)\n', (2222, 2236), True, 'import numpy as np\n'), ((2288, 2308), 'numpy.sum', 'np.sum', (['has_negative'], {}), '(has_negative)\n', (2294, 2308), True, 'import numpy as np\n'), ((2356, 2372), 'numpy.sum', 'np.sum', (['has_both'], {}), '(has_both)\n', (2362, 2372), True, 'import numpy as np\n'), ((3353, 3372), 'torch.from_numpy', 'torch.from_numpy', (['p'], {}), '(p)\n', (3369, 3372), False, 'import torch\n'), ((3394, 3417), 'torch.from_numpy', 'torch.from_numpy', (['knn_p'], {}), '(knn_p)\n', (3410, 3417), False, 'import torch\n'), ((4065, 4078), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (4074, 4078), True, 'import numpy as np\n'), ((7641, 7671), 'numpy.sort', 'np.sort', (['positive_dist'], {'axis': '(1)'}), '(positive_dist, axis=1)\n', (7648, 7671), True, 'import numpy as np\n'), ((8809, 8846), 'os.path.join', 'os.path.join', (['path', '"""dstore_tgts.npy"""'], {}), "(path, 'dstore_tgts.npy')\n", (8821, 8846), False, 'import os\n'), ((8931, 8968), 'os.path.join', 'os.path.join', (['path', '"""dstore_vals.npy"""'], {}), "(path, 'dstore_vals.npy')\n", (8943, 8968), False, 'import os\n'), ((9053, 9090), 'os.path.join', 'os.path.join', (['path', '"""dstore_prob.npy"""'], {}), "(path, 'dstore_prob.npy')\n", (9065, 9090), False, 'import os\n'), ((9382, 9423), 'os.path.join', 'os.path.join', (['path', '"""lookup_knn_tgts.npy"""'], {}), "(path, 'lookup_knn_tgts.npy')\n", (9394, 9423), False, 'import os\n'), ((9511, 9548), 'os.path.join', 'os.path.join', (['path', '"""lookup_dist.npy"""'], {}), "(path, 'lookup_dist.npy')\n", (9523, 9548), False, 'import os\n'), ((16968, 16977), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (16974, 16977), True, 'import numpy as np\n'), ((3960, 3979), 'torch.from_numpy', 'torch.from_numpy', (['p'], {}), '(p)\n', (3976, 3979), False, 'import torch\n'), ((4005, 4036), 'torch.from_numpy', 'torch.from_numpy', (['knn_p_optimal'], {}), '(knn_p_optimal)\n', (4021, 4036), False, 'import torch\n'), ((8044, 8063), 'numpy.zeros', 'np.zeros', (['(n, k, 1)'], {}), '((n, k, 1))\n', (8052, 8063), True, 'import numpy as np\n'), ((5137, 5150), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (5146, 5150), True, 'import numpy as np\n'), ((6073, 6120), 'numpy.take_along_axis', 'np.take_along_axis', (['dist', 'optimal_order'], {'axis': '(1)'}), '(dist, optimal_order, axis=1)\n', (6091, 6120), True, 'import numpy as np\n'), ((6158, 6209), 'numpy.take_along_axis', 'np.take_along_axis', (['knn_tgts', 'optimal_order'], {'axis': '(1)'}), '(knn_tgts, optimal_order, axis=1)\n', (6176, 6209), True, 'import numpy as np\n'), ((6428, 6441), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (6437, 6441), True, 'import numpy as np\n'), ((8300, 8340), 'numpy.unique', 'np.unique', (['new_order'], {'return_counts': '(True)'}), '(new_order, return_counts=True)\n', (8309, 8340), True, 'import numpy as np\n'), ((5024, 5043), 'torch.from_numpy', 'torch.from_numpy', (['p'], {}), '(p)\n', (5040, 5043), False, 'import torch\n'), ((5077, 5100), 'torch.from_numpy', 'torch.from_numpy', (['knn_p'], {}), '(knn_p)\n', (5093, 5100), False, 'import torch\n'), ((6315, 6334), 'torch.from_numpy', 'torch.from_numpy', (['p'], {}), '(p)\n', (6331, 6334), False, 'import torch\n'), ((6368, 6391), 'torch.from_numpy', 'torch.from_numpy', (['knn_p'], {}), '(knn_p)\n', (6384, 6391), False, 'import torch\n'), ((15653, 15675), 'torch.from_numpy', 'torch.from_numpy', (['tgts'], {}), '(tgts)\n', (15669, 15675), False, 'import torch\n'), ((15708, 15731), 'torch.from_numpy', 'torch.from_numpy', (['dists'], {}), '(dists)\n', (15724, 15731), False, 'import torch\n'), ((16080, 16123), 'torch.logsumexp', 'torch.logsumexp', (['(probs + index_mask)'], {'dim': '(-1)'}), '(probs + index_mask, dim=-1)\n', (16095, 16123), False, 'import torch\n'), ((15855, 15881), 'torch.from_numpy', 'torch.from_numpy', (['knn_tgts'], {}), '(knn_tgts)\n', (15871, 15881), False, 'import torch\n')] |
#exec(open("C:\\dev\\blender\\blogo\\src\\blogo.py").read())
import bpy
import math
import mathutils
import numpy as np
import runpy
#exec(open("C:\\dev\\blender\\blogo\\src\\blogo_colours.py").read())
import blogo_colours
import blogo
# TODO
# Clean up functions
# Add config file reading (with defaults and options)
# Use contexts to stop updating real scene everytime, instead only link at end
# Add ability to set cross section from logo?
# Add fd_rt() and fd_lt() which go forward by dist - cs_width, then slowly arc before subtracting cs_width off the next fd to give a smooth turn
# Investigate being able to copy another turtle, but add in other commands, eg:
# turtle1.fd(10), turtle1.rt(90), turtle1.fd(10), turtle1.add_stop("first"), turtle1.fd(10), turtle1.circle(10), turtle1.add_stop("second"), turtle1.fd(10)
# turtle2.pu(), turtle2.replay_to_stop(turtle1, "first"), turtle2.pd(), turtle2.replay_to_stop(turtle1, "second")
# Cross section from image
def dump(obj):
"""
Helper function to print what is inside a python object
"""
for attr in dir(obj):
try:
print("obj.%s = %r" % (attr, getattr(obj, attr)))
except:
print("fail on "+str(attr))
last_run_file = None
def run(file=None, **kwargs):
"""
Run a python script specified by file
:param file: The name of the file to load, or `None` to reload the last file loaded
:param kwargs: User data which can be used by the script.
"""
global last_run_file
if (file == None):
file = last_run_file
last_run_file = file
#exec(open(file).read(), globals(), locals())
globs = runpy.run_path(file, globals())
for key in globs:
globals()[key] = globs[key]
blogo.Blogo.clean_up()
def show(file=None, **kwargs):
"""
Run a python script specified by file and then show the rendered results
:param file: The name of the file to load, or `None` to reload the last file loaded
:param kwargs: User data which can be used by the script.
"""
run(file, **kwargs)
BlogoUtils.unselect_objects()
BlogoUtils.show_objects()
class BlogoUtils:
"""
A class of useful helper functions to be used when programming with Blender
"""
__objcounter = 0
__nullnparray = np.array((0,0,0))
def is_sequence(var):
"""
Tell whether a variable is a sequence, i.e. a list or tuple
"""
return (type(var) == tuple or type(var) == list or type(var) == type(BlogoUtils.__nullnparray))
def object_counter():
"""
Return a unique number
"""
BlogoUtils.__objcounter += 1
return BlogoUtils.__objcounter
def start_fresh(leave_types=[]):
"""
Delete everything, so that we can start again
"""
BlogoUtils.remove_data_block(bpy.data.meshes)
BlogoUtils.remove_data_block(bpy.data.materials)
BlogoUtils.remove_data_block(bpy.data.textures)
BlogoUtils.remove_data_block(bpy.data.images)
BlogoUtils.remove_data_block(bpy.data.lights)
BlogoUtils.remove_data_block(bpy.data.cameras)
BlogoUtils.remove_data_block(bpy.data.actions)
BlogoUtils.remove_data_block(bpy.data.node_groups)
BlogoUtils.remove_data_block(bpy.data.objects)
BlogoUtils.clear_info()
def remove_data_block(data, only_orphans=False):
for block in data:
if ((not only_orphans) or (block.users == 0)):
data.remove(block)
return
def clear_info():
"""
Clear all lines from the info screen
"""
prev_ui_type = bpy.context.area.ui_type
bpy.context.area.ui_type = 'INFO'
bpy.ops.info.select_all()
bpy.ops.info.report_delete()
bpy.context.area.ui_type = prev_ui_type
def copy_object(objects, location, relative_location=True, copy_mesh=True):
"""
Copy an object and insert it into a new location
"""
if (not BlogoUtils.is_sequence(objects)):
objects = (objects,)
#sce = bpy.context.scene
copies = []
for object in objects:
copy = object.copy()
if (relative_location):
copy.location[0] += location[0]
copy.location[1] += location[1]
copy.location[2] += location[2]
else:
copy.location = location.copy()
if (copy_mesh):
copy.data = copy.data.copy()
bpy.context.collection.objects.link(copy)
copies.append(copy)
#sce.update()
return copies
def join_objects(objects):
"""
Join multiple objects together
"""
#scene = bpy.context.scene
ctx = bpy.context.copy()
# one of the objects to join
ctx['active_object'] = objects[0]
ctx['selected_editable_objects'] = objects
bpy.ops.object.join(ctx)
def mode_set(mode):
"""
Set the given mode, e.g. "OBJECT" or "EDIT"
"""
try:
bpy.ops.object.mode_set(mode=mode)
except:
# need to stop this from ever getting an exception
pass
def list_missing(full_list, partial_list):
"""
Return the items that are in full_list, but not in partial_list
e.g. list_missing((1, 2, 3, 4), (1, 3, 5)) ==> (2, 4)
:param full_list: The full list of items
:param partial_list: full_list with some items missing
:returns: The difference between the lists
"""
missing_list = []
for item in full_list:
if (item not in partial_list):
missing_list.append(item)
return missing_list
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def rotate_axis(axis, vec, angle):
"""
Rotate a vector around an axis by an angle in degrees
"""
if (angle == 0):
return vec
return np.dot(BlogoUtils.rotation_matrix(axis, math.radians(angle)), vec)
def normalize_angle(angle):
"""
Return an angle between 0 and 360 giving the same heading as angle
"""
while (angle < 0):
angle += 360
while (angle >= 360):
angle -= 360
return angle
def dot_product(a, b):
"""
Return the dot product of two vectors
"""
return sum([a[i] * b[i] for i in range(len(a))])
def cross_product(a, b):
"""
Return the cross product of two vectors
"""
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
return (x, y, z)
def norm(vec):
"""
Return the norm of the vector
"""
return math.sqrt(BlogoUtils.dot_product(vec, vec))
def normalize(vec):
"""
Normalise a vector
"""
return [vec[i] / BlogoUtils.norm(vec) for i in range(len(vec))]
def project_onto_plane(vec, normal):
"""
Project a vector onto a plane defined by a normal to that plane
"""
unit_normal = BlogoUtils.normalize(normal)
return BlogoUtils.cross_product(normal, BlogoUtils.cross_product(vec, normal))
def unit_vector(vector):
"""
Returns the unit vector of a vector
"""
return vector / np.linalg.norm(vector)
def angle_between(a, b):
"""
Return the angle between two vectors
"""
return math.atan2(BlogoUtils.norm(BlogoUtils.cross_product(a,b)), BlogoUtils.dot_product(a,b))
def line_length(line):
"""
Return the length of a line
"""
total = 0
for part in line:
total += part * part
return math.sqrt(total)
def get_projected_angle(vec, vec_on_plane, plane_normal):
"""
Get the angle between the component of vector on a plane and a vector on that plane
"""
EPS = 0.0001
angle = 0
p = BlogoUtils.project_onto_plane(vec, plane_normal)
if (BlogoUtils.line_length(p) > EPS):
angle = math.degrees(BlogoUtils.angle_between(p, vec_on_plane))
return angle
def add_locations(location1, location2, mul2=1, mul1=1):
"""
Add two locations or vectors together
"""
return (mul1 * location1[0] + mul2 * location2[0],
mul1 * location1[1] + mul2 * location2[1],
mul1 * location1[2] + mul2 * location2[2])
def unselect_objects():
"""
Unselect all objects
"""
bpy.ops.object.select_all(action='DESELECT')
def select_objects():
"""
Select all objects
"""
bpy.ops.object.select_all(action='SELECT')
def show_objects():
"""
Move to the screen showing rendered objects
"""
bpy.context.area.ui_type = 'VIEW_3D'
bpy.context.space_data.shading.type = 'RENDERED'
def create_object(objname, vertices, faces):
"""
Create an object from a list vertices and faces
"""
mesh = bpy.data.meshes.new(objname)
obj = bpy.data.objects.new(objname, mesh)
bpy.context.collection.objects.link(obj)
# Generate mesh data
mesh.from_pydata(vertices, [], faces)
# Calculate the edges
mesh.update(calc_edges=True)
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True);
bpy.context.view_layer.objects.active = obj
BlogoUtils.mode_set('EDIT')
bpy.ops.mesh.vert_connect_concave()
BlogoUtils.mode_set('OBJECT')
bpy.ops.object.origin_set(type='ORIGIN_CENTER_OF_VOLUME', center='MEDIAN')
return (obj, mesh)
def select_object(obj, mode='EDIT'):
"""
Select a single object and move into a new mode
"""
current_mode = bpy.context.object.mode
if (mode == None):
mode = current_mode
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True);
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode=mode)
return current_mode
def unselect_object(obj, prev_mode=None):
"""
Unselect an object
"""
if (prev_mode == None):
prev_mode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
obj.select_set(False);
bpy.ops.object.mode_set(mode=prev_mode)
def unwrap_object(obj):
"""
Run UV unwrap for an object
"""
current_mode = BlogoUtils.select_object(obj)
bpy.ops.uv.unwrap()
BlogoUtils.unselect_object(obj, current_mode)
def smart_project_object(obj):
"""
Run smart project for an object
"""
current_mode = BlogoUtils.select_object(obj)
bpy.ops.uv.smart_project()
BlogoUtils.unselect_object(obj, current_mode)
def add_texture(obj, textureFile, alpha=1.0, unwrap=False, smart_project=False, texture_scale=None):
"""
Add texture to an object
"""
uv_coord_type = "Generated"
colours = blogo_colours.all_colours()
if (type(textureFile) == tuple or textureFile in colours):
name = "Mat-"+str(textureFile);
if (textureFile in colours):
textureTuple = colours[textureFile]
else:
textureTuple = textureFile
while (len(textureTuple) < 4):
textureTuple += (1.0,)
material = bpy.data.materials.get(name)
if material is None:
material = bpy.data.materials.new(name)
material.use_nodes = True
principled_bsdf = material.node_tree.nodes['Principled BSDF']
if principled_bsdf is not None:
principled_bsdf.inputs[0].default_value = textureTuple
alpha *= textureTuple[3]
if (alpha < 1):
obj.show_transparent = True
material.blend_method = 'BLEND'
principled_bsdf.inputs[18].default_value = alpha
obj.active_material = material
return obj
name = "Mat-"+textureFile + "-" + str(texture_scale) + str(unwrap)
if (smart_project):
name += str(BlogoUtils.object_counter())
mat = bpy.data.materials.get(name)
if mat is None:
mat = bpy.data.materials.new(name=name)
mat.use_nodes = True
bsdf = mat.node_tree.nodes["Principled BSDF"]
texImage = mat.node_tree.nodes.new('ShaderNodeTexImage')
texImage.image = bpy.data.images.load(textureFile)
mat.node_tree.links.new(bsdf.inputs['Base Color'], texImage.outputs['Color'])
if (texture_scale != None and texture_scale != 1):
mapping = mat.node_tree.nodes.new('ShaderNodeMapping')
mat.node_tree.links.new(texImage.inputs['Vector'], mapping.outputs['Vector'])
mapping.inputs[3].default_value[0] = texture_scale
mapping.inputs[3].default_value[1] = texture_scale
mapping.inputs[3].default_value[2] = texture_scale
else:
mapping = texImage
if not unwrap:
tex_coordinate = mat.node_tree.nodes.new("ShaderNodeTexCoord")
outputs = tex_coordinate.outputs['Generated']
mat.node_tree.links.new(mapping.inputs['Vector'], outputs)
else:
uv_coord_type = "UV"
tex_coordinate = mat.node_tree.nodes.new("ShaderNodeTexCoord")
outputs = tex_coordinate.outputs['UV']
mat.node_tree.links.new(mapping.inputs['Vector'], outputs)
mat.node_tree.links.new(mapping.inputs['Vector'], tex_coordinate.outputs[uv_coord_type])
if (alpha < 1):
obj.show_transparent = True
mat.blend_method = 'BLEND'
bsdf.inputs[18].default_value = alpha
# Assign it to object
if len(obj.data.materials) > 0:
obj.data.materials[0] = mat
else:
obj.data.materials.append(mat)
for f in obj.data.polygons:
f.material_index = 0
f.select = True
obj.data.update()
if unwrap:
BlogoUtils.unwrap_object(obj)
if smart_project:
BlogoUtils.smart_project_object(obj)
return obj
def draw_mesh(objname, vertices, faces, textureFile, alpha=1.0, unwrap=False, smart_project=False, texture_scale=None):
"""
Create an object from the given lists of vertices and faces and with the given texture
"""
objname = objname+"-"+str(alpha)+"-"+str(BlogoUtils.object_counter())
if (texture_scale == 0):
minX = vertices[0][0]
maxX = vertices[0][0]
for vertex in vertices:
if (vertex[0] > maxX):
maxX = vertex[0]
if (vertex[0] < minX):
minX = vertex[0]
texture_scale = (maxX - minX) / 10.0
texture_scale = round(texture_scale, 3)
if (texture_scale <= 0):
texture_scale = 0.001
obj,mesh = BlogoUtils.create_object(objname, vertices, faces)
if (textureFile != None):
BlogoUtils.add_texture(obj, textureFile, alpha, unwrap, smart_project, texture_scale)
return obj
def draw_plane_from_func(name, func, pts, closed_x=False, closed_y=False, texture=None, **kwargs):
"""
Draw an object defined by a function giving the position of either point in the grid of the plane.
The function given should be of the form: def my_func(x, y, rx, ry, \*\*kwargs):
where x, y is the current position in the plane, and rx, ry is the position in the plane from 0 to 1
:param name: The name the new object will have
:param func: A function to pass each point in the plane to
:param pts: A tuple giving either (x_max, y_max) or (x_min, y_min, x_max, y_max). x_min and y_min default to 0
:param closed_x: If True then the in the x direction 0 will be connected to max_x (default False)
:param closed_y: If True then the in the y direction 0 will be connected to max_y (default False)
:param texture: The texture to draw on the plane
:param kwargs: User data to pass on to func
"""
x_min = pts[0]
y_min = pts[1]
if (len(pts) < 3):
x_max = x_min
x_min = 0
else:
x_max = pts[2]
if (len(pts) < 4):
y_max = y_min
y_min = 0
else:
y_max = pts[3]
points = []
vertex_mapping = []
faces = []
vertices = []
for x in range(x_min, x_max):
column = []
column_mapping = []
x_rel = float(x - x_min) / (x_max - x_min)
for y in range(y_min, y_max):
y_rel = float(y - y_min) / (y_max - y_min)
vertex = func(x, y, x_rel, y_rel, **kwargs)
column.append(vertex)
if (vertex != None):
column_mapping.append(len(vertices))
vertices.append(vertex)
else:
column_mapping.append(None)
points.append(column)
vertex_mapping.append(column_mapping)
for x in range(x_max - x_min):
column = points[x]
for y in range(y_max - y_min):
xplus1 = x + 1
yplus1 = y + 1
if (x >= (len(points)-1)):
if (closed_x):
xplus1 = 0
else:
break
if (y >= (len(column)-1)):
if (closed_y):
yplus1 = 0
else:
break
if (points[x][y] != None and
points[xplus1][y] != None and
points[x][yplus1] != None and
points[xplus1][yplus1] != None):
face = [vertex_mapping[x][y],
vertex_mapping[x][yplus1],
vertex_mapping[xplus1][yplus1],
vertex_mapping[xplus1][y]
]
faces.append(face)
return BlogoUtils.draw_mesh(name, vertices, faces, texture)
def add_blend_file(filepath, coll_name, link=True):
"""
Load a .blend file and put in the current scene
:param filepath: The name of the file to load
:param coll_name: The collection name to add it to
:param link: If true then the file is only linked, otherwise it is appended
"""
# link all collections starting with 'MyCollection'
with bpy.data.libraries.load(filepath, link=link) as (data_from, data_to):
data_to.collections = [c for c in data_from.collections if c.startswith(coll_name)]
# link collection to scene collection
for coll in data_to.collections:
if coll is not None:
bpy.context.scene.collection.children.link(coll)
def boolean_op(a_list, op, b_list):
"""
Perform either union, intersection or difference between two objects or lists of objects
:param a_list: Either a blender object, or a list of blender objects
:param op: The operation to perform, one of 'DIFFERENCE', 'UNION' or 'INTERSECTION'
:param b_list: Either a blender object, or a list of blender objects
:return:
"""
op = op.upper()
op_map = {"MINUS": "DIFFERENCE", "PLUS": "UNION", "EQUAL": "INTERSECTION"}
if (op in op_map):
op = op_map[op]
if (not BlogoUtils.is_sequence(a_list)):
a_list = [a_list]
if (not BlogoUtils.is_sequence(b_list)):
b_list = [b_list]
count = 0
BlogoUtils.unselect_objects()
print(str(a_list) + " minus "+str(b_list))
for b in b_list:
for a in a_list:
print(str(a) + " minus "+str(b))
if (a != b):
op_name = "bool op "+str(BlogoUtils.object_counter())
bool_op = a.modifiers.new(type="BOOLEAN", name=op_name)
bool_op.object = b
bool_op.operation = op
a.select_set(True)
bpy.context.view_layer.objects.active = a
bpy.ops.object.modifier_apply(modifier=op_name)
a.select_set(False)
for b in b_list:
bpy.data.objects.remove(b)
#b.hide_render = True
#b.hide_viewport = True
#b.hide_select = True
def add_light(name, location, point_at, size, energy, type, shape, use_shadow):
"""
Add a light to a scene
:param name:
:param location:
:param point_at:
:param size:
:param energy:
:param type:
:param shape:
:param use_shadow:
:return:
"""
# TOFIX: A crash here causes a problem with set mode next time?
if (name == None):
name = "Light-" + str(BlogoUtils.object_counter())
try:
lightObj = bpy.data.objects[name]
except:
light_data = bpy.data.lights.new(name=name,type="AREA")
lightObj = bpy.data.objects.new(name=name, object_data=light_data)
bpy.context.collection.objects.link(lightObj)
bpy.context.view_layer.objects.active = lightObj
lightObj.location = location
lightObj.data.type = type
try:
lightObj.data.shape = shape
except:
pass
try:
lightObj.data.size = size
except:
pass
try:
lightObj.data.spot_size = size
except:
pass
lightObj.data.specular_factor = 0.25
lightObj.data.use_shadow = use_shadow
lightObj.data.energy = energy
lightObj.rotation_euler[0] = 0
lightObj.rotation_euler[1] = 0
lightObj.rotation_euler[2] = 0
def add_light_area(name=None, location=(0,0,2*1609.34), size=10000, energy=1.21e+09, shape='SQUARE'):
"""
Add a light in the form of a plane
:param name:
:param location:
:param size:
:param energy:
:param shape:
:return:
"""
BlogoUtils.add_light(name, location, None, size, energy, "AREA", shape, False)
def add_light_bulb(name=None, location=(0,0,2*1609.34), angle=120, energy=100):
"""
Add a spot light to a scene
:param name:
:param location:
:param angle:
:param energy:
:return:
"""
area = 4 * pi
proportion = (angle / 720)
size = area * proportion
energy = energy / proportion
BlogoUtils.add_light(name, location, None, size, energy, "SPOT", None, True)
def hide_cameras_and_lights():
"""
Stop the renderer from showing camera and light objects, but still show their light
"""
bpy.context.space_data.overlay.show_extras = False
def add_camera(name, from_location, central_locations, capture_locations=None, angle=None, roll=0):
"""
Place a camera at a particular location and point it at another location
:param from_location: The location of the camera
:param central_locations: A list of locations to point the camera towards the middle of
:param capture_locations: A list of locations that should be in frame (not yet implemented)
:param angle: The angle in radians over which the camera should capture
:param roll: The angle in radians to roll the camera round
"""
x_sum = 0
y_sum = 0
z_sum = 0
for x, y, z in central_locations:
x_sum += x
y_sum += y
z_sum += z
focus_location = mathutils.Vector((x_sum / len(central_locations),
y_sum / len(central_locations),
z_sum / len(central_locations),
))
looking_direction = mathutils.Vector(from_location) - focus_location
rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')
rot_quat = looking_direction.to_track_quat('Z', 'Y')
rot_quat = rot_quat.to_matrix().to_4x4() @ rollMatrix
rotation_euler = rot_quat.to_euler()
cam = bpy.data.cameras.new(name)
if (angle != None):
max_x_angle = angle
max_y_angle = angle
else:
# TODO
pass
# create the first camera object
cam.angle_x = max_x_angle
if (cam.angle_y < max_y_angle):
cam.angle_y = max_y_angle
cam_obj = bpy.data.objects.new(name, cam)
cam_obj.location = from_location
cam_obj.rotation_euler = rotation_euler
bpy.context.collection.objects.link(cam_obj)
return cam_obj
def take_picture(file, from_location, central_locations, capture_locations=None, angle=None, roll=0):
"""
Place a camera at a particular location, point it at another location and save that picture to disk.
:param file: The name of the file to save to.
:param from_location: The location of the camera
:param central_locations: A list of locations to point the camera towards the middle of
:param capture_locations: A list of locations that should be in frame (not yet implemented)
:param angle: The angle in radians over which the camera should capture
:param roll: The angle in radians to roll the camera round
"""
cam_obj = BlogoUtils.add_camera("TempCamera", from_location, central_locations, capture_locations, angle, roll)
bpy.context.scene.camera = cam_obj
bpy.context.scene.render.image_settings.file_format='PNG'
bpy.context.scene.render.filepath = file
bpy.ops.render.render(use_viewport = True, write_still=True)
bpy.data.objects.remove(cam_obj, do_unlink=True)
def register():
pass
def unregister():
pass
if __name__ == "__main__":
register() | [
"bpy.data.lights.new",
"mathutils.Matrix.Rotation",
"math.sqrt",
"bpy.data.objects.new",
"bpy.data.libraries.load",
"math.cos",
"numpy.array",
"bpy.context.scene.collection.children.link",
"blogo.Blogo.clean_up",
"numpy.linalg.norm",
"bpy.context.copy",
"bpy.data.images.load",
"mathutils.Vec... | [((1804, 1826), 'blogo.Blogo.clean_up', 'blogo.Blogo.clean_up', ([], {}), '()\n', (1824, 1826), False, 'import blogo\n'), ((2365, 2384), 'numpy.array', 'np.array', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (2373, 2384), True, 'import numpy as np\n'), ((3859, 3884), 'bpy.ops.info.select_all', 'bpy.ops.info.select_all', ([], {}), '()\n', (3882, 3884), False, 'import bpy\n'), ((3894, 3922), 'bpy.ops.info.report_delete', 'bpy.ops.info.report_delete', ([], {}), '()\n', (3920, 3922), False, 'import bpy\n'), ((5005, 5023), 'bpy.context.copy', 'bpy.context.copy', ([], {}), '()\n', (5021, 5023), False, 'import bpy\n'), ((5172, 5196), 'bpy.ops.object.join', 'bpy.ops.object.join', (['ctx'], {}), '(ctx)\n', (5191, 5196), False, 'import bpy\n'), ((6264, 6280), 'numpy.asarray', 'np.asarray', (['axis'], {}), '(axis)\n', (6274, 6280), True, 'import numpy as np\n'), ((6347, 6368), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (6355, 6368), False, 'import math\n'), ((6562, 6733), 'numpy.array', 'np.array', (['[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc -\n bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]'], {}), '([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad),\n aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa +\n dd - bb - cc]])\n', (6570, 6733), True, 'import numpy as np\n'), ((8888, 8904), 'math.sqrt', 'math.sqrt', (['total'], {}), '(total)\n', (8897, 8904), False, 'import math\n'), ((9761, 9805), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (9786, 9805), False, 'import bpy\n'), ((9906, 9948), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (9931, 9948), False, 'import bpy\n'), ((10329, 10357), 'bpy.data.meshes.new', 'bpy.data.meshes.new', (['objname'], {}), '(objname)\n', (10348, 10357), False, 'import bpy\n'), ((10375, 10410), 'bpy.data.objects.new', 'bpy.data.objects.new', (['objname', 'mesh'], {}), '(objname, mesh)\n', (10395, 10410), False, 'import bpy\n'), ((10422, 10462), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', (['obj'], {}), '(obj)\n', (10457, 10462), False, 'import bpy\n'), ((10632, 10676), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (10657, 10676), False, 'import bpy\n'), ((10807, 10842), 'bpy.ops.mesh.vert_connect_concave', 'bpy.ops.mesh.vert_connect_concave', ([], {}), '()\n', (10840, 10842), False, 'import bpy\n'), ((10891, 10965), 'bpy.ops.object.origin_set', 'bpy.ops.object.origin_set', ([], {'type': '"""ORIGIN_CENTER_OF_VOLUME"""', 'center': '"""MEDIAN"""'}), "(type='ORIGIN_CENTER_OF_VOLUME', center='MEDIAN')\n", (10916, 10965), False, 'import bpy\n'), ((11312, 11350), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""OBJECT"""'}), "(mode='OBJECT')\n", (11335, 11350), False, 'import bpy\n'), ((11360, 11404), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (11385, 11404), False, 'import bpy\n'), ((11498, 11532), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': 'mode'}), '(mode=mode)\n', (11521, 11532), False, 'import bpy\n'), ((11754, 11792), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""OBJECT"""'}), "(mode='OBJECT')\n", (11777, 11792), False, 'import bpy\n'), ((11834, 11873), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': 'prev_mode'}), '(mode=prev_mode)\n', (11857, 11873), False, 'import bpy\n'), ((12039, 12058), 'bpy.ops.uv.unwrap', 'bpy.ops.uv.unwrap', ([], {}), '()\n', (12056, 12058), False, 'import bpy\n'), ((12290, 12316), 'bpy.ops.uv.smart_project', 'bpy.ops.uv.smart_project', ([], {}), '()\n', (12314, 12316), False, 'import bpy\n'), ((12606, 12633), 'blogo_colours.all_colours', 'blogo_colours.all_colours', ([], {}), '()\n', (12631, 12633), False, 'import blogo_colours\n'), ((13891, 13919), 'bpy.data.materials.get', 'bpy.data.materials.get', (['name'], {}), '(name)\n', (13913, 13919), False, 'import bpy\n'), ((26603, 26642), 'mathutils.Matrix.Rotation', 'mathutils.Matrix.Rotation', (['roll', '(4)', '"""Z"""'], {}), "(roll, 4, 'Z')\n", (26628, 26642), False, 'import mathutils\n'), ((26841, 26867), 'bpy.data.cameras.new', 'bpy.data.cameras.new', (['name'], {}), '(name)\n', (26861, 26867), False, 'import bpy\n'), ((27210, 27241), 'bpy.data.objects.new', 'bpy.data.objects.new', (['name', 'cam'], {}), '(name, cam)\n', (27230, 27241), False, 'import bpy\n'), ((27352, 27396), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', (['cam_obj'], {}), '(cam_obj)\n', (27387, 27396), False, 'import bpy\n'), ((28428, 28486), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'use_viewport': '(True)', 'write_still': '(True)'}), '(use_viewport=True, write_still=True)\n', (28449, 28486), False, 'import bpy\n'), ((28498, 28546), 'bpy.data.objects.remove', 'bpy.data.objects.remove', (['cam_obj'], {'do_unlink': '(True)'}), '(cam_obj, do_unlink=True)\n', (28521, 28546), False, 'import bpy\n'), ((4719, 4760), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', (['copy'], {}), '(copy)\n', (4754, 4760), False, 'import bpy\n'), ((5328, 5362), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': 'mode'}), '(mode=mode)\n', (5351, 5362), False, 'import bpy\n'), ((6396, 6417), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (6404, 6417), False, 'import math\n'), ((8470, 8492), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (8484, 8492), True, 'import numpy as np\n'), ((13041, 13069), 'bpy.data.materials.get', 'bpy.data.materials.get', (['name'], {}), '(name)\n', (13063, 13069), False, 'import bpy\n'), ((13964, 13997), 'bpy.data.materials.new', 'bpy.data.materials.new', ([], {'name': 'name'}), '(name=name)\n', (13986, 13997), False, 'import bpy\n'), ((14191, 14224), 'bpy.data.images.load', 'bpy.data.images.load', (['textureFile'], {}), '(textureFile)\n', (14211, 14224), False, 'import bpy\n'), ((20828, 20872), 'bpy.data.libraries.load', 'bpy.data.libraries.load', (['filepath'], {'link': 'link'}), '(filepath, link=link)\n', (20851, 20872), False, 'import bpy\n'), ((22742, 22768), 'bpy.data.objects.remove', 'bpy.data.objects.remove', (['b'], {}), '(b)\n', (22765, 22768), False, 'import bpy\n'), ((26532, 26563), 'mathutils.Vector', 'mathutils.Vector', (['from_location'], {}), '(from_location)\n', (26548, 26563), False, 'import mathutils\n'), ((6314, 6332), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (6320, 6332), True, 'import numpy as np\n'), ((7040, 7059), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (7052, 7059), False, 'import math\n'), ((13132, 13160), 'bpy.data.materials.new', 'bpy.data.materials.new', (['name'], {}), '(name)\n', (13154, 13160), False, 'import bpy\n'), ((21136, 21184), 'bpy.context.scene.collection.children.link', 'bpy.context.scene.collection.children.link', (['coll'], {}), '(coll)\n', (21178, 21184), False, 'import bpy\n'), ((23507, 23550), 'bpy.data.lights.new', 'bpy.data.lights.new', ([], {'name': 'name', 'type': '"""AREA"""'}), "(name=name, type='AREA')\n", (23526, 23550), False, 'import bpy\n'), ((23574, 23629), 'bpy.data.objects.new', 'bpy.data.objects.new', ([], {'name': 'name', 'object_data': 'light_data'}), '(name=name, object_data=light_data)\n', (23594, 23629), False, 'import bpy\n'), ((23643, 23688), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', (['lightObj'], {}), '(lightObj)\n', (23678, 23688), False, 'import bpy\n'), ((22584, 22631), 'bpy.ops.object.modifier_apply', 'bpy.ops.object.modifier_apply', ([], {'modifier': 'op_name'}), '(modifier=op_name)\n', (22613, 22631), False, 'import bpy\n')] |
import numpy as np
from phi import struct
from phi.math.math_util import is_static_shape
# creates normal distributed noise that can vary over the batch
def generateNoise(grid, var, mean=0, seed=0, dtype=np.float32):
size = grid.data.shape
rand = np.random.RandomState(seed)
def array(shape):
result = np.ones(size)
for i in range(size[0]):
result[i] = rand.normal(mean, var[i], size=size[1:]).astype(dtype)
return result
return struct.map(array, grid, leaf_condition=is_static_shape)
# creates randomized parameters for grid generation (v_parameter in paramType is varied over batch)
def generateParams(paramType, batch, dim, noiseStrength, vf1, vf2, vf3, vf4, vf5, vf7, vo1, vo2, vod, vnoise):
f1 = (-0.5 + np.random.rand(dim,dim) ) * 0.4 # range +- 0.4 , initial main dir
f2 = (-0.5 + np.random.rand(dim,dim) ) * 0.4 # reduced ranger for frequencies , freq1
f3 = (-0.5 + np.random.rand(dim,dim) ) * 0.3 # freq2
f4 = (-0.5 + np.random.rand(dim,dim) ) * 0.3 # freq3
f5 = (-0.5 + np.random.rand(dim,dim) ) * 0.2 # freq4
f7 = (-0.5 + np.random.rand(dim,dim) ) * 100.0 # forcing shift, dir&speed
o1 = 0. + ( np.random.rand(dim,dim)*100. ) # offsets, minor influence
o2 = 0. + ( np.random.rand(dim,dim)*100. ) #
nu = 0.0002 * ( 1+ np.random.rand()*500. ) # diffusion
# switch between "static" (near 1) and "forced" (=0) cases smoothly
sfCase = 0. + ( np.random.rand(dim,dim)*1. )
# enlarge regular init for "static"
staticF = (1.+2.*sfCase)
f1 *= staticF
f2 *= staticF
f3 *= staticF
f4 *= staticF
f5 *= staticF
f6 = (1.-sfCase) * 0.1 # note, factor is just forcing strength scaling for f6 factor
fd = 1. / (1. + np.random.randint(6, size=(dim,dim)))
od = 0. + np.random.rand(dim,dim)*100.
f1 = np.repeat(f1[np.newaxis,...], batch, axis=0)
f2 = np.repeat(f2[np.newaxis,...], batch, axis=0)
f3 = np.repeat(f3[np.newaxis,...], batch, axis=0)
f4 = np.repeat(f4[np.newaxis,...], batch, axis=0)
f5 = np.repeat(f5[np.newaxis,...], batch, axis=0)
f6 = np.repeat(f6[np.newaxis,...], batch, axis=0)
f7 = np.repeat(f7[np.newaxis,...], batch, axis=0)
o1 = np.repeat(o1[np.newaxis,...], batch, axis=0)
o2 = np.repeat(o2[np.newaxis,...], batch, axis=0)
fd = np.repeat(fd[np.newaxis,...], batch, axis=0)
od = np.repeat(od[np.newaxis,...], batch, axis=0)
noise = noiseStrength * np.ones(batch) # normally constant noise level
amount = np.repeat(np.arange(batch)[...,np.newaxis], dim, axis=1)
amount = np.repeat(amount[...,np.newaxis], dim, axis=2)
if paramType == "f1": f1 += vf1 * amount
elif paramType == "f1neg": f1 -= vf1 * amount
elif paramType == "f2": f2 += vf2 * amount
elif paramType == "f2neg": f2 -= vf2 * amount
elif paramType == "f3": f3 += vf3 * amount
elif paramType == "f3neg": f3 -= vf3 * amount
elif paramType == "f4": f4 += vf4 * amount
elif paramType == "f4neg": f4 -= vf4 * amount
elif paramType == "f5": f5 += vf5 * amount
elif paramType == "f5neg": f5 -= vf5 * amount
elif paramType == "f7": f7 += vf7 * amount
elif paramType == "f7neg": f7 -= vf7 * amount
elif paramType == "o1": o1 += vo1 * amount
elif paramType == "o1neg": o1 -= vo1 * amount
elif paramType == "o2": o2 += vo2 * amount
elif paramType == "o2neg": o2 -= vo2 * amount
elif paramType == "od": od += vod * amount
elif paramType == "odneg": od -= vod * amount
elif paramType == "noise": noise = vnoise * np.arange(batch) # increasing noise level
else: raise ValueError("Unknown parameter type!")
params = {
"nu" : np.array(nu),
"f1" : f1,
"f2" : f2,
"f3" : f3,
"f4" : f4,
"f5" : f5,
"f6" : f6,
"f7" : f7,
"o1" : o1,
"o2" : o2,
"fd" : fd,
"od" : od,
"noise" : noise,
}
return params
# utilizes parameters generated with generateParams for grid initialization
def createParameterizedGrid(grid, gridType, age, dim, params):
p = params
size = np.array(grid.shape)
def array(shape):
mesh = np.meshgrid(np.arange(0, size[1]), np.arange(0, size[2]), np.arange(0, size[3]), indexing='ij')
mesh = np.transpose(np.asarray(mesh), (1,2,3,0))[...,np.newaxis]
mesh = np.repeat(mesh, dim, axis=dim+1)
result = np.zeros(size, dtype=np.float32)
# vary grid over batches:
for i in range(size[0]):
if gridType == "vectorForcing":
temp = age / size[1:4]
timeOff = 0.5 * p["f7"][i] + p["f7"][i] * np.sin(temp * 3.0)
d = (mesh + timeOff) / size[1:4]
acc = p["f2"][i] * np.sin(d * 2 * np.pi)
acc += p["f3"][i] * np.sin(d * 4 * np.pi + p["o1"][i])
acc += p["f4"][i] * np.sin(d * 8 * np.pi + p["o2"][i])
acc += p["f5"][i] * np.sin(d * 16 * np.pi + 0.7*p["o1"][i])
acc *= p["f6"][i]
result[i] = np.sum(acc, axis=dim)
elif gridType == "vectorComplex":
d = mesh / size[1:4]
acc = p["f1"][i] + np.zeros_like(d)
acc += p["f2"][i] * np.sin(d * 2 * np.pi + p["o1"][i])
acc += p["f3"][i] * np.sin(d * 4 * np.pi + p["o2"][i])
acc += p["f4"][i] * np.sin(d * 8 * np.pi + 0.4*p["o1"][i])
acc += p["f5"][i] * np.sin(d * 16 * np.pi + 0.3*p["o2"][i])
result[i] = np.sum(acc, axis=dim)
elif gridType == "scalarComplex":
d = mesh / size[1:4]
acc = p["f1"][i] + np.zeros_like(d)
acc += p["f2"][i] * np.sin(d * 2 * np.pi + p["o1"][i])
acc += p["f3"][i] * np.sin(d * 4 * np.pi + p["o2"][i])
acc += p["f4"][i] * np.sin(d * 8 * np.pi + 0.4*p["o1"][i])
acc += p["f5"][i] * np.sin(d * 16 * np.pi + 0.3*p["o2"][i])
temp = np.sum(acc, axis=dim+1)
result[i] = np.sum(temp, axis=dim, keepdims=True)
elif gridType == "scalarSimple":
d = mesh / size[1:4]
acc = np.sin(p["fd"][i] * d * 24 * np.pi + p["od"][i])
temp = np.sum(acc, axis=dim+1)
result[i] = np.sum(temp, axis=dim, keepdims=True)
result[i] *= result[i] * result[i] # cubed, sharper transitions
else:
raise ValueError("Unknown grid type")
return result
return struct.map(array, grid, leaf_condition=is_static_shape)
# helper function for mp4 export
def prepareRender(data, pad):
lines = [ [], [] ]
for i in range(data.shape[0]):
for j in range(len(lines)):
part = data[i]
#part = np.flip(part, axis=0)
if j == 0:
part = np.mean(part, axis=0)
else:
part = part[int(data.shape[1]/2)]
part = part - np.min(part)
part = part / np.max(part)
part = 255*part
part = np.pad(part, ((pad,pad), (pad,pad), (0,0)) )
lines[j].append(part.astype(np.uint8))
for j in range(len(lines)):
lines[j] = np.concatenate(lines[j], axis=1)
return np.concatenate(lines, axis=0) | [
"numpy.mean",
"numpy.repeat",
"numpy.ones",
"numpy.random.rand",
"numpy.arange",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.sum",
"numpy.concatenate",
"numpy.min",
"numpy.sin",
"numpy.pad",
"numpy.zeros_like",
"numpy.random.RandomState"... | [((256, 283), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (277, 283), True, 'import numpy as np\n'), ((482, 537), 'phi.struct.map', 'struct.map', (['array', 'grid'], {'leaf_condition': 'is_static_shape'}), '(array, grid, leaf_condition=is_static_shape)\n', (492, 537), False, 'from phi import struct\n'), ((1838, 1883), 'numpy.repeat', 'np.repeat', (['f1[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(f1[np.newaxis, ...], batch, axis=0)\n', (1847, 1883), True, 'import numpy as np\n'), ((1892, 1937), 'numpy.repeat', 'np.repeat', (['f2[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(f2[np.newaxis, ...], batch, axis=0)\n', (1901, 1937), True, 'import numpy as np\n'), ((1946, 1991), 'numpy.repeat', 'np.repeat', (['f3[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(f3[np.newaxis, ...], batch, axis=0)\n', (1955, 1991), True, 'import numpy as np\n'), ((2000, 2045), 'numpy.repeat', 'np.repeat', (['f4[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(f4[np.newaxis, ...], batch, axis=0)\n', (2009, 2045), True, 'import numpy as np\n'), ((2054, 2099), 'numpy.repeat', 'np.repeat', (['f5[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(f5[np.newaxis, ...], batch, axis=0)\n', (2063, 2099), True, 'import numpy as np\n'), ((2108, 2153), 'numpy.repeat', 'np.repeat', (['f6[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(f6[np.newaxis, ...], batch, axis=0)\n', (2117, 2153), True, 'import numpy as np\n'), ((2162, 2207), 'numpy.repeat', 'np.repeat', (['f7[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(f7[np.newaxis, ...], batch, axis=0)\n', (2171, 2207), True, 'import numpy as np\n'), ((2216, 2261), 'numpy.repeat', 'np.repeat', (['o1[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(o1[np.newaxis, ...], batch, axis=0)\n', (2225, 2261), True, 'import numpy as np\n'), ((2270, 2315), 'numpy.repeat', 'np.repeat', (['o2[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(o2[np.newaxis, ...], batch, axis=0)\n', (2279, 2315), True, 'import numpy as np\n'), ((2324, 2369), 'numpy.repeat', 'np.repeat', (['fd[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(fd[np.newaxis, ...], batch, axis=0)\n', (2333, 2369), True, 'import numpy as np\n'), ((2378, 2423), 'numpy.repeat', 'np.repeat', (['od[np.newaxis, ...]', 'batch'], {'axis': '(0)'}), '(od[np.newaxis, ...], batch, axis=0)\n', (2387, 2423), True, 'import numpy as np\n'), ((2583, 2630), 'numpy.repeat', 'np.repeat', (['amount[..., np.newaxis]', 'dim'], {'axis': '(2)'}), '(amount[..., np.newaxis], dim, axis=2)\n', (2592, 2630), True, 'import numpy as np\n'), ((4183, 4203), 'numpy.array', 'np.array', (['grid.shape'], {}), '(grid.shape)\n', (4191, 4203), True, 'import numpy as np\n'), ((6670, 6725), 'phi.struct.map', 'struct.map', (['array', 'grid'], {'leaf_condition': 'is_static_shape'}), '(array, grid, leaf_condition=is_static_shape)\n', (6680, 6725), False, 'from phi import struct\n'), ((7408, 7437), 'numpy.concatenate', 'np.concatenate', (['lines'], {'axis': '(0)'}), '(lines, axis=0)\n', (7422, 7437), True, 'import numpy as np\n'), ((323, 336), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (330, 336), True, 'import numpy as np\n'), ((2452, 2466), 'numpy.ones', 'np.ones', (['batch'], {}), '(batch)\n', (2459, 2466), True, 'import numpy as np\n'), ((3744, 3756), 'numpy.array', 'np.array', (['nu'], {}), '(nu)\n', (3752, 3756), True, 'import numpy as np\n'), ((4426, 4460), 'numpy.repeat', 'np.repeat', (['mesh', 'dim'], {'axis': '(dim + 1)'}), '(mesh, dim, axis=dim + 1)\n', (4435, 4460), True, 'import numpy as np\n'), ((4476, 4508), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (4484, 4508), True, 'import numpy as np\n'), ((7363, 7395), 'numpy.concatenate', 'np.concatenate', (['lines[j]'], {'axis': '(1)'}), '(lines[j], axis=1)\n', (7377, 7395), True, 'import numpy as np\n'), ((767, 791), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (781, 791), True, 'import numpy as np\n'), ((850, 874), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (864, 874), True, 'import numpy as np\n'), ((940, 964), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (954, 964), True, 'import numpy as np\n'), ((997, 1021), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (1011, 1021), True, 'import numpy as np\n'), ((1054, 1078), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (1068, 1078), True, 'import numpy as np\n'), ((1111, 1135), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (1125, 1135), True, 'import numpy as np\n'), ((1188, 1212), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (1202, 1212), True, 'import numpy as np\n'), ((1262, 1286), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (1276, 1286), True, 'import numpy as np\n'), ((1447, 1471), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (1461, 1471), True, 'import numpy as np\n'), ((1745, 1782), 'numpy.random.randint', 'np.random.randint', (['(6)'], {'size': '(dim, dim)'}), '(6, size=(dim, dim))\n', (1762, 1782), True, 'import numpy as np\n'), ((1798, 1822), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (1812, 1822), True, 'import numpy as np\n'), ((2523, 2539), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (2532, 2539), True, 'import numpy as np\n'), ((4254, 4275), 'numpy.arange', 'np.arange', (['(0)', 'size[1]'], {}), '(0, size[1])\n', (4263, 4275), True, 'import numpy as np\n'), ((4277, 4298), 'numpy.arange', 'np.arange', (['(0)', 'size[2]'], {}), '(0, size[2])\n', (4286, 4298), True, 'import numpy as np\n'), ((4300, 4321), 'numpy.arange', 'np.arange', (['(0)', 'size[3]'], {}), '(0, size[3])\n', (4309, 4321), True, 'import numpy as np\n'), ((7215, 7261), 'numpy.pad', 'np.pad', (['part', '((pad, pad), (pad, pad), (0, 0))'], {}), '(part, ((pad, pad), (pad, pad), (0, 0)))\n', (7221, 7261), True, 'import numpy as np\n'), ((1318, 1334), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1332, 1334), True, 'import numpy as np\n'), ((4366, 4382), 'numpy.asarray', 'np.asarray', (['mesh'], {}), '(mesh)\n', (4376, 4382), True, 'import numpy as np\n'), ((5135, 5156), 'numpy.sum', 'np.sum', (['acc'], {'axis': 'dim'}), '(acc, axis=dim)\n', (5141, 5156), True, 'import numpy as np\n'), ((7000, 7021), 'numpy.mean', 'np.mean', (['part'], {'axis': '(0)'}), '(part, axis=0)\n', (7007, 7021), True, 'import numpy as np\n'), ((7116, 7128), 'numpy.min', 'np.min', (['part'], {}), '(part)\n', (7122, 7128), True, 'import numpy as np\n'), ((7155, 7167), 'numpy.max', 'np.max', (['part'], {}), '(part)\n', (7161, 7167), True, 'import numpy as np\n'), ((4822, 4843), 'numpy.sin', 'np.sin', (['(d * 2 * np.pi)'], {}), '(d * 2 * np.pi)\n', (4828, 4843), True, 'import numpy as np\n'), ((4881, 4915), 'numpy.sin', 'np.sin', (["(d * 4 * np.pi + p['o1'][i])"], {}), "(d * 4 * np.pi + p['o1'][i])\n", (4887, 4915), True, 'import numpy as np\n'), ((4957, 4991), 'numpy.sin', 'np.sin', (["(d * 8 * np.pi + p['o2'][i])"], {}), "(d * 8 * np.pi + p['o2'][i])\n", (4963, 4991), True, 'import numpy as np\n'), ((5033, 5074), 'numpy.sin', 'np.sin', (["(d * 16 * np.pi + 0.7 * p['o1'][i])"], {}), "(d * 16 * np.pi + 0.7 * p['o1'][i])\n", (5039, 5074), True, 'import numpy as np\n'), ((5626, 5647), 'numpy.sum', 'np.sum', (['acc'], {'axis': 'dim'}), '(acc, axis=dim)\n', (5632, 5647), True, 'import numpy as np\n'), ((4718, 4736), 'numpy.sin', 'np.sin', (['(temp * 3.0)'], {}), '(temp * 3.0)\n', (4724, 4736), True, 'import numpy as np\n'), ((5277, 5293), 'numpy.zeros_like', 'np.zeros_like', (['d'], {}), '(d)\n', (5290, 5293), True, 'import numpy as np\n'), ((5330, 5364), 'numpy.sin', 'np.sin', (["(d * 2 * np.pi + p['o1'][i])"], {}), "(d * 2 * np.pi + p['o1'][i])\n", (5336, 5364), True, 'import numpy as np\n'), ((5406, 5440), 'numpy.sin', 'np.sin', (["(d * 4 * np.pi + p['o2'][i])"], {}), "(d * 4 * np.pi + p['o2'][i])\n", (5412, 5440), True, 'import numpy as np\n'), ((5482, 5522), 'numpy.sin', 'np.sin', (["(d * 8 * np.pi + 0.4 * p['o1'][i])"], {}), "(d * 8 * np.pi + 0.4 * p['o1'][i])\n", (5488, 5522), True, 'import numpy as np\n'), ((5558, 5599), 'numpy.sin', 'np.sin', (["(d * 16 * np.pi + 0.3 * p['o2'][i])"], {}), "(d * 16 * np.pi + 0.3 * p['o2'][i])\n", (5564, 5599), True, 'import numpy as np\n'), ((6112, 6137), 'numpy.sum', 'np.sum', (['acc'], {'axis': '(dim + 1)'}), '(acc, axis=dim + 1)\n', (6118, 6137), True, 'import numpy as np\n'), ((6164, 6201), 'numpy.sum', 'np.sum', (['temp'], {'axis': 'dim', 'keepdims': '(True)'}), '(temp, axis=dim, keepdims=True)\n', (6170, 6201), True, 'import numpy as np\n'), ((5768, 5784), 'numpy.zeros_like', 'np.zeros_like', (['d'], {}), '(d)\n', (5781, 5784), True, 'import numpy as np\n'), ((5821, 5855), 'numpy.sin', 'np.sin', (["(d * 2 * np.pi + p['o1'][i])"], {}), "(d * 2 * np.pi + p['o1'][i])\n", (5827, 5855), True, 'import numpy as np\n'), ((5897, 5931), 'numpy.sin', 'np.sin', (["(d * 4 * np.pi + p['o2'][i])"], {}), "(d * 4 * np.pi + p['o2'][i])\n", (5903, 5931), True, 'import numpy as np\n'), ((5973, 6013), 'numpy.sin', 'np.sin', (["(d * 8 * np.pi + 0.4 * p['o1'][i])"], {}), "(d * 8 * np.pi + 0.4 * p['o1'][i])\n", (5979, 6013), True, 'import numpy as np\n'), ((6049, 6090), 'numpy.sin', 'np.sin', (["(d * 16 * np.pi + 0.3 * p['o2'][i])"], {}), "(d * 16 * np.pi + 0.3 * p['o2'][i])\n", (6055, 6090), True, 'import numpy as np\n'), ((6323, 6371), 'numpy.sin', 'np.sin', (["(p['fd'][i] * d * 24 * np.pi + p['od'][i])"], {}), "(p['fd'][i] * d * 24 * np.pi + p['od'][i])\n", (6329, 6371), True, 'import numpy as np\n'), ((6395, 6420), 'numpy.sum', 'np.sum', (['acc'], {'axis': '(dim + 1)'}), '(acc, axis=dim + 1)\n', (6401, 6420), True, 'import numpy as np\n'), ((6447, 6484), 'numpy.sum', 'np.sum', (['temp'], {'axis': 'dim', 'keepdims': '(True)'}), '(temp, axis=dim, keepdims=True)\n', (6453, 6484), True, 'import numpy as np\n'), ((3617, 3633), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (3626, 3633), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as f
from mlp import MultiLayerPerceptron
import torch
import torch.nn as nn
import torch.optim as optim
import logging
import numpy as np
device = "cuda" if torch.cuda.is_available() else "cpu"
logger = logging.getLogger(__name__)
logging.basicConfig()
logger.setLevel(logging.INFO)
class InverseModel(nn.Module):
"""
Inverse model predicts action given the current state and the desired future state
"""
def __init__(self, start_state_dims, next_state_dims, action_dims,
latent_var_1=64, latent_var_2=32, criterion=nn.MSELoss(), lr=4e-4, seed=0):
torch.manual_seed(seed)
super(InverseModel, self).__init__()
self.state_dims = start_state_dims + next_state_dims
self.model = nn.Sequential(
nn.Linear(self.state_dims, latent_var_1),
nn.ReLU(),
nn.Linear(latent_var_1, latent_var_2),
nn.ReLU(),
nn.Linear(latent_var_2, action_dims)
)
self.criterion = criterion
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
def forward(self, combined_states):
actions = self.model(combined_states)
return actions
def train_and_validate(self, train_dl, valid_dl, num_epochs):
loss_list = []
avg_loss_list = []
valid_loss_list = []
logger.info("Starting with epoch 0")
for epoch in range(num_epochs):
losses_for_given_epoch = []
self.model.train()
for start_states, next_states, true_actions in train_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
self.optimizer.zero_grad()
combined_states = torch.cat((start_states, next_states), dim=1)
pred_actions = self.model(combined_states)
loss = self.criterion(pred_actions, true_actions)
loss.backward()
self.optimizer.step()
losses_for_given_epoch.append(loss.item())
self.model.eval()
with torch.no_grad():
valid_loss_sum = 0
for start_states, next_states, true_actions in valid_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
combined_states = torch.cat((start_states, next_states), dim=1)
pred_actions = self.model(combined_states)
valid_loss_sum += self.criterion(pred_actions, true_actions)
valid_loss = valid_loss_sum / len(valid_dl)
loss_list += losses_for_given_epoch
avg_loss_list.append(np.mean(losses_for_given_epoch))
valid_loss_list.append(valid_loss)
logger.info(f'Completed epoch: {epoch}/{num_epochs}')
logger.info(f'Avg loss this epoch: {np.mean(losses_for_given_epoch)}')
logger.info(f'Validation loss this epoch: {valid_loss}')
return loss_list, avg_loss_list, valid_loss_list
class ForwardModel(nn.Module):
"""
Forward model predicts future state given current state and action
"""
def __init__(self, start_state_dims, next_state_dims, action_dims,
latent_var_1=64, latent_var_2=32, criterion=nn.MSELoss(), lr=4e-4, seed=0):
torch.manual_seed(seed)
super(ForwardModel, self).__init__()
self.state_dims = start_state_dims + action_dims
self.model = nn.Sequential(
nn.Linear(self.state_dims, latent_var_1),
nn.ReLU(),
nn.Linear(latent_var_1, latent_var_2),
nn.ReLU(),
nn.Linear(latent_var_2, next_state_dims)
)
self.criterion = criterion
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
def forward(self, combined_input):
next_states = self.model(combined_input)
return next_states
def train_and_validate(self, train_dl, valid_dl, num_epochs):
loss_list = []
avg_loss_list = []
valid_loss_list = []
logger.info("Starting with epoch 0")
for epoch in range(num_epochs):
losses_for_given_epoch = []
self.model.train()
for start_states, next_states, true_actions in train_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
self.optimizer.zero_grad()
combined_input = torch.cat((start_states, true_actions), dim=1)
pred_states = self.model(combined_input)
loss = self.criterion(pred_states, next_states)
loss.backward()
self.optimizer.step()
losses_for_given_epoch.append(loss.item())
self.model.eval()
with torch.no_grad():
valid_loss_sum = 0
for start_states, next_states, true_actions in valid_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
combined_input = torch.cat((start_states, true_actions), dim=1)
pred_states = self.model(combined_input)
valid_loss_sum += self.criterion(pred_states, next_states)
valid_loss = valid_loss_sum / len(valid_dl)
loss_list += losses_for_given_epoch
avg_loss_list.append(np.mean(losses_for_given_epoch))
valid_loss_list.append(valid_loss)
logger.info(f'Completed epoch: {epoch}/{num_epochs}')
logger.info(f'Avg loss this epoch: {np.mean(losses_for_given_epoch)}')
logger.info(f'Validation loss this epoch: {valid_loss}')
return loss_list, avg_loss_list, valid_loss_list | [
"logging.getLogger",
"torch.manual_seed",
"logging.basicConfig",
"torch.nn.ReLU",
"numpy.mean",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.no_grad",
"torch.cat"
] | [((273, 300), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (290, 300), False, 'import logging\n'), ((301, 322), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (320, 322), False, 'import logging\n'), ((227, 252), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (250, 252), False, 'import torch\n'), ((622, 634), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (632, 634), True, 'import torch.nn as nn\n'), ((662, 685), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (679, 685), False, 'import torch\n'), ((3476, 3488), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3486, 3488), True, 'import torch.nn as nn\n'), ((3516, 3539), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3533, 3539), False, 'import torch\n'), ((840, 880), 'torch.nn.Linear', 'nn.Linear', (['self.state_dims', 'latent_var_1'], {}), '(self.state_dims, latent_var_1)\n', (849, 880), True, 'import torch.nn as nn\n'), ((894, 903), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (901, 903), True, 'import torch.nn as nn\n'), ((917, 954), 'torch.nn.Linear', 'nn.Linear', (['latent_var_1', 'latent_var_2'], {}), '(latent_var_1, latent_var_2)\n', (926, 954), True, 'import torch.nn as nn\n'), ((968, 977), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (975, 977), True, 'import torch.nn as nn\n'), ((991, 1027), 'torch.nn.Linear', 'nn.Linear', (['latent_var_2', 'action_dims'], {}), '(latent_var_2, action_dims)\n', (1000, 1027), True, 'import torch.nn as nn\n'), ((3690, 3730), 'torch.nn.Linear', 'nn.Linear', (['self.state_dims', 'latent_var_1'], {}), '(self.state_dims, latent_var_1)\n', (3699, 3730), True, 'import torch.nn as nn\n'), ((3744, 3753), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3751, 3753), True, 'import torch.nn as nn\n'), ((3767, 3804), 'torch.nn.Linear', 'nn.Linear', (['latent_var_1', 'latent_var_2'], {}), '(latent_var_1, latent_var_2)\n', (3776, 3804), True, 'import torch.nn as nn\n'), ((3818, 3827), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3825, 3827), True, 'import torch.nn as nn\n'), ((3841, 3881), 'torch.nn.Linear', 'nn.Linear', (['latent_var_2', 'next_state_dims'], {}), '(latent_var_2, next_state_dims)\n', (3850, 3881), True, 'import torch.nn as nn\n'), ((1855, 1900), 'torch.cat', 'torch.cat', (['(start_states, next_states)'], {'dim': '(1)'}), '((start_states, next_states), dim=1)\n', (1864, 1900), False, 'import torch\n'), ((2204, 2219), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2217, 2219), False, 'import torch\n'), ((2868, 2899), 'numpy.mean', 'np.mean', (['losses_for_given_epoch'], {}), '(losses_for_given_epoch)\n', (2875, 2899), True, 'import numpy as np\n'), ((4714, 4760), 'torch.cat', 'torch.cat', (['(start_states, true_actions)'], {'dim': '(1)'}), '((start_states, true_actions), dim=1)\n', (4723, 4760), False, 'import torch\n'), ((5060, 5075), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5073, 5075), False, 'import torch\n'), ((5720, 5751), 'numpy.mean', 'np.mean', (['losses_for_given_epoch'], {}), '(losses_for_given_epoch)\n', (5727, 5751), True, 'import numpy as np\n'), ((2535, 2580), 'torch.cat', 'torch.cat', (['(start_states, next_states)'], {'dim': '(1)'}), '((start_states, next_states), dim=1)\n', (2544, 2580), False, 'import torch\n'), ((5390, 5436), 'torch.cat', 'torch.cat', (['(start_states, true_actions)'], {'dim': '(1)'}), '((start_states, true_actions), dim=1)\n', (5399, 5436), False, 'import torch\n'), ((3062, 3093), 'numpy.mean', 'np.mean', (['losses_for_given_epoch'], {}), '(losses_for_given_epoch)\n', (3069, 3093), True, 'import numpy as np\n'), ((5914, 5945), 'numpy.mean', 'np.mean', (['losses_for_given_epoch'], {}), '(losses_for_given_epoch)\n', (5921, 5945), True, 'import numpy as np\n')] |
# imports
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
import numpy as np
from matplotlib.animation import FuncAnimation
import matplotlib.gridspec as gridspec
import os
import time
from manipulate_readinuvot import uvot
import scipy
from scipy.interpolate import interp1d
import matplotlib.image as mpimg
from bokeh.plotting import figure, output_file, show, ColumnDataSource, output_file, save
from bokeh.io import curdoc
from bokeh.palettes import Turbo256
from bokeh.layouts import gridplot
import random
import bokeh.plotting
import bokeh.layouts
import bokeh.embed
from bokeh.embed import json_item, file_html, components
from bokeh.resources import CDN
import json
def random_color(num):
random_color_arr = [random.sample(list(split_color_arr),1)[0] for split_color_arr in np.array_split(Turbo256[20:], num)]
random.shuffle(random_color_arr)
return random_color_arr
def bokeh_plots(plot, output_file_name):
curdoc().theme = 'dark_minimal'
# ------------------------ FIRST PLOT = FLux vs Wavelength ------------------------
# Get data and group by the different times
df1= pd.read_csv(os.path.join('..','output', 'TEMPLATE', output_file_name+'_template.csv'), header=0)
time_df = df1.groupby(['MJD'])
groups=[time_df.get_group(x).sort_values(by=('MJD')).reset_index() for x in time_df.groups]
num_groups= len(groups)
time_groups=[round(groups[idx]["MJD"][0], 3) for idx in range(num_groups)]
spec = figure(
title = 'Flux vs Wavelength',
x_axis_label = 'Wavelength (angstroms)',
y_axis_label = 'log(flux)+constant',
plot_width=1100, plot_height=500
)
groups_list_wavelength = [list(i['Wavelength']) for i in groups]
groups_list_Flux = [list(i['Flux']) for i in groups]
for t, w, f, color, in zip(time_groups, groups_list_wavelength, groups_list_Flux, random_color(num_groups)):
spec.circle(w, f, color=color, alpha=0.8, muted_color=color, muted_alpha=0.075, legend_label=str(t))
spec.legend.click_policy="hide"
spec.add_layout(spec.legend[0], 'right')
# ------------------------ FIRST PLOT END ------------------------
# ------------------------ SECOND PLOT = Magnitude vs Time (MJD) Plot ------------------------
# Get data from uvot function that returns the manipulated combined data from the original .dat file
# The combined data is simply each row is the appropriate time the data was measured and the associated band magnitude measurements
df=pd.read_csv('../output/MAGS/'+output_file_name+'_mangledmagsarray.csv')
# Interpolate linearly for the missing NaN values in the each band that has Nan values. We do not do it for band error measurements
filter_bands = list(filter(lambda i: ('Time' not in i and 'err' not in i),list(df.columns)))
for band in filter_bands:
nan_idx =list(df[band].index[df[band].apply(np.isnan)])
if len(nan_idx)!=0:
val_idx = [df[band][i] for i in range(len(df[band])) if i not in nan_idx]
replace_nan_idx_times = [df['Time (MJD)'][i] for i in range(len(df[band])) if i in nan_idx]
df_temp = df[df[band].isin(val_idx)]
nan_interp_func = interp1d(df_temp['Time (MJD)'], df_temp[band], kind='linear', fill_value='extrapolate')
for idx,i in enumerate(nan_idx):
df[band][i] = nan_interp_func(replace_nan_idx_times[idx])
# Create the time interpolation function for each band
interp_func_templates = [interp1d(df['Time (MJD)'], df[band], kind='cubic') for band in filter_bands]
# Get a 1000 time points between the start and end times
time_extrap = np.linspace(df['Time (MJD)'][0], df['Time (MJD)'].iloc[-1], num=1000, endpoint=True)
# Interpolate magnitude for each band for each of the 1000 time points
interp_funcs = [i(time_extrap) for i in interp_func_templates]
# # Plot the interpolated plots that are smooth because of high enumeration of values inbetween the times given
# for idx,func in enumerate(interp_funcs):
# ax2.plot(time_extrap,func, label=filter_bands[idx])
light = figure(
title = 'Magnitude vs Time',
x_axis_label = 'Time (MJD)',
y_axis_label = 'Magnitude',
)
x= [list(df['Time (MJD)']) for fb in filter_bands]
y = [ list(df[fb]) for fb in filter_bands]
# for idx,func in enumerate(interp_funcs):
for filter_legend, time, bands, color, in zip(filter_bands, x, y, random_color(len(filter_bands))):
light.line(time, bands, color=color, legend_label=filter_legend)
light.circle(time, bands, color=color, alpha=0.8, muted_color=color, muted_alpha=0.075, legend_label=filter_legend)
light.legend.click_policy="hide"
light.add_layout(light.legend[0], 'right')
light.y_range.flipped = True
spec.sizing_mode = 'stretch_both'
light.sizing_mode = 'stretch_both'
grid = gridplot([[spec], [light]], sizing_mode='stretch_both', toolbar_options=dict(logo=None), toolbar_location=None)
output_file(r'../output/PLOTS/HTML/'+output_file_name+'_summaryPlot.html')
save(grid)
show(spec)
# print(file_html(grid, CDN,r'../output/PLOTS/HTML/'+output_file_name+'_summaryPlot.html'))
# plots={'spec':spec, 'light': light}
# scripts,div = components(plots)
# print(scripts)
# print(div)
# item_text = json.dumps(json_item(grid, output_file_name+'_summaryPlot'))
# print(item_text)
return spec, light
# show(grid) # ------------------------ SECOND PLOT END ------------------------
def summary_plot(plot, output_file_name):
bokeh_plots(plot, output_file_name)
if __name__ == "__main__":
# summary_plot("SN2007af","SN2007af_SNIa_series")
# summary_plot("SN2005cs","SN2005cs_uvot_SNII_series")
summary_plot("SN2011by","SN2011by_SNIa_series")
| [
"random.shuffle",
"bokeh.plotting.figure",
"pandas.read_csv",
"bokeh.plotting.show",
"bokeh.plotting.save",
"os.path.join",
"bokeh.io.curdoc",
"scipy.interpolate.interp1d",
"numpy.array_split",
"numpy.linspace",
"bokeh.plotting.output_file"
] | [((859, 891), 'random.shuffle', 'random.shuffle', (['random_color_arr'], {}), '(random_color_arr)\n', (873, 891), False, 'import random\n'), ((1491, 1637), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""Flux vs Wavelength"""', 'x_axis_label': '"""Wavelength (angstroms)"""', 'y_axis_label': '"""log(flux)+constant"""', 'plot_width': '(1100)', 'plot_height': '(500)'}), "(title='Flux vs Wavelength', x_axis_label='Wavelength (angstroms)',\n y_axis_label='log(flux)+constant', plot_width=1100, plot_height=500)\n", (1497, 1637), False, 'from bokeh.plotting import figure, output_file, show, ColumnDataSource, output_file, save\n'), ((2541, 2616), 'pandas.read_csv', 'pd.read_csv', (["('../output/MAGS/' + output_file_name + '_mangledmagsarray.csv')"], {}), "('../output/MAGS/' + output_file_name + '_mangledmagsarray.csv')\n", (2552, 2616), True, 'import pandas as pd\n'), ((3691, 3779), 'numpy.linspace', 'np.linspace', (["df['Time (MJD)'][0]", "df['Time (MJD)'].iloc[-1]"], {'num': '(1000)', 'endpoint': '(True)'}), "(df['Time (MJD)'][0], df['Time (MJD)'].iloc[-1], num=1000,\n endpoint=True)\n", (3702, 3779), True, 'import numpy as np\n'), ((4164, 4255), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""Magnitude vs Time"""', 'x_axis_label': '"""Time (MJD)"""', 'y_axis_label': '"""Magnitude"""'}), "(title='Magnitude vs Time', x_axis_label='Time (MJD)', y_axis_label=\n 'Magnitude')\n", (4170, 4255), False, 'from bokeh.plotting import figure, output_file, show, ColumnDataSource, output_file, save\n'), ((5083, 5160), 'bokeh.plotting.output_file', 'output_file', (["('../output/PLOTS/HTML/' + output_file_name + '_summaryPlot.html')"], {}), "('../output/PLOTS/HTML/' + output_file_name + '_summaryPlot.html')\n", (5094, 5160), False, 'from bokeh.plotting import figure, output_file, show, ColumnDataSource, output_file, save\n'), ((5162, 5172), 'bokeh.plotting.save', 'save', (['grid'], {}), '(grid)\n', (5166, 5172), False, 'from bokeh.plotting import figure, output_file, show, ColumnDataSource, output_file, save\n'), ((5177, 5187), 'bokeh.plotting.show', 'show', (['spec'], {}), '(spec)\n', (5181, 5187), False, 'from bokeh.plotting import figure, output_file, show, ColumnDataSource, output_file, save\n'), ((966, 974), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (972, 974), False, 'from bokeh.io import curdoc\n'), ((1156, 1232), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', '"""TEMPLATE"""', "(output_file_name + '_template.csv')"], {}), "('..', 'output', 'TEMPLATE', output_file_name + '_template.csv')\n", (1168, 1232), False, 'import os\n'), ((3535, 3585), 'scipy.interpolate.interp1d', 'interp1d', (["df['Time (MJD)']", 'df[band]'], {'kind': '"""cubic"""'}), "(df['Time (MJD)'], df[band], kind='cubic')\n", (3543, 3585), False, 'from scipy.interpolate import interp1d\n'), ((819, 853), 'numpy.array_split', 'np.array_split', (['Turbo256[20:]', 'num'], {}), '(Turbo256[20:], num)\n', (833, 853), True, 'import numpy as np\n'), ((3239, 3331), 'scipy.interpolate.interp1d', 'interp1d', (["df_temp['Time (MJD)']", 'df_temp[band]'], {'kind': '"""linear"""', 'fill_value': '"""extrapolate"""'}), "(df_temp['Time (MJD)'], df_temp[band], kind='linear', fill_value=\n 'extrapolate')\n", (3247, 3331), False, 'from scipy.interpolate import interp1d\n')] |
# coding:utf-8
"""
Copyright 2021 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import numpy as np
import pandas as pd
def str2digit(s):
""" string to digit """
if s.isdigit():
return int(s)
return s
def simplify_tagging_info(info_path="../data/config/", label_file="annotations_final.csv"):
""" simplify_tagging_info """
print("-"*25, "now in function simplify_tagging_info", "-"*25)
T = []
with open(os.path.join(info_path, label_file), 'rb') as info:
data = info.readline()
while data:
T.append([str2digit(i[1:-1]) for i in data.strip().decode('utf-8').split("\t")])
data = info.readline()
annotation = pd.DataFrame(T[1:], columns=T[0])
count = []
for i in annotation.columns[1:-2]:
count.append([annotation[i].sum() / len(annotation), i])
count = sorted(count)
full_label = []
for i in count[-50:]:
full_label.append(i[1])
simplied_tag = []
for i in T[1:]:
index = [k for k, x in enumerate(i) if x == 1]
label = [T[0][k] for k in index]
L = [str(0) for _ in range(50)]
L.append(i[-1])
for j in label:
if j in full_label:
ind = full_label.index(j)
L[ind] = '1'
simplied_tag.append(L)
txt_save_path = os.path.join(info_path, "music_tagging_tmp.txt")
np.savetxt(txt_save_path, np.array(simplied_tag), fmt='%s', delimiter=',')
csv_save_path = os.path.join(info_path, "music_tagging_tmp.csv")
np.savetxt(csv_save_path, np.array(simplied_tag), fmt='%s', delimiter=',')
print("successfully save tagging info in:\n", info_path)
return simplied_tag
def get_labels(info_list, infer_result_path):
""" get_labels """
print("-"*25, "now in function get_labels", "-"*25)
label_list = []
pred_list = []
print("info list length:\n", len(info_list))
for label_info in info_list:
[_, file_name] = os.path.split(label_info[-1])
file_name = file_name[:-4] + ".txt"
rst_file = os.path.join(infer_result_path, file_name)
if os.path.exists(rst_file):
true_label = np.array([str2digit(i) for i in label_info[:-1]])
rst_data = np.loadtxt(rst_file, delimiter=',')
label_list.append(true_label)
pred_list.append(rst_data)
return label_list, pred_list
def compute_auc(labels_list, preds_list):
"""
The AUC calculation function
Input:
labels_list: list of true label
preds_list: list of predicted label
Outputs
Float, means of AUC
"""
print("-"*25, "now in function compute_auc", "-"*25)
auc = []
if labels_list.shape[0] <= 0:
return "label list is None!"
print("shape of labels_list", labels_list.shape)
print("shape of preds_list", preds_list.shape)
n_bins = labels_list.shape[0] // 2
if labels_list.ndim == 1:
labels_list = labels_list.reshape(-1, 1)
preds_list = preds_list.reshape(-1, 1)
for i in range(labels_list.shape[1]):
labels = labels_list[:, i]
preds = preds_list[:, i]
postive_len = labels.sum()
negative_len = labels.shape[0] - postive_len
total_case = postive_len * negative_len
positive_histogram = np.zeros((n_bins))
negative_histogram = np.zeros((n_bins))
bin_width = 1.0 / n_bins
for j, _ in enumerate(labels):
nth_bin = int(preds[j] // bin_width)
if nth_bin == n_bins:
nth_bin = nth_bin - 1
if labels[j]:
positive_histogram[nth_bin] = positive_histogram[nth_bin] + 1
else:
negative_histogram[nth_bin] = negative_histogram[nth_bin] + 1
accumulated_negative = 0
satisfied_pair = 0
for k in range(n_bins):
satisfied_pair += (
positive_histogram[k] * accumulated_negative +
positive_histogram[k] * negative_histogram[k] * 0.5)
accumulated_negative += negative_histogram[k]
auc.append(satisfied_pair / total_case)
return np.mean(auc)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Error-three arguments are required, your command should be like this:")
print(" python mxbase_get_auc.py info_file_path info_filename infer_results_path")
print("For example:")
print(" python mxbase_get_auc.py ../data/config/ annotations_final.csv ../mxbase/results/infer_results")
else:
base_info_path = sys.argv[1]
info_file_name = sys.argv[2]
base_result_path = sys.argv[3]
simp_info_tags = simplify_tagging_info(base_info_path, info_file_name)
_label_list, _pred_list = get_labels(simp_info_tags, base_result_path)
auc_val = compute_auc(np.array(_label_list), np.array(_pred_list))
print("-" * 27 + " Validation Performance " + "-" * 27)
print("AUC: {:.5f}\n".format(auc_val))
| [
"numpy.mean",
"os.path.exists",
"os.path.join",
"os.path.split",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"numpy.loadtxt"
] | [((1220, 1253), 'pandas.DataFrame', 'pd.DataFrame', (['T[1:]'], {'columns': 'T[0]'}), '(T[1:], columns=T[0])\n', (1232, 1253), True, 'import pandas as pd\n'), ((1857, 1905), 'os.path.join', 'os.path.join', (['info_path', '"""music_tagging_tmp.txt"""'], {}), "(info_path, 'music_tagging_tmp.txt')\n", (1869, 1905), False, 'import os\n'), ((2005, 2053), 'os.path.join', 'os.path.join', (['info_path', '"""music_tagging_tmp.csv"""'], {}), "(info_path, 'music_tagging_tmp.csv')\n", (2017, 2053), False, 'import os\n'), ((4673, 4685), 'numpy.mean', 'np.mean', (['auc'], {}), '(auc)\n', (4680, 4685), True, 'import numpy as np\n'), ((1936, 1958), 'numpy.array', 'np.array', (['simplied_tag'], {}), '(simplied_tag)\n', (1944, 1958), True, 'import numpy as np\n'), ((2084, 2106), 'numpy.array', 'np.array', (['simplied_tag'], {}), '(simplied_tag)\n', (2092, 2106), True, 'import numpy as np\n'), ((2490, 2519), 'os.path.split', 'os.path.split', (['label_info[-1]'], {}), '(label_info[-1])\n', (2503, 2519), False, 'import os\n'), ((2583, 2625), 'os.path.join', 'os.path.join', (['infer_result_path', 'file_name'], {}), '(infer_result_path, file_name)\n', (2595, 2625), False, 'import os\n'), ((2637, 2661), 'os.path.exists', 'os.path.exists', (['rst_file'], {}), '(rst_file)\n', (2651, 2661), False, 'import os\n'), ((3837, 3853), 'numpy.zeros', 'np.zeros', (['n_bins'], {}), '(n_bins)\n', (3845, 3853), True, 'import numpy as np\n'), ((3885, 3901), 'numpy.zeros', 'np.zeros', (['n_bins'], {}), '(n_bins)\n', (3893, 3901), True, 'import numpy as np\n'), ((971, 1006), 'os.path.join', 'os.path.join', (['info_path', 'label_file'], {}), '(info_path, label_file)\n', (983, 1006), False, 'import os\n'), ((2761, 2796), 'numpy.loadtxt', 'np.loadtxt', (['rst_file'], {'delimiter': '""","""'}), "(rst_file, delimiter=',')\n", (2771, 2796), True, 'import numpy as np\n'), ((5377, 5398), 'numpy.array', 'np.array', (['_label_list'], {}), '(_label_list)\n', (5385, 5398), True, 'import numpy as np\n'), ((5400, 5420), 'numpy.array', 'np.array', (['_pred_list'], {}), '(_pred_list)\n', (5408, 5420), True, 'import numpy as np\n')] |
## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
# cy_psi.py
#
# Created: Jun 2014, <NAME>
# Modified: Jan 2016, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
import numpy as np
# ----------------------------------------------------------------------
# Method
# ----------------------------------------------------------------------
def cy_psi(cL,theta):
""" This calculates the force coefficient in the y direction
with respect to the yaw angle of the aircraft
Assumptions:
None
Source:
<NAME>, "Automatic Control of Aircraft and Missiles"
Wiley & Sons, Inc. New York, 1991, (pg 23)
Inputs:
theta [radians]
cL [dimensionless]
Outputs:
cy_psi [dimensionless]
Properties Used:
N/A
"""
# Generating Stability derivative
cy_psi = cL * np.tan(theta)
return cy_psi | [
"numpy.tan"
] | [((1153, 1166), 'numpy.tan', 'np.tan', (['theta'], {}), '(theta)\n', (1159, 1166), True, 'import numpy as np\n')] |
"""Testing send_data functionality."""
import time
import pandas as pd
import pytest
import numpy as np
from dbrequests.mysql.tests.conftest import set_up_cats as reset
from dbrequests.mysql.tests.conftest import (
set_up_membership as reset_membership,
set_up_diffs as reset_diffs)
from sqlalchemy.exc import OperationalError, InternalError
@pytest.mark.usefixtures('db')
class TestSendDataDiffs:
"""Tests for mode=[sync|update|replace|insert]_diffs."""
def test_sync_diffs(self, db):
"""
Synchronize has the same affect as a truncate. We only update what has
changed locally, aka the diffs, and delete what we find in the remote
table, but is missing locally.
"""
reset_diffs(db)
df = pd.DataFrame({
'id': [1, 2, 3],
'value': ['a', 'b', 'c']
})
db.send_data(df, 'diffs', mode='insert')
new = pd.DataFrame({
'id': [1, 3, 4],
'value': ['c', 'c', 'b']
})
db.send_data(new, 'diffs', mode='sync_diffs')
res = db.send_query('select id, value from diffs;')
assert (new == res).all(axis=None)
def test_sending_only_diffs(self, db):
"""Construct diffs."""
reset_diffs(db)
df = pd.DataFrame({
'id': [1, 2, 3],
'value': ['a', 'b', 'c']
})
# testing on empty table/result set:
db.send_data(df, 'diffs', mode='insert_diffs')
# - expect a new row in the table
# - if the complete set was transmitted, we would have one
# timestamp; when only the diffs are transmitted, we expect 2
df = df.append(pd.DataFrame({'id': [4, 5], 'value': 'd'}))
time.sleep(1)
db.send_data(df, 'diffs', mode='replace_diffs')
res = db.send_query('select * from diffs')
assert res.shape == (5, 3)
assert res.updated.nunique() == 2
# - expect a modified value
# - expect a new timestamp for existing row, because we replace
df['value'][1] = 'a' # new value
time.sleep(1)
db.send_data(df, 'diffs', mode='replace_diffs')
res = db.send_query('select * from diffs')
assert res.updated.nunique() == 3
assert res.value[1] == 'a'
# - expect a modified value
# - expect no new timestamp
df['value'][1] = 'c' # new value
time.sleep(1)
db.send_data(df, 'diffs', mode='update_diffs')
res = db.send_query('select * from diffs')
assert res.updated.nunique() == 3
assert res.value[1] == 'c'
def test_sending_empty_diffs(self, db):
"""Empty diffs: same issue as in #36"""
reset_diffs(db)
df = pd.DataFrame({
'id': [1, 2, 3],
'value': ['a', 'b', 'c']
})
# We write initial data:
db.send_data(df, 'diffs', mode='insert_diffs')
# When the diffs are empty, the program freezes.
# Check for all diff-modes:
db.send_data(df, 'diffs', mode='insert_diffs')
db.send_data(df, 'diffs', mode='update_diffs')
db.send_data(df, 'diffs', mode='replace_diffs')
# The test is, that the program does not freeze:
assert True
def test_with_system_versioned_table(self, db):
"""
We check that we can send data using the temporary tables context
manager, but without temporary tables. These are not implemented for
system versioned tables in mariadb.
"""
reset(db)
res = db.send_query(
'select id, name, owner from cats', to_pandas=False)
with pytest.raises((OperationalError, InternalError)):
db.send_data(res, 'hist_cats', 'update_diffs')
db.send_data(res, 'hist_cats', 'update_diffs', with_temp=False)
with pytest.raises((OperationalError, InternalError)):
db.send_delete(res, 'hist_cats', 'in_join')
db.send_delete(res, 'hist_cats', 'in_join', with_temp=False)
# If it doesn't work, we get an error from the database.
assert True
def test_with_system_versioned_table_sync(self, db):
"""
We check that we can send data using the temporary tables context
manager, but without temporary tables. These are not implemented for
system versioned tables in mariadb.
"""
reset(db)
res = db.send_query(
'select id, name, owner from cats', to_pandas=False)
db.send_data(res, 'hist_cats', 'sync_diffs', with_temp=False)
# If it doesn't work, we get an error from the database.
assert True
@pytest.mark.usefixtures('db')
class TestSendDataInsert:
"""Tests for mode=insert."""
def test_insert_happy_path(self, db):
"""Insert some data and check, that it is actually there."""
df_add = pd.DataFrame({
'name': ['Chill'],
'owner': ['Alex'],
'birth': ['2018-03-03']
})
reset(db)
db.send_data(df_add, 'cats', mode='insert')
df_out = db.query("select name, owner, birth from cats where id = 4;")
df_out.birth = df_out.birth.astype(str)
assert (df_add == df_out).all(axis=None)
def test_insert_no_override(self, db):
"""Do not override on duplicate key."""
df_add = pd.DataFrame({
'id': [3],
'name': ['Charlie'],
'owner': ['River'],
'birth': ['2016-05-22']
})
reset(db)
db.send_data(df_add, 'cats', mode='insert')
df_out = db.query("SELECT * FROM cats where id = 3;")
assert df_out.birth.astype(str)[0] == '2016-05-21'
@pytest.mark.usefixtures('db')
class TestSendDataDeletes:
"""Tests for mode=delete|truncate."""
def test_send_data_truncate(self, db):
"""Truncate table before insert."""
df_replace = pd.DataFrame({
'id': [1],
'name': ['Chill'],
'owner': ['Alex'],
'birth': ['2018-03-03']
})
reset(db)
db.send_data(df_replace, 'cats', mode='truncate')
df_nrow = db.query("SELECT count(*) as nrows FROM cats;")
assert df_nrow.nrows[0] == 1
df_out = db.query("SELECT * FROM cats;")
df_out.birth = df_out.birth.astype(str)
assert (df_replace == df_out).all(axis=None)
def test_delete_happy_path(self, db):
"""First delete all rows, then insert new data"""
df_replace = pd.DataFrame({
'id': [1],
'name': ['Chill'],
'owner': ['Alex'],
'birth': ['2018-03-03']
})
reset(db)
db.send_data(df_replace, 'cats', mode='delete')
df_nrow = db.query("SELECT count(*) as nrows FROM cats;")
assert df_nrow.nrows[0] == 1
def test_delete_rollback_on_failure(self, db):
"""Delete before insert and check the rollback."""
df_replace = pd.DataFrame({
'id': [1],
'name': ['Chill'],
'owner': ['Alex'],
'wrong_col': ['2018-03-03']
})
reset(db)
# InternalError with pymysql
# OperationalError with mysqldb
with pytest.raises((OperationalError, InternalError)):
db.send_data(df_replace, 'cats', mode='delete')
df_nrow = db.query("SELECT count(*) as nrows FROM cats;")
assert df_nrow.nrows[0] == 3
@pytest.mark.usefixtures('db')
class TestSendDataReplace:
"""Tests for mode=replace."""
def test_send_data_replace(self, db):
"""Send data and replace on duplicate key."""
df_replace = pd.DataFrame({
'id': [1],
'name': ['Chill'],
'owner': ['Alex'],
'birth': ['2018-03-03']
})
reset(db)
db.send_data(df_replace, 'cats', mode='replace')
df_nrow = db.query("SELECT count(*) as nrows FROM cats;")
assert df_nrow.nrows[0] == 3
df_out = db.query("SELECT * FROM cats where id = 1;")
df_out.birth = df_out.birth.astype(str)
assert (df_replace == df_out).all(axis=None)
@pytest.mark.usefixtures('db')
class TestSendDataUpdate:
"""Tests for mode=update."""
def test_send_data_update(self, db):
"""Check for mode update.
Update means:
- we can add rows / new data, similar to insert
- we can update on duplicate key instead of replace
- we can update selected columns, maybe just one field + primary key
"""
df_replace = pd.DataFrame({
'id': [1, 4],
'name': ['Chill', 'Pi'],
'owner': ['Alex', 'Matt'],
'birth': ['2018-03-03', '2019-08-05']
})
reset(db)
db.send_data(df_replace, 'cats', mode='update')
# We have a new row:
df_nrow = db.query("SELECT count(*) as nrows FROM cats;")
assert df_nrow.nrows[0] == 4
# Changes are made:
df_out = db.query("SELECT * FROM cats where id in (1, 4);")
df_out.birth = df_out.birth.astype(str)
assert (df_replace == df_out).all(axis=None)
# We can send partial updates, aka single column
df_replace_small = pd.DataFrame({
'id': [2],
'birth': ['2014-11-13'] # we update this value for id = 2
})
df_expected = pd.DataFrame({
'id': [2],
'name': ['Cookie'],
'owner': ['Casey'],
'birth': ['2014-11-13']
})
reset(db)
db.send_data(df_replace_small, 'cats', mode='update')
df_out = db.query("SELECT * FROM cats where id = 2;")
df_out.birth = df_out.birth.astype(str)
assert (df_expected == df_out).all(axis=None)
@pytest.mark.usefixtures('db')
class TestSendDataBehaviours:
"""Behaviours which are due to CSV and work for all modes."""
def test_send_empty_data_frame(self, db):
"""
We need to check that we can handle empty data frames in send data. See
#36.
"""
res = db.send_query('select * from cats where id < 0')
db.send_data(res, 'cats')
# The program was freezing, so we are happy that the test 'runs'.
assert True
def test_send_data_idempotence(self, db):
"""We check that reading and writing back in is idempotent.
This is not obvious because we write to a CSV as intermediate step!
Special cases, we need to check:
- missing values
- dates / (datetimes)
- (decimals)
- (64bit integer)
TODO: Currently we hold hands and pray that these cases actually work!
"""
df_replace = pd.DataFrame({
'id': [1],
'name': ['Chill'],
'owner': [np.nan],
'birth': ['2018-03-03']
})
reset(db)
db.send_data(df_replace, 'cats', mode='replace')
df_in = db.query("SELECT * FROM cats;")
db.send_data(df_in, 'cats', mode='truncate')
df_inn = db.query("SELECT * FROM cats;")
assert (df_in == df_inn).all(axis=None)
def test_column_arrangemant_is_maintained(self, db):
"""Insert some data with fliped columns: #24"""
reset(db)
df_1 = db.send_query(
"select birth, name, owner from cats where id = 3;")
db.send_data(df_1, 'cats', mode='insert')
df_2 = db.send_query(
"select birth, name, owner from cats where id = 4;")
assert (df_1 == df_2).all(axis=None)
def test_escape_sequences(self, db):
"""Insert some data with escape sequences: #28"""
reset(db)
db.send_bulk_query('truncate table cats;')
db.send_data({'name': ['\\'], 'owner': ['0bnrtZN']}, 'cats')
res = db.send_query('select name, owner from cats;')
assert res.name[0] == '\\'
# all known escape sequences from:
# https://dev.mysql.com/doc/refman/8.0/en/load-data.html
assert res.owner[0] == '0bnrtZN'
def test_update_json_and_decimal(self, db):
"""Insert None/NULL values for json and decimal types: #30"""
reset_membership(db)
df_update = pd.DataFrame({
'id': range(4),
'membership': [
'{"BookClub": 1, "SportsClub": 1, "ClubClub": 1,}',
'{"BookClub": 0, "SportsClub": 0.5, "ClubClub": 0}',
'{"BookClub": null, "SportsClub": 1, "ClubClub": 2}',
None],
'average': [34.49, 34.51, 43.86, None]})
db.send_data(df_update, 'membership', mode='truncate')
df_in = db.send_query('SELECT * FROM membership')
assert self.is_na(df_in.membership[3])
assert np.isnan(df_in.average[3])
@staticmethod
def is_na(x):
if x:
return np.isnan(x)
else:
return True
| [
"dbrequests.mysql.tests.conftest.set_up_cats",
"time.sleep",
"dbrequests.mysql.tests.conftest.set_up_diffs",
"numpy.isnan",
"pytest.mark.usefixtures",
"dbrequests.mysql.tests.conftest.set_up_membership",
"pytest.raises",
"pandas.DataFrame"
] | [((354, 383), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db"""'], {}), "('db')\n", (377, 383), False, 'import pytest\n'), ((4652, 4681), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db"""'], {}), "('db')\n", (4675, 4681), False, 'import pytest\n'), ((5697, 5726), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db"""'], {}), "('db')\n", (5720, 5726), False, 'import pytest\n'), ((7441, 7470), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db"""'], {}), "('db')\n", (7464, 7470), False, 'import pytest\n'), ((8144, 8173), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db"""'], {}), "('db')\n", (8167, 8173), False, 'import pytest\n'), ((9765, 9794), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db"""'], {}), "('db')\n", (9788, 9794), False, 'import pytest\n'), ((734, 749), 'dbrequests.mysql.tests.conftest.set_up_diffs', 'reset_diffs', (['db'], {}), '(db)\n', (745, 749), True, 'from dbrequests.mysql.tests.conftest import set_up_membership as reset_membership, set_up_diffs as reset_diffs\n'), ((763, 820), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1, 2, 3], 'value': ['a', 'b', 'c']}"], {}), "({'id': [1, 2, 3], 'value': ['a', 'b', 'c']})\n", (775, 820), True, 'import pandas as pd\n'), ((918, 975), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1, 3, 4], 'value': ['c', 'c', 'b']}"], {}), "({'id': [1, 3, 4], 'value': ['c', 'c', 'b']})\n", (930, 975), True, 'import pandas as pd\n'), ((1250, 1265), 'dbrequests.mysql.tests.conftest.set_up_diffs', 'reset_diffs', (['db'], {}), '(db)\n', (1261, 1265), True, 'from dbrequests.mysql.tests.conftest import set_up_membership as reset_membership, set_up_diffs as reset_diffs\n'), ((1279, 1336), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1, 2, 3], 'value': ['a', 'b', 'c']}"], {}), "({'id': [1, 2, 3], 'value': ['a', 'b', 'c']})\n", (1291, 1336), True, 'import pandas as pd\n'), ((1729, 1742), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1739, 1742), False, 'import time\n'), ((2086, 2099), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2096, 2099), False, 'import time\n'), ((2407, 2420), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2417, 2420), False, 'import time\n'), ((2705, 2720), 'dbrequests.mysql.tests.conftest.set_up_diffs', 'reset_diffs', (['db'], {}), '(db)\n', (2716, 2720), True, 'from dbrequests.mysql.tests.conftest import set_up_membership as reset_membership, set_up_diffs as reset_diffs\n'), ((2734, 2791), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1, 2, 3], 'value': ['a', 'b', 'c']}"], {}), "({'id': [1, 2, 3], 'value': ['a', 'b', 'c']})\n", (2746, 2791), True, 'import pandas as pd\n'), ((3532, 3541), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (3537, 3541), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((4389, 4398), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (4394, 4398), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((4870, 4947), 'pandas.DataFrame', 'pd.DataFrame', (["{'name': ['Chill'], 'owner': ['Alex'], 'birth': ['2018-03-03']}"], {}), "({'name': ['Chill'], 'owner': ['Alex'], 'birth': ['2018-03-03']})\n", (4882, 4947), True, 'import pandas as pd\n'), ((5003, 5012), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (5008, 5012), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((5351, 5446), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [3], 'name': ['Charlie'], 'owner': ['River'], 'birth': ['2016-05-22']}"], {}), "({'id': [3], 'name': ['Charlie'], 'owner': ['River'], 'birth':\n ['2016-05-22']})\n", (5363, 5446), True, 'import pandas as pd\n'), ((5510, 5519), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (5515, 5519), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((5905, 5998), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'birth': ['2018-03-03']}"], {}), "({'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'birth': [\n '2018-03-03']})\n", (5917, 5998), True, 'import pandas as pd\n'), ((6061, 6070), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (6066, 6070), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((6506, 6599), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'birth': ['2018-03-03']}"], {}), "({'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'birth': [\n '2018-03-03']})\n", (6518, 6599), True, 'import pandas as pd\n'), ((6662, 6671), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (6667, 6671), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((6964, 7060), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'wrong_col': ['2018-03-03']}"], {}), "({'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'wrong_col':\n ['2018-03-03']})\n", (6976, 7060), True, 'import pandas as pd\n'), ((7124, 7133), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (7129, 7133), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((7650, 7743), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'birth': ['2018-03-03']}"], {}), "({'id': [1], 'name': ['Chill'], 'owner': ['Alex'], 'birth': [\n '2018-03-03']})\n", (7662, 7743), True, 'import pandas as pd\n'), ((7806, 7815), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (7811, 7815), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((8558, 8681), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1, 4], 'name': ['Chill', 'Pi'], 'owner': ['Alex', 'Matt'], 'birth':\n ['2018-03-03', '2019-08-05']}"], {}), "({'id': [1, 4], 'name': ['Chill', 'Pi'], 'owner': ['Alex',\n 'Matt'], 'birth': ['2018-03-03', '2019-08-05']})\n", (8570, 8681), True, 'import pandas as pd\n'), ((8745, 8754), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (8750, 8754), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((9227, 9277), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [2], 'birth': ['2014-11-13']}"], {}), "({'id': [2], 'birth': ['2014-11-13']})\n", (9239, 9277), True, 'import pandas as pd\n'), ((9369, 9464), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [2], 'name': ['Cookie'], 'owner': ['Casey'], 'birth': ['2014-11-13']}"], {}), "({'id': [2], 'name': ['Cookie'], 'owner': ['Casey'], 'birth': [\n '2014-11-13']})\n", (9381, 9464), True, 'import pandas as pd\n'), ((9526, 9535), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (9531, 9535), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((10694, 10787), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [1], 'name': ['Chill'], 'owner': [np.nan], 'birth': ['2018-03-03']}"], {}), "({'id': [1], 'name': ['Chill'], 'owner': [np.nan], 'birth': [\n '2018-03-03']})\n", (10706, 10787), True, 'import pandas as pd\n'), ((10850, 10859), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (10855, 10859), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((11238, 11247), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (11243, 11247), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((11641, 11650), 'dbrequests.mysql.tests.conftest.set_up_cats', 'reset', (['db'], {}), '(db)\n', (11646, 11650), True, 'from dbrequests.mysql.tests.conftest import set_up_cats as reset\n'), ((12146, 12166), 'dbrequests.mysql.tests.conftest.set_up_membership', 'reset_membership', (['db'], {}), '(db)\n', (12162, 12166), True, 'from dbrequests.mysql.tests.conftest import set_up_membership as reset_membership, set_up_diffs as reset_diffs\n'), ((12725, 12751), 'numpy.isnan', 'np.isnan', (['df_in.average[3]'], {}), '(df_in.average[3])\n', (12733, 12751), True, 'import numpy as np\n'), ((1677, 1719), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [4, 5], 'value': 'd'}"], {}), "({'id': [4, 5], 'value': 'd'})\n", (1689, 1719), True, 'import pandas as pd\n'), ((3649, 3697), 'pytest.raises', 'pytest.raises', (['(OperationalError, InternalError)'], {}), '((OperationalError, InternalError))\n', (3662, 3697), False, 'import pytest\n'), ((3843, 3891), 'pytest.raises', 'pytest.raises', (['(OperationalError, InternalError)'], {}), '((OperationalError, InternalError))\n', (3856, 3891), False, 'import pytest\n'), ((7224, 7272), 'pytest.raises', 'pytest.raises', (['(OperationalError, InternalError)'], {}), '((OperationalError, InternalError))\n', (7237, 7272), False, 'import pytest\n'), ((12822, 12833), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (12830, 12833), True, 'import numpy as np\n')] |
"""
Base Model
Base structure for creation of new models
Methods:
calc_error: Estimates error according to SciKit's regression metrics
filter_ts: Returns model's residuals
"""
import sys
sys.path.append('../')
from skfore.skfore import series_viewer
from skfore.datasets import *
import pandas
import numpy
import scipy
import sklearn
import matplotlib
import random
import math
from skfore.extras import add_next_date
from sklearn import preprocessing
class BaseModel():
def __init__(self):
self.residuals = None
self.scaler = None
self.test()
def test(self):
""" Raises error if there are not any of the necessary methods defined """
if (not "fit" in dir(self)):
raise ValueError('Method "fit" has not been defined')
if (not "forecast" in dir(self)):
raise ValueError('Method "forecast" has not been defined')
def calc_error(self, ts, error_function = None, ignore_first = None):
""" Estimates error according to SciKit's regression metrics
Args:
ts: Time series to estimate the model
error_function (None or error function): Error function whose
parameters are real time series and estimated time series. If
None, error_function is Sci-Kit learn's mean squared error
"""
if ignore_first != None:
ignore = ignore_first
else:
try:
ignore = self.q
except:
try:
ignore = self.p
except:
ignore = 0
y_estimated = self.simulate(ts)[ignore:]
y_real = ts[ignore:]
if (error_function == None):
error = sklearn.metrics.mean_squared_error(y_real, y_estimated)
else:
error = error_function(y_real, y_estimated)
return error
def filter_ts(self, ts, ignore_first = None):
""" Returns model's residuals
Args:
ts: Time series to estimate residuals
"""
if ignore_first != None:
ignore = ignore_first
else:
try:
ignore = self.q
except:
try:
ignore = self.p
except:
ignore = 0
prediction = self.simulate(ts)[ignore:]
residuals = ts[ignore:].subtract(prediction)
return residuals
def set_residuals(self, residuals):
self.residuals = series_viewer(residuals)
""" Residuals analysis """
def time_plot(self):
self.residuals.time_plot()
def ACF_plot(self):
self.residuals.ACF_plot()
def PACF_plot(self):
self.residuals.PACF_plot()
def qq_plot(self):
self.residuals.qq_plot()
def density_plot(self):
self.residuals.density_plot()
def histogram(self):
self.residuals.histogram()
def normality(self):
self.residuals.normality()
def update(self, kwargs):
for key in kwargs.keys():
setattr(self, key, kwargs[key])
return self
def predict(self, ts, periods, tsp = None, blind = True, confidence_interval = None, iterations = 300, error_sample = 'bootstrap', ignore_first = None, random_state = 100):
""" Predicts future values in a given period
Args:
ts (pandas.Series): Time series to predict
periods (int): Number of periods ahead to predict
tsp (pandas.Series): Predicted time series to compare future values
blind (boolean): True to forecast without using predicted time
series or False to use it in forecasting
confidence_interval (double): Confidence interval level
iterations (int): Number of iterations
error_sample (str): Use 'bootstrap' to forecast using sample errors
of filtered time series or 'normal' to forecast using errors
from a gaussian distribution with known variance
random_state (int): Determines random number generation for seed
Returns:
Dataframe of confidence intervals and time series of predicted
values: (ci_inf, ci_sup, series)
"""
random.seed(random_state)
if blind == False:
if tsp is None:
raise ValueError('Predicted time series not defined for no blind forecast')
else:
if error_sample == 'bootstrap':
if confidence_interval is None:
c_i = 0.95
else:
c_i = confidence_interval
for i in range(len(tsp)):
if i == 0:
tse = ts
simul_step = self.bootstrap(tse, 1, confidence_interval = c_i, iterations = iterations)
simul_result = simul_step.transpose()
y = ts
else:
tse = ts.append(tsp[0:i])
simul_step = self.bootstrap(tse, 1, confidence_interval = c_i, iterations = iterations)
simul_result = simul_result.append(simul_step.transpose())
value = self.forecast(y)
y = add_next_date(y, value)
prediction = y[-len(tsp):]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
result = ci.append(simul_result.transpose())
elif error_sample == 'normal':
if confidence_interval is None:
for i in range(len(tsp)):
if i == 0:
tse = ts
simul_step = self.normal_error(1, tse, ignore_first)
simul_result = simul_step
y = ts
else:
tse = ts.append(tsp[0:i])
simul_step = self.normal_error(1, tse, ignore_first)
simul_result = simul_result.append(simul_step)
value = self.forecast(y)
y = add_next_date(y, value)
prediction = y[-len(tsp):]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
result = ci.append(simul_result.transpose())
else:
for i in range(len(tsp)):
if i == 0:
tse = ts
simul_step = self.normal_error(1, tse, ignore_first)
simul_step_b = self.bootstrap(tse, 1, confidence_interval = confidence_interval, iterations = iterations)
simul_result = simul_step
simul_result_b = simul_step_b.transpose()
y = ts
else:
tse = ts.append(tsp[0:i])
simul_step = self.normal_error(1, tse, ignore_first)
simul_step_b = self.bootstrap(tse, 1, confidence_interval = confidence_interval, iterations = iterations)
simul_result = simul_result.append(simul_step)
simul_result_b = simul_result_b.append(simul_step_b.transpose())
value = self.forecast(y)
y = add_next_date(y, value)
prediction = y[-len(tsp):]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
result = ci.append(simul_result.transpose())
result = result.append(simul_result_b.transpose())
else:
raise ValueError('Error sample has not been defined correctly')
else:
for i in range(periods):
if i == 0:
y = ts
value = self.forecast(y)
y = add_next_date(y, value)
if error_sample == 'bootstrap':
if confidence_interval is None:
c_i = 0.95
else:
c_i = confidence_interval
ci = self.bootstrap(ts, periods, c_i, iterations)
prediction = y[-periods:]
prediction.name = 'series'
result = ci.append(prediction)
elif error_sample == 'normal':
prediction = y[-periods:]
prediction.name = 'series'
ci = pandas.DataFrame([prediction], index = ['series'])
if confidence_interval is None:
simulation = self.normal_error(periods, ts, ignore_first)
result = ci.append(simulation.transpose())
else:
simulation = self.normal_error(periods, ts, ignore_first)
simulation_b = self.bootstrap(ts, periods, confidence_interval, iterations)
result = ci.append(simulation.transpose())
result = result.append(simulation_b)
else:
raise ValueError('Error sample has not been defined correctly')
result = result.transpose()
if error_sample == 'bootstrap':
result['forecast'] = result.bootstrap
elif error_sample == 'normal':
result['forecast'] = result.normal
result['real'] = tsp
return result
def bootstrap(self, ts, periods = 5,confidence_interval = 0.95, iterations = 500):
try:
ignore = self.q
except:
try:
ignore = self.p
except:
ignore = 0
values = self.filter_ts(ts, ignore).values
results = list()
for i in range(iterations):
for j in range(periods):
train = sklearn.utils.resample(values, n_samples = 1)
if j == 0:
y = ts
else:
y = add_next_date(y, next_value_bootstrap)
next_value = self.forecast(y)
next_value_bootstrap = next_value + train[0]
result_complete = add_next_date(y, next_value_bootstrap)
result = result_complete[-periods:]
results.append(result)
results = pandas.DataFrame(results)
ci_inf = results.quantile(1-confidence_interval)
ci_sup = results.quantile(confidence_interval)
mean = results.mean()
ci = pandas.DataFrame([ci_inf, ci_sup, mean], index = ['ci_inf', 'ci_sup', 'bootstrap'])
return ci
def normal_error(self, n, ts, ignore_first = None):
residuals = self.filter_ts(ts, ignore_first)
var = numpy.std(residuals)
generated_values = numpy.random.normal(0, var, n)
for i in range(n):
if i == 0:
y = ts
value = self.forecast(y)
value = value + generated_values[i]
y = add_next_date(y, value)
result = pandas.DataFrame(y[-n:].values, index = y[-n:].index, columns = ['normal'])
return result
def plot_forecast(self, ts, periods = 5, tsp = None, blind = True, confidence_interval = None, iterations = 300, ignore_first = None):
fitted_ts = self.simulate(ts)
fitted_ts.index = ts.index
last = ts[-1:]
if ignore_first != None:
ignore = ignore_first
else:
try:
ignore = self.q
except:
try:
ignore = self.p
except:
ignore = 0
fitted_ts_plot = fitted_ts[ignore:]
if periods == False:
pass
else:
forecast_ts = self.predict(ts, periods, tsp, blind, confidence_interval, iterations)
ci_inf = last.append(forecast_ts['ci_inf'])
ci_sup = last.append(forecast_ts['ci_sup'])
tseries = last.append(forecast_ts['forecast'])
if periods == False:
matplotlib.pyplot.plot(ts, 'k-')
matplotlib.pyplot.plot(fitted_ts_plot, 'b-')
matplotlib.pyplot.legend(['Real', 'Fitted'])
else:
matplotlib.pyplot.plot(ts, 'k-')
matplotlib.pyplot.plot(fitted_ts_plot, 'c-')
matplotlib.pyplot.plot(tseries, 'b-')
matplotlib.pyplot.plot(ci_inf, 'r--')
matplotlib.pyplot.plot(ci_sup, 'r--')
matplotlib.pyplot.axvline(x = ts[-1:].index, color = 'k', linestyle = '--')
if tsp is None:
pass
else:
matplotlib.pyplot.plot(tsp, 'k-')
if confidence_interval != None:
matplotlib.pyplot.legend(['Real', 'Fitted', 'Forecast', 'CI', 'CI'])
else:
matplotlib.pyplot.legend(['Real', 'Fitted', 'Forecast'])
def plot(self, ts, forecast = None, ignore_first = None):
fitted_ts = self.simulate(ts)
fitted_ts.index = ts.index
last = ts[-1:]
if ignore_first != None:
ignore = ignore_first
else:
try:
ignore = self.p
except:
try:
ignore = self.q
except:
ignore = 0
fitted_ts_plot = fitted_ts[ignore:]
if forecast is None:
matplotlib.pyplot.plot(ts, 'k-')
matplotlib.pyplot.plot(fitted_ts_plot, 'b-')
matplotlib.pyplot.legend(['Real', 'Fitted'])
matplotlib.pyplot.legend(['Real', 'Fitted', 'Forecast'])
else:
matplotlib.pyplot.plot(ts, 'k-')
matplotlib.pyplot.plot(fitted_ts_plot, 'c-')
tseries = last.append(forecast['forecast'])
matplotlib.pyplot.plot(tseries, 'b-')
if 'ci_inf' in forecast:
ci_inf = last.append(forecast['ci_inf'])
matplotlib.pyplot.plot(ci_inf, 'r--')
if 'ci_sup' in forecast:
ci_sup = last.append(forecast['ci_sup'])
matplotlib.pyplot.plot(ci_sup, 'r--')
matplotlib.pyplot.plot(forecast['real'], 'k-')
matplotlib.pyplot.axvline(x = ts[-1:].index, color = 'k', linestyle = '--')
if 'ci_inf' in forecast:
matplotlib.pyplot.legend(['Real', 'Fitted', 'Forecast', 'CI', 'CI'])
def cross_validation(self, ts, n_splits, error_function = None):
X = numpy.array(self.__get_X__(ts))
y = numpy.array(ts.values.tolist())
y_index = numpy.array(ts.index)
tscv = sklearn.model_selection.TimeSeriesSplit(n_splits = n_splits)
splits = tscv.split(X)
error_list = list()
for train_index, test_index in splits:
y_train, y_test = y[train_index], y[test_index]
y_train_index, y_test_index = y_index[train_index], y_index[test_index]
y_train = pandas.Series((v for v in y_train), index = y_train_index)
y_test = pandas.Series((v for v in y_test), index = y_test_index)
error = self.calc_error(y_test, error_function)
error_list.append(error)
return error_list
#def get_predict_ci(self, ts, confidence_interval = 0.95, iterations = 1000):
# values = self.filter_ts(ts).values
# serie = self.simulate(ts).values
# results = list()
# for i in range(iterations):
# result = list()
# for j in range(len(serie)):
# train = sklearn.utils.resample(values, n_samples = 1)
# new_value = train[0] + serie[j]
# result.append(new_value)
# results.append(result)
# results = pandas.DataFrame(results)
# minim = results.quantile(1-confidence_interval)
# maxim = results.quantile(confidence_interval)
# final_result = pandas.DataFrame([minim, maxim])
# return final_result
def normalize(self, ts):
if self.scaler == None:
scaler = preprocessing.MinMaxScaler()
values = ts.values.reshape((len(ts.values), 1))
scaler.fit(values)
self.scaler = scaler
else:
values = ts.values.reshape((len(ts.values), 1))
scaler = self.scaler
normalized = scaler.transform(values)
norm_series = pandas.Series((v[0] for v in normalized), index = ts.index)
return norm_series
def des_normalize(self, ts):
values = ts.values.reshape((len(ts.values), 1))
des_norm = self.scaler.inverse_transform(values)
des_norm_series = pandas.Series((v[0] for v in des_norm), index = ts.index)
return des_norm_series
| [
"numpy.random.normal",
"pandas.Series",
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.plot",
"random.seed",
"sklearn.model_selection.TimeSeriesSplit",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"skfore.extras.add_next_date",
"sklearn.utils.resample",
"n... | [((198, 220), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (213, 220), False, 'import sys\n'), ((2531, 2555), 'skfore.skfore.series_viewer', 'series_viewer', (['residuals'], {}), '(residuals)\n', (2544, 2555), False, 'from skfore.skfore import series_viewer\n'), ((4299, 4324), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (4310, 4324), False, 'import random\n'), ((10835, 10860), 'pandas.DataFrame', 'pandas.DataFrame', (['results'], {}), '(results)\n', (10851, 10860), False, 'import pandas\n'), ((11016, 11101), 'pandas.DataFrame', 'pandas.DataFrame', (['[ci_inf, ci_sup, mean]'], {'index': "['ci_inf', 'ci_sup', 'bootstrap']"}), "([ci_inf, ci_sup, mean], index=['ci_inf', 'ci_sup',\n 'bootstrap'])\n", (11032, 11101), False, 'import pandas\n'), ((11243, 11263), 'numpy.std', 'numpy.std', (['residuals'], {}), '(residuals)\n', (11252, 11263), False, 'import numpy\n'), ((11291, 11321), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 'var', 'n'], {}), '(0, var, n)\n', (11310, 11321), False, 'import numpy\n'), ((11539, 11610), 'pandas.DataFrame', 'pandas.DataFrame', (['y[-n:].values'], {'index': 'y[-n:].index', 'columns': "['normal']"}), "(y[-n:].values, index=y[-n:].index, columns=['normal'])\n", (11555, 11610), False, 'import pandas\n'), ((15087, 15108), 'numpy.array', 'numpy.array', (['ts.index'], {}), '(ts.index)\n', (15098, 15108), False, 'import numpy\n'), ((15124, 15182), 'sklearn.model_selection.TimeSeriesSplit', 'sklearn.model_selection.TimeSeriesSplit', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (15163, 15182), False, 'import sklearn\n'), ((16879, 16936), 'pandas.Series', 'pandas.Series', (['(v[0] for v in normalized)'], {'index': 'ts.index'}), '((v[0] for v in normalized), index=ts.index)\n', (16892, 16936), False, 'import pandas\n'), ((17140, 17195), 'pandas.Series', 'pandas.Series', (['(v[0] for v in des_norm)'], {'index': 'ts.index'}), '((v[0] for v in des_norm), index=ts.index)\n', (17153, 17195), False, 'import pandas\n'), ((1758, 1813), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', (['y_real', 'y_estimated'], {}), '(y_real, y_estimated)\n', (1792, 1813), False, 'import sklearn\n'), ((11497, 11520), 'skfore.extras.add_next_date', 'add_next_date', (['y', 'value'], {}), '(y, value)\n', (11510, 11520), False, 'from skfore.extras import add_next_date\n'), ((12554, 12586), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ts', '"""k-"""'], {}), "(ts, 'k-')\n", (12576, 12586), False, 'import matplotlib\n'), ((12599, 12643), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['fitted_ts_plot', '"""b-"""'], {}), "(fitted_ts_plot, 'b-')\n", (12621, 12643), False, 'import matplotlib\n'), ((12656, 12700), 'matplotlib.pyplot.legend', 'matplotlib.pyplot.legend', (["['Real', 'Fitted']"], {}), "(['Real', 'Fitted'])\n", (12680, 12700), False, 'import matplotlib\n'), ((12727, 12759), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ts', '"""k-"""'], {}), "(ts, 'k-')\n", (12749, 12759), False, 'import matplotlib\n'), ((12772, 12816), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['fitted_ts_plot', '"""c-"""'], {}), "(fitted_ts_plot, 'c-')\n", (12794, 12816), False, 'import matplotlib\n'), ((12829, 12866), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['tseries', '"""b-"""'], {}), "(tseries, 'b-')\n", (12851, 12866), False, 'import matplotlib\n'), ((12879, 12916), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ci_inf', '"""r--"""'], {}), "(ci_inf, 'r--')\n", (12901, 12916), False, 'import matplotlib\n'), ((12929, 12966), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ci_sup', '"""r--"""'], {}), "(ci_sup, 'r--')\n", (12951, 12966), False, 'import matplotlib\n'), ((12979, 13048), 'matplotlib.pyplot.axvline', 'matplotlib.pyplot.axvline', ([], {'x': 'ts[-1:].index', 'color': '"""k"""', 'linestyle': '"""--"""'}), "(x=ts[-1:].index, color='k', linestyle='--')\n", (13004, 13048), False, 'import matplotlib\n'), ((13904, 13936), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ts', '"""k-"""'], {}), "(ts, 'k-')\n", (13926, 13936), False, 'import matplotlib\n'), ((13949, 13993), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['fitted_ts_plot', '"""b-"""'], {}), "(fitted_ts_plot, 'b-')\n", (13971, 13993), False, 'import matplotlib\n'), ((14006, 14050), 'matplotlib.pyplot.legend', 'matplotlib.pyplot.legend', (["['Real', 'Fitted']"], {}), "(['Real', 'Fitted'])\n", (14030, 14050), False, 'import matplotlib\n'), ((14063, 14119), 'matplotlib.pyplot.legend', 'matplotlib.pyplot.legend', (["['Real', 'Fitted', 'Forecast']"], {}), "(['Real', 'Fitted', 'Forecast'])\n", (14087, 14119), False, 'import matplotlib\n'), ((14146, 14178), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ts', '"""k-"""'], {}), "(ts, 'k-')\n", (14168, 14178), False, 'import matplotlib\n'), ((14191, 14235), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['fitted_ts_plot', '"""c-"""'], {}), "(fitted_ts_plot, 'c-')\n", (14213, 14235), False, 'import matplotlib\n'), ((14304, 14341), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['tseries', '"""b-"""'], {}), "(tseries, 'b-')\n", (14326, 14341), False, 'import matplotlib\n'), ((14652, 14698), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (["forecast['real']", '"""k-"""'], {}), "(forecast['real'], 'k-')\n", (14674, 14698), False, 'import matplotlib\n'), ((14711, 14780), 'matplotlib.pyplot.axvline', 'matplotlib.pyplot.axvline', ([], {'x': 'ts[-1:].index', 'color': '"""k"""', 'linestyle': '"""--"""'}), "(x=ts[-1:].index, color='k', linestyle='--')\n", (14736, 14780), False, 'import matplotlib\n'), ((15459, 15515), 'pandas.Series', 'pandas.Series', (['(v for v in y_train)'], {'index': 'y_train_index'}), '((v for v in y_train), index=y_train_index)\n', (15472, 15515), False, 'import pandas\n'), ((15539, 15593), 'pandas.Series', 'pandas.Series', (['(v for v in y_test)'], {'index': 'y_test_index'}), '((v for v in y_test), index=y_test_index)\n', (15552, 15593), False, 'import pandas\n'), ((16551, 16579), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (16577, 16579), False, 'from sklearn import preprocessing\n'), ((8454, 8477), 'skfore.extras.add_next_date', 'add_next_date', (['y', 'value'], {}), '(y, value)\n', (8467, 8477), False, 'from skfore.extras import add_next_date\n'), ((10361, 10404), 'sklearn.utils.resample', 'sklearn.utils.resample', (['values'], {'n_samples': '(1)'}), '(values, n_samples=1)\n', (10383, 10404), False, 'import sklearn\n'), ((10689, 10727), 'skfore.extras.add_next_date', 'add_next_date', (['y', 'next_value_bootstrap'], {}), '(y, next_value_bootstrap)\n', (10702, 10727), False, 'from skfore.extras import add_next_date\n'), ((13139, 13172), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['tsp', '"""k-"""'], {}), "(tsp, 'k-')\n", (13161, 13172), False, 'import matplotlib\n'), ((13234, 13302), 'matplotlib.pyplot.legend', 'matplotlib.pyplot.legend', (["['Real', 'Fitted', 'Forecast', 'CI', 'CI']"], {}), "(['Real', 'Fitted', 'Forecast', 'CI', 'CI'])\n", (13258, 13302), False, 'import matplotlib\n'), ((13337, 13393), 'matplotlib.pyplot.legend', 'matplotlib.pyplot.legend', (["['Real', 'Fitted', 'Forecast']"], {}), "(['Real', 'Fitted', 'Forecast'])\n", (13361, 13393), False, 'import matplotlib\n'), ((14453, 14490), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ci_inf', '"""r--"""'], {}), "(ci_inf, 'r--')\n", (14475, 14490), False, 'import matplotlib\n'), ((14601, 14638), 'matplotlib.pyplot.plot', 'matplotlib.pyplot.plot', (['ci_sup', '"""r--"""'], {}), "(ci_sup, 'r--')\n", (14623, 14638), False, 'import matplotlib\n'), ((14841, 14909), 'matplotlib.pyplot.legend', 'matplotlib.pyplot.legend', (["['Real', 'Fitted', 'Forecast', 'CI', 'CI']"], {}), "(['Real', 'Fitted', 'Forecast', 'CI', 'CI'])\n", (14865, 14909), False, 'import matplotlib\n'), ((5550, 5598), 'pandas.DataFrame', 'pandas.DataFrame', (['[prediction]'], {'index': "['series']"}), "([prediction], index=['series'])\n", (5566, 5598), False, 'import pandas\n'), ((9019, 9067), 'pandas.DataFrame', 'pandas.DataFrame', (['[prediction]'], {'index': "['series']"}), "([prediction], index=['series'])\n", (9035, 9067), False, 'import pandas\n'), ((10508, 10546), 'skfore.extras.add_next_date', 'add_next_date', (['y', 'next_value_bootstrap'], {}), '(y, next_value_bootstrap)\n', (10521, 10546), False, 'from skfore.extras import add_next_date\n'), ((5406, 5429), 'skfore.extras.add_next_date', 'add_next_date', (['y', 'value'], {}), '(y, value)\n', (5419, 5429), False, 'from skfore.extras import add_next_date\n'), ((6578, 6626), 'pandas.DataFrame', 'pandas.DataFrame', (['[prediction]'], {'index': "['series']"}), "([prediction], index=['series'])\n", (6594, 6626), False, 'import pandas\n'), ((7984, 8032), 'pandas.DataFrame', 'pandas.DataFrame', (['[prediction]'], {'index': "['series']"}), "([prediction], index=['series'])\n", (8000, 8032), False, 'import pandas\n'), ((6422, 6445), 'skfore.extras.add_next_date', 'add_next_date', (['y', 'value'], {}), '(y, value)\n', (6435, 6445), False, 'from skfore.extras import add_next_date\n'), ((7828, 7851), 'skfore.extras.add_next_date', 'add_next_date', (['y', 'value'], {}), '(y, value)\n', (7841, 7851), False, 'from skfore.extras import add_next_date\n')] |
"""Dummy fill to keep density constant."""
import itertools
from typing import Optional, Union
import gdspy
import numpy as np
from numpy import sqrt
from phidl.device_layout import _parse_layer
from phidl.geometry import (
_expand_raster,
_loop_over,
_raster_index_to_coords,
_rasterize_polygons,
)
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.rectangle import rectangle
from gdsfactory.types import Float2, Floats, LayerSpecs
@cell
def fill_cell_rectangle(
size: Float2 = (20.0, 20.0),
layers: LayerSpecs = (0, 1, 3),
densities: Floats = (0.5, 0.25, 0.7),
inverted=(False, False, False),
):
"""Creates a single Device on multiple layers to be used as fill
based on phidl.geometry
Args:
size: array-like of int or float
x, y dimensions of the fill area for all layers.
layers: int, array-like[2], or set
Specific layer(s) to put fill cell rectangle geometry on.
densities: array-like of int or float
Fill densities for each layer specified in ``layers``. Must be the same
size as ``layers``.
inverted: array-like or bool
If true, inverts the fill area for corresponding layer. Must be the
same size as ``layers``.
"""
D = Component()
for layer, density, inv in zip(layers, densities, inverted):
rectangle_size = np.array(size) * sqrt(density)
# r = D.add_ref(rectangle(size = rectangle_size, layer = layer))
R = rectangle(size=tuple(rectangle_size), layer=layer, port_type=None)
R.center = (0, 0)
if inv is True:
A = rectangle(size=size)
A.center = (0, 0)
A = A.get_polygons()
B = R.get_polygons()
p = gdspy.boolean(A, B, operation="not")
D.add_polygon(p, layer=layer)
else:
D.add_ref(R)
return D
@cell
def fill_rectangle(
component: Component,
fill_layers: LayerSpecs,
fill_size: Float2 = (5.0, 5.0),
avoid_layers: LayerSpecs = None,
include_layers: LayerSpecs = None,
margin: float = 5.0,
fill_densities: Union[float, Floats] = (0.5, 0.25, 0.7),
fill_inverted: bool = False,
bbox: Optional[Float2] = None,
) -> Component:
"""Creates a rectangular fill pattern and fills all empty areas.
in the input component and returns a component that contains just the fill
Dummy fill keeps density constant during fabrication
Args:
component: Component to fill.
fill_size: Rectangular size of the fill element.
avoid_layers: Layers to be avoided (not filled) in D.
include_layers: Layers to be filled, supercedes avoid_layers.
margin:
Margin spacing around avoided areas -- fill will not come within.
`margin` of the geometry in D.
fill_layers: list of layers. fill pattern layers.
fill_densities: float between 0 and 1.
Defines the fill pattern density (1.0 == fully filled).
fill_inverted: Inverts the fill pattern.
bbox: x, y limit the fill pattern to the area defined by this bounding box.
"""
D = component
# Create the fill cell.
# If fill_inverted is not specified, assume all False
fill_layers = _loop_over(fill_layers)
fill_densities = _loop_over(fill_densities)
if fill_inverted is None:
fill_inverted = [False] * len(fill_layers)
fill_inverted = _loop_over(fill_inverted)
if len(fill_layers) != len(fill_densities):
raise ValueError(
"[PHIDL] phidl.geometry.fill_rectangle() "
"`fill_layers` and `fill_densities` parameters "
"must be lists of the same length"
)
if len(fill_layers) != len(fill_inverted):
raise ValueError(
"[PHIDL] phidl.geometry.fill_rectangle() "
"`fill_layers` and `fill_inverted` parameters must "
"be lists of the same length"
)
fill_cell = fill_cell_rectangle(
size=fill_size,
layers=fill_layers,
densities=fill_densities,
inverted=fill_inverted,
)
F = Component()
if avoid_layers == "all":
exclude_polys = D.get_polygons(by_spec=False, depth=None)
else:
avoid_layers = [_parse_layer(layer) for layer in _loop_over(avoid_layers)]
exclude_polys = D.get_polygons(by_spec=True, depth=None)
exclude_polys = {
key: exclude_polys[key] for key in exclude_polys if key in avoid_layers
}
exclude_polys = itertools.chain.from_iterable(exclude_polys.values())
if include_layers is None:
include_polys = []
else:
include_layers = [_parse_layer(layer) for layer in _loop_over(include_layers)]
include_polys = D.get_polygons(by_spec=True, depth=None)
include_polys = {
key: include_polys[key] for key in include_polys if key in include_layers
}
include_polys = itertools.chain.from_iterable(include_polys.values())
if bbox is None:
bbox = D.bbox
raster = _rasterize_polygons(
polygons=exclude_polys, bounds=bbox, dx=fill_size[0], dy=fill_size[1]
)
raster = raster & ~_rasterize_polygons(
polygons=include_polys, bounds=bbox, dx=fill_size[0], dy=fill_size[1]
)
raster = _expand_raster(raster, distance=margin / np.array(fill_size))
for i in range(np.size(raster, 0)):
sub_rasters = [list(g) for k, g in itertools.groupby(raster[i])]
j = 0
for s in sub_rasters:
if s[0] == 0:
x, y = _raster_index_to_coords(i, j, bbox, fill_size[0], fill_size[1])
# F.add(gdspy.CellArray(ref_cell = fill_cell,
# columns = len(s), rows = 1,
# spacing = fill_size, ))
a = F.add_array(fill_cell, columns=len(s), rows=1, spacing=fill_size)
a.move((x, y))
j += len(s)
return F
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.straight()
c = gf.add_padding_container(c)
c.unlock()
c << fill_rectangle(
c,
fill_layers=((2, 0),),
# fill_densities=(1.0,),
fill_densities=0.5,
avoid_layers=((1, 0),),
# bbox=(100.0, 100.0),
)
c.show()
| [
"gdsfactory.components.rectangle.rectangle",
"numpy.sqrt",
"itertools.groupby",
"gdspy.boolean",
"phidl.geometry._loop_over",
"phidl.geometry._rasterize_polygons",
"numpy.size",
"phidl.geometry._raster_index_to_coords",
"numpy.array",
"gdsfactory.component.Component",
"gdsfactory.add_padding_con... | [((1345, 1356), 'gdsfactory.component.Component', 'Component', ([], {}), '()\n', (1354, 1356), False, 'from gdsfactory.component import Component\n'), ((3345, 3368), 'phidl.geometry._loop_over', '_loop_over', (['fill_layers'], {}), '(fill_layers)\n', (3355, 3368), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n'), ((3390, 3416), 'phidl.geometry._loop_over', '_loop_over', (['fill_densities'], {}), '(fill_densities)\n', (3400, 3416), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n'), ((3519, 3544), 'phidl.geometry._loop_over', '_loop_over', (['fill_inverted'], {}), '(fill_inverted)\n', (3529, 3544), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n'), ((4207, 4218), 'gdsfactory.component.Component', 'Component', ([], {}), '()\n', (4216, 4218), False, 'from gdsfactory.component import Component\n'), ((5151, 5245), 'phidl.geometry._rasterize_polygons', '_rasterize_polygons', ([], {'polygons': 'exclude_polys', 'bounds': 'bbox', 'dx': 'fill_size[0]', 'dy': 'fill_size[1]'}), '(polygons=exclude_polys, bounds=bbox, dx=fill_size[0],\n dy=fill_size[1])\n', (5170, 5245), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n'), ((6144, 6168), 'gdsfactory.components.straight', 'gf.components.straight', ([], {}), '()\n', (6166, 6168), True, 'import gdsfactory as gf\n'), ((6177, 6204), 'gdsfactory.add_padding_container', 'gf.add_padding_container', (['c'], {}), '(c)\n', (6201, 6204), True, 'import gdsfactory as gf\n'), ((5479, 5497), 'numpy.size', 'np.size', (['raster', '(0)'], {}), '(raster, 0)\n', (5486, 5497), True, 'import numpy as np\n'), ((1447, 1461), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (1455, 1461), True, 'import numpy as np\n'), ((1464, 1477), 'numpy.sqrt', 'sqrt', (['density'], {}), '(density)\n', (1468, 1477), False, 'from numpy import sqrt\n'), ((1697, 1717), 'gdsfactory.components.rectangle.rectangle', 'rectangle', ([], {'size': 'size'}), '(size=size)\n', (1706, 1717), False, 'from gdsfactory.components.rectangle import rectangle\n'), ((1830, 1866), 'gdspy.boolean', 'gdspy.boolean', (['A', 'B'], {'operation': '"""not"""'}), "(A, B, operation='not')\n", (1843, 1866), False, 'import gdspy\n'), ((4350, 4369), 'phidl.device_layout._parse_layer', '_parse_layer', (['layer'], {}), '(layer)\n', (4362, 4369), False, 'from phidl.device_layout import _parse_layer\n'), ((4767, 4786), 'phidl.device_layout._parse_layer', '_parse_layer', (['layer'], {}), '(layer)\n', (4779, 4786), False, 'from phidl.device_layout import _parse_layer\n'), ((5279, 5373), 'phidl.geometry._rasterize_polygons', '_rasterize_polygons', ([], {'polygons': 'include_polys', 'bounds': 'bbox', 'dx': 'fill_size[0]', 'dy': 'fill_size[1]'}), '(polygons=include_polys, bounds=bbox, dx=fill_size[0],\n dy=fill_size[1])\n', (5298, 5373), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n'), ((4383, 4407), 'phidl.geometry._loop_over', '_loop_over', (['avoid_layers'], {}), '(avoid_layers)\n', (4393, 4407), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n'), ((4800, 4826), 'phidl.geometry._loop_over', '_loop_over', (['include_layers'], {}), '(include_layers)\n', (4810, 4826), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n'), ((5438, 5457), 'numpy.array', 'np.array', (['fill_size'], {}), '(fill_size)\n', (5446, 5457), True, 'import numpy as np\n'), ((5543, 5571), 'itertools.groupby', 'itertools.groupby', (['raster[i]'], {}), '(raster[i])\n', (5560, 5571), False, 'import itertools\n'), ((5666, 5729), 'phidl.geometry._raster_index_to_coords', '_raster_index_to_coords', (['i', 'j', 'bbox', 'fill_size[0]', 'fill_size[1]'], {}), '(i, j, bbox, fill_size[0], fill_size[1])\n', (5689, 5729), False, 'from phidl.geometry import _expand_raster, _loop_over, _raster_index_to_coords, _rasterize_polygons\n')] |
from __future__ import absolute_import
from __future__ import print_function
import logging
import numpy as np
from numpy.lib.recfunctions import append_fields
from sklearn.cluster import DBSCAN
from lmatools.coordinateSystems import GeographicSystem
from lmatools.flashsort.flash_stats import calculate_flash_stats, Flash
def gen_stream(vec, IDs): #<1>
for v, vi in zip(vec, IDs):
yield (v, vi)
def reset_buffer():
buf = []
return buf, buf.append
def gen_chunks(stream, start_time, max_duration, t_idx=-1):
""" Generator function that consumes a stream of points, one at a
time, and their unique index. These points are bundled together
into a chunks of length max_duration along the time coordinate.
For each point vector v, the time coordinate is given by v[t_idx]
"""
next_time = start_time + max_duration
v_buffer, append = reset_buffer() # slight optimization since attr lookup is avoided
i_buffer, append_idx = reset_buffer()
for v, vi in stream:
append(v)
append_idx(vi)
t = v[t_idx]
if t >= next_time:
yield (np.asarray(v_buffer), np.asarray(i_buffer))
v_buffer, append = reset_buffer()
i_buffer, append_idx = reset_buffer()
next_time = t+max_duration
yield (np.asarray(v_buffer), np.asarray(i_buffer))
class ChunkedFlashSorter(object):
"""
Sort LMA data from points to flashes using many small chunks
of points. Allows for algorithms that do not scale efficiently with
large numbers of points.
The __init__ and geo_to_cartesian
methods are more generally useful, and could be factored out into a
generic flash sorting class.
The actual clustering algorithm must be implemented in identify_clusters.
A prototype method is provided below which indicates the necessary call
signature.
"""
def __init__(self, params, min_points=1, **kwargs):
"""
params: dictionary of parameters used to perform data QC and clustering
min_points: the minimum number of points allowed in a cluster
"""
self.logger = logging.getLogger('FlashAutorunLogger')
self.logger.info('%s', params)
self.params = params
self.min_points = min_points
self.ctr_lat, self.ctr_lon, self.ctr_alt = (
params['ctr_lat'], params['ctr_lon'], 0.0)
def geo_to_cartesisan(self, lon, lat, alt):
""" Convert lat, lon in degrees and altitude in meters to
Earth-centered, Earth-fixed cartesian coordinates. Translate
to coordinate center location. Returns X,Y,Z in meters.
"""
geoCS = GeographicSystem()
X,Y,Z = geoCS.toECEF(lon, lat, alt)
Xc, Yc, Zc = geoCS.toECEF(self.ctr_lon, self.ctr_lat, self.ctr_alt)
X, Y, Z = X-Xc, Y-Yc, Z-Zc
return (X, Y, Z)
def identify_clusters(self, data):
""" For data with shape (N, D) in D dimensions, return
a vector of labels of length N.
min_points is the minimum number of points required to form a
a cluster. For the DBSCAN algorithm, this is min_samples for
a core cluster.
This function adopts the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
err = "Please create a new subclass and implement this method"
raise NotImplementedError(err)
def gen_cluster_chunk_pairs(self, stream):
""" Generator function that consumes a stream of chunks of data,
and processes overlapping pairs. The stream is to consist of
tuples of (chunk, pt_id), where pt_id is a unique index for
each vector in chunk.
Chunk is of shape (N, D) for N point vectors in D dimensions
pt_id has shape (N,)
Calls self.identify_clusters, which returns a vector N labels.
The labels are presumed to adopt the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
chunk1, id1 = next(stream)
for chunk2, id2 in stream:
len1 = chunk1.shape[0]
len2 = chunk2.shape[0]
if len2 == 0:
conc = chunk1
concID = id1
chunk2 = chunk1[0:0,:]
id2 = id1[0:0]
elif len1 == 0:
conc = chunk2
concID = id2
chunk1 = chunk2[0:0,:]
id1 = id2[0:0]
else:
print(id1.shape, id2.shape)
conc = np.vstack((chunk1, chunk2))
concID = np.concatenate((id1, id2))
# do stuff with chunk 1 and 2
labels = self.identify_clusters(conc)
# defer sending these in one bundle ... need to ensure all labels
# from this run of clustering stay together
# clustered_output_target.send((chunk1, labels[:len1])) IS BAD
# pull data out of chunk2 that was clustered as part of chunk 1
chunk1_labelset = set(labels[:len1])
if -1 in chunk1_labelset:
chunk1_labelset.remove(-1) # remove the singleton cluster ID - we want to retain these from chunk 2.
clustered_in_chunk2 = np.fromiter( ( True if label in chunk1_labelset else False for i,label in enumerate(labels[len1:])) , dtype=bool)
clustered_in_chunk1 = np.ones(chunk1.shape[0], dtype = bool)
clustered_mask = np.hstack((clustered_in_chunk1, clustered_in_chunk2))
bundle_chunks = conc[clustered_mask,:]
bundle_IDs = concID[clustered_mask]
bundle_labels = np.concatenate((labels[:len1], labels[len1:][clustered_in_chunk2]))
assert bundle_chunks.shape[0] == bundle_labels.shape[0]
yield (bundle_chunks, bundle_labels, bundle_IDs)
del bundle_chunks, bundle_labels
# clustered_output_target.send((chunk2[clustered_in_chunk2], labels[len1:][clustered_in_chunk2]))
residuals = conc[clustered_mask==False,:]
# Because we pull some points from chunk2 and combine them with
# flashes that started in chunk1, the data are now out of their
# original order. Therefore, send along the data IDs that go with the
# pulled points so that the original order is still tracked.
residualIDs = concID[clustered_mask==False]
# optimization TODO: pull clusters out of chunk 2 whose final point is greater
# than the distance threshold from the end of the second chunk interval. They're already clustered
# and don't need to be clustered again.
# prepare for another chunk
if len(residuals) == 0:
residuals = chunk1[0:0,:] # empty array that preserves the number of dimensions in the data vector - no obs.
residualIDs = id1[0:0]
del chunk1, id1
chunk1 = np.asarray(residuals)
id1 = np.asarray(residualIDs)
del residuals, residualIDs
if chunk1.shape[0] != 0:
labels = self.identify_clusters(chunk1)
yield (chunk1, labels, id1)
def aggregate_ids(self, stream):
""" Final step in streamed clustering: consume clustered output from
one or more chunks of data, ensuring that the IDs increment
across chunk boundaries.
"""
# TODO: remove v from loop below; not needed.
unique_labels = set([-1])
total = 0
point_labels = []
all_IDs = []
# all_v = []
# n_last = 0
for (v, orig_labels, IDs) in stream:
labels = np.atleast_1d(orig_labels).copy()
if len(unique_labels) > 0:
# Only add those labels that represent valid clusters (nonnegative) to the unique set.
# Make sure labels increment continuously across all chunks received
nonsingleton = (labels >= 0)
labels[nonsingleton] = labels[nonsingleton] + (max(unique_labels) + 1)
for l in set(labels):
unique_labels.add(l)
all_IDs.append(np.asarray(IDs))
point_labels.append(labels)
total += v.shape[0]
del v, orig_labels, labels, IDs
print("done with {0} total points".format(total))
if total == 0:
point_labels = np.asarray(point_labels, dtype=int)
point_labels = np.asarray(all_IDs, dtype=int)
else:
point_labels = np.concatenate(point_labels)
all_IDs = np.concatenate(all_IDs)
print("returning {0} total points".format(total))
return (unique_labels, point_labels, all_IDs)
def create_flash_objs(self, lma, good_data, unique_labels, point_labels, all_IDs):
""" lma is an LMADataset object. Its data instance gets overwritten
with the qc'd, flash_id'd data, and it gains a flashes attribute
with a list of flash objects resulting from flash sorting.
all_IDs gives the index in the original data array to
which each point_label corresponds.
unique_labels is the set of all labels produced by previous stages
in the flash sorting algorithm, including a -1 ID for all singleton flashes.
"""
logger = self.logger
# add flash_id column
empty_labels = np.empty_like(point_labels)
# placing good_data in a list due to this bug when good_data has length 1
# http://stackoverflow.com/questions/36440557/typeerror-when-appending-fields-to-a-structured-array-of-size-one
if 'flash_id' not in good_data.dtype.names:
data = append_fields([good_data], ('flash_id',), (empty_labels,))
else:
data = good_data.copy()
# all_IDs gives the index in the original data array to
# which each point_label corresponds
data['flash_id'][all_IDs] = point_labels
# In the case of no data, lma.data.shape will have
# length zero, i.e., a 0-d array
if (len(data.shape) == 0) | (data.shape[0] == 0):
# No data
flashes = []
else:
# work first with non-singleton flashes
# to have strictly positive flash ids
print(data.shape)
singles = (data['flash_id'] == -1)
non_singleton = data[ np.logical_not(singles) ]
print(non_singleton['flash_id'].shape)
order = np.argsort(non_singleton['flash_id'])
ordered_data = non_singleton[order]
flid = ordered_data['flash_id']
if (flid.shape[0]>0):
max_flash_id = flid[-1]
else:
max_flash_id = 0
try:
assert max_flash_id == max(unique_labels)
except AssertionError:
print("Max flash ID {0} is not as expected from unique labels {1}".format(max_flash_id, max(unique_labels)))
boundaries, = np.where(flid[1:]-flid[:-1]) # where indices are nonzero
boundaries = np.hstack(([0], boundaries+1))
max_idx = len(flid) #- 1
slice_lower_edges = tuple(boundaries)
slice_upper_edges = slice_lower_edges[1:] + (max_idx,)
slices = list(zip(slice_lower_edges, slice_upper_edges))
flashes = [ Flash(ordered_data[slice(*sl)]) for sl in slices ]
print("finished non-singletons")
# Now deal with the nonsingleton points.
# Each singleton point will have a high flash_id,
# starting with the previous maximum flash id.
singleton = data[singles]
n_singles = singleton.shape[0]
# this operation works on a view of the original data array,
# so it modifies the original data array
singleton['flash_id'] += max_flash_id + 1 + np.arange(n_singles, dtype=int)
singleton_flashes = [ Flash(singleton[i:i+1]) for i in range(n_singles)]
data[singles] = singleton
print("finished singletons")
flashes += singleton_flashes
logtext = "Calculating flash initation, centroid, area, etc. for %d flashes" % (len(flashes), )
logger.info(logtext)
# print flashes[0].points.dtype
for fl in flashes:
# header = ''.join(lma.header)
fl.metadata = lma.metadata #FlashMetadata(header)
calculate_flash_stats(fl)
# logger.info(fl.points.shape[0])
logger.info('finished setting flash metadata')
lma.raw_data = lma.data
lma.data = data
assert (lma.data['flash_id'].min() >=0) # this should be true since the singletons were modified in the original data array above
lma.flashes = flashes
def perform_chunked_clustering(self, XYZT, ptIDs, chunk_duration):
""" Perform clustering of a 4D data vector in overlapping chunks of
data,
XYZT: (N,4) array of N 4D point vectors
ptIDs: array of N unique identifiers of each point vector.
chunk_duration: duration of chunk in the units along the T coordinate
"""
if XYZT.shape[0] < 1:
# no data, so minimum time is zero. Assume nothing is done with the
# data, so that time doesn't matter. No flashes can result.
t_min = 0
else:
t_min = XYZT[:,-1].min()
point_stream = gen_stream(XYZT.astype('float64'), ptIDs)
chunk_stream = gen_chunks(point_stream, t_min, chunk_duration)
cluster_stream = self.gen_cluster_chunk_pairs(chunk_stream)
unique_labels, point_labels, all_IDs = self.aggregate_ids(cluster_stream)
return unique_labels, point_labels, all_IDs
def cluster(self, dataset, **kwargs):
""" Cluster an lmatools LMADataset provided in the dataset argument.
Basic filtering of the data is performed by calling the filter_data
method of the dataset, which returns a filtered data array. The
params are provided by the argument used to initialize this class.
This method modifies dataset as a side effect.
"""
data = dataset.filter_data(self.params)
print("sorting {0} total points".format(data.shape[0]))
# Transform to cartesian coordiantes
X, Y, Z = self.geo_to_cartesisan(data['lon'], data['lat'], data['alt'])
# Assemble a normalized data vector using flash sorting parameters
D_max, t_max = (self.params['distance'], # meters
self.params['thresh_critical_time']) # seconds
duration_max = self.params['thresh_duration'] # seconds
IDs = np.arange(X.shape[0]) # Vector of unique point IDs
X_vector = np.hstack((X[:,None],Y[:,None],Z[:,None])) / D_max
T_vector = data['time'][:,None] / t_max
XYZT = np.hstack((X_vector, T_vector))
# Perform chunked clustering of the data
normed_chunk_duration = duration_max/t_max
unique_labels, point_labels, all_IDs = self.perform_chunked_clustering(XYZT, IDs, normed_chunk_duration)
# Calculate flash metadata and store it in flash objects
# This should be factored out into somehting that modifies the data table
# and something that creates the flash objects themselves
self.create_flash_objs(dataset, data, unique_labels, point_labels, all_IDs)
class DBSCANFlashSorter(ChunkedFlashSorter):
def identify_clusters(self, data):
""" For data with shape (N, D) in D dimensions, return
a vector of labels of length N.
min_points is the minimum number of points required to form a
a cluster. For the DBSCAN algorithm, this is min_samples for
a core cluster.
This function adopts the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
db = DBSCAN(eps=1.0, min_samples=self.min_points, metric='euclidean')
clusters = db.fit(data)
labels = clusters.labels_.astype(int)
return labels
| [
"logging.getLogger",
"lmatools.flashsort.flash_stats.calculate_flash_stats",
"numpy.ones",
"numpy.hstack",
"numpy.arange",
"sklearn.cluster.DBSCAN",
"numpy.lib.recfunctions.append_fields",
"numpy.where",
"numpy.asarray",
"numpy.logical_not",
"numpy.argsort",
"numpy.empty_like",
"numpy.vstack... | [((2228, 2267), 'logging.getLogger', 'logging.getLogger', (['"""FlashAutorunLogger"""'], {}), "('FlashAutorunLogger')\n", (2245, 2267), False, 'import logging\n'), ((2801, 2819), 'lmatools.coordinateSystems.GeographicSystem', 'GeographicSystem', ([], {}), '()\n', (2817, 2819), False, 'from lmatools.coordinateSystems import GeographicSystem\n'), ((9928, 9955), 'numpy.empty_like', 'np.empty_like', (['point_labels'], {}), '(point_labels)\n', (9941, 9955), True, 'import numpy as np\n'), ((15496, 15517), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (15505, 15517), True, 'import numpy as np\n'), ((15680, 15711), 'numpy.hstack', 'np.hstack', (['(X_vector, T_vector)'], {}), '((X_vector, T_vector))\n', (15689, 15711), True, 'import numpy as np\n'), ((16856, 16920), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(1.0)', 'min_samples': 'self.min_points', 'metric': '"""euclidean"""'}), "(eps=1.0, min_samples=self.min_points, metric='euclidean')\n", (16862, 16920), False, 'from sklearn.cluster import DBSCAN\n'), ((1359, 1379), 'numpy.asarray', 'np.asarray', (['v_buffer'], {}), '(v_buffer)\n', (1369, 1379), True, 'import numpy as np\n'), ((1381, 1401), 'numpy.asarray', 'np.asarray', (['i_buffer'], {}), '(i_buffer)\n', (1391, 1401), True, 'import numpy as np\n'), ((5811, 5847), 'numpy.ones', 'np.ones', (['chunk1.shape[0]'], {'dtype': 'bool'}), '(chunk1.shape[0], dtype=bool)\n', (5818, 5847), True, 'import numpy as np\n'), ((5879, 5932), 'numpy.hstack', 'np.hstack', (['(clustered_in_chunk1, clustered_in_chunk2)'], {}), '((clustered_in_chunk1, clustered_in_chunk2))\n', (5888, 5932), True, 'import numpy as np\n'), ((6069, 6136), 'numpy.concatenate', 'np.concatenate', (['(labels[:len1], labels[len1:][clustered_in_chunk2])'], {}), '((labels[:len1], labels[len1:][clustered_in_chunk2]))\n', (6083, 6136), True, 'import numpy as np\n'), ((7403, 7424), 'numpy.asarray', 'np.asarray', (['residuals'], {}), '(residuals)\n', (7413, 7424), True, 'import numpy as np\n'), ((7443, 7466), 'numpy.asarray', 'np.asarray', (['residualIDs'], {}), '(residualIDs)\n', (7453, 7466), True, 'import numpy as np\n'), ((8889, 8924), 'numpy.asarray', 'np.asarray', (['point_labels'], {'dtype': 'int'}), '(point_labels, dtype=int)\n', (8899, 8924), True, 'import numpy as np\n'), ((8952, 8982), 'numpy.asarray', 'np.asarray', (['all_IDs'], {'dtype': 'int'}), '(all_IDs, dtype=int)\n', (8962, 8982), True, 'import numpy as np\n'), ((9024, 9052), 'numpy.concatenate', 'np.concatenate', (['point_labels'], {}), '(point_labels)\n', (9038, 9052), True, 'import numpy as np\n'), ((9075, 9098), 'numpy.concatenate', 'np.concatenate', (['all_IDs'], {}), '(all_IDs)\n', (9089, 9098), True, 'import numpy as np\n'), ((10229, 10287), 'numpy.lib.recfunctions.append_fields', 'append_fields', (['[good_data]', "('flash_id',)", '(empty_labels,)'], {}), "([good_data], ('flash_id',), (empty_labels,))\n", (10242, 10287), False, 'from numpy.lib.recfunctions import append_fields\n'), ((11037, 11074), 'numpy.argsort', 'np.argsort', (["non_singleton['flash_id']"], {}), "(non_singleton['flash_id'])\n", (11047, 11074), True, 'import numpy as np\n'), ((11592, 11622), 'numpy.where', 'np.where', (['(flid[1:] - flid[:-1])'], {}), '(flid[1:] - flid[:-1])\n', (11600, 11622), True, 'import numpy as np\n'), ((11677, 11709), 'numpy.hstack', 'np.hstack', (['([0], boundaries + 1)'], {}), '(([0], boundaries + 1))\n', (11686, 11709), True, 'import numpy as np\n'), ((15566, 15613), 'numpy.hstack', 'np.hstack', (['(X[:, None], Y[:, None], Z[:, None])'], {}), '((X[:, None], Y[:, None], Z[:, None]))\n', (15575, 15613), True, 'import numpy as np\n'), ((8647, 8662), 'numpy.asarray', 'np.asarray', (['IDs'], {}), '(IDs)\n', (8657, 8662), True, 'import numpy as np\n'), ((10940, 10963), 'numpy.logical_not', 'np.logical_not', (['singles'], {}), '(singles)\n', (10954, 10963), True, 'import numpy as np\n'), ((12519, 12550), 'numpy.arange', 'np.arange', (['n_singles'], {'dtype': 'int'}), '(n_singles, dtype=int)\n', (12528, 12550), True, 'import numpy as np\n'), ((12594, 12619), 'lmatools.flashsort.flash_stats.Flash', 'Flash', (['singleton[i:i + 1]'], {}), '(singleton[i:i + 1])\n', (12599, 12619), False, 'from lmatools.flashsort.flash_stats import calculate_flash_stats, Flash\n'), ((13137, 13162), 'lmatools.flashsort.flash_stats.calculate_flash_stats', 'calculate_flash_stats', (['fl'], {}), '(fl)\n', (13158, 13162), False, 'from lmatools.flashsort.flash_stats import calculate_flash_stats, Flash\n'), ((1169, 1189), 'numpy.asarray', 'np.asarray', (['v_buffer'], {}), '(v_buffer)\n', (1179, 1189), True, 'import numpy as np\n'), ((1191, 1211), 'numpy.asarray', 'np.asarray', (['i_buffer'], {}), '(i_buffer)\n', (1201, 1211), True, 'import numpy as np\n'), ((4948, 4975), 'numpy.vstack', 'np.vstack', (['(chunk1, chunk2)'], {}), '((chunk1, chunk2))\n', (4957, 4975), True, 'import numpy as np\n'), ((5002, 5028), 'numpy.concatenate', 'np.concatenate', (['(id1, id2)'], {}), '((id1, id2))\n', (5016, 5028), True, 'import numpy as np\n'), ((8142, 8168), 'numpy.atleast_1d', 'np.atleast_1d', (['orig_labels'], {}), '(orig_labels)\n', (8155, 8168), True, 'import numpy as np\n')] |
import numpy as np
import quaternion
def from_tqs_to_matrix(translation, quater, scale):
"""
(T(3), Q(4), S(3)) -> 4x4 Matrix
:param translation: 3 dim translation vector (np.array or list)
:param quater: 4 dim rotation quaternion (np.array or list)
:param scale: 3 dim scale vector (np.array or list)
:return: 4x4 transformation matrix
"""
q = np.quaternion(quater[0], quater[1], quater[2], quater[3])
T = np.eye(4)
T[0:3, 3] = translation
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(scale)
M = T.dot(R).dot(S)
return M
def apply_transform(points, *args):
"""
points = points х args[0] x args[1] x args[2] x ... args[-1]
:param points: np.array N x (3|4)
:param args: array of transformations. May be 4x4 np.arrays, or dict {
'transformation': [t1, t2, t3],
'rotation': [q1, q2, q3, q4],
'scale': [s1, s2, s3],
}
:return: transformed points
"""
# save origin dimensionality and add forth coordinate if needed
initial_dim = points.shape[-1]
if initial_dim == 3:
points = add_forth_coord(points)
# transform each transformation to 4x4 matrix
transformations = []
for transform in args:
if type(transform) == dict:
transformations.append(from_tqs_to_matrix(
translation=transform['translation'],
quater=transform['rotation'],
scale=transform['scale']
))
else:
transformations.append(transform)
# main loop
for transform in transformations:
points = points @ transform.T
# back to origin dimensionality if needed
if initial_dim == 3:
points = points[:, :3]
return points
def apply_inverse_transform(points, *args):
"""
points = points х args[0] x args[1] x args[2] x ... args[-1]
:param points: np.array N x (3|4)
:param args: array of tranformations. May be 4x4 np.arrays, or dict {
'transformation': [t1, t2, t3],
'rotation': [q1, q2, q3, q4],
'scale': [s1, s2, s3],
}
:return: transformed points
"""
# save origin dimensionality and add forth coordinate if needed
initial_dim = points.shape[-1]
if initial_dim == 3:
points = add_forth_coord(points)
# transform each transformation to 4x4 matrix
transformations = []
for transform in args:
if type(transform) == dict:
t = from_tqs_to_matrix(
translation=transform['translation'],
quater=transform['rotation'],
scale=transform['scale']
)
t = np.linalg.inv(t)
transformations.append(t)
else:
t = np.linalg.inv(transform)
transformations.append(t)
# main loop
for transform in transformations:
points = points @ transform.T
# back to origin dimensionality if needed
if initial_dim == 3:
points = points[:, :3]
return points
def add_forth_coord(points):
"""forth coordinate is const = 1"""
return np.hstack((points, np.ones((len(points), 1))))
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M | [
"numpy.eye",
"quaternion.as_rotation_matrix",
"numpy.diag",
"numpy.linalg.inv",
"numpy.quaternion"
] | [((379, 436), 'numpy.quaternion', 'np.quaternion', (['quater[0]', 'quater[1]', 'quater[2]', 'quater[3]'], {}), '(quater[0], quater[1], quater[2], quater[3])\n', (392, 436), True, 'import numpy as np\n'), ((445, 454), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (451, 454), True, 'import numpy as np\n'), ((491, 500), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (497, 500), True, 'import numpy as np\n'), ((519, 551), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', (['q'], {}), '(q)\n', (548, 551), False, 'import quaternion\n'), ((560, 569), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (566, 569), True, 'import numpy as np\n'), ((588, 602), 'numpy.diag', 'np.diag', (['scale'], {}), '(scale)\n', (595, 602), True, 'import numpy as np\n'), ((3247, 3284), 'numpy.quaternion', 'np.quaternion', (['q[0]', 'q[1]', 'q[2]', 'q[3]'], {}), '(q[0], q[1], q[2], q[3])\n', (3260, 3284), True, 'import numpy as np\n'), ((3293, 3302), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3299, 3302), True, 'import numpy as np\n'), ((3329, 3338), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3335, 3338), True, 'import numpy as np\n'), ((3357, 3389), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', (['q'], {}), '(q)\n', (3386, 3389), False, 'import quaternion\n'), ((3398, 3407), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3404, 3407), True, 'import numpy as np\n'), ((3426, 3436), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (3433, 3436), True, 'import numpy as np\n'), ((2715, 2731), 'numpy.linalg.inv', 'np.linalg.inv', (['t'], {}), '(t)\n', (2728, 2731), True, 'import numpy as np\n'), ((2800, 2824), 'numpy.linalg.inv', 'np.linalg.inv', (['transform'], {}), '(transform)\n', (2813, 2824), True, 'import numpy as np\n')] |
"""Build hypothesis file by applying post-processing rules
to LSTM output
Requires LSTM output
Produces a hypothesis txt file
"""
# +
import sys
# Root folder of main library
sys.path.insert(0, 'library')
# Root folder of EDF files
EDF_ROOT = '/esat/biomeddata/Neureka_challenge/edf/dev/'
# Root folder of predictions on edf files
PREDICTION_ROOT = 'evaluation'
# custom lib
import spir
# std lib
import os
import pickle
# 3rd party lib
import h5py
import numpy as np
# +
def load_filenames():
filenames = list()
with h5py.File(os.join(PREDICTION_ROOT, 'prediction_test_iclabel.h5'), 'r') as f:
filenames = list(f['filenames'])
return filenames
filenames = load_filenames()
with open('lstm-results.pkl', 'rb') as filehandler:
results = pickle.load(filehandler)
threshold = 0.55
for i, filename in enumerate(filenames):
# Apply threshold on baseline corrected prediction
hyp = spir.mask2eventList((results[i].flatten() - np.median(results[i].flatten())) > threshold, fs)
# Merge events closer than 30seconds
hyp = spir.merge_events(hyp, 30)
# Remove events with mean prediction < 82% of event with max prediction
if len(hyp):
amp = list()
for event in hyp:
amp.append(np.mean(results[i].flatten()[int(event[0]*fs):int(event[1]*fs)]))
amp = np.array(amp)
amp /= np.max(amp)
hyp = list(np.array(hyp)[amp > 0.82])
with open('hyp_lstm.txt', 'a') as handle:
for event in hyp:
# Remove short events
if event[1] - event[0] > 15:
amp = np.mean(results[i][int(event[0]*fs):int(event[1]*fs)])
# Shorten events by 2 seconds
handle.write('{} {} {} {} 16\n'.format(filename, event[0]+1, event[1]-1), amp)
| [
"os.join",
"sys.path.insert",
"pickle.load",
"numpy.max",
"numpy.array",
"spir.merge_events"
] | [((179, 208), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""library"""'], {}), "(0, 'library')\n", (194, 208), False, 'import sys\n'), ((771, 795), 'pickle.load', 'pickle.load', (['filehandler'], {}), '(filehandler)\n', (782, 795), False, 'import pickle\n'), ((1067, 1093), 'spir.merge_events', 'spir.merge_events', (['hyp', '(30)'], {}), '(hyp, 30)\n', (1084, 1093), False, 'import spir\n'), ((1342, 1355), 'numpy.array', 'np.array', (['amp'], {}), '(amp)\n', (1350, 1355), True, 'import numpy as np\n'), ((1371, 1382), 'numpy.max', 'np.max', (['amp'], {}), '(amp)\n', (1377, 1382), True, 'import numpy as np\n'), ((545, 599), 'os.join', 'os.join', (['PREDICTION_ROOT', '"""prediction_test_iclabel.h5"""'], {}), "(PREDICTION_ROOT, 'prediction_test_iclabel.h5')\n", (552, 599), False, 'import os\n'), ((1403, 1416), 'numpy.array', 'np.array', (['hyp'], {}), '(hyp)\n', (1411, 1416), True, 'import numpy as np\n')] |
import gym
import numpy as np
from gym import error, spaces, utils
from gym.utils import seeding
from gym import Env
from gym.spaces import Discrete, MultiDiscrete, MultiBinary, Box
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
class SYSTEM():
def __init__(self, action_dim, fixed_matrix=True):
self.beta = 0.9 ## randomness
TARGET = -400
self.target_action = np.ones(action_dim) * TARGET
if (fixed_matrix):
self.matrix = np.diag(np.ones(action_dim) * 0.1)
else:
# target_action = np.array([-100,-400,-400,200])
A = np.random.rand(action_dim, action_dim)
B = (A + A.T) / 2
_, s, V = np.linalg.svd(B)
c_matrix = np.zeros((action_dim, action_dim))
np.fill_diagonal(c_matrix, np.ones(action_dim) * 0.1)
B_new = V.T.dot(c_matrix).dot(V)
self.matrix = B_new
def step(self, action):
u = np.random.rand()
dis_vector = (action - self.target_action).reshape([-1, 1])
cost = 0.001 * np.matmul(dis_vector.T, self.matrix).dot(dis_vector)
cost = cost * self.beta + u * (1 - self.beta)
reward = -cost
return -reward
class ContBanditEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, action_dim=1, ep_length=10000):
super(ContBanditEnv, self).__init__()
self.action_dim = action_dim
self.ep_length = ep_length
self.beta = 0.9
TARGET = 10
self.target = TARGET
self.target_action = np.ones(self.action_dim) * TARGET
self.matrix = np.diag(np.ones(action_dim) * 0.1)
# Dimensionality of our observation space
self.action_space = Box(low=-1000, high=1000, shape=(self.action_dim,), dtype=np.float32)
self.observation_space = Box(low=-1000, high=1000, shape=(1,), dtype=np.float32)
# Episode Done?
self.done = False
self.current_step = 0
# Our current state, with length(1,len_state)
self.x = None
# Reset environment
self.reset()
def step(self, action):
self.current_step += 1
self.done = self.current_step >= self.ep_length
# Make vectorized form
self.x += (action[0] - self.target)
if abs(self.x[0])<0.2:
self.x = [0.]
elif self.x[0]>0.2:
self.x = [1.]
else:
self.x = [-1.]
#self.x = [-1.]
arrayed_version = np.array(self.x)
reward = self.Reward(arrayed_version, action)
return arrayed_version, self.Reward(arrayed_version, action), self.done, {}
def reset(self):
"""
Reset environment, and get a window 250 of self.len_state size
Returns:arrayed_version:np.array(1,len_state)
"""
self.current_step = 0
self.x = [0.]
arrayed_version = np.array(self.x)
return arrayed_version
def render(self, mode='human', close=False):
pass
def show(self, title):
pass
def Reward(self, obs, action_value):
u = np.random.rand()
dis_vector = (obs + action_value - self.target_action).reshape([-1, 1])
cost = np.matmul(dis_vector.T, self.matrix).dot(dis_vector)
cost = cost * self.beta + u * (1 - self.beta)
reward = -cost
return reward
| [
"numpy.ones",
"numpy.random.rand",
"matplotlib.use",
"gym.spaces.Box",
"numpy.array",
"numpy.zeros",
"numpy.matmul",
"numpy.linalg.svd"
] | [((201, 222), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (215, 222), False, 'import matplotlib\n'), ((981, 997), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (995, 997), True, 'import numpy as np\n'), ((1764, 1833), 'gym.spaces.Box', 'Box', ([], {'low': '(-1000)', 'high': '(1000)', 'shape': '(self.action_dim,)', 'dtype': 'np.float32'}), '(low=-1000, high=1000, shape=(self.action_dim,), dtype=np.float32)\n', (1767, 1833), False, 'from gym.spaces import Discrete, MultiDiscrete, MultiBinary, Box\n'), ((1867, 1922), 'gym.spaces.Box', 'Box', ([], {'low': '(-1000)', 'high': '(1000)', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=-1000, high=1000, shape=(1,), dtype=np.float32)\n', (1870, 1922), False, 'from gym.spaces import Discrete, MultiDiscrete, MultiBinary, Box\n'), ((2526, 2542), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (2534, 2542), True, 'import numpy as np\n'), ((2934, 2950), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (2942, 2950), True, 'import numpy as np\n'), ((3141, 3157), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3155, 3157), True, 'import numpy as np\n'), ((423, 442), 'numpy.ones', 'np.ones', (['action_dim'], {}), '(action_dim)\n', (430, 442), True, 'import numpy as np\n'), ((631, 669), 'numpy.random.rand', 'np.random.rand', (['action_dim', 'action_dim'], {}), '(action_dim, action_dim)\n', (645, 669), True, 'import numpy as np\n'), ((722, 738), 'numpy.linalg.svd', 'np.linalg.svd', (['B'], {}), '(B)\n', (735, 738), True, 'import numpy as np\n'), ((762, 796), 'numpy.zeros', 'np.zeros', (['(action_dim, action_dim)'], {}), '((action_dim, action_dim))\n', (770, 796), True, 'import numpy as np\n'), ((1594, 1618), 'numpy.ones', 'np.ones', (['self.action_dim'], {}), '(self.action_dim)\n', (1601, 1618), True, 'import numpy as np\n'), ((1658, 1677), 'numpy.ones', 'np.ones', (['action_dim'], {}), '(action_dim)\n', (1665, 1677), True, 'import numpy as np\n'), ((3253, 3289), 'numpy.matmul', 'np.matmul', (['dis_vector.T', 'self.matrix'], {}), '(dis_vector.T, self.matrix)\n', (3262, 3289), True, 'import numpy as np\n'), ((513, 532), 'numpy.ones', 'np.ones', (['action_dim'], {}), '(action_dim)\n', (520, 532), True, 'import numpy as np\n'), ((836, 855), 'numpy.ones', 'np.ones', (['action_dim'], {}), '(action_dim)\n', (843, 855), True, 'import numpy as np\n'), ((1089, 1125), 'numpy.matmul', 'np.matmul', (['dis_vector.T', 'self.matrix'], {}), '(dis_vector.T, self.matrix)\n', (1098, 1125), True, 'import numpy as np\n')] |
"""
.. module:: HarmonicKMeans
HarmonicKMeans
*************
:Description: HarmonicKMeans
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
from sklearn.base import TransformerMixin, ClusterMixin, BaseEstimator
from sklearn.utils import check_random_state
class HarmonicKMeans(TransformerMixin, ClusterMixin, BaseEstimator):
"""
Harmonic K-means
Reference
---------
<NAME>. & <NAME>.
Alternatives to the k-means algorithm that find better clusterings.
Proceedings of the Eleventh International Conference on
Information and Knowledge Management (CIKM-02),
ACM Press, 2002, 600-607
"""
def __init__(self, n_clusters=3, p=2., epsilon=1e-6, tol=1e-3,
init_method='forgy', max_iter=300, random_state=None):
''' init_method by <NAME> and Larranaga. An empirical comparison of
four initialization methods for the k-means algorithm.
Pattern recognition letters,20:1027-1040, 1999.
The Forgy method choose n_clusters data points from the dataset at random
and uses them as initial centers.
The Random Partition method assigns each datapoint to a random center, then
computes the initial location of each center as the centroid of its assigned
points.
p is an input degree and typically p >= 2
Epsilon is small positive value that prevents numerical overflow when the center
and a data point coincide.
'''
self.n_clusters = n_clusters
self.p = p
self.epsilon = epsilon
self.tol = tol
self.init_method = init_method
self.max_iter = max_iter
self.random_state = random_state
def _calculate_centers(self, X):
# distances between cluster centers and X with the degree p+2
dist_p_plus_2 = ((X[:,np.newaxis]-self.cluster_centers_)**(self.p+2)).sum(axis=2)
# distances between cluster centers and X with the degree p
dist_p = ((X[:,np.newaxis]-self.cluster_centers_)**self.p).sum(axis=2)
# check that we don't divide by 0 when inverting distances - Zhang
dist_p_plus_2[dist_p_plus_2<=self.epsilon]=self.epsilon
dist_p[dist_p<=self.epsilon]=self.epsilon
# calculate membership m
M1 = dist_p_plus_2**(-1)
self.m_ = M1/np.sum(M1, axis=1).reshape(-1,1)
# calculate weight w
M2 = dist_p**(-1)
self.w_ = M1.sum(axis=1).reshape(-1,1)/(M2.sum(axis=1).reshape(-1,1))**2
# calculate centers
self.cluster_centers_ = np.dot((self.m_*self.w_).T,X)/(self.m_*self.w_).sum(axis=0).reshape(-1,1)
def fit_predict(self, X):
'''Returns hard partition (labels) for the data X on which the instance was fitted.
'''
n_samples, n_features = X.shape
vdata = np.mean(np.var(X, 0))
random_state = check_random_state(self.random_state)
# centers initialization
if self.init_method=='forgy':
self.cluster_centers_ = X[random_state.choice(np.arange(n_samples), self.n_clusters)]
elif self.init_method=='random_partition':
ra = random_state.randint(self.n_clusters, size=n_samples)
self.cluster_centers_ = np.zeros((self.n_clusters,n_features))
for i in range(self.n_clusters):
self.cluster_centers_[i] = X[ra==i].mean(axis=0)
for i in range(self.max_iter):
centers_old = self.cluster_centers_.copy()
self._calculate_centers(X)
if np.sum((centers_old - self.cluster_centers_) ** 2) < self.tol * vdata:
break
return self.m_.argmax(axis=1)
def predict(self, X):
'''Predicts the hard partition for the new data X based on the fitted instance
and converged self.cluster_centers_ (if the cluster_centers_ attribute does not exist
raises an exception).
'''
try:
dist_p_plus_2 = ((X[:,np.newaxis]-self.cluster_centers_)**(self.p+2)).sum(axis=2)
# check that we don't divide by 0 when inverting distances - Zhang
dist_p_plus_2[dist_p_plus_2<=self.epsilon]=self.epsilon
# calculate membership m
M1 = dist_p_plus_2**(-1)
m_ = M1/np.sum(M1, axis=1).reshape(-1,1)
except AttributeError:
raise NotFittedError("This instance is not fitted yet. Call 'fit_predict' method before using this method")
return m_.argmax(axis=1)
def predict_proba(self, X):
'''Predicts the soft partition for the new data X based on the fitted instance
and converged self.cluster_centers_ (if the cluster_centers_ attribute does not exist
raises an exception).
'''
try:
dist_p_plus_2 = ((X[:,np.newaxis]-self.cluster_centers_)**(self.p+2)).sum(axis=2)
# check that we don't divide by 0 when inverting distances - Zhang
dist_p_plus_2[dist_p_plus_2<=self.epsilon]=self.epsilon
# calculate membership m
M1 = dist_p_plus_2**(-1)
m_ = M1/np.sum(M1, axis=1).reshape(-1,1)
except AttributeError:
raise NotFittedError("This instance is not fitted yet. Call 'fit_predict' method before using this method")
return m_
if __name__ == '__main__':
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=1200, centers=centers, cluster_std=0.3, random_state=42)
km = HarmonicKMeans(n_clusters=3, init_method='random_partition', max_iter=100, random_state=42)
labels = km.fit_predict(X)
print(km.cluster_centers_)
print(labels[:10])
print(km.m_[:10]) | [
"sklearn.utils.check_random_state",
"numpy.arange",
"sklearn.datasets.make_blobs",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.var"
] | [((5663, 5740), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(1200)', 'centers': 'centers', 'cluster_std': '(0.3)', 'random_state': '(42)'}), '(n_samples=1200, centers=centers, cluster_std=0.3, random_state=42)\n', (5673, 5740), False, 'from sklearn.datasets import make_blobs\n'), ((2956, 2993), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (2974, 2993), False, 'from sklearn.utils import check_random_state\n'), ((2636, 2668), 'numpy.dot', 'np.dot', (['(self.m_ * self.w_).T', 'X'], {}), '((self.m_ * self.w_).T, X)\n', (2642, 2668), True, 'import numpy as np\n'), ((2918, 2930), 'numpy.var', 'np.var', (['X', '(0)'], {}), '(X, 0)\n', (2924, 2930), True, 'import numpy as np\n'), ((3331, 3370), 'numpy.zeros', 'np.zeros', (['(self.n_clusters, n_features)'], {}), '((self.n_clusters, n_features))\n', (3339, 3370), True, 'import numpy as np\n'), ((3631, 3681), 'numpy.sum', 'np.sum', (['((centers_old - self.cluster_centers_) ** 2)'], {}), '((centers_old - self.cluster_centers_) ** 2)\n', (3637, 3681), True, 'import numpy as np\n'), ((2405, 2423), 'numpy.sum', 'np.sum', (['M1'], {'axis': '(1)'}), '(M1, axis=1)\n', (2411, 2423), True, 'import numpy as np\n'), ((3133, 3153), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3142, 3153), True, 'import numpy as np\n'), ((4390, 4408), 'numpy.sum', 'np.sum', (['M1'], {'axis': '(1)'}), '(M1, axis=1)\n', (4396, 4408), True, 'import numpy as np\n'), ((5245, 5263), 'numpy.sum', 'np.sum', (['M1'], {'axis': '(1)'}), '(M1, axis=1)\n', (5251, 5263), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.decomposition import TruncatedSVD
def rpca(M,lam):
# <NAME> - Oct-2017
# computes rpca separation of M into L and S using the parameter lam
# this uses the alternating directions augmented method of multipliers
# as described in my blog
Nr = M.shape[0]
Nc = M.shape[1]
Nt = M.shape[2]
M = M.reshape(Nr*Nc,Nt)
Y = M / np.maximum(np.linalg.norm(M,2), np.linalg.norm(M,np.inf) / lam)
mu = 1/ (np.linalg.norm(M,2))
rho = 1.6
S = np.zeros((Nr*Nc,Nt))
error = 10
count = 0
while error > 1e-7:
U,sig,V = np.linalg.svd(M-S+Y/mu, full_matrices=False)
L = np.dot(U, np.dot(np.diag(soft_thres(sig, 1/mu)), V))
S = soft_thres(M-L+Y/mu, lam/mu)
Y = Y + mu*(M-L-S)
mu = mu*rho
error = np.linalg.norm(M-L-S,'fro') / np.linalg.norm(M,'fro')
count += 1
L = L.reshape(Nr,Nc,Nt)
S = S.reshape(Nr,Nc,Nt)
# used to previously return count also
# but don't really use this
return L,S
def soft_thres(x,eps):
# <NAME> - Oct-2017
# soft thresholds a matrix x at the eps level
# i.e ST(x,eps)_ij = sgn(x_ij) max(|x_ij| - eps, 0)
a = np.sign(x)
b = np.maximum((np.fabs(x) - eps), 0)
return np.multiply(a,b)
| [
"numpy.fabs",
"numpy.multiply",
"numpy.zeros",
"numpy.sign",
"numpy.linalg.norm",
"numpy.linalg.svd"
] | [((544, 567), 'numpy.zeros', 'np.zeros', (['(Nr * Nc, Nt)'], {}), '((Nr * Nc, Nt))\n', (552, 567), True, 'import numpy as np\n'), ((1275, 1285), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (1282, 1285), True, 'import numpy as np\n'), ((1341, 1358), 'numpy.multiply', 'np.multiply', (['a', 'b'], {}), '(a, b)\n', (1352, 1358), True, 'import numpy as np\n'), ((495, 515), 'numpy.linalg.norm', 'np.linalg.norm', (['M', '(2)'], {}), '(M, 2)\n', (509, 515), True, 'import numpy as np\n'), ((646, 696), 'numpy.linalg.svd', 'np.linalg.svd', (['(M - S + Y / mu)'], {'full_matrices': '(False)'}), '(M - S + Y / mu, full_matrices=False)\n', (659, 696), True, 'import numpy as np\n'), ((424, 444), 'numpy.linalg.norm', 'np.linalg.norm', (['M', '(2)'], {}), '(M, 2)\n', (438, 444), True, 'import numpy as np\n'), ((865, 897), 'numpy.linalg.norm', 'np.linalg.norm', (['(M - L - S)', '"""fro"""'], {}), "(M - L - S, 'fro')\n", (879, 897), True, 'import numpy as np\n'), ((895, 919), 'numpy.linalg.norm', 'np.linalg.norm', (['M', '"""fro"""'], {}), "(M, 'fro')\n", (909, 919), True, 'import numpy as np\n'), ((1307, 1317), 'numpy.fabs', 'np.fabs', (['x'], {}), '(x)\n', (1314, 1317), True, 'import numpy as np\n'), ((445, 470), 'numpy.linalg.norm', 'np.linalg.norm', (['M', 'np.inf'], {}), '(M, np.inf)\n', (459, 470), True, 'import numpy as np\n')] |
# coding: utf-8
""" Classes for accessing simulation data for Sgr-like streams with
different mass progenitors.
"""
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Standard library
import os, sys
from random import sample
# Third-party
import numpy as np
import numexpr
import astropy.io.ascii as ascii
from astropy.table import Column
import astropy.units as u
from astropy.constants import G
from gary.io import SCFReader
from gary.units import usys as _usys
# Project
from .. import usys
from ..dynamics import Particle, Orbit
from ..util import streamspath
from ..coordinates.frame import galactocentric
from ..potential.lm10 import LM10Potential
from ..inference.util import guess_tail_bit, particles_x1x2x3
__all__ = ["SgrSimulation"]
class SgrSimulation(object):
def __init__(self, path, snapfile):
""" """
# potential used for the simulation
self.potential = LawMajewski2010()
# some smart pathness
if not os.path.exists(path):
_path = os.path.join(streamspath, "data", "simulation", path)
if os.path.exists(_path):
path = _path
else:
raise IOError("Path '{}' doesn't exist".format(path))
self.path = path
self.reader = SCFReader(self.path)
self.particle_table = self.reader.read_snap(snapfile, units=usys)
self.units = _usys
# get mass column from table
m = np.array(self.particle_table['m'])*self.particle_table['m'].unit
self.mass = np.sum(m)
self.t1 = self.particle_table.meta["time"]
self.t2 = 0.
def particles(self, n=None, expr=None, tail_bit=False, clean=False):
""" Return a Particle object with N particles selected from the
simulation with expression expr.
Parameters
----------
n : int or None (optional)
Number of particles to return. None or 0 means 'all'
expr : str (optional)
Use numexpr to select out only rows that match criteria.
tail_bit : bool (optional)
Compute tail bit or not.
"""
if expr is not None:
expr_idx = numexpr.evaluate(str(expr), self.particle_table)
else:
expr_idx = np.ones(len(self.particle_table)).astype(bool)
table = self.particle_table[expr_idx]
n_idx = np.array(sample(xrange(len(table)), len(table)))
if n is not None and n > 0:
idx = n_idx[:n]
else:
idx = n_idx
table = table[idx]
# get a list of quantities for each column
coords = []
for colname in galactocentric.coord_names:
coords.append(np.array(table[colname])*table[colname].unit)
meta = dict(expr=expr)
meta["tub"] = (np.array(table["tub"])*table["tub"].unit).to(_usys["time"]).value
# create the particle object
p = Particle(coords, frame=galactocentric, meta=meta)
p = p.decompose(usys)
# guess whether in leading or trailing tail
if tail_bit:
coord, r_tide, v_disp = particles_x1x2x3(p, self.satellite(),
self.potential,
self.t1, self.t2, -1,
at_tub=True)
(x1,x2,x3,vx1,vx2,vx3) = coord
p.meta["tail_bit"] = p.tail_bit = guess_tail_bit(x1,x2)
else:
tail_bit = np.ones(p.nparticles)*np.nan
if clean:
if not tail_bit:
coord, r_tide, v_disp = particles_x1x2x3(p, self.satellite(),
self.potential,
self.t1, self.t2, -1,
at_tub=True)
(x1,x2,x3,vx1,vx2,vx3) = coord
tail_bit = guess_tail_bit(x1,x2)
else:
tail_bit = p.tail_bit
# reject any with nan tail_bit
idx = ~np.isnan(tail_bit)
# reject any with |x1| > 2.5 or |x2| > 1.2
idx &= np.fabs(x1/r_tide) < 2.5
idx &= np.fabs(x2/r_tide) < 1.2
_X = p._X[idx]
meta["tub"] = p.tub[idx]
meta["tail_bit"] = tail_bit[idx]
p = Particle(_X.T.copy(), frame=p.frame, units=p._internal_units, meta=meta)
return p
def satellite(self):
""" Return a Particle object with the present-day position of the
satellite, computed from the still-bound particles.
"""
expr_idx = numexpr.evaluate("tub==0", self.particle_table)
bound = self.particle_table[expr_idx]
q = []
for colname in galactocentric.coord_names:
q.append(np.median(np.array(bound[colname]))*bound[colname].unit)
meta = dict()
meta["m0"] = self.mass.to(_usys['mass']).value
mdot = 3.3*10**(np.floor(np.log10(meta["m0"]))-4)
meta['mdot'] = mdot
p = Particle(q, frame=galactocentric, meta=meta)
return p.decompose(usys)
| [
"os.path.exists",
"numpy.fabs",
"numpy.log10",
"numpy.ones",
"gary.io.SCFReader",
"os.path.join",
"numpy.sum",
"numpy.array",
"numpy.isnan",
"numexpr.evaluate"
] | [((1304, 1324), 'gary.io.SCFReader', 'SCFReader', (['self.path'], {}), '(self.path)\n', (1313, 1324), False, 'from gary.io import SCFReader\n'), ((1561, 1570), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1567, 1570), True, 'import numpy as np\n'), ((4736, 4783), 'numexpr.evaluate', 'numexpr.evaluate', (['"""tub==0"""', 'self.particle_table'], {}), "('tub==0', self.particle_table)\n", (4752, 4783), False, 'import numexpr\n'), ((1005, 1025), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1019, 1025), False, 'import os, sys\n'), ((1047, 1100), 'os.path.join', 'os.path.join', (['streamspath', '"""data"""', '"""simulation"""', 'path'], {}), "(streamspath, 'data', 'simulation', path)\n", (1059, 1100), False, 'import os, sys\n'), ((1116, 1137), 'os.path.exists', 'os.path.exists', (['_path'], {}), '(_path)\n', (1130, 1137), False, 'import os, sys\n'), ((1476, 1510), 'numpy.array', 'np.array', (["self.particle_table['m']"], {}), "(self.particle_table['m'])\n", (1484, 1510), True, 'import numpy as np\n'), ((3567, 3588), 'numpy.ones', 'np.ones', (['p.nparticles'], {}), '(p.nparticles)\n', (3574, 3588), True, 'import numpy as np\n'), ((4160, 4178), 'numpy.isnan', 'np.isnan', (['tail_bit'], {}), '(tail_bit)\n', (4168, 4178), True, 'import numpy as np\n'), ((4255, 4275), 'numpy.fabs', 'np.fabs', (['(x1 / r_tide)'], {}), '(x1 / r_tide)\n', (4262, 4275), True, 'import numpy as np\n'), ((4299, 4319), 'numpy.fabs', 'np.fabs', (['(x2 / r_tide)'], {}), '(x2 / r_tide)\n', (4306, 4319), True, 'import numpy as np\n'), ((2764, 2788), 'numpy.array', 'np.array', (['table[colname]'], {}), '(table[colname])\n', (2772, 2788), True, 'import numpy as np\n'), ((2865, 2887), 'numpy.array', 'np.array', (["table['tub']"], {}), "(table['tub'])\n", (2873, 2887), True, 'import numpy as np\n'), ((4928, 4952), 'numpy.array', 'np.array', (['bound[colname]'], {}), '(bound[colname])\n', (4936, 4952), True, 'import numpy as np\n'), ((5086, 5106), 'numpy.log10', 'np.log10', (["meta['m0']"], {}), "(meta['m0'])\n", (5094, 5106), True, 'import numpy as np\n')] |
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
| [
"numpy.array",
"allennlp.common.Params",
"torch.FloatTensor",
"allennlp.modules.matrix_attention.CosineMatrixAttention"
] | [((462, 488), 'allennlp.common.Params', 'Params', (["{'type': 'cosine'}"], {}), "({'type': 'cosine'})\n", (468, 488), False, 'from allennlp.common import Params\n'), ((824, 847), 'allennlp.modules.matrix_attention.CosineMatrixAttention', 'CosineMatrixAttention', ([], {}), '()\n', (845, 847), False, 'from allennlp.modules.matrix_attention import CosineMatrixAttention\n'), ((861, 934), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]'], {}), '([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]])\n', (878, 934), False, 'import torch\n'), ((948, 1018), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n', (965, 1018), False, 'import torch\n'), ((1403, 1463), 'numpy.array', 'numpy.array', (['[[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]'], {}), '([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]])\n', (1414, 1463), False, 'import numpy\n')] |
import threading
import cv2
import numpy as np
from utils.misc import color_filter, kill_thread
from screen import Screen
from item import ItemFinder
from config import Config
from template_finder import TemplateFinder
class GraphicDebuggerController:
"""
This class takes care of handling the graphic debugger by starting and stopping it.
The variable is_running is static and should be accessed only by the main thread or it is subject
to race condition, if you plan to touch it from within a thread you might have to
add a locking mechanism to order to access it.
"""
is_running = False
def __init__(self, config: Config):
self.config = config
self.screen = None
self.item_finder = None
self.template_finder = None
self.debugger_thread = None
def start(self):
self.item_finder = ItemFinder(self.config)
self.screen = Screen(self.config.general["monitor"])
self.template_finder = TemplateFinder(self.screen)
self.debugger_thread = threading.Thread(target=self.run_debugger)
self.debugger_thread.daemon = False
self.debugger_thread.start()
GraphicDebuggerController.is_running = True
def stop(self):
if self.debugger_thread: kill_thread(self.debugger_thread)
GraphicDebuggerController.is_running = False
def run_debugger(self):
search_templates = ["A5_TOWN_0", "A5_TOWN_1", "A5_TOWN_2", "A5_TOWN_3"]
while 1:
img = self.screen.grab()
# Show item detections
combined_img = np.zeros(img.shape, dtype="uint8")
for key in self.config.colors:
_, filterd_img = color_filter(img, self.config.colors[key])
combined_img = cv2.bitwise_or(filterd_img, combined_img)
item_list = self.item_finder.search(img)
for item in item_list:
cv2.circle(combined_img, item.center, 7, (0, 0, 255), 4)
cv2.putText(combined_img, item.name, item.center, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
if len(item_list) > 0:
print(item_list)
# Show Town A5 template matches
scores = {}
for template_name in search_templates:
template_match = self.template_finder.search(template_name, img, threshold=0.65)
if template_match.valid:
scores[template_match.name] = template_match.score
cv2.putText(combined_img, str(template_name), template_match.position, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.circle(combined_img, template_match.position, 7, (255, 0, 0), thickness=5)
if len(scores) > 0:
print(scores)
# Show img
combined_img = cv2.resize(combined_img, None, fx=0.5, fy=0.5)
cv2.imshow("debug img", combined_img)
cv2.setWindowProperty("debug img", cv2.WND_PROP_TOPMOST, 1)
cv2.waitKey(1)
if __name__ == "__main__":
debugger = GraphicDebuggerController(Config())
debugger.start()
| [
"screen.Screen",
"item.ItemFinder",
"cv2.setWindowProperty",
"template_finder.TemplateFinder",
"config.Config",
"utils.misc.color_filter",
"cv2.imshow",
"cv2.putText",
"numpy.zeros",
"cv2.circle",
"cv2.bitwise_or",
"threading.Thread",
"cv2.resize",
"cv2.waitKey",
"utils.misc.kill_thread"... | [((872, 895), 'item.ItemFinder', 'ItemFinder', (['self.config'], {}), '(self.config)\n', (882, 895), False, 'from item import ItemFinder\n'), ((918, 956), 'screen.Screen', 'Screen', (["self.config.general['monitor']"], {}), "(self.config.general['monitor'])\n", (924, 956), False, 'from screen import Screen\n'), ((988, 1015), 'template_finder.TemplateFinder', 'TemplateFinder', (['self.screen'], {}), '(self.screen)\n', (1002, 1015), False, 'from template_finder import TemplateFinder\n'), ((1047, 1089), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run_debugger'}), '(target=self.run_debugger)\n', (1063, 1089), False, 'import threading\n'), ((3133, 3141), 'config.Config', 'Config', ([], {}), '()\n', (3139, 3141), False, 'from config import Config\n'), ((1277, 1310), 'utils.misc.kill_thread', 'kill_thread', (['self.debugger_thread'], {}), '(self.debugger_thread)\n', (1288, 1310), False, 'from utils.misc import color_filter, kill_thread\n'), ((1589, 1623), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': '"""uint8"""'}), "(img.shape, dtype='uint8')\n", (1597, 1623), True, 'import numpy as np\n'), ((2867, 2913), 'cv2.resize', 'cv2.resize', (['combined_img', 'None'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(combined_img, None, fx=0.5, fy=0.5)\n', (2877, 2913), False, 'import cv2\n'), ((2926, 2963), 'cv2.imshow', 'cv2.imshow', (['"""debug img"""', 'combined_img'], {}), "('debug img', combined_img)\n", (2936, 2963), False, 'import cv2\n'), ((2976, 3035), 'cv2.setWindowProperty', 'cv2.setWindowProperty', (['"""debug img"""', 'cv2.WND_PROP_TOPMOST', '(1)'], {}), "('debug img', cv2.WND_PROP_TOPMOST, 1)\n", (2997, 3035), False, 'import cv2\n'), ((3048, 3062), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3059, 3062), False, 'import cv2\n'), ((1700, 1742), 'utils.misc.color_filter', 'color_filter', (['img', 'self.config.colors[key]'], {}), '(img, self.config.colors[key])\n', (1712, 1742), False, 'from utils.misc import color_filter, kill_thread\n'), ((1774, 1815), 'cv2.bitwise_or', 'cv2.bitwise_or', (['filterd_img', 'combined_img'], {}), '(filterd_img, combined_img)\n', (1788, 1815), False, 'import cv2\n'), ((1920, 1976), 'cv2.circle', 'cv2.circle', (['combined_img', 'item.center', '(7)', '(0, 0, 255)', '(4)'], {}), '(combined_img, item.center, 7, (0, 0, 255), 4)\n', (1930, 1976), False, 'import cv2\n'), ((1993, 2110), 'cv2.putText', 'cv2.putText', (['combined_img', 'item.name', 'item.center', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), '(combined_img, item.name, item.center, cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n', (2004, 2110), False, 'import cv2\n'), ((2676, 2754), 'cv2.circle', 'cv2.circle', (['combined_img', 'template_match.position', '(7)', '(255, 0, 0)'], {'thickness': '(5)'}), '(combined_img, template_match.position, 7, (255, 0, 0), thickness=5)\n', (2686, 2754), False, 'import cv2\n')] |
#!/usr/bin/env python
###########################################################################
##
## The class to handle P3D simulation data in python. An object is created
## that has the run parameters attached with it. The basic usage of the
## class can be done in the following ways.
##
## Example 1:
## > from p3do import p3do
## > r1=p3do('OTV')
## > r1.vars2load(['bx','by','bz'])
## > for i in range(20):
## > r1.loadslice(i)
## > DO WHATEVER!!!
## > r1.fin()
##
##
## Example 2:
## > from p3do import p3do
## > r1=p3do('OTV')
## > bxf=open('bx','rb')
## > bx=r1.readslice(bxf,10)
## > DO WHATEVER!!!
## > r1.fin()
##
## Methods in the class:
## 1) self.__init__(): Initialize the run object and load run parameters
## 2) self.print_params(): Print the run parameters
## 3) self.vars2load(): Define what variables to load, create variables
## and open files for reading data.
## 4) self.readslice(): Read a time slice from an open file.
## 5) self.loadslice(): Load a snapshot of time for all the defined variables.
## 6)
##
## <NAME>
## 2014/08/23
## Hat tip to ColbyCH for teaching me the basics of classes in python.
##
###########################################################################
import numpy as np
from subprocess import getstatusoutput as syscomout
from os.path import basename, realpath, exists
import TurbAn.Analysis.Simulations as af
from scipy.ndimage import gaussian_filter as gf
class p3do(object):
"""p3do object:
Tulasi Parashar's version of python library
to read P3D data from older versions of the code.
Created on 08/22/2014
Last modified on 02/26/2016
"""
def __init__(self,shelldirname="",code_type="",data_type="",filenum=""):
# If no rundir specified
if len(shelldirname) == 0: # Start init prompt
shelldirname = input('Please enter the rundir: ')
self.rundir = realpath(shelldirname)
self.dirname= basename(realpath(shelldirname))
# If code type not specified
if len(code_type) == 0:
self.code_type=input("GIT or PIC or Hybrid? (type g or p or h): ")
else:
self.code_type = code_type
# If Git version of the code AND filenum not given
if self.code_type == 'g' and len(filenum) == 0:
self.filenum=input("Please enter the file number to load (e.g. 000): ")
else:
self.filenum=filenum
# If data type not specified
if len(data_type) == 0:
self.data_type=input("Data Type? [(b)yte/ double (bb)yte/ (f)our byte/ (d)ouble precision] ")
else:
self.data_type = data_type
# Check where the paramfile is
if exists(self.rundir+'/param_'+self.dirname):
self.paramfile=self.rundir+'/param_'+self.dirname
elif exists(self.rundir+'/staging/param_'+self.dirname):
self.paramfile=self.rundir+'/staging/param_'+self.dirname
elif exists(self.rundir+'/paramfile'):
self.paramfile=self.rundir+'/paramfile'
else:
raise ValueError('Paramfile not found in '+self.dirname)
# load parameters
self.__loadparams__()
# If byte or double byte data, open the log file.
if self.data_type in ("b", "bb"):
print(self.rundir+'/staging/movie.log.'+self.filenum)
self.logfile=open(self.rundir+"/staging/movie.log."+self.filenum,"r")
self.logvars=['rho', 'jx', 'jy', 'jz', 'bx', 'by', 'bz', 'ex', 'ey'\
, 'ez', 'ne', 'jex', 'jey', 'jez', 'pexx', 'peyy', 'pezz', 'pexy', \
'peyz', 'pexz', 'ni', 'jix', 'jiy', 'jiz', 'pixx', 'piyy', 'pizz', \
'pixy', 'piyz', 'pixz']
self.szl=size(self.logvars)
if code_type == "h":
self.primitives=['bx','by','bz','jix','jiy','jiz','jx','jy','jz','ni'\
,'pixx','piyy','pizz','pixy','pixz','piyz','pe']
self.fls2l={'bx':'bx','by':'by','bz':'bz','jix':'jix','jiy':'jiy',\
'jiz':'jiz','jx':'jtotx','jy':'jtoty','jz':'jtotz','ni':'n','pixx':'pxx',\
'piyy':'pyy','pizz':'pzz','pixy':'pxy','pixz':'pxz','piyz':'pyz','pe':'pe'}
###
### Method to load parameters
###
def __loadparams__(self):
## Parameters to load
self.params=['pex','pey','pez','nx','ny','nz','lx','ly','lz',\
'movieout_full','dt','T_i','T_e','n_0','b0x','b0y','b0z','m_e'\
,'d_e2','c_2','ppg']
# Read parameters from file
for i in self.params:
comm="awk '/^#define/ && / "+i+" / {print $3}' "+self.paramfile
if syscomout("grep "+i+" "+self.paramfile)[0] == 0:
exec('self.'+i+'=float(syscomout(comm)[1])')
else:
exec('self.'+i+'=float(0.)')
## For hybrid code, set electron mass to extremely small
## and speed of light to extremely large.
HYBRID=syscomout("grep 'define hybrid' "+self.paramfile+" |awk '{print $2}'")[1]
if HYBRID != '':
self.m_e = self.d_e2
self.c_2 = 1e9
self.dtmovie=self.movieout_full
# Derive some others
self.nx=int(self.pex*self.nx)
self.ny=int(self.pey*self.ny)
self.nz=int(self.pez*self.nz)
self.dx=self.lx/self.nx
self.dy=self.ly/self.ny
self.dz=self.lz/self.nz
self.xx=np.linspace(0.,self.lx,self.nx)
self.yy=np.linspace(0.,self.ly,self.ny)
self.zz=np.linspace(0.,self.lz,self.nz)
self.xxt=np.linspace(0.,2*np.pi,self.nx)
self.yyt=np.linspace(0.,2*np.pi,self.ny)
self.zzt=np.linspace(0.,2*np.pi,self.nz)
self.B0 =np.sqrt(self.b0x**2+self.b0y**2+self.b0z**2)
self.betai = 2*self.n_0*self.T_i/self.B0**2
self.betae = 2*self.n_0*self.T_e/self.B0**2
self.nprocs = int(self.pex*self.pey*self.pez)
self.lambdae=np.sqrt(self.T_e/(self.n_0*self.c_2))
###
#### Method to print the parameters associated with the run
####
def print_params(self):
"""
A quick method to print the parameters and variables attached with
the p3d run object.
"""
for i in self.params:
exec('print i," = ",self.'+i)
####
#### Method to read a particular time slice from an open file.
####
def readslice(self,f,timeslice,v=""):
"""
This method reads a particular slice of time from a given file. The
explicit inputs are file name, time slice and data type. It is used
as:
output=self.readslice(filename,time)
"""
### Byte data #########################################################
if self.data_type == 'b':
f.seek(timeslice*self.nx*self.ny*self.nz)
field = np.fromfile(f,dtype='int8',count=self.nx*self.ny*self.nz)
field = np.reshape(field,(self.nx,self.ny,self.nz),order='F')
exec('minmax=self.'+v+'minmax[timeslice]')
field = minmax[0]+(minmax[1]-minmax[0])*field/255.
### Double byte data #################################################
elif self.data_type == 'bb':
f.seek(2*timeslice*self.nx*self.ny*self.nz)
field = np.fromfile(f,dtype='int16',count=self.nx*self.ny*self.nz)
field = np.reshape(field,(self.nx,self.ny,self.nz),order='F')
exec('minmax=self.'+v+'minmax[timeslice]')
field = minmax[0]+(minmax[1]-minmax[0])*(field+32678.)/65535.
### Four byte (single precision) data ################################
elif self.data_type == 'f':
f.seek(4*timeslice*self.nx*self.ny*self.nz)
field = np.fromfile(f,dtype='float32',count=self.nx*self.ny*self.nz)
field = np.reshape(field,(self.nx,self.ny,self.nz),order='F')
### Double precision data ############################################
elif self.data_type == 'd':
f.seek(8*timeslice*self.nx*self.ny*self.nz)
field = np.fromfile(f,dtype='float64',count=self.nx*self.ny*self.nz)
field = np.reshape(field,(self.nx,self.ny,self.nz),order='F')
return field
####
#### Method to define the variables to load, create variables and
#### open corresponding files.
####
def vars2load(self,v2l):
"""
Define the variables to load, define corresponding numpy arrays &
open the files
"""
if len(v2l) == 1:
if v2l[0] == 'min':
if self.code_type in ('p', 'g'):
self.vars2l=['bx','by','bz','jix','jiy','jiz','ni']
else:
self.vars2l=['bx','by','bz','jix','jiy','jiz','n']
elif v2l[0] == 'all':
if self.code_type in ('p', 'g'):
self.vars2l=['bx','by','bz','ex','ey','ez','jix','jiy','jiz','jex','jey'\
,'jez','jx','jy','jz','ni','pixx','piyy','pizz','pixy','pixz','piyz'\
,'pexx','pexy','pexz','peyy','peyz','pezz','ne','rho']
else:
self.vars2l=['bx','by','bz','jix','jiy','jiz','jx','jy','jz','ni'\
,'pixx','piyy','pizz','pixy','pixz','piyz','pe']
else:
self.vars2l=v2l
else:
self.vars2l = v2l
# Create arrays and open files
for i in self.vars2l:
exec('self.'+i+'=np.array(('+str(self.nx)+','+str(self.ny)+','+str(self.nz)+'))')
if self.code_type == 'p':
exec('self.'+i+'f=open("'+self.rundir+'/FPData/'+i+'","rb")')
elif self.code_type == 'h':
exec('self.'+i+'f=open("'+self.rundir+'/FPData/'+self.fls2l[i]+'","rb")')
elif self.code_type == 'g':
exec('self.'+i+'f=open("'+self.rundir+'/staging/movie.'+i+'.'+self.filenum+'","rb")')
# If 'b' or 'bb' data type, load minmax for each loaded variable
if self.data_type in ('b', 'bb'):
d=loadtxt(self.logfile)
for i in self.vars2l:
exec('self.'+i+'minmax=d[self.logvars.index("'+i+'")::len(self.logvars),:]')
# Find out the number of slices for the open file
self.__dict__[self.vars2l[0]+'f'].seek(0,2)
filesize=self.__dict__[self.vars2l[0]+'f'].tell()
numbersize=2**['b','bb','f','d'].index(self.data_type)
self.numslices=filesize/(self.nx*self.ny*self.nz*numbersize)
####
#### Method to load time slices for the loaded variables.
####
def loadslice(self,it,smth=None):
"""
Load the variables initialized by self.vars2load()
"""
if self.data_type in ('b', 'bb'):
for i in self.vars2l:
exec('self.'+i+'=self.readslice(self.'+i+'f,'+str(it)+',"'+i+'")')
else:
for i in self.vars2l:
exec('self.'+i+'=self.readslice(self.'+i+'f,'+str(it)+')')
self.mmd={}
for i in self.vars2l:
if smth is not None:
exec('self.'+i+'=gf(self.'+i+',sigma='+str(smth)+')')
exec('self.mmd["'+i+'"]=[self.'+i+'.min(),self.'+i+'.max()]')
self.time=it*self.movieout_full
####
#### Method to add attributes to the object
####
def addattr(self,key,val):
for i in key:
print('Adding '+i)
comm='self.'+i+'=val[key.index("'+i+'")]'; exec(comm)
if isinstance(val[key.index(i)],np.ndarray):
exec('self.mmd["'+i+'"]=[self.'+i+'.min(),self.'+i+'.max()]')
####
#### Method to compute derived quantities
####
def computevars(self,v2c,smth=None):
if 'tempi' in v2c:
self.tix = self.pixx/self.ni
self.tiy = self.piyy/self.ni
self.tiz = self.pizz/self.ni
self.ti = (self.tix+self.tiy+self.tiz)/3.
if 'tempe' in v2c:
if self.code_type == 'h':
self.te = self.pe/self.ni
else:
self.tex = self.pexx/self.ne
self.tey = self.peyy/self.ne
self.tez = self.pezz/self.ne
self.te = (self.tex+self.tey+self.tez)/3.
if any([1 for i in ['vi','ve','omi','dui','zpzm','udgpi','udgpe','ome','due'] if i in v2c]):
# if 'vi' in v2c or 've' in v2c or 'omi' in v2c or 'dui' in v2c or 'zpzm' in v2c or 'udgpi' in v2c\
# or 'udgpe' in v2c or 'ome' in v2c or 'due' in v2c:
self.vix = self.jix/self.ni
self.viy = self.jiy/self.ni
self.viz = self.jiz/self.ni
if 'omi' in v2c:
self.omxi,self.omyi,self.omzi = af.pcurl(self.vix,self.viy,\
self.viz,dx=self.dx,dy=self.dy,dz=self.dz,smth=smth)
if 'dui' in v2c:
self.dui = af.pdiv(self.vix,self.viy,self.viz,dx=self.dx,dy=self.dy,dz=self.dz,smth=smth)
if 'udgpi' in v2c:
self.udgpi = self.vix*af.pdiv(self.pixx, self.pixy, self.pixz, dx=self.dx,dy=self.dy,dz=self.dz,smth=smth) +\
self.viy*af.pdiv(self.pixy, self.piyy, self.piyz, dx=self.dx,dy=self.dy,dz=self.dz,smth=smth) +\
self.viz*af.pdiv(self.pixz, self.piyz, self.pizz, dx=self.dx,dy=self.dy,dz=self.dz,smth=smth)
if any([1 for i in ['ve','ome', 'due','zpzm','udgpe'] if i in v2c]):
# if 've' in v2c or 'ome' in v2c or 'due' in v2c or 'zpzm' in v2c or 'udgpe' in v2c:
if self.code_type == 'h':
self.vex = self.vix - self.jx/self.ni
self.vey = self.viy - self.jy/self.ni
self.vez = self.viz - self.jz/self.ni
if 'udgpe' in v2c:
gp = af.pgrad(self.pe, dx=self.dx,dy=self.dy,dz=self.dz,smth=smth)
self.udgpe = self.vex*gp[0] + self.vey*gp[1] + self.vez*gp[2]
else:
self.vex = -self.jex/self.ne
self.vey = -self.jey/self.ne
self.vez = -self.jez/self.ne
if 'udgpe' in v2c:
self.udgpe = self.vex*af.pdiv(self.pexx, self.pexy, self.pexz, dx=self.dx,dy=self.dy,dz=self.dz,smth=smth) +\
self.vey*af.pdiv(self.pexy, self.peyy, self.peyz, dx=self.dx,dy=self.dy,dz=self.dz,smth=smth) +\
self.vez*af.pdiv(self.pexz, self.peyz, self.pezz, dx=self.dx,dy=self.dy,dz=self.dz,smth=smth)
if 'ome' in v2c:
self.omxe,self.omye,self.omze = af.pcurl(self.vex,self.vey,\
self.vez,dx=self.dx,dy=self.dy,dz=self.dz,smth=smth)
if 'due' in v2c:
self.due = af.pdiv(self.vex,self.vey,self.vez,dx=self.dx,dy=self.dy,dz=self.dz,smth=smth)
if 'zpzm' in v2c:
cmx = (self.vix+self.m_e*self.vex)/(1+self.m_e)
cmy = (self.viy+self.m_e*self.vey)/(1+self.m_e)
cmz = (self.viz+self.m_e*self.vez)/(1+self.m_e)
den = self.ni+self.m_e*self.ne
self.zpx = self.bx/np.sqrt(den) + cmx
self.zpy = self.by/np.sqrt(den) + cmy
self.zpz = self.bz/np.sqrt(den) + cmz
self.zmx = self.bx/np.sqrt(den) - cmx
self.zmy = self.by/np.sqrt(den) - cmy
self.zmz = self.bz/np.sqrt(den) - cmz
####
#### Method to close opened files.
####
def fin(self):
"""
close the run files.
"""
for i in self.vars2l:
exec('self.'+i+'f.close()')
####
#### Load energies for the run
####
def loadenergies(self):
if self.code_type in ('p','g'):
self.evars=['t', 'eges', 'ebx' , 'eby' , 'ebz' , 'eex' , 'eey' , 'eez' ,\
'eem' , 'ekix', 'ekiy', 'ekiz', 'ekex', 'ekey', 'ekez', 'ekin', 'eifx', \
'eify', 'eifz', 'eefx', 'eefy', 'eefz', 'eipx', 'eipy', 'eipz', 'eepx', \
'eepy', 'eepz']
if self.code_type == 'h':
self.evars=['t','eges','eb','ebx','eby','ebz','eef','ION','ekix','ekiy',\
'ekiz','eif','eifx','eify','eifz','eep','eip','eipx','eipy','eipz','VIP','VIX',\
'VIY','VIZ','BIP','BIX','BIY','BIZ','BNU','ETA']
data=np.loadtxt(self.rundir+'/Energies.dat')
for i in range(np.size(self.evars)):
exec('self.'+self.evars[i]+'=data[:,'+str(i)+']')
self.eb0=0.5*(self.b0x**2+self.b0y**2+self.b0z**2)
if self.code_type in ('p','g'):
self.eb =self.ebx +self.eby +self.ebz
self.eip=self.eipx+self.eipy+self.eipz
self.eep=self.eepx+self.eepy+self.eepz
self.eif=self.eifx+self.eify+self.eifz
self.eef=self.eefx+self.eefy+self.eefz
self.ee =self.eex +self.eey +self.eez
self.edz=self.eb-self.eb0+self.eif+self.eef
self.tnl=self.t*np.sqrt(2*self.edz[0])*2*np.pi/self.lx
self.ltnl=self.lx/(np.sqrt(self.edz)*4*np.pi)
self.ta=np.zeros(len(self.eb))
for i in range(1,len(self.eb)):
self.ta[i]=self.ta[i-1]+self.dt*2./(self.ltnl[i-1]+self.ltnl[i])
| [
"TurbAn.Analysis.Simulations.pgrad",
"os.path.exists",
"numpy.fromfile",
"numpy.sqrt",
"numpy.reshape",
"TurbAn.Analysis.Simulations.pcurl",
"numpy.size",
"os.path.realpath",
"numpy.linspace",
"TurbAn.Analysis.Simulations.pdiv",
"numpy.loadtxt",
"subprocess.getstatusoutput"
] | [((1982, 2004), 'os.path.realpath', 'realpath', (['shelldirname'], {}), '(shelldirname)\n', (1990, 2004), False, 'from os.path import basename, realpath, exists\n'), ((2744, 2790), 'os.path.exists', 'exists', (["(self.rundir + '/param_' + self.dirname)"], {}), "(self.rundir + '/param_' + self.dirname)\n", (2750, 2790), False, 'from os.path import basename, realpath, exists\n'), ((5290, 5324), 'numpy.linspace', 'np.linspace', (['(0.0)', 'self.lx', 'self.nx'], {}), '(0.0, self.lx, self.nx)\n', (5301, 5324), True, 'import numpy as np\n'), ((5336, 5370), 'numpy.linspace', 'np.linspace', (['(0.0)', 'self.ly', 'self.ny'], {}), '(0.0, self.ly, self.ny)\n', (5347, 5370), True, 'import numpy as np\n'), ((5382, 5416), 'numpy.linspace', 'np.linspace', (['(0.0)', 'self.lz', 'self.nz'], {}), '(0.0, self.lz, self.nz)\n', (5393, 5416), True, 'import numpy as np\n'), ((5429, 5465), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', 'self.nx'], {}), '(0.0, 2 * np.pi, self.nx)\n', (5440, 5465), True, 'import numpy as np\n'), ((5476, 5512), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', 'self.ny'], {}), '(0.0, 2 * np.pi, self.ny)\n', (5487, 5512), True, 'import numpy as np\n'), ((5523, 5559), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', 'self.nz'], {}), '(0.0, 2 * np.pi, self.nz)\n', (5534, 5559), True, 'import numpy as np\n'), ((5570, 5624), 'numpy.sqrt', 'np.sqrt', (['(self.b0x ** 2 + self.b0y ** 2 + self.b0z ** 2)'], {}), '(self.b0x ** 2 + self.b0y ** 2 + self.b0z ** 2)\n', (5577, 5624), True, 'import numpy as np\n'), ((5786, 5827), 'numpy.sqrt', 'np.sqrt', (['(self.T_e / (self.n_0 * self.c_2))'], {}), '(self.T_e / (self.n_0 * self.c_2))\n', (5793, 5827), True, 'import numpy as np\n'), ((15604, 15645), 'numpy.loadtxt', 'np.loadtxt', (["(self.rundir + '/Energies.dat')"], {}), "(self.rundir + '/Energies.dat')\n", (15614, 15645), True, 'import numpy as np\n'), ((2034, 2056), 'os.path.realpath', 'realpath', (['shelldirname'], {}), '(shelldirname)\n', (2042, 2056), False, 'from os.path import basename, realpath, exists\n'), ((2858, 2912), 'os.path.exists', 'exists', (["(self.rundir + '/staging/param_' + self.dirname)"], {}), "(self.rundir + '/staging/param_' + self.dirname)\n", (2864, 2912), False, 'from os.path import basename, realpath, exists\n'), ((4861, 4935), 'subprocess.getstatusoutput', 'syscomout', (['("grep \'define hybrid\' " + self.paramfile + " |awk \'{print $2}\'")'], {}), '("grep \'define hybrid\' " + self.paramfile + " |awk \'{print $2}\'")\n', (4870, 4935), True, 'from subprocess import getstatusoutput as syscomout\n'), ((6649, 6712), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""int8"""', 'count': '(self.nx * self.ny * self.nz)'}), "(f, dtype='int8', count=self.nx * self.ny * self.nz)\n", (6660, 6712), True, 'import numpy as np\n'), ((6724, 6781), 'numpy.reshape', 'np.reshape', (['field', '(self.nx, self.ny, self.nz)'], {'order': '"""F"""'}), "(field, (self.nx, self.ny, self.nz), order='F')\n", (6734, 6781), True, 'import numpy as np\n'), ((15665, 15684), 'numpy.size', 'np.size', (['self.evars'], {}), '(self.evars)\n', (15672, 15684), True, 'import numpy as np\n'), ((2988, 3022), 'os.path.exists', 'exists', (["(self.rundir + '/paramfile')"], {}), "(self.rundir + '/paramfile')\n", (2994, 3022), False, 'from os.path import basename, realpath, exists\n'), ((7072, 7136), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""int16"""', 'count': '(self.nx * self.ny * self.nz)'}), "(f, dtype='int16', count=self.nx * self.ny * self.nz)\n", (7083, 7136), True, 'import numpy as np\n'), ((7148, 7205), 'numpy.reshape', 'np.reshape', (['field', '(self.nx, self.ny, self.nz)'], {'order': '"""F"""'}), "(field, (self.nx, self.ny, self.nz), order='F')\n", (7158, 7205), True, 'import numpy as np\n'), ((12164, 12253), 'TurbAn.Analysis.Simulations.pcurl', 'af.pcurl', (['self.vix', 'self.viy', 'self.viz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.vix, self.viy, self.viz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (12172, 12253), True, 'import TurbAn.Analysis.Simulations as af\n'), ((12307, 12395), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.vix', 'self.viy', 'self.viz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.vix, self.viy, self.viz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (12314, 12395), True, 'import TurbAn.Analysis.Simulations as af\n'), ((4573, 4618), 'subprocess.getstatusoutput', 'syscomout', (["('grep ' + i + ' ' + self.paramfile)"], {}), "('grep ' + i + ' ' + self.paramfile)\n", (4582, 4618), True, 'from subprocess import getstatusoutput as syscomout\n'), ((7506, 7572), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""float32"""', 'count': '(self.nx * self.ny * self.nz)'}), "(f, dtype='float32', count=self.nx * self.ny * self.nz)\n", (7517, 7572), True, 'import numpy as np\n'), ((7584, 7641), 'numpy.reshape', 'np.reshape', (['field', '(self.nx, self.ny, self.nz)'], {'order': '"""F"""'}), "(field, (self.nx, self.ny, self.nz), order='F')\n", (7594, 7641), True, 'import numpy as np\n'), ((13988, 14077), 'TurbAn.Analysis.Simulations.pcurl', 'af.pcurl', (['self.vex', 'self.vey', 'self.vez'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.vex, self.vey, self.vez, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (13996, 14077), True, 'import TurbAn.Analysis.Simulations as af\n'), ((14140, 14228), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.vex', 'self.vey', 'self.vez'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.vex, self.vey, self.vez, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (14147, 14228), True, 'import TurbAn.Analysis.Simulations as af\n'), ((16257, 16274), 'numpy.sqrt', 'np.sqrt', (['self.edz'], {}), '(self.edz)\n', (16264, 16274), True, 'import numpy as np\n'), ((7819, 7885), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""float64"""', 'count': '(self.nx * self.ny * self.nz)'}), "(f, dtype='float64', count=self.nx * self.ny * self.nz)\n", (7830, 7885), True, 'import numpy as np\n'), ((7897, 7954), 'numpy.reshape', 'np.reshape', (['field', '(self.nx, self.ny, self.nz)'], {'order': '"""F"""'}), "(field, (self.nx, self.ny, self.nz), order='F')\n", (7907, 7954), True, 'import numpy as np\n'), ((12692, 12783), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.pixz', 'self.piyz', 'self.pizz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.pixz, self.piyz, self.pizz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (12699, 12783), True, 'import TurbAn.Analysis.Simulations as af\n'), ((13203, 13267), 'TurbAn.Analysis.Simulations.pgrad', 'af.pgrad', (['self.pe'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.pe, dx=self.dx, dy=self.dy, dz=self.dz, smth=smth)\n', (13211, 13267), True, 'import TurbAn.Analysis.Simulations as af\n'), ((14501, 14513), 'numpy.sqrt', 'np.sqrt', (['den'], {}), '(den)\n', (14508, 14513), True, 'import numpy as np\n'), ((14551, 14563), 'numpy.sqrt', 'np.sqrt', (['den'], {}), '(den)\n', (14558, 14563), True, 'import numpy as np\n'), ((14601, 14613), 'numpy.sqrt', 'np.sqrt', (['den'], {}), '(den)\n', (14608, 14613), True, 'import numpy as np\n'), ((14651, 14663), 'numpy.sqrt', 'np.sqrt', (['den'], {}), '(den)\n', (14658, 14663), True, 'import numpy as np\n'), ((14701, 14713), 'numpy.sqrt', 'np.sqrt', (['den'], {}), '(den)\n', (14708, 14713), True, 'import numpy as np\n'), ((14751, 14763), 'numpy.sqrt', 'np.sqrt', (['den'], {}), '(den)\n', (14758, 14763), True, 'import numpy as np\n'), ((16193, 16217), 'numpy.sqrt', 'np.sqrt', (['(2 * self.edz[0])'], {}), '(2 * self.edz[0])\n', (16200, 16217), True, 'import numpy as np\n'), ((12448, 12539), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.pixx', 'self.pixy', 'self.pixz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.pixx, self.pixy, self.pixz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (12455, 12539), True, 'import TurbAn.Analysis.Simulations as af\n'), ((12570, 12661), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.pixy', 'self.piyy', 'self.piyz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.pixy, self.piyy, self.piyz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (12577, 12661), True, 'import TurbAn.Analysis.Simulations as af\n'), ((13826, 13917), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.pexz', 'self.peyz', 'self.pezz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.pexz, self.peyz, self.pezz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (13833, 13917), True, 'import TurbAn.Analysis.Simulations as af\n'), ((13570, 13661), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.pexx', 'self.pexy', 'self.pexz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.pexx, self.pexy, self.pexz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (13577, 13661), True, 'import TurbAn.Analysis.Simulations as af\n'), ((13698, 13789), 'TurbAn.Analysis.Simulations.pdiv', 'af.pdiv', (['self.pexy', 'self.peyy', 'self.peyz'], {'dx': 'self.dx', 'dy': 'self.dy', 'dz': 'self.dz', 'smth': 'smth'}), '(self.pexy, self.peyy, self.peyz, dx=self.dx, dy=self.dy, dz=self.dz,\n smth=smth)\n', (13705, 13789), True, 'import TurbAn.Analysis.Simulations as af\n')] |
## imports
import os
import sys
import numpy as np
import cv2
import logging
import imutils
from matplotlib import pyplot as plt
from pathlib import Path
from pylab import array, plot, show, axis, arange, figure, uint8
# setup logger
logger = logging.getLogger(__name__)
# https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
def order_points(pts):
pts = pts.reshape((4, 2))
# initialize a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def increase_contrast(img, f=1.8):
assert img.dtype == 'uint8', "image should be of type 'uint8'"
# Image data
maxIntensity = 255.0 # we expect integers
# Increase intensity such that dark pixels become much brighter, bright pixels become slightly bright
newImage = maxIntensity*(img/maxIntensity)**f
newImage = array(newImage, dtype=uint8)
return newImage
def filter_valid_contours(img, contours, min_perc_area=0.01, max_perc_area=0.04):
# get total area of image
total_area = img.shape[0]*img.shape[1]
# get areas of each contour
areas = []
for c in contours:
areas.append(cv2.contourArea(c))
# sort the areas, largest first
sorted_areas_contours = np.array(sorted(zip(areas, contours), key=lambda x: x[0], reverse=True))
# get top 30 largest areas and contours
top_areas = sorted_areas_contours[:, 0][:30]
top_contours = sorted_areas_contours[:, 1][:30]
# calculate relative area to total area for each contour
relative_areas = top_areas / total_area
# condition: area should be within valid range
cond_valid_area = (relative_areas >= min_perc_area) & (relative_areas <= max_perc_area)
# return largest contours
return top_contours[cond_valid_area], top_areas[cond_valid_area]
def preprocess(img, erode_kernel=(3, 3), erode_iterations=4):
# increase contrast
contrast = increase_contrast(img)
# convert to grayscale
gray = cv2.cvtColor(contrast, cv2.COLOR_BGR2GRAY)
# blur image
blur = cv2.GaussianBlur(gray, (1, 1), 1000)
# threshold
flag, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
logger.info(f"found OTSU threshold: {flag}")
# erode
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, erode_kernel)
eroded = cv2.erode(thresh, kernel, iterations=erode_iterations)
# result of preprocessing
preprocessed = eroded
# return intermediate steps for plotting
intermediate_steps = [contrast, gray, blur, thresh, eroded]
return preprocessed, intermediate_steps
def valid_ratio(short_side, long_side, thresh_low=0.5, thresh_high=0.8):
if short_side / long_side < thresh_low:
logger.debug("invalid ratio: image to long")
return False
if short_side / long_side > thresh_high:
logger.debug("invalid ratio: image too square")
return False
return True
def identify_images(img, target_size):
# save a copy
img_orig = img.copy()
# preprocess image
img, intermediate_steps = preprocess(img)
# find contours
contours = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]
# get largest contours
valid_contours, valid_areas = filter_valid_contours(img, contours)
if len(valid_contours) == 0:
logger.warning("no valid contours found, using all contours")
valid_contours = contours
identified_images = []
bboxes = []
rejected_bboxes = []
for i, c in enumerate(valid_contours):
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect).astype(int)
warped = four_point_transform(img_orig.copy(), box)
# get width and heigt of warped image
w = warped.shape[1]
h = warped.shape[0]
short_side = w if w < h else h
long_side = w if w > h else h
if not valid_ratio(short_side, long_side, thresh_low=0.47, thresh_high=0.9):
logger.info(f"reject contour, invalid ratio short/long side: {short_side / long_side}")
rejected_bboxes.append(box)
continue
# make sure image is landscape
if h >= w:
warped = imutils.rotate_bound(warped, 90)
# resize image to target size
resized = cv2.resize(warped, target_size, interpolation=cv2.INTER_AREA)
# add image to output array
identified_images.append(resized)
bboxes.append(box)
logger.debug(f"nr warped: {len(identified_images)}, nr valid contours: {len(valid_contours)}")
return np.array(identified_images).astype(float), np.array(bboxes), np.array(rejected_bboxes), intermediate_steps
| [
"logging.getLogger",
"numpy.sqrt",
"pylab.array",
"numpy.array",
"cv2.warpPerspective",
"cv2.threshold",
"cv2.erode",
"numpy.diff",
"cv2.contourArea",
"cv2.minAreaRect",
"numpy.argmin",
"cv2.boxPoints",
"cv2.getPerspectiveTransform",
"numpy.argmax",
"cv2.cvtColor",
"cv2.resize",
"cv2... | [((247, 274), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (264, 274), False, 'import logging\n'), ((666, 699), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (674, 699), True, 'import numpy as np\n'), ((1097, 1117), 'numpy.diff', 'np.diff', (['pts'], {'axis': '(1)'}), '(pts, axis=1)\n', (1104, 1117), True, 'import numpy as np\n'), ((1618, 1670), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (1625, 1670), True, 'import numpy as np\n'), ((1688, 1740), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (1695, 1740), True, 'import numpy as np\n'), ((1994, 2046), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (2001, 2046), True, 'import numpy as np\n'), ((2065, 2117), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (2072, 2117), True, 'import numpy as np\n'), ((2454, 2564), 'numpy.array', 'np.array', (['[[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, \n maxHeight - 1]], dtype='float32')\n", (2462, 2564), True, 'import numpy as np\n'), ((2669, 2707), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['rect', 'dst'], {}), '(rect, dst)\n', (2696, 2707), False, 'import cv2\n'), ((2721, 2773), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'M', '(maxWidth, maxHeight)'], {}), '(image, M, (maxWidth, maxHeight))\n', (2740, 2773), False, 'import cv2\n'), ((3164, 3192), 'pylab.array', 'array', (['newImage'], {'dtype': 'uint8'}), '(newImage, dtype=uint8)\n', (3169, 3192), False, 'from pylab import array, plot, show, axis, arange, figure, uint8\n'), ((4300, 4342), 'cv2.cvtColor', 'cv2.cvtColor', (['contrast', 'cv2.COLOR_BGR2GRAY'], {}), '(contrast, cv2.COLOR_BGR2GRAY)\n', (4312, 4342), False, 'import cv2\n'), ((4376, 4412), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(1, 1)', '(1000)'], {}), '(gray, (1, 1), 1000)\n', (4392, 4412), False, 'import cv2\n'), ((4449, 4513), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (4462, 4513), False, 'import cv2\n'), ((4590, 4648), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', 'erode_kernel'], {}), '(cv2.MORPH_ELLIPSE, erode_kernel)\n', (4615, 4648), False, 'import cv2\n'), ((4662, 4716), 'cv2.erode', 'cv2.erode', (['thresh', 'kernel'], {'iterations': 'erode_iterations'}), '(thresh, kernel, iterations=erode_iterations)\n', (4671, 4716), False, 'import cv2\n'), ((860, 872), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (869, 872), True, 'import numpy as np\n'), ((892, 904), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (901, 904), True, 'import numpy as np\n'), ((1136, 1151), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (1145, 1151), True, 'import numpy as np\n'), ((1171, 1186), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (1180, 1186), True, 'import numpy as np\n'), ((5456, 5517), 'cv2.findContours', 'cv2.findContours', (['img', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (5472, 5517), False, 'import cv2\n'), ((5890, 5908), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (5905, 5908), False, 'import cv2\n'), ((6629, 6690), 'cv2.resize', 'cv2.resize', (['warped', 'target_size'], {'interpolation': 'cv2.INTER_AREA'}), '(warped, target_size, interpolation=cv2.INTER_AREA)\n', (6639, 6690), False, 'import cv2\n'), ((6963, 6979), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (6971, 6979), True, 'import numpy as np\n'), ((6981, 7006), 'numpy.array', 'np.array', (['rejected_bboxes'], {}), '(rejected_bboxes)\n', (6989, 7006), True, 'import numpy as np\n'), ((3468, 3486), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (3483, 3486), False, 'import cv2\n'), ((6539, 6571), 'imutils.rotate_bound', 'imutils.rotate_bound', (['warped', '(90)'], {}), '(warped, 90)\n', (6559, 6571), False, 'import imutils\n'), ((5923, 5942), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (5936, 5942), False, 'import cv2\n'), ((6920, 6947), 'numpy.array', 'np.array', (['identified_images'], {}), '(identified_images)\n', (6928, 6947), True, 'import numpy as np\n')] |
import tkinter as tk
from PIL import ImageTk, Image
from os import listdir
import cv2
import numpy as np
root=tk.Tk()
root.title("DataX: Team OST, Plastic Part Matching")
#Init database
path=r"C:\Users\tobias.grab\IWK_data\test"
files=listdir(path)
nrOfFiles=len(files)
bf = cv2.BFMatcher()
fast=1
if fast==1:
img_database=np.load(r"C:\Users\tobias.grab\IWK_data\savedArrays\img_database.npy")
img_database=[img_database[i,:,:] for i in range(len(files))]
else:
img_database=[cv2.imread(path+'\\'+file,0) for file in files]
img_database_pillow=[ImageTk.PhotoImage(Image.fromarray(img).resize((320, 240),Image.ANTIALIAS)) for img in img_database]
def open_file():
from tkinter.filedialog import askopenfilename
file_path = askopenfilename(title=u'select file')
name=file_path.split("/")[-1]
img_to_match=cv2.imread(file_path,0)
img_to_match_pillow=ImageTk.PhotoImage(Image.fromarray(img_to_match).resize((320, 240),Image.ANTIALIAS))
if v.get()==1:
ALG=cv2.xfeatures2d.SURF_create()
elif v.get()==2:
ALG=cv2.xfeatures2d.SURF_create()
elif v.get()==3:
ALG=cv2.BRISK_create()
elif v.get()==4:
ALG=cv2.AKAZE_create()
elif v.get()==5:
ALG=cv2.KAZE_create()
img_database_fts=[ALG.detectAndCompute(img, None) for img in img_database]
draw_database=[ImageTk.PhotoImage(Image.fromarray(
cv2.drawKeypoints(img_from_database, img_database_fts[nr][0],None)).resize((320, 240),Image.ANTIALIAS)
) for nr, img_from_database in enumerate(img_database)]
(kps1, descs1) = ALG.detectAndCompute(img_to_match, None)
layout2=tk.Label(root)
layout2.place(relx=0.5,rely=0, relwidth=1, relheight=1,anchor='n')
label_img_to_match=tk.Label(layout2,image=img_to_match_pillow)
label_img_to_match.image=img_to_match_pillow
label_img_to_match.place(relx=0.3,rely=0.1, width=320, height=240,anchor='n')
label_img_database=tk.Label(layout2,image=draw_database[0])
label_img_database.image=draw_database[0]
label_img_database.place(relx=0.7,rely=0.1, width=320, height=240,anchor='n')
label_img_matched=tk.Label(layout2,image=draw_database[0])
label_img_matched.image=draw_database[0]
label_img_matched.place(relx=0.5,rely=0.5, width=640, height=240,anchor='n')
nrOfGoodPerImage=np.zeros([nrOfFiles,1])
image_list_matched=[]
def calc(j):
if j<nrOfFiles-1:
bf = cv2.BFMatcher()
kps2=img_database_fts[j][0]
descs2=img_database_fts[j][1]
matches = bf.knnMatch(descs1,descs2,k=2)
matchesMask = [[0,0] for i in range(len(matches))]
for i,(m,n) in enumerate(matches):
if m.distance < 0.75*n.distance:
matchesMask[i]=[1,0]
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
nrOfGoodPerImage[j]=np.sum(matchesMask[:])
img3 = cv2.drawMatchesKnn(img_to_match,kps1,img_database[j],kps2,good,None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
img3_pillow=ImageTk.PhotoImage(Image.fromarray(img3).resize((640, 240),Image.ANTIALIAS))
image_list_matched.append(img3_pillow)
root.after(0, calc(j+1))
calc(0)
idx = (-np.squeeze(nrOfGoodPerImage)).argsort()[:3]
def matching(i):
if i<nrOfFiles-1:
label_img_database.config(image=draw_database[i])
label_img_matched.config(image=image_list_matched[i])
root.after(DELAY, lambda: matching(i+1))
elif i==nrOfFiles-1:
# my_label4=tk.Label(root,bg='#80c1ff')
my_label4=tk.Label(root,bg="LightSteelBlue1")
my_label4.place(relx=0.5,rely=0, relwidth=1, relheight=1,anchor='n')
org_label=tk.Label(my_label4,text="Image to match:\n"+name)
org_label.place(relx=0.15,rely=0.5,anchor='e')
org=tk.Label(my_label4,image=img_to_match_pillow)
org.place(relx=0.15,rely=0.5, width=320, height=240,anchor='w')
best_match_label=tk.Label(my_label4,text="Best Match:\n"+files[idx[0]])
best_match_label.place(relx=0.8,rely=0.2,anchor='w')
best_match=tk.Label(my_label4,image=img_database_pillow[idx[0]])
best_match.place(relx=0.8,rely=0.2, width=320, height=240,anchor='e')
best_match2_label=tk.Label(my_label4,text="Second Best Match:\n"+files[idx[1]])
best_match2_label.place(relx=0.8,rely=0.5,anchor='w')
best_match2=tk.Label(my_label4,image=img_database_pillow[idx[1]])
best_match2.place(relx=0.8,rely=0.5, width=320, height=240,anchor='e')
best_match3_label=tk.Label(my_label4,text="Third Best Match:\n"+files[idx[2]])
best_match3_label.place(relx=0.8,rely=0.8,anchor='w')
best_match3=tk.Label(my_label4,image=img_database_pillow[idx[2]])
best_match3.place(relx=0.8,rely=0.8, width=320, height=240,anchor='e')
# my_title2=tk.Label(my_label4,text="Matching finished! Displaying results...",font=("Helvetica",20), bg='#80c1ff')
my_title2=tk.Label(my_label4,text="Matching finished! Displaying results...",font=("Helvetica",20), bg="LightSteelBlue1")
my_title2.place(relx=0.5,rely=0.0, relwidth=0.4, relheight=0.05,anchor='n')
matching(0)
DELAY=10
HEIGHT=900
WIDTH=1400
canvas=tk.Canvas(root,height=HEIGHT, width=WIDTH)
canvas.pack()
# button_quit= tk.Button(root, text="Exit Program", command=root.quit)
# button_quit.pack()
my_title=tk.Label(root,text="Choose the algorithm you want to use",font=("Helvetica",16))
my_title.place(relx=0.5,rely=0.0, relwidth=0.4, relheight=0.1,anchor='n')
v = tk.IntVar()
v.set(1) # initializing the choice, i.e. Python
languages = [
("SIFT",1),
("SURF",2),
("BRISK",3),
("AKAZE",4),
("KAZE",5)
]
for txt, val in languages:
tk.Radiobutton(root,
text=txt,
padx = 10, pady=10,
variable=v,
value=val).place(relx=0.5,rely=0.1+val/40, relwidth=0.1, relheight=0.025,anchor='n')
my_title1=tk.Label(root,text="Choose the testimage:",font=("Helvetica",16))
my_title1.place(relx=0.5,rely=0.525, relwidth=0.4, relheight=0.1,anchor='n')
btn = tk.Button(root, text ='Open', command = lambda: open_file(),bg="LightSteelBlue1")
btn.place(relx=0.5,rely=0.6, relwidth=0.6, relheight=0.2,anchor='n')
root.mainloop() | [
"cv2.BFMatcher",
"cv2.AKAZE_create",
"tkinter.Canvas",
"tkinter.Label",
"numpy.load",
"os.listdir",
"cv2.drawMatchesKnn",
"cv2.xfeatures2d.SURF_create",
"tkinter.filedialog.askopenfilename",
"numpy.squeeze",
"cv2.imread",
"tkinter.IntVar",
"PIL.Image.fromarray",
"cv2.KAZE_create",
"cv2.d... | [((119, 126), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (124, 126), True, 'import tkinter as tk\n'), ((250, 263), 'os.listdir', 'listdir', (['path'], {}), '(path)\n', (257, 263), False, 'from os import listdir\n'), ((294, 309), 'cv2.BFMatcher', 'cv2.BFMatcher', ([], {}), '()\n', (307, 309), False, 'import cv2\n'), ((5789, 5832), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'height': 'HEIGHT', 'width': 'WIDTH'}), '(root, height=HEIGHT, width=WIDTH)\n', (5798, 5832), True, 'import tkinter as tk\n'), ((5955, 6043), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Choose the algorithm you want to use"""', 'font': "('Helvetica', 16)"}), "(root, text='Choose the algorithm you want to use', font=(\n 'Helvetica', 16))\n", (5963, 6043), True, 'import tkinter as tk\n'), ((6118, 6129), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (6127, 6129), True, 'import tkinter as tk\n'), ((6556, 6624), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Choose the testimage:"""', 'font': "('Helvetica', 16)"}), "(root, text='Choose the testimage:', font=('Helvetica', 16))\n", (6564, 6624), True, 'import tkinter as tk\n'), ((349, 423), 'numpy.load', 'np.load', (['"""C:\\\\Users\\\\tobias.grab\\\\IWK_data\\\\savedArrays\\\\img_database.npy"""'], {}), "('C:\\\\Users\\\\tobias.grab\\\\IWK_data\\\\savedArrays\\\\img_database.npy')\n", (356, 423), True, 'import numpy as np\n'), ((775, 812), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {'title': 'u"""select file"""'}), "(title=u'select file')\n", (790, 812), False, 'from tkinter.filedialog import askopenfilename\n'), ((866, 890), 'cv2.imread', 'cv2.imread', (['file_path', '(0)'], {}), '(file_path, 0)\n', (876, 890), False, 'import cv2\n'), ((1704, 1718), 'tkinter.Label', 'tk.Label', (['root'], {}), '(root)\n', (1712, 1718), True, 'import tkinter as tk\n'), ((1821, 1865), 'tkinter.Label', 'tk.Label', (['layout2'], {'image': 'img_to_match_pillow'}), '(layout2, image=img_to_match_pillow)\n', (1829, 1865), True, 'import tkinter as tk\n'), ((2024, 2065), 'tkinter.Label', 'tk.Label', (['layout2'], {'image': 'draw_database[0]'}), '(layout2, image=draw_database[0])\n', (2032, 2065), True, 'import tkinter as tk\n'), ((2220, 2261), 'tkinter.Label', 'tk.Label', (['layout2'], {'image': 'draw_database[0]'}), '(layout2, image=draw_database[0])\n', (2228, 2261), True, 'import tkinter as tk\n'), ((2417, 2441), 'numpy.zeros', 'np.zeros', (['[nrOfFiles, 1]'], {}), '([nrOfFiles, 1])\n', (2425, 2441), True, 'import numpy as np\n'), ((513, 546), 'cv2.imread', 'cv2.imread', (["(path + '\\\\' + file)", '(0)'], {}), "(path + '\\\\' + file, 0)\n", (523, 546), False, 'import cv2\n'), ((1039, 1068), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', ([], {}), '()\n', (1066, 1068), False, 'import cv2\n'), ((1104, 1133), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', ([], {}), '()\n', (1131, 1133), False, 'import cv2\n'), ((2547, 2562), 'cv2.BFMatcher', 'cv2.BFMatcher', ([], {}), '()\n', (2560, 2562), False, 'import cv2\n'), ((3136, 3158), 'numpy.sum', 'np.sum', (['matchesMask[:]'], {}), '(matchesMask[:])\n', (3142, 3158), True, 'import numpy as np\n'), ((3193, 3321), 'cv2.drawMatchesKnn', 'cv2.drawMatchesKnn', (['img_to_match', 'kps1', 'img_database[j]', 'kps2', 'good', 'None'], {'flags': 'cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS'}), '(img_to_match, kps1, img_database[j], kps2, good, None,\n flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n', (3211, 3321), False, 'import cv2\n'), ((6325, 6396), 'tkinter.Radiobutton', 'tk.Radiobutton', (['root'], {'text': 'txt', 'padx': '(10)', 'pady': '(10)', 'variable': 'v', 'value': 'val'}), '(root, text=txt, padx=10, pady=10, variable=v, value=val)\n', (6339, 6396), True, 'import tkinter as tk\n'), ((602, 622), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (617, 622), False, 'from PIL import ImageTk, Image\n'), ((934, 963), 'PIL.Image.fromarray', 'Image.fromarray', (['img_to_match'], {}), '(img_to_match)\n', (949, 963), False, 'from PIL import ImageTk, Image\n'), ((1169, 1187), 'cv2.BRISK_create', 'cv2.BRISK_create', ([], {}), '()\n', (1185, 1187), False, 'import cv2\n'), ((3941, 3977), 'tkinter.Label', 'tk.Label', (['root'], {'bg': '"""LightSteelBlue1"""'}), "(root, bg='LightSteelBlue1')\n", (3949, 3977), True, 'import tkinter as tk\n'), ((4096, 4148), 'tkinter.Label', 'tk.Label', (['my_label4'], {'text': "('Image to match:\\n' + name)"}), "(my_label4, text='Image to match:\\n' + name)\n", (4104, 4148), True, 'import tkinter as tk\n'), ((4223, 4269), 'tkinter.Label', 'tk.Label', (['my_label4'], {'image': 'img_to_match_pillow'}), '(my_label4, image=img_to_match_pillow)\n', (4231, 4269), True, 'import tkinter as tk\n'), ((4390, 4447), 'tkinter.Label', 'tk.Label', (['my_label4'], {'text': "('Best Match:\\n' + files[idx[0]])"}), "(my_label4, text='Best Match:\\n' + files[idx[0]])\n", (4398, 4447), True, 'import tkinter as tk\n'), ((4535, 4589), 'tkinter.Label', 'tk.Label', (['my_label4'], {'image': 'img_database_pillow[idx[0]]'}), '(my_label4, image=img_database_pillow[idx[0]])\n', (4543, 4589), True, 'import tkinter as tk\n'), ((4717, 4781), 'tkinter.Label', 'tk.Label', (['my_label4'], {'text': "('Second Best Match:\\n' + files[idx[1]])"}), "(my_label4, text='Second Best Match:\\n' + files[idx[1]])\n", (4725, 4781), True, 'import tkinter as tk\n'), ((4871, 4925), 'tkinter.Label', 'tk.Label', (['my_label4'], {'image': 'img_database_pillow[idx[1]]'}), '(my_label4, image=img_database_pillow[idx[1]])\n', (4879, 4925), True, 'import tkinter as tk\n'), ((5054, 5117), 'tkinter.Label', 'tk.Label', (['my_label4'], {'text': "('Third Best Match:\\n' + files[idx[2]])"}), "(my_label4, text='Third Best Match:\\n' + files[idx[2]])\n", (5062, 5117), True, 'import tkinter as tk\n'), ((5207, 5261), 'tkinter.Label', 'tk.Label', (['my_label4'], {'image': 'img_database_pillow[idx[2]]'}), '(my_label4, image=img_database_pillow[idx[2]])\n', (5215, 5261), True, 'import tkinter as tk\n'), ((5511, 5630), 'tkinter.Label', 'tk.Label', (['my_label4'], {'text': '"""Matching finished! Displaying results..."""', 'font': "('Helvetica', 20)", 'bg': '"""LightSteelBlue1"""'}), "(my_label4, text='Matching finished! Displaying results...', font=(\n 'Helvetica', 20), bg='LightSteelBlue1')\n", (5519, 5630), True, 'import tkinter as tk\n'), ((1223, 1241), 'cv2.AKAZE_create', 'cv2.AKAZE_create', ([], {}), '()\n', (1239, 1241), False, 'import cv2\n'), ((3536, 3564), 'numpy.squeeze', 'np.squeeze', (['nrOfGoodPerImage'], {}), '(nrOfGoodPerImage)\n', (3546, 3564), True, 'import numpy as np\n'), ((1277, 1294), 'cv2.KAZE_create', 'cv2.KAZE_create', ([], {}), '()\n', (1292, 1294), False, 'import cv2\n'), ((1440, 1507), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img_from_database', 'img_database_fts[nr][0]', 'None'], {}), '(img_from_database, img_database_fts[nr][0], None)\n', (1457, 1507), False, 'import cv2\n'), ((3356, 3377), 'PIL.Image.fromarray', 'Image.fromarray', (['img3'], {}), '(img3)\n', (3371, 3377), False, 'from PIL import ImageTk, Image\n')] |
# The experiment logic and analysis
import copy
import gym
import json
import matplotlib
import multiprocessing as mp
import warnings
import numpy as np
import platform
import pandas as pd
import traceback
from keras import backend as K
from os import path, environ
from rl.util import *
from rl.agent import *
from rl.memory import *
from rl.policy import *
from rl.preprocessor import *
# TODO fix mp breaking on Mac shit,
# except when running -b with agg backend
# (no GUI rendered,but saves graphs)
# set only if it's not MacOS
if environ.get('CI') or platform.system() == 'Darwin':
matplotlib.rcParams['backend'] = 'agg'
else:
matplotlib.rcParams['backend'] = 'TkAgg'
np.seterr(all='raise')
warnings.filterwarnings("ignore", module="matplotlib")
GREF = globals()
PARALLEL_PROCESS_NUM = mp.cpu_count()
ASSET_PATH = path.join(path.dirname(__file__), 'asset')
SESS_SPECS = json.loads(open(
path.join(ASSET_PATH, 'sess_specs.json')).read())
PROBLEMS = json.loads(open(
path.join(ASSET_PATH, 'problems.json')).read())
# the keys and their defaults need to be implemented by a sys_var
# the constants (capitalized) are problem configs,
# set in asset/problems.json
REQUIRED_SYS_KEYS = {
'RENDER': None,
'GYM_ENV_NAME': None,
'SOLVED_MEAN_REWARD': None,
'MAX_EPISODES': None,
'REWARD_MEAN_LEN': None,
'epi': 0,
't': 0,
'done': False,
'loss': [],
'total_rewards_history': [],
'explore_history': [],
'mean_rewards_history': [],
'mean_rewards': 0,
'total_rewards': 0,
'solved': False,
}
class Grapher(object):
'''
Grapher object that belongs to a Session
to draw graphs from its data
'''
def __init__(self, session):
import matplotlib.pyplot as plt
plt.rcParams['toolbar'] = 'None' # mute matplotlib toolbar
self.plt = plt
self.session = session
self.graph_filename = self.session.graph_filename
self.subgraphs = {}
self.figure = self.plt.figure(facecolor='white', figsize=(8, 9))
self.figure.suptitle(wrap_text(self.session.session_id))
self.init_figure()
def init_figure(self):
if environ.get('CI'):
return
# graph 1
ax1 = self.figure.add_subplot(
311,
frame_on=False,
title="\n\ntotal rewards per episode",
ylabel='total rewards')
p1, = ax1.plot([], [])
self.subgraphs['total rewards'] = (ax1, p1)
ax1e = ax1.twinx()
ax1e.set_ylabel('exploration rate').set_color('r')
ax1e.set_frame_on(False)
p1e, = ax1e.plot([], [], 'r')
self.subgraphs['e'] = (ax1e, p1e)
# graph 2
ax2 = self.figure.add_subplot(
312,
frame_on=False,
title='mean rewards over last 100 episodes',
ylabel='mean rewards')
p2, = ax2.plot([], [], 'g')
self.subgraphs['mean rewards'] = (ax2, p2)
# graph 3
ax3 = self.figure.add_subplot(
313,
frame_on=False,
title='loss over time, episode',
ylabel='loss')
p3, = ax3.plot([], [])
self.subgraphs['loss'] = (ax3, p3)
self.plt.tight_layout() # auto-fix spacing
self.plt.ion() # for live plot
def plot(self):
'''do live plotting'''
sys_vars = self.session.sys_vars
if environ.get('CI'):
return
ax1, p1 = self.subgraphs['total rewards']
p1.set_ydata(
sys_vars['total_rewards_history'])
p1.set_xdata(np.arange(len(p1.get_ydata())))
ax1.relim()
ax1.autoscale_view(tight=True, scalex=True, scaley=True)
ax1e, p1e = self.subgraphs['e']
p1e.set_ydata(
sys_vars['explore_history'])
p1e.set_xdata(np.arange(len(p1e.get_ydata())))
ax1e.relim()
ax1e.autoscale_view(tight=True, scalex=True, scaley=True)
ax2, p2 = self.subgraphs['mean rewards']
p2.set_ydata(
sys_vars['mean_rewards_history'])
p2.set_xdata(np.arange(len(p2.get_ydata())))
ax2.relim()
ax2.autoscale_view(tight=True, scalex=True, scaley=True)
ax3, p3 = self.subgraphs['loss']
p3.set_ydata(sys_vars['loss'])
p3.set_xdata(np.arange(len(p3.get_ydata())))
ax3.relim()
ax3.autoscale_view(tight=True, scalex=True, scaley=True)
self.plt.draw()
self.plt.pause(0.01)
self.save()
def save(self):
'''save graph to filename'''
self.figure.savefig(self.graph_filename)
class Session(object):
'''
The base unit of an Experiment
An Experiment for a config on repeat for k time
will run k Sessions, each with identical sess_spec
for a problem, Agent, Memory, Policy, param.
Handles its own data, plots and saves its own graphs
Serialized by the parent experiment_id with its session_id
'''
def __init__(self, experiment, session_num=0, num_of_sessions=1):
self.experiment = experiment
self.session_num = session_num
self.num_of_sessions = num_of_sessions
self.session_id = self.experiment.experiment_id + \
'_s' + str(self.session_num)
log_delimiter('Init Session #{} of {}:\n{}'.format(
self.session_num, self.num_of_sessions, self.session_id))
self.sess_spec = experiment.sess_spec
self.problem = self.sess_spec['problem']
self.Agent = get_module(GREF, self.sess_spec['Agent'])
self.Memory = get_module(GREF, self.sess_spec['Memory'])
self.Policy = get_module(GREF, self.sess_spec['Policy'])
self.PreProcessor = get_module(GREF, self.sess_spec['PreProcessor'])
self.param = self.sess_spec['param']
# init all things, so a session can only be ran once
self.sys_vars = self.init_sys_vars()
self.env = gym.make(self.sys_vars['GYM_ENV_NAME'])
self.preprocessor = self.PreProcessor(**self.param)
self.env_spec = self.set_env_spec()
self.agent = self.Agent(self.env_spec, **self.param)
self.memory = self.Memory(**self.param)
self.policy = self.Policy(**self.param)
self.agent.compile(self.memory, self.policy, self.preprocessor)
# data file and graph
self.base_filename = './data/{}/{}'.format(
self.experiment.prefix_id, self.session_id)
self.graph_filename = self.base_filename + '.png'
# for plotting
self.grapher = Grapher(self)
def init_sys_vars(self):
'''
init the sys vars for a problem by reading from
asset/problems.json, then reset the other sys vars
on reset will add vars (lower cases, see REQUIRED_SYS_KEYS)
'''
sys_vars = PROBLEMS[self.problem]
if not args.render:
sys_vars['RENDER'] = False
if environ.get('CI'):
sys_vars['RENDER'] = False
sys_vars['MAX_EPISODES'] = 4
self.sys_vars = sys_vars
self.reset_sys_vars()
return self.sys_vars
def reset_sys_vars(self):
'''reset and check RL system vars (lower case)
before each new session'''
for k in REQUIRED_SYS_KEYS:
if k.islower():
self.sys_vars[k] = copy.copy(REQUIRED_SYS_KEYS.get(k))
self.check_sys_vars()
return self.sys_vars
def check_sys_vars(self):
'''ensure the requried RL system vars are specified'''
sys_keys = self.sys_vars.keys()
assert all(k in sys_keys for k in REQUIRED_SYS_KEYS)
def set_env_spec(self):
'''Helper: return the env specs: dims, actions, reward range'''
env = self.env
state_dim = env.observation_space.shape[0]
if (len(env.observation_space.shape) > 1):
state_dim = env.observation_space.shape
env_spec = {
'state_dim': state_dim,
'state_bounds': np.transpose(
[env.observation_space.low, env.observation_space.high]),
'action_dim': env.action_space.n,
'actions': list(range(env.action_space.n)),
'reward_range': env.reward_range,
'timestep_limit': env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
}
self.env_spec = self.preprocessor.preprocess_env_spec(
env_spec) # preprocess
return self.env_spec
def debug_agent_info(self):
logger.debug(
"Agent info: {}".format(
format_obj_dict(
self.agent,
['learning_rate', 'n_epoch'])))
logger.debug(
"Memory info: size: {}".format(self.agent.memory.size()))
logger.debug(
"Policy info: {}".format(
format_obj_dict(self.agent.policy, ['e', 'tau'])))
logger.debug(
"PreProcessor info: {}".format(
format_obj_dict(self.agent.preprocessor, [])))
def check_end(self):
'''check if session ends (if is last episode)
do ending steps'''
sys_vars = self.sys_vars
logger.debug(
"RL Sys info: {}".format(
format_obj_dict(
sys_vars, ['epi', 't', 'total_rewards', 'mean_rewards'])))
logger.debug('{:->30}'.format(''))
if (sys_vars['solved'] or
(sys_vars['epi'] == sys_vars['MAX_EPISODES'] - 1)):
logger.info(
'Problem solved? {}\nAt episode: {}\nParams: {}'.format(
sys_vars['solved'], sys_vars['epi'],
to_json(self.param)))
self.env.close()
def update_history(self):
'''
update the data per episode end
'''
sys_vars = self.sys_vars
sys_vars['total_rewards_history'].append(sys_vars['total_rewards'])
sys_vars['explore_history'].append(
getattr(self.policy, 'e', 0) or getattr(self.policy, 'tau', 0))
avg_len = sys_vars['REWARD_MEAN_LEN']
# Calculating mean_reward over last 100 episodes
# case away from np for json serializable (dumb python)
mean_rewards = float(
np.mean(sys_vars['total_rewards_history'][-avg_len:]))
solved = (mean_rewards >= sys_vars['SOLVED_MEAN_REWARD'])
sys_vars['mean_rewards'] = mean_rewards
sys_vars['mean_rewards_history'].append(mean_rewards)
sys_vars['solved'] = solved
self.grapher.plot()
self.check_end()
return sys_vars
def run_episode(self):
'''run ane episode, return sys_vars'''
sys_vars, env, agent = self.sys_vars, self.env, self.agent
sys_vars['total_rewards'] = 0
state = env.reset()
processed_state = agent.preprocessor.reset_state(state)
agent.memory.reset_state(processed_state)
self.debug_agent_info()
for t in range(agent.env_spec['timestep_limit']):
sys_vars['t'] = t # update sys_vars t
if sys_vars.get('RENDER'):
env.render()
processed_state = agent.preprocessor.preprocess_state()
action = agent.select_action(processed_state)
next_state, reward, done, _info = env.step(action)
processed_exp = agent.preprocessor.preprocess_memory(
action, reward, next_state, done)
if processed_exp is not None:
agent.memory.add_exp(*processed_exp)
sys_vars['done'] = done
agent.update(sys_vars)
if agent.to_train(sys_vars):
agent.train(sys_vars)
sys_vars['total_rewards'] += reward
if done:
break
self.update_history()
return sys_vars
def clear_session(self):
if K._BACKEND == 'tensorflow':
K.clear_session() # manual gc to fix TF issue 3388
def run(self):
'''run a session of agent'''
log_delimiter('Run Session #{} of {}\n{}'.format(
self.session_num, self.num_of_sessions, self.session_id))
sys_vars = self.sys_vars
sys_vars['time_start'] = timestamp()
for epi in range(sys_vars['MAX_EPISODES']):
sys_vars['epi'] = epi # update sys_vars epi
try:
self.run_episode()
except Exception:
logger.error('Error in experiment, terminating '
'further session from {}'.format(self.session_id))
traceback.print_exc(file=sys.stdout)
break
if sys_vars['solved']:
break
self.clear_session()
sys_vars['time_end'] = timestamp()
sys_vars['time_taken'] = timestamp_elapse(
sys_vars['time_start'], sys_vars['time_end'])
progress = 'Progress: Experiment #{} Session #{} of {} done'.format(
self.experiment.experiment_num,
self.session_num, self.num_of_sessions)
log_delimiter('End Session:\n{}\n{}'.format(
self.session_id, progress))
return sys_vars
class Experiment(object):
'''
An Experiment for a config on repeat for k time
will run k Sessions, each with identical sess_spec
for a problem, Agent, Memory, Policy, param.
Will spawn as many Sessions for repetition
Handles all the data from sessions
to provide an experiment-level summary for a sess_spec
Its experiment_id is serialized by
problem, Agent, Memory, Policy and timestamp
Data Requirements:
JSON, single file, quick and useful summary,
replottable data, rerunnable specs
Keys:
all below X array of hyper param selection:
- sess_spec (so we can plug in directly again to rerun)
- summary
- time_start
- time_end
- time_taken
- metrics
- sys_vars_array
'''
def __init__(self, sess_spec, times=1,
experiment_num=0, num_of_experiments=1,
run_timestamp=timestamp(),
prefix_id_override=None):
self.sess_spec = sess_spec
self.data = None
self.times = times
self.sess_spec.pop('param_range', None) # single exp, del range
self.experiment_num = experiment_num
self.num_of_experiments = num_of_experiments
self.run_timestamp = run_timestamp
self.prefix_id = prefix_id_override or '{}_{}_{}_{}_{}_{}'.format(
sess_spec['problem'],
sess_spec['Agent'].split('.').pop(),
sess_spec['Memory'].split('.').pop(),
sess_spec['Policy'].split('.').pop(),
sess_spec['PreProcessor'].split('.').pop(),
self.run_timestamp
)
self.experiment_id = self.prefix_id + '_e' + str(self.experiment_num)
self.base_dir = './data/{}'.format(self.prefix_id)
os.makedirs(self.base_dir, exist_ok=True)
self.base_filename = './data/{}/{}'.format(
self.prefix_id, self.experiment_id)
self.data_filename = self.base_filename + '.json'
log_delimiter('Init Experiment #{} of {}:\n{}'.format(
self.experiment_num, self.num_of_experiments,
self.experiment_id), '=')
def analyze(self):
'''mean_rewards_per_epi
helper: analyze given data from an experiment
return metrics
'''
sys_vars_array = self.data['sys_vars_array']
solved_sys_vars_array = list(filter(
lambda sv: sv['solved'], sys_vars_array))
mean_rewards_array = np.array(list(map(
lambda sv: sv['mean_rewards'], sys_vars_array)))
max_total_rewards_array = np.array(list(map(
lambda sv: np.max(sv['total_rewards_history']), sys_vars_array)))
epi_array = np.array(list(map(lambda sv: sv['epi'], sys_vars_array)))
mean_rewards_per_epi_array = np.divide(mean_rewards_array, epi_array)
t_array = np.array(list(map(lambda sv: sv['t'], sys_vars_array)))
time_taken_array = np.array(list(map(
lambda sv: timestamp_elapse_to_seconds(sv['time_taken']),
sys_vars_array)))
solved_epi_array = np.array(list(map(
lambda sv: sv['epi'], solved_sys_vars_array)))
solved_t_array = np.array(list(map(
lambda sv: sv['t'], solved_sys_vars_array)))
solved_time_taken_array = np.array(list(map(
lambda sv: timestamp_elapse_to_seconds(sv['time_taken']),
solved_sys_vars_array)))
metrics = {
# percentage solved
'num_of_sessions': len(sys_vars_array),
'solved_num_of_sessions': len(solved_sys_vars_array),
'solved_ratio_of_sessions': float(len(
solved_sys_vars_array)) / len(sys_vars_array),
'mean_rewards_stats': basic_stats(mean_rewards_array),
'mean_rewards_per_epi_stats': basic_stats(
mean_rewards_per_epi_array),
'max_total_rewards_stats': basic_stats(max_total_rewards_array),
'epi_stats': basic_stats(epi_array),
't_stats': basic_stats(t_array),
'time_taken_stats': basic_stats(time_taken_array),
'solved_epi_stats': basic_stats(solved_epi_array),
'solved_t_stats': basic_stats(solved_t_array),
'solved_time_taken_stats': basic_stats(solved_time_taken_array),
}
self.data['summary'].update({'metrics': metrics})
return self.data
def save(self):
'''save the entire experiment data grid from inside run()'''
with open(self.data_filename, 'w') as f:
f.write(to_json(self.data))
logger.info(
'Session complete, data saved to {}'.format(self.data_filename))
def to_stop(self):
'''check of experiment should be continued'''
metrics = self.data['summary']['metrics']
failed = metrics['solved_ratio_of_sessions'] < 1.
if failed:
logger.info(
'Failed experiment, terminating sessions for {}'.format(
self.experiment_id))
return failed
def run(self):
'''
helper: run a experiment for Session
a number of times times given a sess_spec from gym_specs
'''
configure_gpu()
time_start = timestamp()
sys_vars_array = []
for s in range(self.times):
sess = Session(experiment=self,
session_num=s, num_of_sessions=self.times)
sys_vars = sess.run()
sys_vars_array.append(copy.copy(sys_vars))
time_end = timestamp()
time_taken = timestamp_elapse(time_start, time_end)
self.data = { # experiment data
'experiment_id': self.experiment_id,
'sess_spec': self.sess_spec,
'summary': {
'time_start': time_start,
'time_end': time_end,
'time_taken': time_taken,
'metrics': None,
},
'sys_vars_array': sys_vars_array,
}
self.analyze()
# progressive update, write when every session is done
self.save()
if self.to_stop():
break
progress = 'Progress: Experiment #{} of {} done'.format(
self.experiment_num, self.num_of_experiments)
log_delimiter(
'End Experiment:\n{}\n{}'.format(
self.experiment_id, progress), '=')
return self.data
def configure_gpu():
'''detect GPU options and configure'''
if K._BACKEND != 'tensorflow':
# skip directly if is not tensorflow
return
real_parallel_process_num = 1 if mp.current_process(
).name == 'MainProcess' else PARALLEL_PROCESS_NUM
tf = K.tf
gpu_options = tf.GPUOptions(
allow_growth=True,
per_process_gpu_memory_fraction=1./float(real_parallel_process_num))
config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True)
sess = tf.Session(config=config)
K.set_session(sess)
return sess
def plot(experiment_or_prefix_id):
'''plot from a saved data by init sessions for each sys_vars'''
prefix_id = prefix_id_from_experiment_id(experiment_or_prefix_id)
experiment_data_array = load_data_array_from_prefix_id(prefix_id)
for data in experiment_data_array:
sess_spec = data['sess_spec']
experiment = Experiment(sess_spec, times=1,
prefix_id_override=prefix_id)
# save with the right serialized filename
experiment.experiment_id = data['experiment_id']
num_of_sessions = len(data['sys_vars_array'])
for s in range(num_of_sessions):
sess = Session(experiment=experiment,
session_num=s, num_of_sessions=num_of_sessions)
sys_vars = data['sys_vars_array'][s]
sess.sys_vars = sys_vars
sess.grapher.plot()
sess.clear_session()
def analyze_param_space(experiment_data_array_or_prefix_id):
'''
get all the data from all experiments.run()
or read from all data files matching the prefix of experiment_id
e.g. usage without running:
prefix_id = 'DevCartPole-v0_DQN_LinearMemoryWithForgetting_BoltzmannPolicy_2017-01-15_142810'
analyze_param_space(prefix_id)
'''
if isinstance(experiment_data_array_or_prefix_id, str):
experiment_data_array = load_data_array_from_prefix_id(
experiment_data_array_or_prefix_id)
else:
experiment_data_array = experiment_data_array_or_prefix_id
flat_metrics_array = []
for data in experiment_data_array:
flat_metrics = flatten_dict(data['summary']['metrics'])
flat_metrics.update({'experiment_id': data['experiment_id']})
flat_metrics_array.append(flat_metrics)
metrics_df = pd.DataFrame.from_dict(flat_metrics_array)
metrics_df.sort_values(
['mean_rewards_per_epi_stats_mean',
'mean_rewards_stats_mean', 'solved_ratio_of_sessions'],
ascending=False
)
experiment_id = experiment_data_array[0]['experiment_id']
prefix_id = prefix_id_from_experiment_id(experiment_id)
param_space_data_filename = './data/{0}/param_space_data_{0}.csv'.format(
prefix_id)
metrics_df.to_csv(param_space_data_filename, index=False)
logger.info(
'Param space data saved to {}'.format(param_space_data_filename))
return metrics_df
def run(sess_name_id_spec, times=1,
param_selection=False, line_search=False,
plot_only=False):
'''
primary method:
specify:
- sess_name(str) or sess_spec(Dict): run new experiment,
- experiment_id(str): rerun experiment from data
- experiment_id(str) with plot_only=True: plot graphs from data
This runs all experiments, specified by the obtained sess_spec
for a specified number of sessions per experiment
Multiple experiments are ran if param_selection=True
'''
# run plots on data only
if plot_only:
plot(sess_name_id_spec)
return
# set sess_spec based on input
if isinstance(sess_name_id_spec, str):
if len(sess_name_id_spec.split('_')) >= 4:
data = load_data_from_experiment_id(sess_name_id_spec)
sess_spec = data['sess_spec']
else:
sess_spec = SESS_SPECS.get(sess_name_id_spec)
else:
sess_spec = sess_name_id_spec
# compose grid and run param selection
if param_selection:
if line_search:
param_grid = param_line_search(sess_spec)
else:
param_grid = param_product(sess_spec)
sess_spec_grid = generate_sess_spec_grid(sess_spec, param_grid)
num_of_experiments = len(sess_spec_grid)
run_timestamp = timestamp()
experiment_array = []
for e in range(num_of_experiments):
sess_spec = sess_spec_grid[e]
experiment = Experiment(
sess_spec, times=times, experiment_num=e,
num_of_experiments=num_of_experiments,
run_timestamp=run_timestamp)
experiment_array.append(experiment)
p = mp.Pool(PARALLEL_PROCESS_NUM)
experiment_data_array = list(p.map(mp_run_helper, experiment_array))
p.close()
p.join()
else:
experiment = Experiment(sess_spec, times=times)
experiment_data = experiment.run()
experiment_data_array = [experiment_data]
return analyze_param_space(experiment_data_array)
| [
"multiprocessing.cpu_count",
"copy.copy",
"gym.make",
"numpy.divide",
"numpy.mean",
"pandas.DataFrame.from_dict",
"numpy.max",
"platform.system",
"keras.backend.clear_session",
"traceback.print_exc",
"keras.backend.set_session",
"os.path.dirname",
"numpy.transpose",
"warnings.filterwarning... | [((684, 706), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (693, 706), True, 'import numpy as np\n'), ((707, 761), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'module': '"""matplotlib"""'}), "('ignore', module='matplotlib')\n", (730, 761), False, 'import warnings\n'), ((803, 817), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (815, 817), True, 'import multiprocessing as mp\n'), ((537, 554), 'os.environ.get', 'environ.get', (['"""CI"""'], {}), "('CI')\n", (548, 554), False, 'from os import path, environ\n'), ((841, 863), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (853, 863), False, 'from os import path, environ\n'), ((20177, 20196), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (20190, 20196), True, 'from keras import backend as K\n'), ((22006, 22048), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['flat_metrics_array'], {}), '(flat_metrics_array)\n', (22028, 22048), True, 'import pandas as pd\n'), ((558, 575), 'platform.system', 'platform.system', ([], {}), '()\n', (573, 575), False, 'import platform\n'), ((2169, 2186), 'os.environ.get', 'environ.get', (['"""CI"""'], {}), "('CI')\n", (2180, 2186), False, 'from os import path, environ\n'), ((3407, 3424), 'os.environ.get', 'environ.get', (['"""CI"""'], {}), "('CI')\n", (3418, 3424), False, 'from os import path, environ\n'), ((5920, 5959), 'gym.make', 'gym.make', (["self.sys_vars['GYM_ENV_NAME']"], {}), "(self.sys_vars['GYM_ENV_NAME'])\n", (5928, 5959), False, 'import gym\n'), ((6908, 6925), 'os.environ.get', 'environ.get', (['"""CI"""'], {}), "('CI')\n", (6919, 6925), False, 'from os import path, environ\n'), ((15922, 15962), 'numpy.divide', 'np.divide', (['mean_rewards_array', 'epi_array'], {}), '(mean_rewards_array, epi_array)\n', (15931, 15962), True, 'import numpy as np\n'), ((24327, 24356), 'multiprocessing.Pool', 'mp.Pool', (['PARALLEL_PROCESS_NUM'], {}), '(PARALLEL_PROCESS_NUM)\n', (24334, 24356), True, 'import multiprocessing as mp\n'), ((7972, 8041), 'numpy.transpose', 'np.transpose', (['[env.observation_space.low, env.observation_space.high]'], {}), '([env.observation_space.low, env.observation_space.high])\n', (7984, 8041), True, 'import numpy as np\n'), ((10233, 10286), 'numpy.mean', 'np.mean', (["sys_vars['total_rewards_history'][-avg_len:]"], {}), "(sys_vars['total_rewards_history'][-avg_len:])\n", (10240, 10286), True, 'import numpy as np\n'), ((11888, 11905), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (11903, 11905), True, 'from keras import backend as K\n'), ((908, 948), 'os.path.join', 'path.join', (['ASSET_PATH', '"""sess_specs.json"""'], {}), "(ASSET_PATH, 'sess_specs.json')\n", (917, 948), False, 'from os import path, environ\n'), ((990, 1028), 'os.path.join', 'path.join', (['ASSET_PATH', '"""problems.json"""'], {}), "(ASSET_PATH, 'problems.json')\n", (999, 1028), False, 'from os import path, environ\n'), ((18627, 18646), 'copy.copy', 'copy.copy', (['sys_vars'], {}), '(sys_vars)\n', (18636, 18646), False, 'import copy\n'), ((19814, 19834), 'multiprocessing.current_process', 'mp.current_process', ([], {}), '()\n', (19832, 19834), True, 'import multiprocessing as mp\n'), ((12555, 12591), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (12574, 12591), False, 'import traceback\n'), ((15752, 15787), 'numpy.max', 'np.max', (["sv['total_rewards_history']"], {}), "(sv['total_rewards_history'])\n", (15758, 15787), True, 'import numpy as np\n')] |
import pandas as pd
from datetime import time, timedelta, datetime
import numpy as np
import geopandas as gp
import random
from halo import Halo
from lps.core import samplers, generators
from lps.core.population import Population, Agent, Plan, Activity, Leg
times = {
(7, 10): 0,
(10, 16): 1,
(16, 19): 2
}
class Demand:
def __init__(self, config):
print('\n--------- Initiating LoHAM (freight) Population Input ---------')
self.config = config
self.zones = self.load_zones()
self.london = self.load_filter()
self.demand = self.load_demand()
self.num_plans = None
self.sampler = None
print('Input Demand Loaded:')
print("\t> daily demand distribution: {}".format(config.WEIGHTS))
print("\t> outputs using epsg:{}".format(config.EPSG))
print("\t> AM demand inputs from: {}".format(config.AMPATH))
print("\t> Inter-peak demand inputs from: {}".format(config.INTERPATH))
print("\t> PM demand inputs from: {}".format(config.PMPATH))
print("\t> zone inputs from: {}".format(config.ZONESPATH))
print("\t> saving to: {}".format(config.OUTPATH))
# load zones
def load_zones(self):
"""
Load zones
:return: GeoPandas GeoDataFrame
"""
with Halo(text='Loading zone data...', spinner='dots') as spinner:
gdf = gp.read_file(self.config.ZONESPATH)
if not gdf.crs.get('init') == self.config.EPSG:
spinner.text = 'converting zones to espg:{}'.format(self.config.EPSG)
gdf = gdf.to_crs(epsg=self.config.EPSG)
gdf = gdf.loc[:, ['renumber_I', 'london', 'geometry']]
gdf = gdf.set_index('renumber_I')
spinner.succeed('{} zones loaded'.format(len(gdf)))
return gdf
def load_filter(self):
return pd.Series(self.zones.loc[self.zones.london == 1, :].index)
def filter(self, df):
return df.loc[df.o.isin(self.london) | df.d.isin(self.london), :]
# load OD pairs
def load_demand(self):
with Halo(text='loading demand inputs...', spinner='dots') as spinner:
am = pd.read_csv(self.config.AMPATH, header=None, names=['o', 'd', 'freq'])
inter = pd.read_csv(self.config.INTERPATH, header=None, names=['o', 'd', 'freq'])
pm = pd.read_csv(self.config.PMPATH, header=None, names=['o', 'd', 'freq'])
spinner.succeed('input demand loaded')
# TODO SENSE CHECK - halve demand so that return trip does not cause double counting
am.freq = am.freq / 2
inter.freq = inter.freq / 2
pm.freq = pm.freq / 2
with Halo(text='filtering am demand (1/3) for london...', spinner='dots') as spinner:
am = self.filter(am)
spinner.text = 'filtering inter demand (2/3) for london...'
inter = self.filter(inter)
spinner.text = 'filtering pm demand (3/3) for london...'
pm = self.filter(pm)
am_hour = sum(am.freq)
inter_hour = sum(inter.freq)
pm_hour = sum(pm.freq)
spinner.succeed('inputs filtered for London')
print("\t> AM-peak hourly demand is {} trips".format(int(am_hour)))
print("\t> Inter-peak hourly demand is {} trips".format(int(inter_hour)))
print("\t> PM-peak hourly demand is {} trips".format(int(pm_hour)))
print("\t> Overnight hourly demand approximated as {} trips".format(int(self.config.WEIGHTS[3] * inter_hour)))
with Halo(text='Modelled daily demand profile...', spinner='dots') as spinner:
trips = 0
daily_hours = np.arange(24)
daily_profile = []
for hour in daily_hours: # Build profile
demand = self.config.WEIGHTS[3] * inter_hour
for (start, end), period in times.items():
if end > hour >= start:
weight = self.config.WEIGHTS[period]
if period == 0: # AM
demand = am_hour * weight
elif period in [1, 3]: # Inter peak or night
demand = inter_hour * weight
elif period == 2: # PM
demand = pm_hour * weight
# break
daily_profile.append(demand)
trips += demand
spinner.succeed('Daily demand model complete')
if self.config.VERBOSE:
for hour in daily_hours:
hour_str = time(hour).strftime("%H:%M")
norm = int(20 * daily_profile[hour] / max(am_hour, inter_hour, pm_hour))
print(hour_str + ': ' + ('/' * norm))
daily_profile = np.array(daily_profile)
daily_sampler = (daily_hours, daily_profile)
if self.config.NORM: # Normalise
print("Normalising")
daily_profile = self.config.NORM * daily_profile / trips
print("{} total trips loaded".format(int(sum(daily_profile))))
with Halo(text='Modelled O-D demand profiles...', spinner='dots') as spinner:
am_profile = np.array(am.freq)
am_od = tuple(zip(am.o, am.d))
am_sampler = (am_od, am_profile)
inter_profile = np.array(inter.freq)
inter_od = tuple(zip(inter.o, inter.d))
inter_sampler = (inter_od, inter_profile)
pm_profile = np.array(pm.freq)
pm_od = tuple(zip(pm.o, pm.d))
pm_sampler = (pm_od, pm_profile)
spinner.succeed('O-D demand profiles completed')
return {'daily': daily_sampler, 'am': am_sampler, 'inter': inter_sampler, 'pm': pm_sampler}
def sample(self, sampler, population=None):
if not population:
print('Creating new population object')
population = Population()
with Halo(text='Sampling from distributions', spinner='dots') as spinner:
n = sum(self.demand['daily'][1]) # total daily demand
n = sampler.get_sample_size(n)
# Sample n times (over samples but no significant slow down)
hours = random.choices(self.demand['daily'][0], weights=self.demand['daily'][1], k=n + 1)
start_times = generators.gen_minutes(hours)
am_od_ids = random.choices(self.demand['am'][0], weights=self.demand['am'][1], k=n + 1)
inter_od_ids = random.choices(self.demand['inter'][0], weights=self.demand['inter'][1], k=n + 1)
pm_od_ids = random.choices(self.demand['pm'][0], weights=self.demand['pm'][1], k=n + 1)
spinner.succeed('Sampling completed for {} plans'.format(n))
with Halo(text="Building trips...", spinner="dots") as spinner:
for trip in range(n):
spinner.text = "Built {} of {} trips...".format(trip, n)
# Make unique ID
uid = self.config.PREFIX + str(trip)
# Random sample hour from daily profile distribution
hour = hours[trip]
# Random sample minute to make time stamp
start_time = start_times[trip]
# Select Peak or Inter-Peal O-D pairs and weights
period = 3
for (start, end), p in times.items():
if end > hour >= start:
period = p
break
if period == 0: # Use am matrix
o_id, d_id = am_od_ids[trip]
elif period == 2: # Use pm matrix
o_id, d_id = pm_od_ids[trip]
else: # Use inter-peak matrix
o_id, d_id = inter_od_ids[trip]
# Sample O-D points
o = samplers.sample_point(o_id, self.zones)
d = samplers.sample_point(d_id, self.zones)
# Get distance between pair (for approx. journey time)
dist = samplers.get_approx_distance(o, d)
journey_time = samplers.build_journey_time(dist, mode=self.config.MODE, limit=72000) # limited at 20 hours
# Build up day times
dt = datetime(2000, 1, 1, start_time.hour, start_time.minute)
dt0, dt1 = samplers.build_trip_times(dt, journey_time, push='forward') # function prevents leg straddling day
minutes = random.randint(1, 6) * 5
dt2 = dt1 + timedelta(seconds=(minutes * 60)) # Assume 5 to 30 minutes at destination
dt3 = dt2 + journey_time
population.agents.append(self.build_plan(uid, o, d, dt0, dt1, dt2, dt3, dist))
spinner.succeed("Plan simulation completed for {} plans".format(n))
return population
def build_plan(self, uid, o, d, dt0, dt1, dt2, dt3, dist=None):
t0 = dt0.time().strftime("%H:%M:%S") # Home departure
t1 = dt1.time().strftime("%H:%M:%S") # Delivery arrival
t2 = dt2.time().strftime("%H:%M:%S") # Delivery departure
t3 = dt3.time().strftime("%H:%M:%S") # Home arrival
activities = []
legs = []
if (dt1 - dt0).days or (dt1 - dt0).seconds > (12 * 60 * 60): # long journey - don't try to return
activities.append(Activity(uid, 0, 'depot', o, t1, t0))
legs.append(Leg(uid, 0, self.config.MODE, o, d, t0, t1, dist))
activities.append(Activity(uid, 1, 'delivery', d, t1, t0))
else:
if dt0.time() < dt2.time(): # Regular sequence with delivery end time after depo departure
activities.append(Activity(uid, 0, 'depot', o, t3, t0))
legs.append(Leg(uid, 0, self.config.MODE, o, d, t0, t1, dist))
activities.append(Activity(uid, 1, 'delivery', d, t1, t2))
legs.append(Leg(uid, 1, self.config.MODE, d, o, t2, t3, dist))
activities.append(Activity(uid, 2, 'depot', o, t3, t0))
else: # sequence starting at delivery
activities.append(Activity(uid, 0, 'delivery', d, t1, t2))
legs.append(Leg(uid, 0, self.config.MODE, d, o, t2, t3, dist))
activities.append(Activity(uid, 1, 'depot', o, t3, t0))
legs.append(Leg(uid, 1, self.config.MODE, o, d, t0, t1, dist))
activities.append(Activity(uid, 2, 'delivery', o, t1, t2))
tag = self.config.SOURCE
plan = [Plan(activities, legs, tag)]
keys = (
'source', 'subpopulation', 'hsize', 'car', 'inc', 'hstr', 'gender', 'age', 'race', 'license', 'job', 'occ')
values = [tag] * len(keys)
attribute_dic = dict(zip(keys, values))
return Agent(uid, plan, attribute_dic)
| [
"lps.core.samplers.sample_point",
"pandas.read_csv",
"lps.core.samplers.build_journey_time",
"lps.core.samplers.get_approx_distance",
"lps.core.population.Activity",
"numpy.array",
"random.choices",
"datetime.timedelta",
"numpy.arange",
"datetime.datetime",
"datetime.time",
"geopandas.read_fil... | [((1876, 1934), 'pandas.Series', 'pd.Series', (['self.zones.loc[self.zones.london == 1, :].index'], {}), '(self.zones.loc[self.zones.london == 1, :].index)\n', (1885, 1934), True, 'import pandas as pd\n'), ((4806, 4829), 'numpy.array', 'np.array', (['daily_profile'], {}), '(daily_profile)\n', (4814, 4829), True, 'import numpy as np\n'), ((10757, 10788), 'lps.core.population.Agent', 'Agent', (['uid', 'plan', 'attribute_dic'], {}), '(uid, plan, attribute_dic)\n', (10762, 10788), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((1319, 1368), 'halo.Halo', 'Halo', ([], {'text': '"""Loading zone data..."""', 'spinner': '"""dots"""'}), "(text='Loading zone data...', spinner='dots')\n", (1323, 1368), False, 'from halo import Halo\n'), ((1399, 1434), 'geopandas.read_file', 'gp.read_file', (['self.config.ZONESPATH'], {}), '(self.config.ZONESPATH)\n', (1411, 1434), True, 'import geopandas as gp\n'), ((2097, 2150), 'halo.Halo', 'Halo', ([], {'text': '"""loading demand inputs..."""', 'spinner': '"""dots"""'}), "(text='loading demand inputs...', spinner='dots')\n", (2101, 2150), False, 'from halo import Halo\n'), ((2180, 2250), 'pandas.read_csv', 'pd.read_csv', (['self.config.AMPATH'], {'header': 'None', 'names': "['o', 'd', 'freq']"}), "(self.config.AMPATH, header=None, names=['o', 'd', 'freq'])\n", (2191, 2250), True, 'import pandas as pd\n'), ((2271, 2344), 'pandas.read_csv', 'pd.read_csv', (['self.config.INTERPATH'], {'header': 'None', 'names': "['o', 'd', 'freq']"}), "(self.config.INTERPATH, header=None, names=['o', 'd', 'freq'])\n", (2282, 2344), True, 'import pandas as pd\n'), ((2362, 2432), 'pandas.read_csv', 'pd.read_csv', (['self.config.PMPATH'], {'header': 'None', 'names': "['o', 'd', 'freq']"}), "(self.config.PMPATH, header=None, names=['o', 'd', 'freq'])\n", (2373, 2432), True, 'import pandas as pd\n'), ((2704, 2772), 'halo.Halo', 'Halo', ([], {'text': '"""filtering am demand (1/3) for london..."""', 'spinner': '"""dots"""'}), "(text='filtering am demand (1/3) for london...', spinner='dots')\n", (2708, 2772), False, 'from halo import Halo\n'), ((3569, 3630), 'halo.Halo', 'Halo', ([], {'text': '"""Modelled daily demand profile..."""', 'spinner': '"""dots"""'}), "(text='Modelled daily demand profile...', spinner='dots')\n", (3573, 3630), False, 'from halo import Halo\n'), ((3691, 3704), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (3700, 3704), True, 'import numpy as np\n'), ((5114, 5174), 'halo.Halo', 'Halo', ([], {'text': '"""Modelled O-D demand profiles..."""', 'spinner': '"""dots"""'}), "(text='Modelled O-D demand profiles...', spinner='dots')\n", (5118, 5174), False, 'from halo import Halo\n'), ((5212, 5229), 'numpy.array', 'np.array', (['am.freq'], {}), '(am.freq)\n', (5220, 5229), True, 'import numpy as np\n'), ((5346, 5366), 'numpy.array', 'np.array', (['inter.freq'], {}), '(inter.freq)\n', (5354, 5366), True, 'import numpy as np\n'), ((5498, 5515), 'numpy.array', 'np.array', (['pm.freq'], {}), '(pm.freq)\n', (5506, 5515), True, 'import numpy as np\n'), ((5920, 5932), 'lps.core.population.Population', 'Population', ([], {}), '()\n', (5930, 5932), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((5947, 6003), 'halo.Halo', 'Halo', ([], {'text': '"""Sampling from distributions"""', 'spinner': '"""dots"""'}), "(text='Sampling from distributions', spinner='dots')\n", (5951, 6003), False, 'from halo import Halo\n'), ((6221, 6307), 'random.choices', 'random.choices', (["self.demand['daily'][0]"], {'weights': "self.demand['daily'][1]", 'k': '(n + 1)'}), "(self.demand['daily'][0], weights=self.demand['daily'][1], k=\n n + 1)\n", (6235, 6307), False, 'import random\n'), ((6329, 6358), 'lps.core.generators.gen_minutes', 'generators.gen_minutes', (['hours'], {}), '(hours)\n', (6351, 6358), False, 'from lps.core import samplers, generators\n'), ((6383, 6458), 'random.choices', 'random.choices', (["self.demand['am'][0]"], {'weights': "self.demand['am'][1]", 'k': '(n + 1)'}), "(self.demand['am'][0], weights=self.demand['am'][1], k=n + 1)\n", (6397, 6458), False, 'import random\n'), ((6486, 6572), 'random.choices', 'random.choices', (["self.demand['inter'][0]"], {'weights': "self.demand['inter'][1]", 'k': '(n + 1)'}), "(self.demand['inter'][0], weights=self.demand['inter'][1], k=\n n + 1)\n", (6500, 6572), False, 'import random\n'), ((6592, 6667), 'random.choices', 'random.choices', (["self.demand['pm'][0]"], {'weights': "self.demand['pm'][1]", 'k': '(n + 1)'}), "(self.demand['pm'][0], weights=self.demand['pm'][1], k=n + 1)\n", (6606, 6667), False, 'import random\n'), ((6755, 6801), 'halo.Halo', 'Halo', ([], {'text': '"""Building trips..."""', 'spinner': '"""dots"""'}), "(text='Building trips...', spinner='dots')\n", (6759, 6801), False, 'from halo import Halo\n'), ((10492, 10519), 'lps.core.population.Plan', 'Plan', (['activities', 'legs', 'tag'], {}), '(activities, legs, tag)\n', (10496, 10519), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((7830, 7869), 'lps.core.samplers.sample_point', 'samplers.sample_point', (['o_id', 'self.zones'], {}), '(o_id, self.zones)\n', (7851, 7869), False, 'from lps.core import samplers, generators\n'), ((7890, 7929), 'lps.core.samplers.sample_point', 'samplers.sample_point', (['d_id', 'self.zones'], {}), '(d_id, self.zones)\n', (7911, 7929), False, 'from lps.core import samplers, generators\n'), ((8025, 8059), 'lps.core.samplers.get_approx_distance', 'samplers.get_approx_distance', (['o', 'd'], {}), '(o, d)\n', (8053, 8059), False, 'from lps.core import samplers, generators\n'), ((8091, 8160), 'lps.core.samplers.build_journey_time', 'samplers.build_journey_time', (['dist'], {'mode': 'self.config.MODE', 'limit': '(72000)'}), '(dist, mode=self.config.MODE, limit=72000)\n', (8118, 8160), False, 'from lps.core import samplers, generators\n'), ((8243, 8299), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)', 'start_time.hour', 'start_time.minute'], {}), '(2000, 1, 1, start_time.hour, start_time.minute)\n', (8251, 8299), False, 'from datetime import time, timedelta, datetime\n'), ((8327, 8386), 'lps.core.samplers.build_trip_times', 'samplers.build_trip_times', (['dt', 'journey_time'], {'push': '"""forward"""'}), "(dt, journey_time, push='forward')\n", (8352, 8386), False, 'from lps.core import samplers, generators\n'), ((9332, 9368), 'lps.core.population.Activity', 'Activity', (['uid', '(0)', '"""depot"""', 'o', 't1', 't0'], {}), "(uid, 0, 'depot', o, t1, t0)\n", (9340, 9368), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((9394, 9443), 'lps.core.population.Leg', 'Leg', (['uid', '(0)', 'self.config.MODE', 'o', 'd', 't0', 't1', 'dist'], {}), '(uid, 0, self.config.MODE, o, d, t0, t1, dist)\n', (9397, 9443), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((9475, 9514), 'lps.core.population.Activity', 'Activity', (['uid', '(1)', '"""delivery"""', 'd', 't1', 't0'], {}), "(uid, 1, 'delivery', d, t1, t0)\n", (9483, 9514), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((8453, 8473), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (8467, 8473), False, 'import random\n'), ((8506, 8537), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(minutes * 60)'}), '(seconds=minutes * 60)\n', (8515, 8537), False, 'from datetime import time, timedelta, datetime\n'), ((9668, 9704), 'lps.core.population.Activity', 'Activity', (['uid', '(0)', '"""depot"""', 'o', 't3', 't0'], {}), "(uid, 0, 'depot', o, t3, t0)\n", (9676, 9704), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((9734, 9783), 'lps.core.population.Leg', 'Leg', (['uid', '(0)', 'self.config.MODE', 'o', 'd', 't0', 't1', 'dist'], {}), '(uid, 0, self.config.MODE, o, d, t0, t1, dist)\n', (9737, 9783), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((9819, 9858), 'lps.core.population.Activity', 'Activity', (['uid', '(1)', '"""delivery"""', 'd', 't1', 't2'], {}), "(uid, 1, 'delivery', d, t1, t2)\n", (9827, 9858), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((9888, 9937), 'lps.core.population.Leg', 'Leg', (['uid', '(1)', 'self.config.MODE', 'd', 'o', 't2', 't3', 'dist'], {}), '(uid, 1, self.config.MODE, d, o, t2, t3, dist)\n', (9891, 9937), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((9973, 10009), 'lps.core.population.Activity', 'Activity', (['uid', '(2)', '"""depot"""', 'o', 't3', 't0'], {}), "(uid, 2, 'depot', o, t3, t0)\n", (9981, 10009), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((10096, 10135), 'lps.core.population.Activity', 'Activity', (['uid', '(0)', '"""delivery"""', 'd', 't1', 't2'], {}), "(uid, 0, 'delivery', d, t1, t2)\n", (10104, 10135), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((10165, 10214), 'lps.core.population.Leg', 'Leg', (['uid', '(0)', 'self.config.MODE', 'd', 'o', 't2', 't3', 'dist'], {}), '(uid, 0, self.config.MODE, d, o, t2, t3, dist)\n', (10168, 10214), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((10250, 10286), 'lps.core.population.Activity', 'Activity', (['uid', '(1)', '"""depot"""', 'o', 't3', 't0'], {}), "(uid, 1, 'depot', o, t3, t0)\n", (10258, 10286), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((10316, 10365), 'lps.core.population.Leg', 'Leg', (['uid', '(1)', 'self.config.MODE', 'o', 'd', 't0', 't1', 'dist'], {}), '(uid, 1, self.config.MODE, o, d, t0, t1, dist)\n', (10319, 10365), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((10401, 10440), 'lps.core.population.Activity', 'Activity', (['uid', '(2)', '"""delivery"""', 'o', 't1', 't2'], {}), "(uid, 2, 'delivery', o, t1, t2)\n", (10409, 10440), False, 'from lps.core.population import Population, Agent, Plan, Activity, Leg\n'), ((4609, 4619), 'datetime.time', 'time', (['hour'], {}), '(hour)\n', (4613, 4619), False, 'from datetime import time, timedelta, datetime\n')] |
#! /usr/bin/env python
"""
Aegean Residual (AeRes) has the following capability:
- convert a catalogue into an image model
- subtract image model from image
- write model and residual files
"""
__author__ = "<NAME>"
import logging
import numpy as np
from astropy.io import fits
from AegeanTools import catalogs, fitting, wcs_helpers
FWHM2CC = 1 / (2 * np.sqrt(2 * np.log(2)))
def load_sources(filename,
ra_col='ra', dec_col='dec',
peak_col='peak_flux',
a_col='a', b_col='b', pa_col='pa'):
"""
Open a file, read contents, return a list of all the sources in that file.
Parameters
----------
filename : str
Filename to be read
ra_col, dec_col, peak_col, a_col, b_col, pa_col : str
The column names for each of the parameters.
Default = ['ra', 'dec', 'peak_flux', 'a', 'b', 'pa']
Return
------
catalog : [`class:AegeanTools.models.ComponentSource`, ...]
A list of source components
"""
table = catalogs.load_table(filename)
required_cols = [ra_col, dec_col, peak_col, a_col, b_col, pa_col]
#required_cols = ['ra','dec','peak_flux','a','b','pa']
good = True
for c in required_cols:
if c not in table.colnames:
logging.error("Column {0} not found".format(c))
good = False
if not good:
logging.error("Some required columns missing or mis-labeled")
return None
# rename the table columns
for old, new in zip([ra_col, dec_col, peak_col, a_col, b_col, pa_col],
['ra', 'dec', 'peak_flux', 'a', 'b', 'pa']):
table.rename_column(old, new)
catalog = catalogs.table_to_source_list(table)
logging.info("read {0} sources from {1}".format(len(catalog), filename))
return catalog
def make_model(sources, shape, wcshelper, mask=False, frac=None, sigma=4):
"""
Create a model image based on a catalogue of sources.
Parameters
----------
sources : [`class:AegeanTools.models.ComponentSource`, ...]
a list of sources
shape : [float, float]
the shape of the input (and output) image
wcshelper : 'class:AegeanTools.wcs_helpers.WCSHelper'
A WCSHelper object corresponding to the input image
mask : bool
If true then mask pixels instead of subtracting or adding sources
frac : float
pixels that are brighter than frac*peak_flux for each source will be masked if mask=True
sigma: float
pixels that are brighter than rms*sigma be masked if mask=True
Returns
-------
model : np.ndarray
The desired model.
"""
# Model array
m = np.zeros(shape, dtype=np.float32)
factor = 5
i_count = 0
for src in sources:
xo, yo, sx, sy, theta = wcshelper.sky2pix_ellipse([src.ra, src.dec], src.a/3600, src.b/3600, src.pa)
phi = np.radians(theta)
# skip sources that have a center that is outside of the image
if not 0 < xo < shape[0]:
logging.debug("source {0} is not within image".format(src.island))
continue
if not 0 < yo < shape[1]:
logging.debug("source {0} is not within image".format(src.island))
continue
# pixels over which this model is calculated
xoff = factor*(abs(sx*np.cos(phi)) + abs(sy*np.sin(phi)))
xmin = xo - xoff
xmax = xo + xoff
yoff = factor*(abs(sx*np.sin(phi)) + abs(sy*np.cos(phi)))
ymin = yo - yoff
ymax = yo + yoff
# clip to the image size
ymin = max(np.floor(ymin), 0)
ymax = min(np.ceil(ymax), shape[1])
xmin = max(np.floor(xmin), 0)
xmax = min(np.ceil(xmax), shape[0])
if not np.all(np.isfinite([ymin, ymax, xmin, xmax])):
continue
if logging.getLogger().isEnabledFor(logging.DEBUG): # pragma: no cover
logging.debug("Source ({0},{1})".format(src.island, src.source))
logging.debug(" xo, yo: {0} {1}".format(xo, yo))
logging.debug(" sx, sy: {0} {1}".format(sx, sy))
logging.debug(" theta, phi: {0} {1}".format(theta, phi))
logging.debug(" xoff, yoff: {0} {1}".format(xoff, yoff))
logging.debug(" xmin, xmax, ymin, ymax: {0}:{1} {2}:{3}".format(xmin, xmax, ymin, ymax))
logging.debug(" flux, sx, sy: {0} {1} {2}".format(src.peak_flux, sx, sy))
# positions for which we want to make the model
x, y = np.mgrid[int(xmin):int(xmax), int(ymin):int(ymax)]
x = x.ravel()
y = y.ravel()
# TODO: understand why xo/yo -1 is needed
model = fitting.elliptical_gaussian(x, y, src.peak_flux, xo-1, yo-1, sx*FWHM2CC, sy*FWHM2CC, theta)
# Mask the output image if requested
if mask:
if frac is not None:
indices = np.where(model >= (frac*src.peak_flux))
else:
indices = np.where(model >= (sigma*src.local_rms))
# somehow m[x,y][indices] = np.nan doesn't assign any values
# so we have to do the more complicated
# m[x[indices],y[indices]] = np.nan
m[x[indices], y[indices]]= np.nan
else:
m[x, y] += model
i_count += 1
logging.info("modeled {0} sources".format(i_count))
return m
def make_residual(fitsfile, catalog, rfile, mfile=None, add=False, mask=False, frac=None, sigma=4,
colmap=None):
"""
Take an input image and catalogue, make a model of the catalogue, and then add/subtract or mask the input image.
Saving the residual and (optionally) model files.
Parameters
----------
fitsfile : str
Input fits image filename
catalog : str
Input catalog filename of a type supported by Aegean
rfile : str
Filename to write residual image
mfile : str
Filename to write model image. Default=None means don't write the model file.
add : bool
If True add the model instead of subtracting it
mask : bool
If true then mask pixels instead of adding or subtracting the sources
frac : float
pixels that are brighter than frac*peak_flux for each source will be masked if mask=True
sigma : float
pixels that are brighter than sigma*local_rms for each source will be masked if mask=True
colmap : dict
A mapping of column names. Default is:
{'ra_col':'ra', 'dec_col':'dec', 'peak_col':'peak_flux', 'a_col':'a', 'b_col':'b', 'pa_col':'pa}
Return
------
None
"""
if colmap is None:
colmap = {}
source_list = load_sources(catalog, **colmap)
if source_list is None:
return None
# force two axes so that we dump redundant stokes/freq axes if they are present.
hdulist = fits.open(fitsfile, naxis=2)
# ignore dimensions of length 1
data = np.squeeze(hdulist[0].data)
header = hdulist[0].header
wcshelper = wcs_helpers.WCSHelper.from_header(header)
model = make_model(source_list, data.shape, wcshelper, mask, frac, sigma)
if add or mask:
residual = data + model
else:
residual = data - model
hdulist[0].data = residual
hdulist.writeto(rfile, overwrite=True)
logging.info("wrote residual to {0}".format(rfile))
if mfile is not None:
hdulist[0].data = model
hdulist.writeto(mfile, overwrite=True)
logging.info("wrote model to {0}".format(mfile))
return
| [
"numpy.radians",
"logging.getLogger",
"AegeanTools.fitting.elliptical_gaussian",
"numpy.ceil",
"numpy.where",
"AegeanTools.wcs_helpers.WCSHelper.from_header",
"numpy.log",
"numpy.floor",
"AegeanTools.catalogs.load_table",
"numpy.squeeze",
"numpy.zeros",
"numpy.isfinite",
"numpy.cos",
"astr... | [((1026, 1055), 'AegeanTools.catalogs.load_table', 'catalogs.load_table', (['filename'], {}), '(filename)\n', (1045, 1055), False, 'from AegeanTools import catalogs, fitting, wcs_helpers\n'), ((1685, 1721), 'AegeanTools.catalogs.table_to_source_list', 'catalogs.table_to_source_list', (['table'], {}), '(table)\n', (1714, 1721), False, 'from AegeanTools import catalogs, fitting, wcs_helpers\n'), ((2684, 2717), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2692, 2717), True, 'import numpy as np\n'), ((6845, 6873), 'astropy.io.fits.open', 'fits.open', (['fitsfile'], {'naxis': '(2)'}), '(fitsfile, naxis=2)\n', (6854, 6873), False, 'from astropy.io import fits\n'), ((6921, 6948), 'numpy.squeeze', 'np.squeeze', (['hdulist[0].data'], {}), '(hdulist[0].data)\n', (6931, 6948), True, 'import numpy as np\n'), ((6997, 7038), 'AegeanTools.wcs_helpers.WCSHelper.from_header', 'wcs_helpers.WCSHelper.from_header', (['header'], {}), '(header)\n', (7030, 7038), False, 'from AegeanTools import catalogs, fitting, wcs_helpers\n'), ((1375, 1436), 'logging.error', 'logging.error', (['"""Some required columns missing or mis-labeled"""'], {}), "('Some required columns missing or mis-labeled')\n", (1388, 1436), False, 'import logging\n'), ((2897, 2914), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (2907, 2914), True, 'import numpy as np\n'), ((4664, 4767), 'AegeanTools.fitting.elliptical_gaussian', 'fitting.elliptical_gaussian', (['x', 'y', 'src.peak_flux', '(xo - 1)', '(yo - 1)', '(sx * FWHM2CC)', '(sy * FWHM2CC)', 'theta'], {}), '(x, y, src.peak_flux, xo - 1, yo - 1, sx *\n FWHM2CC, sy * FWHM2CC, theta)\n', (4691, 4767), False, 'from AegeanTools import catalogs, fitting, wcs_helpers\n'), ((3595, 3609), 'numpy.floor', 'np.floor', (['ymin'], {}), '(ymin)\n', (3603, 3609), True, 'import numpy as np\n'), ((3633, 3646), 'numpy.ceil', 'np.ceil', (['ymax'], {}), '(ymax)\n', (3640, 3646), True, 'import numpy as np\n'), ((3678, 3692), 'numpy.floor', 'np.floor', (['xmin'], {}), '(xmin)\n', (3686, 3692), True, 'import numpy as np\n'), ((3716, 3729), 'numpy.ceil', 'np.ceil', (['xmax'], {}), '(xmax)\n', (3723, 3729), True, 'import numpy as np\n'), ((367, 376), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (373, 376), True, 'import numpy as np\n'), ((3764, 3801), 'numpy.isfinite', 'np.isfinite', (['[ymin, ymax, xmin, xmax]'], {}), '([ymin, ymax, xmin, xmax])\n', (3775, 3801), True, 'import numpy as np\n'), ((3837, 3856), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3854, 3856), False, 'import logging\n'), ((4878, 4917), 'numpy.where', 'np.where', (['(model >= frac * src.peak_flux)'], {}), '(model >= frac * src.peak_flux)\n', (4886, 4917), True, 'import numpy as np\n'), ((4962, 5002), 'numpy.where', 'np.where', (['(model >= sigma * src.local_rms)'], {}), '(model >= sigma * src.local_rms)\n', (4970, 5002), True, 'import numpy as np\n'), ((3339, 3350), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3345, 3350), True, 'import numpy as np\n'), ((3361, 3372), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3367, 3372), True, 'import numpy as np\n'), ((3456, 3467), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3462, 3467), True, 'import numpy as np\n'), ((3478, 3489), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3484, 3489), True, 'import numpy as np\n')] |
import os
import sys
try:
base_directory = os.path.split(sys.executable)[0]
os.environ['PATH'] += ';' + base_directory
import cntk
os.environ['KERAS_BACKEND'] = 'cntk'
except ImportError:
print('CNTK not installed')
import keras
import keras.utils
import keras.datasets
import keras.models
import keras.layers
import keras.applications
import keras.preprocessing.image
import numpy as np
import matplotlib.pyplot as plt
import os
import numpy as np
base_dir = 'C:/Users/anastasios/Desktop/cats_and_dogs'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
def extract_features(directory, sample_count):
conv_base = keras.applications.VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
conv_base.summary()
datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
batch_size = 20
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i % 4 == 0:
print('{0}, processed {1} images'.format(directory, i*batch_size))
if i * batch_size >= sample_count:
# Note that since generators yield data indefinitely in a loop,
# we must `break` after every image has been seen once.
break
return features, labels
def save_npy_files(features, labels, prefix):
np.save(prefix+'_features.npy', features)
np.save(prefix+'_labels', labels)
def load_npy_files(prefix):
result = (np.load(prefix+'_features.npy'), np.load(prefix+'_labels.npy'))
print('Loaded {0}_features.npy, {0}_labels.npy'.format(prefix))
return result
def plot_history(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def train_with_extracted_features():
if os.path.isfile('test_features.npy'):
train_features, train_labels = load_npy_files('train')
validation_features, validation_labels = load_npy_files('validation')
test_features, test_labels = load_npy_files('test')
else:
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
save_npy_files(train_features, train_labels, 'train')
save_npy_files(validation_features, validation_labels, 'validation')
save_npy_files(test_features, test_labels, 'test')
model = keras.models.Sequential()
model.add(keras.layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=keras.optimizers.RMSprop(lr=2e-5), loss='binary_crossentropy', metrics=['acc'])
history = model.fit(train_features, train_labels, epochs=5, batch_size=20, validation_data=(validation_features, validation_labels))
plot_history(history)
def train_with_augmentation(use_finetuning):
conv_base = keras.applications.VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
model = keras.models.Sequential()
model.add(conv_base)
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
print('This is the number of trainable weights before freezing the conv base:', len(model.trainable_weights))
if use_finetuning:
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
else:
conv_base.trainable = False
print('This is the number of trainable weights after freezing the conv base:', len(model.trainable_weights))
model.summary()
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Note that the validation data should not be augmented!
test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.RMSprop(lr=2e-5), metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
plot_history(history)
if __name__ == '__main__':
train_with_extracted_features()
train_with_augmentation(use_finetuning=True)
| [
"keras.preprocessing.image.ImageDataGenerator",
"keras.layers.Dense",
"numpy.save",
"numpy.load",
"numpy.reshape",
"matplotlib.pyplot.plot",
"os.path.split",
"keras.applications.VGG16",
"keras.layers.Flatten",
"keras.models.Sequential",
"os.path.isfile",
"matplotlib.pyplot.title",
"keras.lay... | [((542, 573), 'os.path.join', 'os.path.join', (['base_dir', '"""train"""'], {}), "(base_dir, 'train')\n", (554, 573), False, 'import os\n'), ((591, 627), 'os.path.join', 'os.path.join', (['base_dir', '"""validation"""'], {}), "(base_dir, 'validation')\n", (603, 627), False, 'import os\n'), ((639, 669), 'os.path.join', 'os.path.join', (['base_dir', '"""test"""'], {}), "(base_dir, 'test')\n", (651, 669), False, 'import os\n'), ((735, 830), 'keras.applications.VGG16', 'keras.applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(150, 150, 3)'}), "(weights='imagenet', include_top=False, input_shape\n =(150, 150, 3))\n", (759, 830), False, 'import keras\n'), ((865, 928), 'keras.preprocessing.image.ImageDataGenerator', 'keras.preprocessing.image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (909, 928), False, 'import keras\n'), ((964, 1005), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sample_count, 4, 4, 512)'}), '(shape=(sample_count, 4, 4, 512))\n', (972, 1005), True, 'import numpy as np\n'), ((1019, 1047), 'numpy.zeros', 'np.zeros', ([], {'shape': 'sample_count'}), '(shape=sample_count)\n', (1027, 1047), True, 'import numpy as np\n'), ((1866, 1909), 'numpy.save', 'np.save', (["(prefix + '_features.npy')", 'features'], {}), "(prefix + '_features.npy', features)\n", (1873, 1909), True, 'import numpy as np\n'), ((1912, 1947), 'numpy.save', 'np.save', (["(prefix + '_labels')", 'labels'], {}), "(prefix + '_labels', labels)\n", (1919, 1947), True, 'import numpy as np\n'), ((2356, 2405), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""bo"""'], {'label': '"""Training acc"""'}), "(epochs, acc, 'bo', label='Training acc')\n", (2364, 2405), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2464), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""b"""'], {'label': '"""Validation acc"""'}), "(epochs, val_acc, 'b', label='Validation acc')\n", (2418, 2464), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2514), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (2478, 2514), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2531), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2529, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2536, 2548), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2546, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2604), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss', '"""bo"""'], {'label': '"""Training loss"""'}), "(epochs, loss, 'bo', label='Training loss')\n", (2561, 2604), True, 'import matplotlib.pyplot as plt\n'), ((2609, 2665), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_loss', '"""b"""'], {'label': '"""Validation loss"""'}), "(epochs, val_loss, 'b', label='Validation loss')\n", (2617, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2670, 2711), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (2679, 2711), True, 'import matplotlib.pyplot as plt\n'), ((2716, 2728), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2726, 2728), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2741, 2743), True, 'import matplotlib.pyplot as plt\n'), ((2790, 2825), 'os.path.isfile', 'os.path.isfile', (['"""test_features.npy"""'], {}), "('test_features.npy')\n", (2804, 2825), False, 'import os\n'), ((3709, 3734), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (3732, 3734), False, 'import keras\n'), ((4251, 4346), 'keras.applications.VGG16', 'keras.applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(150, 150, 3)'}), "(weights='imagenet', include_top=False, input_shape\n =(150, 150, 3))\n", (4275, 4346), False, 'import keras\n'), ((4355, 4380), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (4378, 4380), False, 'import keras\n'), ((5200, 5413), 'keras.preprocessing.image.ImageDataGenerator', 'keras.preprocessing.image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'rotation_range': '(40)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255,\n rotation_range=40, width_shift_range=0.2, height_shift_range=0.2,\n shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')\n", (5244, 5413), False, 'import keras\n'), ((5551, 5614), 'keras.preprocessing.image.ImageDataGenerator', 'keras.preprocessing.image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (5595, 5614), False, 'import keras\n'), ((49, 78), 'os.path.split', 'os.path.split', (['sys.executable'], {}), '(sys.executable)\n', (62, 78), False, 'import os\n'), ((1990, 2023), 'numpy.load', 'np.load', (["(prefix + '_features.npy')"], {}), "(prefix + '_features.npy')\n", (1997, 2023), True, 'import numpy as np\n'), ((2023, 2054), 'numpy.load', 'np.load', (["(prefix + '_labels.npy')"], {}), "(prefix + '_labels.npy')\n", (2030, 2054), True, 'import numpy as np\n'), ((3295, 3342), 'numpy.reshape', 'np.reshape', (['train_features', '(2000, 4 * 4 * 512)'], {}), '(train_features, (2000, 4 * 4 * 512))\n', (3305, 3342), True, 'import numpy as np\n'), ((3373, 3425), 'numpy.reshape', 'np.reshape', (['validation_features', '(1000, 4 * 4 * 512)'], {}), '(validation_features, (1000, 4 * 4 * 512))\n', (3383, 3425), True, 'import numpy as np\n'), ((3450, 3496), 'numpy.reshape', 'np.reshape', (['test_features', '(1000, 4 * 4 * 512)'], {}), '(test_features, (1000, 4 * 4 * 512))\n', (3460, 3496), True, 'import numpy as np\n'), ((3749, 3814), 'keras.layers.Dense', 'keras.layers.Dense', (['(256)'], {'activation': '"""relu"""', 'input_dim': '(4 * 4 * 512)'}), "(256, activation='relu', input_dim=4 * 4 * 512)\n", (3767, 3814), False, 'import keras\n'), ((3830, 3855), 'keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (3850, 3855), False, 'import keras\n'), ((3871, 3914), 'keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3889, 3914), False, 'import keras\n'), ((4420, 4442), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (4440, 4442), False, 'import keras\n'), ((4458, 4500), 'keras.layers.Dense', 'keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (4476, 4500), False, 'import keras\n'), ((4516, 4559), 'keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4534, 4559), False, 'import keras\n'), ((3944, 3978), 'keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(2e-05)'}), '(lr=2e-05)\n', (3968, 3978), False, 'import keras\n'), ((6160, 6194), 'keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(2e-05)'}), '(lr=2e-05)\n', (6184, 6194), False, 'import keras\n')] |
"""loading training, validation and test data
This script is to load training, validation and test data
"""
import numpy as np
import os
import dicom
from geometry_parameters import GEOMETRY, RECONSTRUCT_PARA
from geometry_parameters import TRAIN_INDEX, VALID_INDEX, TEST_INDEX
from geometry_parameters import NUM_TRAINING_SAMPLES, NUM_VALIDATION_SAMPLES, NUM_TEST_SAMPLES
def load_training_data():
"""
load training data
Returns
-------
ndarray
training data
ndarray
training ground truth
"""
train_data_numpy = np.empty((NUM_TRAINING_SAMPLES,) + tuple(GEOMETRY.sinogram_shape))
train_labels_numpy = np.empty((NUM_TRAINING_SAMPLES,) + tuple(GEOMETRY.volume_shape))
i = 0
for index in TRAIN_INDEX:
train_data_file = '../data_preprocessing/sinograms/sinogram_' + str(index) + '.npy'
train_data_numpy[i, :, :, :] = np.load(train_data_file)[:GEOMETRY.number_of_projections, :, :]
train_label_file = '../data_preprocessing/recon_360/recon_' + str(index) + '.npy'
train_labels_numpy[i, :, :, :] = np.load(train_label_file)
i = i + 1
return train_data_numpy, train_labels_numpy
def load_validation_data():
"""
load validation data
Returns
-------
ndarray
validation data
ndarray
validation ground truth
"""
validation_data_numpy = np.empty((NUM_VALIDATION_SAMPLES,) + tuple(GEOMETRY.sinogram_shape))
validation_labels_numpy = np.empty((NUM_VALIDATION_SAMPLES,) + tuple(GEOMETRY.volume_shape))
i = 0
for index in VALID_INDEX:
valid_data_file = '../data_preprocessing/sinograms/sinogram_' + str(index) + '.npy'
validation_data_numpy[i, :, :, :] = np.load(valid_data_file)[:GEOMETRY.number_of_projections, :, :]
valid_label_file = '../data_preprocessing/recon_360/recon_' + str(index) + '.npy'
validation_labels_numpy[i, :, :, :] = np.load(valid_label_file)
i = i + 1
return validation_data_numpy, validation_labels_numpy
def load_test_data():
"""
load test data
Returns
-------
ndarray
test data
ndarray
test ground truth
"""
test_data_numpy = np.empty((NUM_TEST_SAMPLES,) + tuple(GEOMETRY.sinogram_shape))
test_labels_numpy = np.empty((NUM_TEST_SAMPLES,) + tuple(GEOMETRY.volume_shape))
i = 0
for index in TEST_INDEX:
test_data_file = '../data_preprocessing/sinograms/sinogram_' + str(index) + '.npy'
test_data_numpy[i, :, :, :] = np.load(test_data_file)[:GEOMETRY.number_of_projections, :, :]
test_label_file = '../data_preprocessing/recon_360/recon_' + str(index) + '.npy'
test_labels_numpy[i, :, :, :] = np.load(test_label_file)
i = i + 1
return test_data_numpy, test_labels_numpy
def load_voxel_size_list():
"""
load voxel sizes for each CT
Returns
-------
list
a list containing the voxel sizes needed for reconstruction for models training
"""
phantoms_dir = '../3Dircadb1/'
num_phantoms = len(os.listdir(phantoms_dir))
voxel_size_list = []
for n in range(num_phantoms):
phantom_dir = phantoms_dir + '3Dircadb1.' + str(n + 1) + '/PATIENT_DICOM/'
num_slices = len(os.listdir(phantom_dir))
dcm = dicom.read_file(phantom_dir + "image_0")
num_row = dcm.Rows
num_col = dcm.Columns
row_pixel_spacing = np.round(dcm.PixelSpacing[0], 2)
col_pixel_spacing = np.round(dcm.PixelSpacing[1], 2)
slice_thickness = np.round(dcm.SliceThickness, 2)
# Volume Parameters:
voxel_size = [num_slices * slice_thickness / RECONSTRUCT_PARA['volume_shape'][0],
num_row * row_pixel_spacing / RECONSTRUCT_PARA['volume_shape'][1],
num_col * col_pixel_spacing / RECONSTRUCT_PARA['volume_shape'][2]]
voxel_size_list.append(voxel_size)
return voxel_size_list | [
"numpy.round",
"os.listdir",
"numpy.load",
"dicom.read_file"
] | [((1091, 1116), 'numpy.load', 'np.load', (['train_label_file'], {}), '(train_label_file)\n', (1098, 1116), True, 'import numpy as np\n'), ((1931, 1956), 'numpy.load', 'np.load', (['valid_label_file'], {}), '(valid_label_file)\n', (1938, 1956), True, 'import numpy as np\n'), ((2717, 2741), 'numpy.load', 'np.load', (['test_label_file'], {}), '(test_label_file)\n', (2724, 2741), True, 'import numpy as np\n'), ((3067, 3091), 'os.listdir', 'os.listdir', (['phantoms_dir'], {}), '(phantoms_dir)\n', (3077, 3091), False, 'import os\n'), ((3301, 3341), 'dicom.read_file', 'dicom.read_file', (["(phantom_dir + 'image_0')"], {}), "(phantom_dir + 'image_0')\n", (3316, 3341), False, 'import dicom\n'), ((3427, 3459), 'numpy.round', 'np.round', (['dcm.PixelSpacing[0]', '(2)'], {}), '(dcm.PixelSpacing[0], 2)\n', (3435, 3459), True, 'import numpy as np\n'), ((3488, 3520), 'numpy.round', 'np.round', (['dcm.PixelSpacing[1]', '(2)'], {}), '(dcm.PixelSpacing[1], 2)\n', (3496, 3520), True, 'import numpy as np\n'), ((3547, 3578), 'numpy.round', 'np.round', (['dcm.SliceThickness', '(2)'], {}), '(dcm.SliceThickness, 2)\n', (3555, 3578), True, 'import numpy as np\n'), ((896, 920), 'numpy.load', 'np.load', (['train_data_file'], {}), '(train_data_file)\n', (903, 920), True, 'import numpy as np\n'), ((1731, 1755), 'numpy.load', 'np.load', (['valid_data_file'], {}), '(valid_data_file)\n', (1738, 1755), True, 'import numpy as np\n'), ((2525, 2548), 'numpy.load', 'np.load', (['test_data_file'], {}), '(test_data_file)\n', (2532, 2548), True, 'import numpy as np\n'), ((3261, 3284), 'os.listdir', 'os.listdir', (['phantom_dir'], {}), '(phantom_dir)\n', (3271, 3284), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 21 15:06:43 2018
@author: Michelle
"""
# -*- coding: utf-8 -*-
"""
Updated on Wed May 23 10:58:10 2018
- adjusted the orientation of the image to column major
- set origin to bottom left
- switched stepx and stepy in grid so the real and imaginary axis are plotted right
- changed while loops to for loops, deleted if statements
@author Ramsey
Updated on Tue May 22 11:12:14 2018
- switching to 2d numpy arrays and using an implot instead of scatter (huge speed gains)
- pros: gains in speed and fills whole screen with colors
- also added early break when htiting "high" values heading towards infinity"
@author: Ramsey
Created on Mon May 21 15:06:43 2018
@author: Michelle
"""
#The goal is to graph only the Julia Sets that are connected
import matplotlib.pyplot as plt
import numpy as np
#for a mandelbrot set, you want to change c (the complex number) but keep z the same (the origin 0)
high_num = 200 #above this we consider infinity
neg_num = -200 #below this we consider infinity
lowerbound = -2 #the lower bound of the values we consider on real and i
upperbound = 2 #upper bound of values we considero n real and i
stepsize = 0.01 # increment size between upper and lower bounds
steps = int((upperbound-lowerbound)/stepsize) # how many steps it'll take
iters = 100 #how many iterations we'll take to try to find infinity
grid = np.arange(1,steps*steps+1,1) #gets a grid with right number of pts
grid = grid.reshape((steps,steps))
rp = lowerbound
ip = lowerbound
#filling up our c array with every possible value of complex numbers
for stepx in range(steps): #we want to get all the numbers from the lowerbound to the upperbound
print("Step:",stepx,"/",steps)
ip = lowerbound
for stepy in range(steps):
c = complex(rp, ip) #this is our initial c
z = 0
for i in range(iters):
z = np.multiply(z,z) + c #get the next z from this z
if z.real > high_num or z.imag > high_num or z.real < neg_num or z.imag < neg_num:
break
grid[stepy][stepx] = i #the graph is colored according to how long it takes to get to infinity
#by switching the stepy and stepx, you get a set thats oriented like online images -- real axis as x, imaginary as y
ip += stepsize
rp += stepsize
plt.suptitle("Mandelbrot Set")
plt.xlabel("real")
plt.ylabel("imaginary")
plt.title("c="+str(c))
plt.imshow(grid,cmap="terrain", origin = 'lower')
plt.show() | [
"matplotlib.pyplot.imshow",
"numpy.multiply",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1418, 1452), 'numpy.arange', 'np.arange', (['(1)', '(steps * steps + 1)', '(1)'], {}), '(1, steps * steps + 1, 1)\n', (1427, 1452), True, 'import numpy as np\n'), ((2379, 2409), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Mandelbrot Set"""'], {}), "('Mandelbrot Set')\n", (2391, 2409), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2428), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""real"""'], {}), "('real')\n", (2420, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2429, 2452), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""imaginary"""'], {}), "('imaginary')\n", (2439, 2452), True, 'import matplotlib.pyplot as plt\n'), ((2476, 2524), 'matplotlib.pyplot.imshow', 'plt.imshow', (['grid'], {'cmap': '"""terrain"""', 'origin': '"""lower"""'}), "(grid, cmap='terrain', origin='lower')\n", (2486, 2524), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2536), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2534, 2536), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1938), 'numpy.multiply', 'np.multiply', (['z', 'z'], {}), '(z, z)\n', (1932, 1938), True, 'import numpy as np\n')] |
from __future__ import annotations
import pickle
import numpy as np
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import (
ElasticNet,
HuberRegressor,
Lasso,
LinearRegression,
MultiTaskElasticNet,
MultiTaskLasso,
)
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.tree import DecisionTreeRegressor
from behavior.modeling import METHODS
def get_model(method, config):
"""Initialize and return the underlying Behavior Model variant with the provided configuration parameters.
Parameters
----------
method : str
Regression model variant.
config : dict[str, Any]
Configuration parameters for the model.
Returns
-------
model : Any
A regression model.
Raises
------
ValueError
If the requested method is not supported.
"""
if method not in METHODS:
raise ValueError(f"Method: {method} is not supported.")
regressor = None
# Tree-based Models.
if method == "dt":
regressor = DecisionTreeRegressor(max_depth=config["dt"]["max_depth"])
regressor = MultiOutputRegressor(regressor)
elif method == "rf":
regressor = RandomForestRegressor(
n_estimators=config["rf"]["n_estimators"],
criterion=config["rf"]["criterion"],
max_depth=config["rf"]["max_depth"],
random_state=config["random_state"],
n_jobs=config["num_jobs"],
)
elif method == "gbm":
regressor = LGBMRegressor(
max_depth=config["gbm"]["max_depth"],
num_leaves=config["gbm"]["num_leaves"],
n_estimators=config["gbm"]["n_estimators"],
min_child_samples=config["gbm"]["min_child_samples"],
objective=config["gbm"]["objective"],
random_state=config["random_state"],
)
regressor = MultiOutputRegressor(regressor)
elif method == "mlp":
# Multi-layer Perceptron.
hls = tuple(dim for dim in config["mlp"]["hidden_layers"])
regressor = MLPRegressor(
hidden_layer_sizes=hls,
early_stopping=config["mlp"]["early_stopping"],
max_iter=config["mlp"]["max_iter"],
alpha=config["mlp"]["alpha"],
random_state=config["random_state"],
)
# Generalized Linear Models.
elif method == "lr":
regressor = LinearRegression(n_jobs=config["num_jobs"])
elif method == "huber":
regressor = HuberRegressor(max_iter=config["huber"]["max_iter"])
regressor = MultiOutputRegressor(regressor)
elif method == "mt_lasso":
regressor = MultiTaskLasso(alpha=config["mt_lasso"]["alpha"], random_state=config["random_state"])
elif method == "lasso":
regressor = Lasso(alpha=config["lasso"]["alpha"], random_state=config["random_state"])
elif method == "elastic":
regressor = ElasticNet(
alpha=config["elastic"]["alpha"],
l1_ratio=config["elastic"]["l1_ratio"],
random_state=config["random_state"],
)
regressor = MultiOutputRegressor(regressor)
elif method == "mt_elastic":
regressor = MultiTaskElasticNet(l1_ratio=config["mt_elastic"]["l1_ratio"], random_state=config["random_state"])
assert regressor is not None
return regressor
class BehaviorModel:
def __init__(self, method, ou_name, base_model_name, config, features):
"""Create a Behavior Model for predicting the resource consumption cost of a single PostgreSQL operating-unit.
Parameters
----------
method : str
The method to use. Valid methods are defined in modeling/__init__.py.
ou_name : str
The name of this operating unit.
base_model_name : str
The base name for this model, currently just the experiment name.
config : dict[str, Any]
The dictionary of configuration parameters for this model.
features : list[str]
The list of input features for this model.
"""
self.method = method
self.base_model_name = base_model_name
self.ou_name = ou_name
self.model = get_model(method, config)
self.features = features
self.normalize = config["normalize"]
self.log_transform = config["log_transform"]
self.eps = 1e-4
self.xscaler = RobustScaler() if config["robust"] else StandardScaler()
self.yscaler = RobustScaler() if config["robust"] else StandardScaler()
def train(self, x, y):
"""Train a model using the input features and targets.
Parameters
----------
x : NDArray[np.float32]
Input features.
y : NDArray[np.float32]
Input targets.
"""
if self.log_transform:
x = np.log(x + self.eps)
y = np.log(y + self.eps)
if self.normalize:
x = self.xscaler.fit_transform(x)
y = self.yscaler.fit_transform(y)
self.model.fit(x, y)
def predict(self, x):
"""Run inference using the provided input features.
Parameters
----------
x : NDArray[np.float32]
Input features.
Returns
-------
NDArray[np.float32]
Predicted targets.
"""
# Transform the features.
if self.log_transform:
x = np.log(x + self.eps)
if self.normalize:
x = self.xscaler.transform(x)
# Perform inference (in the transformed feature space).
y = self.model.predict(x)
# Map the result back to the original space.
if self.normalize:
y = self.yscaler.inverse_transform(y)
if self.log_transform:
y = np.exp(y) - self.eps
y = np.clip(y, 0, None)
return y
def save(self, output_dir):
"""Save the model to disk.
Parameters
----------
output_dir : Path | str
The directory to save the model to.
"""
model_dir = output_dir / self.base_model_name / self.method / self.ou_name
with open(model_dir / f"{self.method}_{self.ou_name}.pkl", "wb") as f:
pickle.dump(self, f)
| [
"numpy.clip",
"sklearn.neural_network.MLPRegressor",
"sklearn.ensemble.RandomForestRegressor",
"pickle.dump",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.Lasso",
"sklearn.linear_model.ElasticNet",
"sklearn.linear_model.MultiTaskLasso",
"numpy.log",
"sklearn.multioutput.MultiOutputR... | [((1208, 1266), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'max_depth': "config['dt']['max_depth']"}), "(max_depth=config['dt']['max_depth'])\n", (1229, 1266), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1287, 1318), 'sklearn.multioutput.MultiOutputRegressor', 'MultiOutputRegressor', (['regressor'], {}), '(regressor)\n', (1307, 1318), False, 'from sklearn.multioutput import MultiOutputRegressor\n'), ((1364, 1575), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': "config['rf']['n_estimators']", 'criterion': "config['rf']['criterion']", 'max_depth': "config['rf']['max_depth']", 'random_state': "config['random_state']", 'n_jobs': "config['num_jobs']"}), "(n_estimators=config['rf']['n_estimators'], criterion=\n config['rf']['criterion'], max_depth=config['rf']['max_depth'],\n random_state=config['random_state'], n_jobs=config['num_jobs'])\n", (1385, 1575), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((4571, 4585), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (4583, 4585), False, 'from sklearn.preprocessing import RobustScaler, StandardScaler\n'), ((4611, 4627), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4625, 4627), False, 'from sklearn.preprocessing import RobustScaler, StandardScaler\n'), ((4651, 4665), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (4663, 4665), False, 'from sklearn.preprocessing import RobustScaler, StandardScaler\n'), ((4691, 4707), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4705, 4707), False, 'from sklearn.preprocessing import RobustScaler, StandardScaler\n'), ((5016, 5036), 'numpy.log', 'np.log', (['(x + self.eps)'], {}), '(x + self.eps)\n', (5022, 5036), True, 'import numpy as np\n'), ((5053, 5073), 'numpy.log', 'np.log', (['(y + self.eps)'], {}), '(y + self.eps)\n', (5059, 5073), True, 'import numpy as np\n'), ((5595, 5615), 'numpy.log', 'np.log', (['(x + self.eps)'], {}), '(x + self.eps)\n', (5601, 5615), True, 'import numpy as np\n'), ((5999, 6018), 'numpy.clip', 'np.clip', (['y', '(0)', 'None'], {}), '(y, 0, None)\n', (6006, 6018), True, 'import numpy as np\n'), ((6410, 6430), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (6421, 6430), False, 'import pickle\n'), ((1684, 1962), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {'max_depth': "config['gbm']['max_depth']", 'num_leaves': "config['gbm']['num_leaves']", 'n_estimators': "config['gbm']['n_estimators']", 'min_child_samples': "config['gbm']['min_child_samples']", 'objective': "config['gbm']['objective']", 'random_state': "config['random_state']"}), "(max_depth=config['gbm']['max_depth'], num_leaves=config['gbm'\n ]['num_leaves'], n_estimators=config['gbm']['n_estimators'],\n min_child_samples=config['gbm']['min_child_samples'], objective=config[\n 'gbm']['objective'], random_state=config['random_state'])\n", (1697, 1962), False, 'from lightgbm import LGBMRegressor\n'), ((2052, 2083), 'sklearn.multioutput.MultiOutputRegressor', 'MultiOutputRegressor', (['regressor'], {}), '(regressor)\n', (2072, 2083), False, 'from sklearn.multioutput import MultiOutputRegressor\n'), ((5962, 5971), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5968, 5971), True, 'import numpy as np\n'), ((2231, 2428), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'hls', 'early_stopping': "config['mlp']['early_stopping']", 'max_iter': "config['mlp']['max_iter']", 'alpha': "config['mlp']['alpha']", 'random_state': "config['random_state']"}), "(hidden_layer_sizes=hls, early_stopping=config['mlp'][\n 'early_stopping'], max_iter=config['mlp']['max_iter'], alpha=config[\n 'mlp']['alpha'], random_state=config['random_state'])\n", (2243, 2428), False, 'from sklearn.neural_network import MLPRegressor\n'), ((2568, 2611), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'n_jobs': "config['num_jobs']"}), "(n_jobs=config['num_jobs'])\n", (2584, 2611), False, 'from sklearn.linear_model import ElasticNet, HuberRegressor, Lasso, LinearRegression, MultiTaskElasticNet, MultiTaskLasso\n'), ((2660, 2712), 'sklearn.linear_model.HuberRegressor', 'HuberRegressor', ([], {'max_iter': "config['huber']['max_iter']"}), "(max_iter=config['huber']['max_iter'])\n", (2674, 2712), False, 'from sklearn.linear_model import ElasticNet, HuberRegressor, Lasso, LinearRegression, MultiTaskElasticNet, MultiTaskLasso\n'), ((2733, 2764), 'sklearn.multioutput.MultiOutputRegressor', 'MultiOutputRegressor', (['regressor'], {}), '(regressor)\n', (2753, 2764), False, 'from sklearn.multioutput import MultiOutputRegressor\n'), ((2816, 2907), 'sklearn.linear_model.MultiTaskLasso', 'MultiTaskLasso', ([], {'alpha': "config['mt_lasso']['alpha']", 'random_state': "config['random_state']"}), "(alpha=config['mt_lasso']['alpha'], random_state=config[\n 'random_state'])\n", (2830, 2907), False, 'from sklearn.linear_model import ElasticNet, HuberRegressor, Lasso, LinearRegression, MultiTaskElasticNet, MultiTaskLasso\n'), ((2951, 3025), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': "config['lasso']['alpha']", 'random_state': "config['random_state']"}), "(alpha=config['lasso']['alpha'], random_state=config['random_state'])\n", (2956, 3025), False, 'from sklearn.linear_model import ElasticNet, HuberRegressor, Lasso, LinearRegression, MultiTaskElasticNet, MultiTaskLasso\n'), ((3076, 3202), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'alpha': "config['elastic']['alpha']", 'l1_ratio': "config['elastic']['l1_ratio']", 'random_state': "config['random_state']"}), "(alpha=config['elastic']['alpha'], l1_ratio=config['elastic'][\n 'l1_ratio'], random_state=config['random_state'])\n", (3086, 3202), False, 'from sklearn.linear_model import ElasticNet, HuberRegressor, Lasso, LinearRegression, MultiTaskElasticNet, MultiTaskLasso\n'), ((3265, 3296), 'sklearn.multioutput.MultiOutputRegressor', 'MultiOutputRegressor', (['regressor'], {}), '(regressor)\n', (3285, 3296), False, 'from sklearn.multioutput import MultiOutputRegressor\n'), ((3350, 3454), 'sklearn.linear_model.MultiTaskElasticNet', 'MultiTaskElasticNet', ([], {'l1_ratio': "config['mt_elastic']['l1_ratio']", 'random_state': "config['random_state']"}), "(l1_ratio=config['mt_elastic']['l1_ratio'], random_state\n =config['random_state'])\n", (3369, 3454), False, 'from sklearn.linear_model import ElasticNet, HuberRegressor, Lasso, LinearRegression, MultiTaskElasticNet, MultiTaskLasso\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test a trained classification model."""
import argparse
import numpy as np
import sys
import torch
from sscls.core.config import assert_cfg
from sscls.core.config import cfg
from sscls.utils.meters import TestMeter
import sscls.modeling.builder as model_builder
import sscls.datasets.loader as loader
import sscls.utils.checkpoint as cu
import sscls.utils.distributed as du
import sscls.utils.logging as lu
import sscls.utils.metrics as mu
import sscls.utils.multiprocessing as mpu
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Test a trained classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See sscls/core/config/defaults.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.logger.info_help()
sys.exit(1)
return parser.parse_args()
def log_model_info(model):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
logger.info('Params: {:,}'.format(mu.params_count(model)))
logger.info('Flops: {:,}'.format(mu.flops_count(model)))
@torch.no_grad()
def test_epoch(test_loader, model, test_meter, cur_epoch):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
def test_model():
"""Evaluates the model."""
# Build the model (before the loaders to speed up debugging)
model = model_builder.build_model()
log_model_info(model)
# Load model weights
cu.load_checkpoint(cfg.TEST.WEIGHTS, model)
logger.info('Loaded model weights from: {}'.format(cfg.TEST.WEIGHTS))
# Create data loaders
test_loader = loader.construct_test_loader()
# Create meters
test_meter = TestMeter(len(test_loader))
# Evaluate the model
test_epoch(test_loader, model, test_meter, 0)
def single_proc_test():
"""Performs single process evaluation."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Evaluate the model
test_model()
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
cfg.freeze()
# Perform evaluation
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_test)
else:
single_proc_test()
if __name__ == '__main__':
main()
| [
"sscls.utils.checkpoint.load_checkpoint",
"sscls.utils.metrics.flops_count",
"sys.exit",
"sscls.utils.distributed.scaled_all_reduce",
"argparse.ArgumentParser",
"sscls.datasets.loader.construct_test_loader",
"sscls.core.config.cfg.merge_from_file",
"numpy.random.seed",
"sscls.core.config.cfg.merge_f... | [((700, 723), 'sscls.utils.logging.get_logger', 'lu.get_logger', (['__name__'], {}), '(__name__)\n', (713, 723), True, 'import sscls.utils.logging as lu\n'), ((1525, 1540), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1538, 1540), False, 'import torch\n'), ((789, 863), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test a trained classification model"""'}), "(description='Test a trained classification model')\n", (812, 863), False, 'import argparse\n'), ((2797, 2824), 'sscls.modeling.builder.build_model', 'model_builder.build_model', ([], {}), '()\n', (2822, 2824), True, 'import sscls.modeling.builder as model_builder\n'), ((2881, 2924), 'sscls.utils.checkpoint.load_checkpoint', 'cu.load_checkpoint', (['cfg.TEST.WEIGHTS', 'model'], {}), '(cfg.TEST.WEIGHTS, model)\n', (2899, 2924), True, 'import sscls.utils.checkpoint as cu\n'), ((3044, 3074), 'sscls.datasets.loader.construct_test_loader', 'loader.construct_test_loader', ([], {}), '()\n', (3072, 3074), True, 'import sscls.datasets.loader as loader\n'), ((3314, 3332), 'sscls.utils.logging.setup_logging', 'lu.setup_logging', ([], {}), '()\n', (3330, 3332), True, 'import sscls.utils.logging as lu\n'), ((3478, 3506), 'numpy.random.seed', 'np.random.seed', (['cfg.RNG_SEED'], {}), '(cfg.RNG_SEED)\n', (3492, 3506), True, 'import numpy as np\n'), ((3511, 3542), 'torch.manual_seed', 'torch.manual_seed', (['cfg.RNG_SEED'], {}), '(cfg.RNG_SEED)\n', (3528, 3542), False, 'import torch\n'), ((3772, 3806), 'sscls.core.config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.cfg_file'], {}), '(args.cfg_file)\n', (3791, 3806), False, 'from sscls.core.config import cfg\n'), ((3811, 3841), 'sscls.core.config.cfg.merge_from_list', 'cfg.merge_from_list', (['args.opts'], {}), '(args.opts)\n', (3830, 3841), False, 'from sscls.core.config import cfg\n'), ((3846, 3858), 'sscls.core.config.assert_cfg', 'assert_cfg', ([], {}), '()\n', (3856, 3858), False, 'from sscls.core.config import assert_cfg\n'), ((3863, 3875), 'sscls.core.config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (3873, 3875), False, 'from sscls.core.config import cfg\n'), ((1256, 1267), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1264, 1267), False, 'import sys\n'), ((2024, 2061), 'sscls.utils.metrics.topk_errors', 'mu.topk_errors', (['preds', 'labels', '[1, 5]'], {}), '(preds, labels, [1, 5])\n', (2038, 2061), True, 'import sscls.utils.metrics as mu\n'), ((3935, 3998), 'sscls.utils.multiprocessing.multi_proc_run', 'mpu.multi_proc_run', ([], {'num_proc': 'cfg.NUM_GPUS', 'fun': 'single_proc_test'}), '(num_proc=cfg.NUM_GPUS, fun=single_proc_test)\n', (3953, 3998), True, 'import sscls.utils.multiprocessing as mpu\n'), ((1436, 1458), 'sscls.utils.metrics.params_count', 'mu.params_count', (['model'], {}), '(model)\n', (1451, 1458), True, 'import sscls.utils.metrics as mu\n'), ((1498, 1519), 'sscls.utils.metrics.flops_count', 'mu.flops_count', (['model'], {}), '(model)\n', (1512, 1519), True, 'import sscls.utils.metrics as mu\n'), ((2169, 2211), 'sscls.utils.distributed.scaled_all_reduce', 'du.scaled_all_reduce', (['[top1_err, top5_err]'], {}), '([top1_err, top5_err])\n', (2189, 2211), True, 'import sscls.utils.distributed as du\n')] |
from tensorflow.examples.tutorials.mnist import input_data
from modeler.gaussianAE import GaussianAutoencoderModel
from trainer.tftrainer import TFTrainer
import sklearn.preprocessing as prep
import numpy as np
class GaussianAETrainer(TFTrainer):
def __init__(self):
self.training_epochs = 20
self.batch_size = 128
self.display_step = 1
self.training_scale = 0.1
pass
def get_data(self):
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
self.X_train, self.X_test = self.standard_scale(mnist.train.images, mnist.test.images)
self.n_samples = int(mnist.train.num_examples)
pass
def get_model(self):
gaussianAutoencoderModel = GaussianAutoencoderModel()
self.cost = gaussianAutoencoderModel.cost
self.optimizer = gaussianAutoencoderModel.optimizer
self.scale = gaussianAutoencoderModel.scale
self.x = gaussianAutoencoderModel.x
self.hidden = gaussianAutoencoderModel.hidden
self.weights = gaussianAutoencoderModel.weights
self.reconstruction = gaussianAutoencoderModel.reconstruction
pass
def train(self):
for epoch in range(self.training_epochs):
avg_cost = 0.
total_batch = int(self.n_samples / self.batch_size)
for i in range(total_batch):
batch_xs = self.get_random_block_from_data(self.X_train, self.batch_size)
cost = self.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / self.n_samples * self.batch_size
# Display logs per epoch step
if epoch % self.display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(self.calc_total_cost(self.X_test)))
pass
def standard_scale(self, X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(self, data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
def partial_fit(self, X):
cost, opt = self.session.run((self.cost, self.optimizer),
feed_dict={self.x: X, self.scale: self.training_scale})
return cost
def calc_total_cost(self, X):
return self.session.run(self.cost, feed_dict={self.x: X,
self.scale: self.training_scale
})
def transform(self, X):
return self.session.run(self.hidden, feed_dict={self.x: X,
self.scale: self.training_scale
})
def generate(self, hidden=None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
return self.session.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.session.run(self.reconstruction, feed_dict={self.x: X,
self.scale: self.training_scale
})
def getWeights(self):
return self.session.run(self.weights['w1'])
def getBiases(self):
return self.session.run(self.weights['b1'])
| [
"numpy.random.normal",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"sklearn.preprocessing.StandardScaler",
"modeler.gaussianAE.GaussianAutoencoderModel"
] | [((456, 509), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data"""'], {'one_hot': '(True)'}), "('MNIST_data', one_hot=True)\n", (481, 509), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((734, 760), 'modeler.gaussianAE.GaussianAutoencoderModel', 'GaussianAutoencoderModel', ([], {}), '()\n', (758, 760), False, 'from modeler.gaussianAE import GaussianAutoencoderModel\n'), ((3082, 3123), 'numpy.random.normal', 'np.random.normal', ([], {'size': "self.weights['b1']"}), "(size=self.weights['b1'])\n", (3098, 3123), True, 'import numpy as np\n'), ((1948, 1969), 'sklearn.preprocessing.StandardScaler', 'prep.StandardScaler', ([], {}), '()\n', (1967, 1969), True, 'import sklearn.preprocessing as prep\n')] |
import numpy as np
import scipy.misc
import time
import h5py
def make_generator(hdf5_file, n_images, batch_size, res, res_slack=2, label_name=None):
epoch_count = [1]
def get_epoch():
images = np.zeros((batch_size, 3, res, res), dtype='int32')
labels = np.zeros(batch_size, dtype='int32')
indices = range(n_images)
random_state = np.random.RandomState(epoch_count[0])
random_state.shuffle(indices)
epoch_count[0] += 1
for n, i in enumerate(indices):
shape = hdf5_file['shapes'][i]
# assuming (B)CWH format
if np.amax(shape[1:2]) >= res - res_slack:
if shape[1] == shape[2] == res:
images[n % batch_size] = hdf5_file['data'][i][:, :shape[1], :shape[2]]
else:
image = hdf5_file['data'][i, :, :shape[1], :shape[2]].transpose((1, 2, 0))
images[n % batch_size] = scipy.misc.imresize(image, (res, res, shape[0]),
interp='lanczos').transpose((2, 0, 1))
if label_name is not None:
labels[n % batch_size] = hdf5_file[label_name][i]
if n > 0 and n % batch_size == 0:
yield (images, labels)
return get_epoch
def load(batch_size, data_file='/home/sagea/scratch/data/twitter/clean_v2/twitter_dataset_clean.hdf5', resolution=128, label_name=None):
hdf5_file = h5py.File(data_file, 'r')
n_images = len(hdf5_file['data'])
print('Images: %i' % n_images)
if label_name is not None:
n_labels = len(hdf5_file[label_name])
n_images = min(n_images, n_labels)
print('Labels: %i' % n_labels)
return make_generator(hdf5_file, n_images, batch_size, res=resolution, label_name=label_name)
def load_new(cfg):
label_name = cfg.LABELS if cfg.LABELS != 'None' else None
return load(cfg.BATCH_SIZE, cfg.DATA, cfg.OUTPUT_RES, label_name=label_name)
if __name__ == '__main__':
train_gen, valid_gen = load(64)
t0 = time.time()
for i, batch in enumerate(train_gen(), start=1):
print("{}\t{}".format(str(time.time() - t0), batch[0][0,0,0,0]))
if i == 1000:
break
t0 = time.time() | [
"h5py.File",
"numpy.zeros",
"time.time",
"numpy.amax",
"numpy.random.RandomState"
] | [((1486, 1511), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (1495, 1511), False, 'import h5py\n'), ((2080, 2091), 'time.time', 'time.time', ([], {}), '()\n', (2089, 2091), False, 'import time\n'), ((210, 260), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, res, res)'], {'dtype': '"""int32"""'}), "((batch_size, 3, res, res), dtype='int32')\n", (218, 260), True, 'import numpy as np\n'), ((278, 313), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': '"""int32"""'}), "(batch_size, dtype='int32')\n", (286, 313), True, 'import numpy as np\n'), ((371, 408), 'numpy.random.RandomState', 'np.random.RandomState', (['epoch_count[0]'], {}), '(epoch_count[0])\n', (392, 408), True, 'import numpy as np\n'), ((2271, 2282), 'time.time', 'time.time', ([], {}), '()\n', (2280, 2282), False, 'import time\n'), ((610, 629), 'numpy.amax', 'np.amax', (['shape[1:2]'], {}), '(shape[1:2])\n', (617, 629), True, 'import numpy as np\n'), ((2179, 2190), 'time.time', 'time.time', ([], {}), '()\n', (2188, 2190), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""FFT functions.
This module contains FFT functions that support centered operation.
"""
import numpy as np
from sigpy import backend, config, interp, util
if config.cupy_enabled:
import cupy as cp
__all__ = ['fft', 'ifft', 'nufft', 'nufft_adjoint', 'estimate_shape']
def fft(input, oshape=None, axes=None, center=True, norm='ortho'):
"""FFT function that supports centering.
Args:
input (array): input array.
oshape (None or array of ints): output shape.
axes (None or array of ints): Axes over which to compute the FFT.
norm (Nonr or ``"ortho"``): Keyword to specify the normalization mode.
Returns:
array: FFT result of dimension oshape.
See Also:
:func:`numpy.fft.fftn`
"""
device = backend.get_device(input)
xp = device.xp
with device:
if not np.issubdtype(input.dtype, np.complexfloating):
input = input.astype(np.complex)
if center:
output = _fftc(input, oshape=oshape, axes=axes, norm=norm)
else:
output = xp.fft.fftn(input, s=oshape, axes=axes, norm=norm)
if np.issubdtype(input.dtype, np.complexfloating) and input.dtype != output.dtype:
output = output.astype(input.dtype)
return output
def ifft(input, oshape=None, axes=None, center=True, norm='ortho'):
"""IFFT function that supports centering.
Args:
input (array): input array.
oshape (None or array of ints): output shape.
axes (None or array of ints): Axes over which to compute the inverse FFT.
norm (None or ``"ortho"``): Keyword to specify the normalization mode.
Returns:
array of dimension oshape.
See Also:
:func:`numpy.fft.ifftn`
"""
device = backend.get_device(input)
xp = device.xp
with device:
if not np.issubdtype(input.dtype, np.complexfloating):
input = input.astype(np.complex)
if center:
output = _ifftc(input, oshape=oshape, axes=axes, norm=norm)
else:
output = xp.fft.ifftn(input, s=oshape, axes=axes, norm=norm)
if np.issubdtype(input.dtype, np.complexfloating) and input.dtype != output.dtype:
output = output.astype(input.dtype)
return output
def nufft(input, coord, oversamp=1.25, width=4.0, n=128):
"""Non-uniform Fast Fourier Transform.
Args:
input (array): input array.
coord (array): coordinate array of shape (..., ndim).
ndim determines the number of dimension to apply nufft.
oversamp (float): oversampling factor.
width (float): interpolation kernel full-width in terms of oversampled grid.
n (int): number of sampling points of interpolation kernel.
Returns:
array: Fourier domain points of shape input.shape[:-ndim] + coord.shape[:-1]
References:
<NAME>., & <NAME>. (2003).
Nonuniform fast Fourier transforms using min-max interpolation.
IEEE Transactions on Signal Processing, 51(2), 560-574.
<NAME>., <NAME>., & <NAME>. (2005).
Rapid gridding reconstruction with a minimal oversampling ratio.
IEEE transactions on medical imaging, 24(6), 799-808.
"""
device = backend.get_device(input)
xp = device.xp
ndim = coord.shape[-1]
beta = np.pi * (((width / oversamp) * (oversamp - 0.5))**2 - 0.8)**0.5
with device:
output = input.copy()
os_shape = list(input.shape)
for a in range(-ndim, 0):
i = input.shape[a]
os_i = _get_ugly_number(oversamp * i)
os_shape[a] = os_i
idx = xp.arange(i, dtype=input.dtype)
# Calculate apodization
apod = (beta**2 - (np.pi * width * (idx - i // 2) / os_i)**2)**0.5
apod /= xp.sinh(apod)
# Swap axes
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
# Apodize
output *= apod
# Oversampled FFT
output = util.resize(output, os_shape)
output = fft(output, axes=[-1], norm=None)
output /= i**0.5
# Swap back
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
coord = _scale_coord(backend.to_device(coord, device), input.shape, oversamp)
kernel = backend.to_device(
_kb(np.arange(n, dtype=coord.dtype) / n, width, beta, coord.dtype), device)
output = interp.interpolate(output, width, kernel, coord)
return output
def estimate_shape(coord):
"""Estimate array shape from coordinates.
Shape is estimated by the different between maximum and minimum of
coordinates in each axis.
Args:
coord (array): Coordinates.
"""
ndim = coord.shape[-1]
with backend.get_device(coord):
shape = [int(coord[..., i].max() - coord[..., i].min()) for i in range(ndim)]
return shape
def nufft_adjoint(input, coord, oshape=None, oversamp=1.25, width=4.0, n=128):
"""Adjoint non-uniform Fast Fourier Transform.
Args:
input (array): Input Fourier domain array.
coord (array): coordinate array of shape (..., ndim).
ndim determines the number of dimension to apply nufft adjoint.
oshape (tuple of ints): output shape.
oversamp (float): oversampling factor.
width (float): interpolation kernel full-width in terms of oversampled grid.
n (int): number of sampling points of interpolation kernel.
Returns:
array: Transformed array.
See Also:
:func:`sigpy.nufft.nufft`
"""
device = backend.get_device(input)
xp = device.xp
ndim = coord.shape[-1]
beta = np.pi * (((width / oversamp) * (oversamp - 0.5))**2 - 0.8)**0.5
if oshape is None:
oshape = list(input.shape[:-coord.ndim + 1]) + estimate_shape(coord)
else:
oshape = list(oshape)
with device:
coord = _scale_coord(backend.to_device(coord, device), oshape, oversamp)
kernel = backend.to_device(
_kb(np.arange(n, dtype=coord.dtype) / n, width, beta, coord.dtype), device)
os_shape = oshape[:-ndim] + [_get_ugly_number(oversamp * i) for i in oshape[-ndim:]]
output = interp.gridding(input, os_shape, width, kernel, coord)
for a in range(-ndim, 0):
i = oshape[a]
os_i = os_shape[a]
idx = xp.arange(i, dtype=input.dtype)
os_shape[a] = i
# Swap axes
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
# Oversampled IFFT
output = ifft(output, axes=[-1], norm=None)
output *= os_i / i**0.5
output = util.resize(output, os_shape)
# Calculate apodization
apod = (beta**2 - (np.pi * width * (idx - i // 2) / os_i)**2)**0.5
apod /= xp.sinh(apod)
# Apodize
output *= apod
# Swap back
output = output.swapaxes(a, -1)
os_shape[a], os_shape[-1] = os_shape[-1], os_shape[a]
return output
def _fftc(input, oshape=None, axes=None, norm='ortho'):
ndim = input.ndim
axes = util._normalize_axes(axes, ndim)
device = backend.get_device(input)
xp = device.xp
if oshape is None:
oshape = input.shape
with device:
tmp = input
tshape = list(input.shape)
for a in axes:
i = oshape[a]
tshape[a] = i
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
tmp = util.resize(tmp, tshape)
tmp = xp.fft.ifftshift(tmp, axes=-1)
tmp = xp.fft.fft(tmp, axis=-1, norm=norm)
tmp = xp.fft.fftshift(tmp, axes=-1)
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
output = tmp
return output
def _ifftc(input, oshape=None, axes=None, norm='ortho'):
ndim = input.ndim
axes = util._normalize_axes(axes, ndim)
device = backend.get_device(input)
xp = device.xp
if oshape is None:
oshape = input.shape
with device:
tmp = input
tshape = list(input.shape)
for a in axes:
i = oshape[a]
tshape[a] = i
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
tmp = util.resize(tmp, tshape)
tmp = xp.fft.ifftshift(tmp, axes=-1)
tmp = xp.fft.ifft(tmp, axis=-1, norm=norm)
tmp = xp.fft.fftshift(tmp, axes=-1)
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
output = tmp
return output
def _kb(x, width, beta, dtype):
return 1 / width * np.i0(beta * (1 - x**2)**0.5).astype(dtype)
def _scale_coord(coord, shape, oversamp):
ndim = coord.shape[-1]
device = backend.get_device(coord)
scale = backend.to_device([_get_ugly_number(oversamp * i) / i for i in shape[-ndim:]], device)
shift = backend.to_device([_get_ugly_number(oversamp * i) // 2 for i in shape[-ndim:]], device)
with device:
coord = scale * coord + shift
return coord
def _get_ugly_number(n):
"""Get closest ugly number greater than n.
An ugly number is defined as a positive integer that is a multiple of 2, 3, and 5.
Args:
n (int): Base number.
"""
if n <= 1:
return n
ugly_nums = [1]
i2, i3, i5 = 0, 0, 0
while(True):
ugly_num = min(ugly_nums[i2] * 2,
ugly_nums[i3] * 3,
ugly_nums[i5] * 5)
if ugly_num >= n:
return ugly_num
ugly_nums.append(ugly_num)
if ugly_num == ugly_nums[i2] * 2:
i2 += 1
elif ugly_num == ugly_nums[i3] * 3:
i3 += 1
elif ugly_num == ugly_nums[i5] * 5:
i5 += 1
| [
"numpy.i0",
"sigpy.util._normalize_axes",
"sigpy.interp.gridding",
"numpy.issubdtype",
"sigpy.backend.get_device",
"sigpy.util.resize",
"sigpy.interp.interpolate",
"sigpy.backend.to_device",
"numpy.arange"
] | [((799, 824), 'sigpy.backend.get_device', 'backend.get_device', (['input'], {}), '(input)\n', (817, 824), False, 'from sigpy import backend, config, interp, util\n'), ((1806, 1831), 'sigpy.backend.get_device', 'backend.get_device', (['input'], {}), '(input)\n', (1824, 1831), False, 'from sigpy import backend, config, interp, util\n'), ((3293, 3318), 'sigpy.backend.get_device', 'backend.get_device', (['input'], {}), '(input)\n', (3311, 3318), False, 'from sigpy import backend, config, interp, util\n'), ((5756, 5781), 'sigpy.backend.get_device', 'backend.get_device', (['input'], {}), '(input)\n', (5774, 5781), False, 'from sigpy import backend, config, interp, util\n'), ((7361, 7393), 'sigpy.util._normalize_axes', 'util._normalize_axes', (['axes', 'ndim'], {}), '(axes, ndim)\n', (7381, 7393), False, 'from sigpy import backend, config, interp, util\n'), ((7407, 7432), 'sigpy.backend.get_device', 'backend.get_device', (['input'], {}), '(input)\n', (7425, 7432), False, 'from sigpy import backend, config, interp, util\n'), ((8175, 8207), 'sigpy.util._normalize_axes', 'util._normalize_axes', (['axes', 'ndim'], {}), '(axes, ndim)\n', (8195, 8207), False, 'from sigpy import backend, config, interp, util\n'), ((8221, 8246), 'sigpy.backend.get_device', 'backend.get_device', (['input'], {}), '(input)\n', (8239, 8246), False, 'from sigpy import backend, config, interp, util\n'), ((9084, 9109), 'sigpy.backend.get_device', 'backend.get_device', (['coord'], {}), '(coord)\n', (9102, 9109), False, 'from sigpy import backend, config, interp, util\n'), ((4587, 4635), 'sigpy.interp.interpolate', 'interp.interpolate', (['output', 'width', 'kernel', 'coord'], {}), '(output, width, kernel, coord)\n', (4605, 4635), False, 'from sigpy import backend, config, interp, util\n'), ((4927, 4952), 'sigpy.backend.get_device', 'backend.get_device', (['coord'], {}), '(coord)\n', (4945, 4952), False, 'from sigpy import backend, config, interp, util\n'), ((6376, 6430), 'sigpy.interp.gridding', 'interp.gridding', (['input', 'os_shape', 'width', 'kernel', 'coord'], {}), '(input, os_shape, width, kernel, coord)\n', (6391, 6430), False, 'from sigpy import backend, config, interp, util\n'), ((877, 923), 'numpy.issubdtype', 'np.issubdtype', (['input.dtype', 'np.complexfloating'], {}), '(input.dtype, np.complexfloating)\n', (890, 923), True, 'import numpy as np\n'), ((1159, 1205), 'numpy.issubdtype', 'np.issubdtype', (['input.dtype', 'np.complexfloating'], {}), '(input.dtype, np.complexfloating)\n', (1172, 1205), True, 'import numpy as np\n'), ((1884, 1930), 'numpy.issubdtype', 'np.issubdtype', (['input.dtype', 'np.complexfloating'], {}), '(input.dtype, np.complexfloating)\n', (1897, 1930), True, 'import numpy as np\n'), ((2168, 2214), 'numpy.issubdtype', 'np.issubdtype', (['input.dtype', 'np.complexfloating'], {}), '(input.dtype, np.complexfloating)\n', (2181, 2214), True, 'import numpy as np\n'), ((4109, 4138), 'sigpy.util.resize', 'util.resize', (['output', 'os_shape'], {}), '(output, os_shape)\n', (4120, 4138), False, 'from sigpy import backend, config, interp, util\n'), ((4388, 4420), 'sigpy.backend.to_device', 'backend.to_device', (['coord', 'device'], {}), '(coord, device)\n', (4405, 4420), False, 'from sigpy import backend, config, interp, util\n'), ((6090, 6122), 'sigpy.backend.to_device', 'backend.to_device', (['coord', 'device'], {}), '(coord, device)\n', (6107, 6122), False, 'from sigpy import backend, config, interp, util\n'), ((6881, 6910), 'sigpy.util.resize', 'util.resize', (['output', 'os_shape'], {}), '(output, os_shape)\n', (6892, 6910), False, 'from sigpy import backend, config, interp, util\n'), ((7769, 7793), 'sigpy.util.resize', 'util.resize', (['tmp', 'tshape'], {}), '(tmp, tshape)\n', (7780, 7793), False, 'from sigpy import backend, config, interp, util\n'), ((8584, 8608), 'sigpy.util.resize', 'util.resize', (['tmp', 'tshape'], {}), '(tmp, tshape)\n', (8595, 8608), False, 'from sigpy import backend, config, interp, util\n'), ((8956, 8989), 'numpy.i0', 'np.i0', (['(beta * (1 - x ** 2) ** 0.5)'], {}), '(beta * (1 - x ** 2) ** 0.5)\n', (8961, 8989), True, 'import numpy as np\n'), ((4497, 4528), 'numpy.arange', 'np.arange', (['n'], {'dtype': 'coord.dtype'}), '(n, dtype=coord.dtype)\n', (4506, 4528), True, 'import numpy as np\n'), ((6194, 6225), 'numpy.arange', 'np.arange', (['n'], {'dtype': 'coord.dtype'}), '(n, dtype=coord.dtype)\n', (6203, 6225), True, 'import numpy as np\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import site
import unittest
import numpy as np
class TestCustomKernelLoad(unittest.TestCase):
def setUp(self):
# compile so and set to current path
cur_dir = os.path.dirname(os.path.abspath(__file__))
# --inplace to place output so file to current dir
cmd = 'cd {} && {} custom_kernel_dot_setup.py build_ext --inplace'.format(
cur_dir, sys.executable)
os.system(cmd)
# get paddle lib path and place so
paddle_lib_path = ''
site_dirs = site.getsitepackages() if hasattr(
site, 'getsitepackages') else [
x for x in sys.path if 'site-packages' in x
]
for site_dir in site_dirs:
lib_dir = os.path.sep.join([site_dir, 'paddle', 'libs'])
if os.path.exists(lib_dir):
paddle_lib_path = lib_dir
break
if paddle_lib_path == '':
if hasattr(site, 'USER_SITE'):
lib_dir = os.path.sep.join([site.USER_SITE, 'paddle', 'libs'])
if os.path.exists(lib_dir):
paddle_lib_path = lib_dir
self.default_path = os.path.sep.join(
[paddle_lib_path, '..', '..', 'paddle-plugins'])
# copy so to defalut path
cmd = 'mkdir -p {} && cp ./*.so {}'.format(self.default_path,
self.default_path)
os.system(cmd) # wait
def test_custom_kernel_dot_load(self):
# test dot load
x_data = np.random.uniform(1, 5, [2, 10]).astype(np.int8)
y_data = np.random.uniform(1, 5, [2, 10]).astype(np.int8)
result = np.sum(x_data * y_data, axis=1).reshape([2, 1])
import paddle
paddle.set_device('cpu')
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
out = paddle.dot(x, y)
self.assertTrue(
np.array_equal(out.numpy(), result),
"custom kernel dot out: {},\n numpy dot out: {}".format(
out.numpy(), result))
def tearDown(self):
cmd = 'rm -rf {}'.format(self.default_path)
os.system(cmd)
if __name__ == '__main__':
if os.name == 'nt' or sys.platform.startswith('darwin'):
# only support Linux now
exit()
unittest.main()
| [
"os.path.exists",
"sys.platform.startswith",
"os.path.abspath",
"numpy.sum",
"os.path.sep.join",
"paddle.to_tensor",
"paddle.dot",
"site.getsitepackages",
"numpy.random.uniform",
"unittest.main",
"os.system",
"paddle.set_device"
] | [((2921, 2936), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2934, 2936), False, 'import unittest\n'), ((1044, 1058), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1053, 1058), False, 'import os\n'), ((1787, 1852), 'os.path.sep.join', 'os.path.sep.join', (["[paddle_lib_path, '..', '..', 'paddle-plugins']"], {}), "([paddle_lib_path, '..', '..', 'paddle-plugins'])\n", (1803, 1852), False, 'import os\n'), ((2048, 2062), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2057, 2062), False, 'import os\n'), ((2367, 2391), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (2384, 2391), False, 'import paddle\n'), ((2404, 2428), 'paddle.to_tensor', 'paddle.to_tensor', (['x_data'], {}), '(x_data)\n', (2420, 2428), False, 'import paddle\n'), ((2441, 2465), 'paddle.to_tensor', 'paddle.to_tensor', (['y_data'], {}), '(y_data)\n', (2457, 2465), False, 'import paddle\n'), ((2480, 2496), 'paddle.dot', 'paddle.dot', (['x', 'y'], {}), '(x, y)\n', (2490, 2496), False, 'import paddle\n'), ((2764, 2778), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2773, 2778), False, 'import os\n'), ((2834, 2867), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (2857, 2867), False, 'import sys\n'), ((829, 854), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (844, 854), False, 'import os\n'), ((1152, 1174), 'site.getsitepackages', 'site.getsitepackages', ([], {}), '()\n', (1172, 1174), False, 'import site\n'), ((1362, 1408), 'os.path.sep.join', 'os.path.sep.join', (["[site_dir, 'paddle', 'libs']"], {}), "([site_dir, 'paddle', 'libs'])\n", (1378, 1408), False, 'import os\n'), ((1424, 1447), 'os.path.exists', 'os.path.exists', (['lib_dir'], {}), '(lib_dir)\n', (1438, 1447), False, 'import os\n'), ((1616, 1668), 'os.path.sep.join', 'os.path.sep.join', (["[site.USER_SITE, 'paddle', 'libs']"], {}), "([site.USER_SITE, 'paddle', 'libs'])\n", (1632, 1668), False, 'import os\n'), ((1688, 1711), 'os.path.exists', 'os.path.exists', (['lib_dir'], {}), '(lib_dir)\n', (1702, 1711), False, 'import os\n'), ((2156, 2188), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(5)', '[2, 10]'], {}), '(1, 5, [2, 10])\n', (2173, 2188), True, 'import numpy as np\n'), ((2222, 2254), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(5)', '[2, 10]'], {}), '(1, 5, [2, 10])\n', (2239, 2254), True, 'import numpy as np\n'), ((2288, 2319), 'numpy.sum', 'np.sum', (['(x_data * y_data)'], {'axis': '(1)'}), '(x_data * y_data, axis=1)\n', (2294, 2319), True, 'import numpy as np\n')] |
# coding:utf-8
# Test for upsample_2d
# Created : 7, 5, 2018
# Revised : 7, 5, 2018
# All rights reserved
#------------------------------------------------------------------------------------------------
__author__ = 'dawei.leng'
import os, sys
os.environ['THEANO_FLAGS'] = "floatX=float32, mode=FAST_RUN, warn_float64='raise'"
import theano
from theano import tensor
from dandelion.module import *
from dandelion.functional import upsample_2d, upsample_2d_bilinear
from lasagne.layers import InputLayer, get_output, Upscale2DLayer
import dandelion
dandelion_path = os.path.split(dandelion.__file__)[0]
print('dandelion path = %s\n' % dandelion_path)
class build_model_D(Module):
def __init__(self, ratio=[2, 3], mode='repeat'):
super().__init__()
self.ratio = ratio
self.mode = mode
self.predict = self.forward
def forward(self, x):
"""
:param x: (B, C, H, W)
:return:
"""
x = upsample_2d(x, ratio=self.ratio, mode=self.mode)
# x = relu(x)
return x
def build_model_L(ratio=[2,3], mode='repeat'):
input_var = tensor.ftensor4('x') # (B, C, H, W)
input0 = InputLayer(shape=(None, None, None, None), input_var=input_var, name='input0')
x = Upscale2DLayer(input0, scale_factor=ratio, mode=mode)
return x
def test_case_0():
import numpy as np
from lasagne_ext.utils import get_layer_by_name
ratio = [1, 2]
mode = 'dilate'
model_D = build_model_D(ratio=ratio, mode=mode)
model_L = build_model_L(ratio=ratio, mode=mode)
X = get_layer_by_name(model_L, 'input0').input_var
y_D = model_D.forward(X)
y_L = get_output(model_L)
fn_D = theano.function([X], y_D, no_default_updates=True, on_unused_input='ignore')
fn_L = theano.function([X], y_L, no_default_updates=True, on_unused_input='ignore')
for i in range(20):
B = np.random.randint(low=1, high=16)
C = np.random.randint(low=1, high=32)
H = np.random.randint(low=5, high=256)
W = np.random.randint(low=5, high=255)
x = np.random.rand(B, C, H, W).astype(np.float32) - 0.5
y_D = fn_D(x)
y_L = fn_L(x)
# print(y_D)
diff = np.max(np.abs(y_D - y_L))
print('i=%d, diff=%0.6f' % (i, diff))
if diff>1e-4:
print('y_D=\n', y_D)
print('y_L=\n', y_L)
raise ValueError('diff is too big')
if __name__ == '__main__':
test_case_0()
print('Test passed')
| [
"numpy.abs",
"lasagne_ext.utils.get_layer_by_name",
"theano.function",
"numpy.random.rand",
"lasagne.layers.InputLayer",
"os.path.split",
"lasagne.layers.get_output",
"numpy.random.randint",
"theano.tensor.ftensor4",
"lasagne.layers.Upscale2DLayer",
"dandelion.functional.upsample_2d"
] | [((577, 610), 'os.path.split', 'os.path.split', (['dandelion.__file__'], {}), '(dandelion.__file__)\n', (590, 610), False, 'import os, sys\n'), ((1124, 1144), 'theano.tensor.ftensor4', 'tensor.ftensor4', (['"""x"""'], {}), "('x')\n", (1139, 1144), False, 'from theano import tensor\n'), ((1174, 1252), 'lasagne.layers.InputLayer', 'InputLayer', ([], {'shape': '(None, None, None, None)', 'input_var': 'input_var', 'name': '"""input0"""'}), "(shape=(None, None, None, None), input_var=input_var, name='input0')\n", (1184, 1252), False, 'from lasagne.layers import InputLayer, get_output, Upscale2DLayer\n'), ((1262, 1315), 'lasagne.layers.Upscale2DLayer', 'Upscale2DLayer', (['input0'], {'scale_factor': 'ratio', 'mode': 'mode'}), '(input0, scale_factor=ratio, mode=mode)\n', (1276, 1315), False, 'from lasagne.layers import InputLayer, get_output, Upscale2DLayer\n'), ((1665, 1684), 'lasagne.layers.get_output', 'get_output', (['model_L'], {}), '(model_L)\n', (1675, 1684), False, 'from lasagne.layers import InputLayer, get_output, Upscale2DLayer\n'), ((1697, 1773), 'theano.function', 'theano.function', (['[X]', 'y_D'], {'no_default_updates': '(True)', 'on_unused_input': '"""ignore"""'}), "([X], y_D, no_default_updates=True, on_unused_input='ignore')\n", (1712, 1773), False, 'import theano\n'), ((1785, 1861), 'theano.function', 'theano.function', (['[X]', 'y_L'], {'no_default_updates': '(True)', 'on_unused_input': '"""ignore"""'}), "([X], y_L, no_default_updates=True, on_unused_input='ignore')\n", (1800, 1861), False, 'import theano\n'), ((972, 1020), 'dandelion.functional.upsample_2d', 'upsample_2d', (['x'], {'ratio': 'self.ratio', 'mode': 'self.mode'}), '(x, ratio=self.ratio, mode=self.mode)\n', (983, 1020), False, 'from dandelion.functional import upsample_2d, upsample_2d_bilinear\n'), ((1579, 1615), 'lasagne_ext.utils.get_layer_by_name', 'get_layer_by_name', (['model_L', '"""input0"""'], {}), "(model_L, 'input0')\n", (1596, 1615), False, 'from lasagne_ext.utils import get_layer_by_name\n'), ((1899, 1932), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(16)'}), '(low=1, high=16)\n', (1916, 1932), True, 'import numpy as np\n'), ((1945, 1978), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(32)'}), '(low=1, high=32)\n', (1962, 1978), True, 'import numpy as np\n'), ((1991, 2025), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(5)', 'high': '(256)'}), '(low=5, high=256)\n', (2008, 2025), True, 'import numpy as np\n'), ((2038, 2072), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(5)', 'high': '(255)'}), '(low=5, high=255)\n', (2055, 2072), True, 'import numpy as np\n'), ((2224, 2241), 'numpy.abs', 'np.abs', (['(y_D - y_L)'], {}), '(y_D - y_L)\n', (2230, 2241), True, 'import numpy as np\n'), ((2085, 2111), 'numpy.random.rand', 'np.random.rand', (['B', 'C', 'H', 'W'], {}), '(B, C, H, W)\n', (2099, 2111), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from .derivest import derivest
import numpy as np
def directional_diff(fun, x, d, par = None, normalize = True, **kwargs):
"""
Estimate the directional derivative of a function of n variables.
Uses the derivest method to provide both a directional derivative
and an error estimate.
Arguments:
fun : Callable object with signature fun(x, *args) -> float, with x the
(vector) argument, and args an optional list of parameters.
x : Vector location at which to differentiate fun. If x has more than
one axis, then fun is assumed to be a function of np.prod(x.shape)
variables, but it is not flattened.
d : Vector (d.shape == x.shape) defining the line along which to take
the derivative. The vector will automatically be normalized.
par : Optional list of parameters to be passed to fun as fun(x, *par).
If par is not provided, then fun(x, *[]) is equivalent to fun(x),
in which case fun may have signature fun(x) -> float.
normalize : Boolean specifying whether to normalize d. (Default: True)
Additional keyword arguments are passed to derivest.
Returns a 3-tuple, containing:
dd : A scalar estimate of the first derivative of fun at location x,
in the specified direction d.
err : Error estimate of the directional derivative.
final_delta : Vector of final step sizes for each partial derivative.
Example:
>>> import derivest, numpy as np
>>> def f(v, *args):
... '''f(x, y; z) = x**2.0 + y**z'''
... return v[0]**2.0 + v[1]**args[0]
>>> v = np.array([-1, 0]) # Evaluate at (x, y) = (-1, 0)
>>> d = np.array([ 1, 1]) # in the direction [1, 1].
>>> p = [1.0] # Set parameter value z = 1.
>>> (der, err, delta) = derivest.directional_diff(f, v, d, p)
>>> print(der, "|", err)
Out: -0.7071 | 5.4102e-15
"""
##### PROCESS ARGUMENTS AND CHECK FOR VALIDITY #####
if kwargs.pop("deriv_order", 1) != 1:
raise ValueError("directional_diff() can only perform "
"first-order differentiation.")
if kwargs.pop("vectorized", False):
raise ValueError("directional_diff() is incompatible with "
"vectorized evaluation.")
kwargs["deriv_order"] = 1 # Force first-order differentiation
kwargs["vectorized"] = False # and non-vectorized evaluation.
if isinstance(x, list):
x = np.array(x) # Avoid AttributeError from x.shape if given a list.
d = np.array(d, dtype = np.float64)
if par is None:
par = [] # Avoid TypeError from attempting *None when par not provided.
if x.shape != d.shape:
raise ValueError("Shapes must match. Got x -> %s and d -> %s"
% (x.shape, d.shape))
if np.allclose(d, np.zeros_like(d)):
raise ValueError("Direction vector is numerically zero.")
if normalize:
d /= np.sqrt(np.sum(d**2.0)) # Normalize direction.
##### COMPUTE DIRECTIONAL DERIVATIVE #####
func = lambda t: fun(x + t*d, *par)
return derivest(func, 0.0, **kwargs)
| [
"numpy.array",
"numpy.zeros_like",
"numpy.sum"
] | [((2724, 2753), 'numpy.array', 'np.array', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (2732, 2753), True, 'import numpy as np\n'), ((2651, 2662), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2659, 2662), True, 'import numpy as np\n'), ((3022, 3038), 'numpy.zeros_like', 'np.zeros_like', (['d'], {}), '(d)\n', (3035, 3038), True, 'import numpy as np\n'), ((3146, 3162), 'numpy.sum', 'np.sum', (['(d ** 2.0)'], {}), '(d ** 2.0)\n', (3152, 3162), True, 'import numpy as np\n')] |
"""
Created on February 28, 2020
@author: <NAME>
Implementation of vignette_filter function in the pymagine package.
"""
import numpy as np
import cv2
def vignette_filter(
image_path,
strength=1.0,
x=0.5,
y=0.5,
file_name="vignette.jpg"):
"""
Applies vignette filter to a given image at the specified strength
and focal point then saves the result to the current working directory.
Parameters
----------
image: string
The local file path to the image for which the
filter will be applied.
sigma: float
Parameter for the strength of the dimming effect.
Default: 1.0
x: float
Parameter for the centre point of the effect
along the x axis.
Default: 0.5
y: float
Parameter for the centre point of the effect
along the y axis.
Default: 0.5
file_name: string
The filename for the output image
Default: "vignette.jpg"
Returns
-------
numpy array
altered image array with effect applied
Example
-------
>>> vignette_filter("img/picture.jpeg", strength=2.5, x=0.25, y=0.75)
"""
if not isinstance(image_path, str):
raise TypeError("Image file path must be a string.")
if not image_path.endswith((".png", ".jpeg", ".jpg")):
raise TypeError("Image format must be png, jpg, or jpeg.")
if image_path.startswith(("https:", "http:", "www.")):
raise TypeError(
"Image file path can't be a URL, provide a local file path.")
if strength <= 0.:
raise ValueError("Vignette strength must be a positive value.")
if x < 0 or x > 1 or y < 0 or y > 1:
raise ValueError("Centre points must be between 0 and 1.")
if not file_name.endswith((".png", ".jpeg", ".jpg")):
raise TypeError("File name format must be png, jpg, or jpeg.")
# read in image file
image = cv2.imread(image_path, 1)
# extract image dimensions and define center/focus indices
rows, cols = image.shape[:2]
focus_x = int(cols * x)
focus_y = int(rows * y)
# calculate stdev of the gaussian based on strength parameter
sigma = (rows + cols) / (((1 + strength) / 1))
# generate gaussian filters for each axis
filt_cols = cv2.getGaussianKernel(
2 * cols, sigma)[cols - focus_x:2 * cols - focus_x]
filt_rows = cv2.getGaussianKernel(
2 * rows, sigma)[rows - focus_y:2 * rows - focus_y]
# create and scale 2d vignette filter
filt_2d = filt_rows * filt_cols.T
filt_2d_scaled = filt_2d / filt_2d.max()
# create output image of correct dimensions
image_modified = np.copy(image)
image_modified[:, :, :] = 0
# apply filter to each layer of the input image
image_modified[:, :, 0] = image[:, :, 0] * filt_2d_scaled
image_modified[:, :, 1] = image[:, :, 1] * filt_2d_scaled
image_modified[:, :, 2] = image[:, :, 2] * filt_2d_scaled
# write image to disk
cv2.imwrite(file_name, image_modified)
return image_modified
| [
"cv2.getGaussianKernel",
"numpy.copy",
"cv2.imwrite",
"cv2.imread"
] | [((1923, 1948), 'cv2.imread', 'cv2.imread', (['image_path', '(1)'], {}), '(image_path, 1)\n', (1933, 1948), False, 'import cv2\n'), ((2661, 2675), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (2668, 2675), True, 'import numpy as np\n'), ((2978, 3016), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'image_modified'], {}), '(file_name, image_modified)\n', (2989, 3016), False, 'import cv2\n'), ((2283, 2321), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['(2 * cols)', 'sigma'], {}), '(2 * cols, sigma)\n', (2304, 2321), False, 'import cv2\n'), ((2382, 2420), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['(2 * rows)', 'sigma'], {}), '(2 * rows, sigma)\n', (2403, 2420), False, 'import cv2\n')] |
import csv
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from path import Path
from vector_math import *
from find_matches import *
from file_paths import *
#********************
#**** this function reads a CSV file and returns a header, and a list that has been converted to float
#********************
def read_csv_float_with_header(file1,list1):
fileopen = open(file1,'rb')
fileobject = csv.reader(fileopen)
# get the header
header = fileobject.next()
# read each line in the CSV, and convert the values to float before appending to list1
for row in fileobject:
float_line = []
for subrow in row:
float_line.append( float(subrow))
list1.append( float_line)
fileopen.close() # close the file that we read from
return header, list1
#********************
#**** end function reads a CSV file and returns a header, and a list that has been converted to float
#********************
#********************
#**** main code
#********************
#@profile
def mainloop(driver_id, rdp_tolerance,Input_Path):
Input_Path = DATA
start_time = time.time()
list_of_paths = []
list_of_lengths = []
# read all the routes for this driver
for cnt in range(1,201):
# initialize this path
path = Path(1,cnt) # start with driver 1, route 1
input_coords=[]
# file_name = "Test_Set\\driver_1\\" + str(cnt) + ".csv"
file_name = os.path.join(Input_Path,str(driver_id),str(cnt) + ".csv")
header, input_coords = read_csv_float_with_header(file_name,input_coords)
path_array = np.array(input_coords)
path.route = path_array
path.time = len(path.route) # 1 second per data file
# only analyze this path if it is not within a 50 meter bound of the starting point
max_value = np.amax(path.route)
min_value = np.amin(path.route)
if ( max_value < 50 and min_value > -50):
path.is_zero = 1 # this is a zero length route
#if ( path.is_zero == 0) :
# x_coord = path.route[ path.time-1, 0]
# y_coord = path.route[ path.time-1, 1]
# angle_off_horizontal = np.arctan( y_coord / x_coord )
# path.rotate_path(angle_off_horizontal)
# x_coord = path.route[ path.time-1, 0]
# y_coord = path.route[ path.time-1, 1]
# if (x_coord < 0) : # for quadrant 2 & 3 rotate back to quadrant 1
# angle_off_horizontal = np.pi
# path.rotate_path(angle_off_horizontal)
# # make our new 0, 0 point be the highest point in the path, bisecting the angle that makes the tallest
# path.center_on_highest_point()
# find the total distance along the route
path.distance = path.get_route_distance(0, path.time)
list_of_lengths.append(path.distance)
if ( path.is_zero == 0) :
# get features on this path
path.generate_features(rdp_tolerance)
#plt.figure()
#plt.plot(path.route[:,0],path.route[:,1],markersize=2.0)
#feature_list = []
#for cnt, feature in enumerate(path.feature_loc):
# x1 = path.route[ path.feature_loc[cnt,2] ,0]
# y1 = path.route[ path.feature_loc[cnt,2] ,1]
# feature_list.append( [x1, y1] )
#feature_list = np.array(feature_list)
#
#plt.scatter(feature_list[:,0],feature_list[:,1])
#plt.figure()
#plt.plot(path.route[:,0],path.route[:,1],markersize=2.0)
#feature_list = []
#for cnt, feature in enumerate(path.feature_loc):
# x1 = path.route[ path.feature_loc[cnt,2] ,0]
# y1 = path.route[ path.feature_loc[cnt,2] ,1]
# feature_list.append( [x1, y1] )
#feature_list = np.array(feature_list)
#
#plt.scatter(feature_list[:,0],feature_list[:,1])
#plt.show()
# get angles between each of the consective features
path.generate_angles()
list_of_paths.append(path)
list_to_run = []
#list_to_run.append( [11,19] )
#list_to_run.append( [17,9,15] )
#list_to_run.append( [63, 83, 120, 148] )
#list_to_run.append( [102, 167, 183, 197, 200] )
#list_to_run.append( [5,96] )
for cnt, path in enumerate(list_of_paths):
if ( path.is_zero == 0) :
for cnt2, run_list in enumerate(list_to_run):
if ( path.routeid in run_list):
plt.figure(cnt2+1)
plt.plot(path.route[:,0],path.route[:,1],markersize=2.0)
feature_list = []
for cnt, feature in enumerate(path.feature_loc):
x1 = path.route[ path.feature_loc[cnt,2] ,0]
y1 = path.route[ path.feature_loc[cnt,2] ,1]
feature_list.append( [x1, y1] )
feature_list = np.array(feature_list)
plt.scatter(feature_list[:,0],feature_list[:,1])
# make CSV files of our angles
#file_name = open("Angle_Info_" + str(path.routeid) + ".csv",'wb')
#file_object = csv.writer(file_name)
#
#for angle in path.angles:
# file_object.writerow(angle)
#
#file_name.close()
for cnt1, path1 in enumerate(list_of_paths):
for cnt2, path2 in enumerate(list_of_paths[cnt1+1:]):
if (path1.matched < 3 or path2.matched < 3): # if one of the two paths aren't matched, check it
if ( path1.is_zero == 0 and path2.is_zero == 0) : # make sure we don't run a zero length path
already_matched = 0
path2.print_flag = 0 # default to not making a picture
path1.print_flag = 0
if (path1.routeid != path2.routeid): # don't compare a path against itself
compare_two_sets_of_angles(path1, path2) # Compare these two paths and record the score in path 1
#if (path1.matched ==1 and path2.matched ==1): # if we matched this time, see if it is a new match
#
#
#
#
# if (previous_value1 ==0 and rdp_tolerance==15):
# path1.print_flag = 1
# else:
# path1.print_flag = 0
# if (previous_value2 ==0 and rdp_tolerance==15):
# path2.print_flag = 1
# else:
# path2.print_flag = 0
# #
# if (path1.print_flag==1 or path2.print_flag==1): # if the new values are a match that wasn't a previous match, print it
# print(path1.routeid, path2.routeid)
# align_two_paths(path1, path2,driver_id,rdp_tolerance)
#
# path2.print_flag = 0
# path1.print_flag = 0
#
#
list_of_lengths.sort()
for cnt1, path1 in enumerate(list_of_paths):
if (path1.distance > list_of_lengths[190] and path1.matched ==0): # if it is a long path and not matched, move it down the ranking
path1.matched = -2
elif (path1.distance > list_of_lengths[180] and path1.matched ==0): # if it is a long path and not matched, move it down the ranking
path1.matched = -1
num_matched = 0
final_out = open("Results_"+str(rdp_tolerance)+"m//Driver_" + str(driver_id)+".csv",'wb')
final_out_csv = csv.writer(final_out)
for cnt1, path1 in enumerate(list_of_paths):
final_out_csv.writerow([driver_id, path1.routeid, path1.matched])
if (path1.matched >=1):
num_matched+=1
fout = open("intial_match_list.txt",'a')
fout.write("Driver " + str(driver_id) +" num matched " + str(num_matched) + "\n")
fout.close()
end_time = time.time()
print("minutes elapsed ",(end_time-start_time) / 60. )
#plt.show()
#sys.exit(0)
#time_vs_distance = []
#color_list=[]
#for cnt, path in enumerate(list_of_paths):
# if (path.matched==1):
# time_vs_distance.append( [ path.time, path.distance] )
# color_list.append("red")
# elif(path.is_zero ==1):
# time_vs_distance.append( [ path.time, path.distance] )
# color_list.append("blue")
# else:
# time_vs_distance.append( [ path.time, path.distance] )
# color_list.append("green")
#
#time_vs_distance = np.array(time_vs_distance)
#plt.figure(2)
#plt.scatter(time_vs_distance[:,0],time_vs_distance[:,1],c=color_list,s=100)
#plt.show()
#plt.close()
#
#
#********************
#**** end main code
#********************
# drivers start at 1 and the last one is 3612 however there are gaps in between
# there are a total of 2736 drivers
rdp_tolerance_list = [ 13, 15, 17 ]
#rdp_tolerance_list = [ 13 ]
for rdp_tolerance in rdp_tolerance_list:
if( not os.path.isdir("Results_"+str(rdp_tolerance)+"m") ):
os.mkdir("Results_"+str(rdp_tolerance)+"m")
Input_Path = DATA
for driver_id in range(1,3613):
#rdp_tolerance = 15
#if (1==1):
try:
file_name = os.path.join(Input_Path,str(driver_id),str(1) + ".csv")
fileopen = open(file_name,'rb')
fileopen.close() # close the file that we read from
for rdp_tolerance in rdp_tolerance_list:
print ("doing driver ",driver_id, " rdp ",rdp_tolerance)
mainloop(driver_id, rdp_tolerance,Input_Path)
except:
x=1 | [
"numpy.amax",
"numpy.amin",
"csv.writer",
"matplotlib.pyplot.plot",
"path.Path",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"time.time",
"csv.reader"
] | [((439, 459), 'csv.reader', 'csv.reader', (['fileopen'], {}), '(fileopen)\n', (449, 459), False, 'import csv\n'), ((1189, 1200), 'time.time', 'time.time', ([], {}), '()\n', (1198, 1200), False, 'import time\n'), ((7903, 7924), 'csv.writer', 'csv.writer', (['final_out'], {}), '(final_out)\n', (7913, 7924), False, 'import csv\n'), ((8302, 8313), 'time.time', 'time.time', ([], {}), '()\n', (8311, 8313), False, 'import time\n'), ((1387, 1399), 'path.Path', 'Path', (['(1)', 'cnt'], {}), '(1, cnt)\n', (1391, 1399), False, 'from path import Path\n'), ((1702, 1724), 'numpy.array', 'np.array', (['input_coords'], {}), '(input_coords)\n', (1710, 1724), True, 'import numpy as np\n'), ((1948, 1967), 'numpy.amax', 'np.amax', (['path.route'], {}), '(path.route)\n', (1955, 1967), True, 'import numpy as np\n'), ((1987, 2006), 'numpy.amin', 'np.amin', (['path.route'], {}), '(path.route)\n', (1994, 2006), True, 'import numpy as np\n'), ((4783, 4803), 'matplotlib.pyplot.figure', 'plt.figure', (['(cnt2 + 1)'], {}), '(cnt2 + 1)\n', (4793, 4803), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4879), 'matplotlib.pyplot.plot', 'plt.plot', (['path.route[:, 0]', 'path.route[:, 1]'], {'markersize': '(2.0)'}), '(path.route[:, 0], path.route[:, 1], markersize=2.0)\n', (4827, 4879), True, 'import matplotlib.pyplot as plt\n'), ((5213, 5235), 'numpy.array', 'np.array', (['feature_list'], {}), '(feature_list)\n', (5221, 5235), True, 'import numpy as np\n'), ((5271, 5322), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_list[:, 0]', 'feature_list[:, 1]'], {}), '(feature_list[:, 0], feature_list[:, 1])\n', (5282, 5322), True, 'import matplotlib.pyplot as plt\n')] |
import pytest
# import unittest
import numpy as np
import femnurbs.SplineUsefulFunctions as SUF
def test_isValidU():
with pytest.raises(TypeError):
SUF.isValidU()
assert SUF.isValidU(0) is False
assert SUF.isValidU(1.2) is False
assert SUF.isValidU({}) is False
assert SUF.isValidU(-1) is False
assert SUF.isValidU({1: 1}) is False
assert SUF.isValidU([0, 0, 0, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1, 1, ]) is False
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 1, 1, 1]) is False
assert SUF.isValidU([-1, -1, 1, 1]) is False
assert SUF.isValidU([0, 0, 2, 2]) is False
assert SUF.isValidU([0, 0, 0.8, 0.2, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 0.5, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.2, 0.8, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0.5, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.1, 0.5, 0.9, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 0.5, 1, 1]) is False
def test_UBezier():
for p in range(1, 10):
assert SUF.isValidU(SUF.UBezier(p=p)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UBezier(p=1)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UBezier(p=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UBezier(p=3)
np.testing.assert_almost_equal(Ugood, Utest)
def test_UUniform():
for p in range(1, 10):
for n in range(p + 1, 11):
assert SUF.isValidU(SUF.UUniform(p=p, n=n)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UUniform(p=1, n=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.5, 1, 1])
Utest = SUF.UUniform(p=1, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.25, 0.5, 0.75, 1, 1])
Utest = SUF.UUniform(p=1, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1])
Utest = SUF.UUniform(p=1, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.5, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.5, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=8)
np.testing.assert_almost_equal(Ugood, Utest)
def test_URandom():
Ntest = 100
for p in (1, 2, 3):
for n in range(p + 1, 30):
for zz in range(Ntest):
U = SUF.URandom(p=p, n=n)
assert SUF.isValidU(U) is True
assert SUF.getPfromU(U) == p
assert SUF.getNfromU(U) == n
def test_transpose():
II = np.eye(3)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(3)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
def test_isSymetric():
II = np.eye(3)
assert SUF.isSymetric(II) is True
II = np.eye(4)
assert SUF.isSymetric(II) is True
II = np.eye(3)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.eye(4)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 3, 4],
[4, 3, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 4, 4],
[4, 4, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[7, 2, 4, 3],
[4, 4, 2, 7]])
assert SUF.isSymetric(II, diagonal=2) is False
II = np.array([[7, 2, 4, 7],
[7, 4, 2, 3]])
assert SUF.isSymetric(II, diagonal=2) is False
def test_getPfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UBezier(p=2)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UBezier(p=3)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UBezier(p=4)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getPfromU(U)
assert ptest == 2
def test_getNfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getNfromU(U)
assert ptest == 2
U = SUF.UBezier(p=2)
ptest = SUF.getNfromU(U)
assert ptest == 3
U = SUF.UBezier(p=3)
ptest = SUF.getNfromU(U)
assert ptest == 4
U = SUF.UBezier(p=4)
ptest = SUF.getNfromU(U)
assert ptest == 5
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getNfromU(U)
assert ptest == 5
def test_transformUtoH():
U = SUF.UBezier(p=1)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 0, 1, 0, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0, 0.25, 0.25, 0.25, 0.25, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 0, 1, 1, 1, 0, 0]) / 3
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 0, 1, 1, 0, 0, 0]) / 2
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1]) # p = 2 and n = 5
Hgood = np.array([0, 0.2, 0.6, 0.2, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0, 0, 0.2, 0.6, 0.2, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U, j=3)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 1, 1, 1, 0]) / 3
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 1, 1, 0]) / 2
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 1, 1, 0, 0]) / 2
Htest = SUF.transformUtoH(U, j=3)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1]) # p = 2 and n = 5
Hgood = np.array([0.2, 0.6, 0.2])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0.2, 0.6, 0.2])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0, 0.2, 0.6, 0.2, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
def test_transformHtoSides():
H = np.array([1, 1, 1])
Sgood = np.array([[1], [1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 1])
Sgood = np.array([[0], [1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 0])
Sgood = np.array([[1], [0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 0])
Sgood = np.array([[0], [0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0.6, 1, 0.3])
Sgood = np.array([[0.6], [0.3]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([6, 10, 3])
Sgood = np.array([[0.6], [0.3]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 1, 1, 1])
Sgood = np.array([[1, 1], [1, 1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 1, 1, 1])
Sgood = np.array([[1, 0], [1, 1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 1, 1, 0])
Sgood = np.array([[1, 1], [1, 0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 0, 1, 0, 0])
Sgood = np.array([[0, 0], [0, 0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0.2, 0.6, 1, 0.3, 0.4])
Sgood = np.array([[0.6, 0.2], [0.3, 0.4]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([2, 6, 10, 3, 4])
Sgood = np.array([[0.6, 0.2], [0.3, 0.4]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
def test_cutHtoElementZ():
H = np.array([0.5, 0.5])
Zgood = np.array([0.5])
Ztest = SUF.cutHtoElementZ(H, 0)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0.5, 0.5])
Zgood = np.array([0.5])
Ztest = SUF.cutHtoElementZ(H, 1)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0.5, 0.5, 0])
Zgood = np.array([0, 0.5, 0.5])
Ztest = SUF.cutHtoElementZ(H, 0)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0.5, 0.5, 0])
Zgood = np.array([0.5, 0.5, 0])
Ztest = SUF.cutHtoElementZ(H, 1)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0, 0.5, 0.5, 0, 0])
Zgood = np.array([0, 0, 0.5, 0.5, 0])
Ztest = SUF.cutHtoElementZ(H, 0)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0, 0.5, 0.5, 0, 0])
Zgood = np.array([0, 0.5, 0.5, 0, 0])
Ztest = SUF.cutHtoElementZ(H, 1)
np.testing.assert_almost_equal(Zgood, Ztest)
def test_isDiagonalDominant():
M = np.eye(3)
assert SUF.isDiagonalDominant(M) is True
M = np.ones((3, 3))
assert SUF.isDiagonalDominant(M) is False
M = np.zeros((3, 3))
assert SUF.isDiagonalDominant(M) is False
M = np.eye(3) - (1 / 3)
assert SUF.isDiagonalDominant(M) is False
M = 1.0001 * np.eye(3) - (1 / 3)
assert SUF.isDiagonalDominant(M) is True
# def main():
# unittest.main()
# if __name__ == "__main__":
# main()
| [
"numpy.eye",
"femnurbs.SplineUsefulFunctions.getNfromU",
"femnurbs.SplineUsefulFunctions.transformHtoSides",
"numpy.ones",
"femnurbs.SplineUsefulFunctions.transformUtoH",
"femnurbs.SplineUsefulFunctions.isDiagonalDominant",
"femnurbs.SplineUsefulFunctions.isValidU",
"femnurbs.SplineUsefulFunctions.URa... | [((1399, 1421), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (1407, 1421), True, 'import numpy as np\n'), ((1435, 1451), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(1)'}), '(p=1)\n', (1446, 1451), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1457, 1501), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (1487, 1501), True, 'import numpy as np\n'), ((1517, 1545), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (1525, 1545), True, 'import numpy as np\n'), ((1559, 1575), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(2)'}), '(p=2)\n', (1570, 1575), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1581, 1625), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (1611, 1625), True, 'import numpy as np\n'), ((1641, 1675), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1])\n', (1649, 1675), True, 'import numpy as np\n'), ((1689, 1705), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(3)'}), '(p=3)\n', (1700, 1705), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1711, 1755), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (1741, 1755), True, 'import numpy as np\n'), ((1926, 1948), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (1934, 1948), True, 'import numpy as np\n'), ((1962, 1984), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(2)'}), '(p=1, n=2)\n', (1974, 1984), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1990, 2034), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2020, 2034), True, 'import numpy as np\n'), ((2050, 2077), 'numpy.array', 'np.array', (['[0, 0, 0.5, 1, 1]'], {}), '([0, 0, 0.5, 1, 1])\n', (2058, 2077), True, 'import numpy as np\n'), ((2091, 2113), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(3)'}), '(p=1, n=3)\n', (2103, 2113), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((2119, 2163), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2149, 2163), True, 'import numpy as np\n'), ((2179, 2218), 'numpy.array', 'np.array', (['[0, 0, 0.25, 0.5, 0.75, 1, 1]'], {}), '([0, 0, 0.25, 0.5, 0.75, 1, 1])\n', (2187, 2218), True, 'import numpy as np\n'), ((2232, 2254), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(5)'}), '(p=1, n=5)\n', (2244, 2254), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((2260, 2304), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2290, 2304), True, 'import numpy as np\n'), ((2320, 2362), 'numpy.array', 'np.array', (['[0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1]'], {}), '([0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1])\n', (2328, 2362), True, 'import numpy as np\n'), ((2376, 2398), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(6)'}), '(p=1, n=6)\n', (2388, 2398), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((2404, 2448), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2434, 2448), True, 'import numpy as np\n'), ((2464, 2492), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (2472, 2492), True, 'import numpy as np\n'), ((2506, 2528), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(3)'}), '(p=2, n=3)\n', (2518, 2528), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((2534, 2578), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2564, 2578), True, 'import numpy as np\n'), ((2594, 2627), 'numpy.array', 'np.array', (['[0, 0, 0, 0.5, 1, 1, 1]'], {}), '([0, 0, 0, 0.5, 1, 1, 1])\n', (2602, 2627), True, 'import numpy as np\n'), ((2641, 2663), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(4)'}), '(p=2, n=4)\n', (2653, 2663), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((2669, 2713), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2699, 2713), True, 'import numpy as np\n'), ((2729, 2774), 'numpy.array', 'np.array', (['[0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1]'], {}), '([0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1])\n', (2737, 2774), True, 'import numpy as np\n'), ((2788, 2810), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(6)'}), '(p=2, n=6)\n', (2800, 2810), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((2816, 2860), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2846, 2860), True, 'import numpy as np\n'), ((2876, 2924), 'numpy.array', 'np.array', (['[0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1]'], {}), '([0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1])\n', (2884, 2924), True, 'import numpy as np\n'), ((2938, 2960), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(7)'}), '(p=2, n=7)\n', (2950, 2960), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((2966, 3010), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (2996, 3010), True, 'import numpy as np\n'), ((3026, 3060), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1])\n', (3034, 3060), True, 'import numpy as np\n'), ((3074, 3096), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(4)'}), '(p=3, n=4)\n', (3086, 3096), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3102, 3146), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (3132, 3146), True, 'import numpy as np\n'), ((3162, 3201), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.5, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0.5, 1, 1, 1, 1])\n', (3170, 3201), True, 'import numpy as np\n'), ((3215, 3237), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(5)'}), '(p=3, n=5)\n', (3227, 3237), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3243, 3287), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (3273, 3287), True, 'import numpy as np\n'), ((3303, 3354), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1, 1])\n', (3311, 3354), True, 'import numpy as np\n'), ((3368, 3390), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(7)'}), '(p=3, n=7)\n', (3380, 3390), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3396, 3440), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (3426, 3440), True, 'import numpy as np\n'), ((3456, 3510), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1, 1])\n', (3464, 3510), True, 'import numpy as np\n'), ((3524, 3546), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(8)'}), '(p=3, n=8)\n', (3536, 3546), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3552, 3596), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Ugood', 'Utest'], {}), '(Ugood, Utest)\n', (3582, 3596), True, 'import numpy as np\n'), ((3957, 3966), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3963, 3966), True, 'import numpy as np\n'), ((3981, 3998), 'femnurbs.SplineUsefulFunctions.transpose', 'SUF.transpose', (['II'], {}), '(II)\n', (3994, 3998), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4004, 4046), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['IItest', 'II'], {}), '(IItest, II)\n', (4034, 4046), True, 'import numpy as np\n'), ((4059, 4068), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4065, 4068), True, 'import numpy as np\n'), ((4083, 4100), 'femnurbs.SplineUsefulFunctions.transpose', 'SUF.transpose', (['II'], {}), '(II)\n', (4096, 4100), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4106, 4148), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['IItest', 'II'], {}), '(IItest, II)\n', (4136, 4148), True, 'import numpy as np\n'), ((4161, 4170), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4167, 4170), True, 'import numpy as np\n'), ((4185, 4214), 'femnurbs.SplineUsefulFunctions.transpose', 'SUF.transpose', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (4198, 4214), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4220, 4262), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['IItest', 'II'], {}), '(IItest, II)\n', (4250, 4262), True, 'import numpy as np\n'), ((4275, 4284), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4281, 4284), True, 'import numpy as np\n'), ((4299, 4328), 'femnurbs.SplineUsefulFunctions.transpose', 'SUF.transpose', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (4312, 4328), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4334, 4376), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['IItest', 'II'], {}), '(IItest, II)\n', (4364, 4376), True, 'import numpy as np\n'), ((4415, 4424), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4421, 4424), True, 'import numpy as np\n'), ((4476, 4485), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4482, 4485), True, 'import numpy as np\n'), ((4537, 4546), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4543, 4546), True, 'import numpy as np\n'), ((4610, 4619), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4616, 4619), True, 'import numpy as np\n'), ((4683, 4721), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [4, 3, 2, 1]]'], {}), '([[1, 2, 3, 4], [4, 3, 2, 1]])\n', (4691, 4721), True, 'import numpy as np\n'), ((4805, 4843), 'numpy.array', 'np.array', (['[[1, 2, 4, 4], [4, 4, 2, 1]]'], {}), '([[1, 2, 4, 4], [4, 4, 2, 1]])\n', (4813, 4843), True, 'import numpy as np\n'), ((4927, 4965), 'numpy.array', 'np.array', (['[[7, 2, 4, 3], [4, 4, 2, 7]]'], {}), '([[7, 2, 4, 3], [4, 4, 2, 7]])\n', (4935, 4965), True, 'import numpy as np\n'), ((5050, 5088), 'numpy.array', 'np.array', (['[[7, 2, 4, 7], [7, 4, 2, 3]]'], {}), '([[7, 2, 4, 7], [7, 4, 2, 3]])\n', (5058, 5088), True, 'import numpy as np\n'), ((5197, 5213), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(1)'}), '(p=1)\n', (5208, 5213), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5227, 5243), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5240, 5243), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5278, 5294), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(2)'}), '(p=2)\n', (5289, 5294), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5308, 5324), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5321, 5324), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5359, 5375), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(3)'}), '(p=3)\n', (5370, 5375), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5389, 5405), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5402, 5405), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5440, 5456), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(4)'}), '(p=4)\n', (5451, 5456), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5470, 5486), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5483, 5486), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5521, 5543), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(6)'}), '(p=1, n=6)\n', (5533, 5543), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5557, 5573), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5570, 5573), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5608, 5630), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(6)'}), '(p=2, n=6)\n', (5620, 5630), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5644, 5660), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5657, 5660), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5695, 5717), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(6)'}), '(p=3, n=6)\n', (5707, 5717), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5731, 5747), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5744, 5747), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5782, 5804), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(4)', 'n': '(6)'}), '(p=4, n=6)\n', (5794, 5804), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5818, 5834), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5831, 5834), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5869, 5907), 'numpy.array', 'np.array', (['[0, 0, 0, 0.2, 0.8, 1, 1, 1]'], {}), '([0, 0, 0, 0.2, 0.8, 1, 1, 1])\n', (5877, 5907), True, 'import numpy as np\n'), ((5921, 5937), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (5934, 5937), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5997, 6013), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(1)'}), '(p=1)\n', (6008, 6013), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6027, 6043), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6040, 6043), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6078, 6094), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(2)'}), '(p=2)\n', (6089, 6094), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6108, 6124), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6121, 6124), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6159, 6175), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(3)'}), '(p=3)\n', (6170, 6175), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6189, 6205), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6202, 6205), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6240, 6256), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(4)'}), '(p=4)\n', (6251, 6256), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6270, 6286), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6283, 6286), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6321, 6343), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(6)'}), '(p=1, n=6)\n', (6333, 6343), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6357, 6373), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6370, 6373), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6408, 6430), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(6)'}), '(p=2, n=6)\n', (6420, 6430), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6444, 6460), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6457, 6460), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6495, 6517), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(6)'}), '(p=3, n=6)\n', (6507, 6517), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6531, 6547), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6544, 6547), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6582, 6604), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(4)', 'n': '(6)'}), '(p=4, n=6)\n', (6594, 6604), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6618, 6634), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6631, 6634), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6669, 6707), 'numpy.array', 'np.array', (['[0, 0, 0, 0.2, 0.8, 1, 1, 1]'], {}), '([0, 0, 0, 0.2, 0.8, 1, 1, 1])\n', (6677, 6707), True, 'import numpy as np\n'), ((6721, 6737), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (6734, 6737), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6801, 6817), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(1)'}), '(p=1)\n', (6812, 6817), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6831, 6844), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (6839, 6844), True, 'import numpy as np\n'), ((6858, 6878), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (6875, 6878), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6884, 6928), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (6914, 6928), True, 'import numpy as np\n'), ((6940, 6956), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(2)'}), '(p=2)\n', (6951, 6956), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((6970, 6989), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (6978, 6989), True, 'import numpy as np\n'), ((7003, 7023), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (7020, 7023), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7029, 7073), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (7059, 7073), True, 'import numpy as np\n'), ((7085, 7101), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(3)'}), '(p=3)\n', (7096, 7101), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7115, 7140), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (7123, 7140), True, 'import numpy as np\n'), ((7154, 7174), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (7171, 7174), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7180, 7224), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (7210, 7224), True, 'import numpy as np\n'), ((7236, 7252), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(4)'}), '(p=4)\n', (7247, 7252), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7266, 7297), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 0, 0, 0]'], {}), '([0, 0, 0, 1, 0, 0, 0])\n', (7274, 7297), True, 'import numpy as np\n'), ((7311, 7331), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (7328, 7331), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7337, 7381), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (7367, 7381), True, 'import numpy as np\n'), ((7393, 7415), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(6)'}), '(p=1, n=6)\n', (7405, 7415), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7429, 7464), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2, 0.2, 0.2])\n', (7437, 7464), True, 'import numpy as np\n'), ((7478, 7498), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (7495, 7498), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7504, 7548), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (7534, 7548), True, 'import numpy as np\n'), ((7560, 7582), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(6)'}), '(p=2, n=6)\n', (7572, 7582), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7596, 7636), 'numpy.array', 'np.array', (['[0, 0.25, 0.25, 0.25, 0.25, 0]'], {}), '([0, 0.25, 0.25, 0.25, 0.25, 0])\n', (7604, 7636), True, 'import numpy as np\n'), ((7650, 7670), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (7667, 7670), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7676, 7720), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (7706, 7720), True, 'import numpy as np\n'), ((7732, 7754), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(6)'}), '(p=3, n=6)\n', (7744, 7754), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7817, 7837), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (7834, 7837), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7843, 7887), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (7873, 7887), True, 'import numpy as np\n'), ((7899, 7921), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(4)', 'n': '(6)'}), '(p=4, n=6)\n', (7911, 7921), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7987, 8007), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (8004, 8007), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8013, 8057), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (8043, 8057), True, 'import numpy as np\n'), ((8069, 8107), 'numpy.array', 'np.array', (['[0, 0, 0, 0.2, 0.8, 1, 1, 1]'], {}), '([0, 0, 0, 0.2, 0.8, 1, 1, 1])\n', (8077, 8107), True, 'import numpy as np\n'), ((8140, 8171), 'numpy.array', 'np.array', (['[0, 0.2, 0.6, 0.2, 0]'], {}), '([0, 0.2, 0.6, 0.2, 0])\n', (8148, 8171), True, 'import numpy as np\n'), ((8185, 8205), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (8202, 8205), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8211, 8255), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (8241, 8255), True, 'import numpy as np\n'), ((8267, 8311), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1])\n', (8275, 8311), True, 'import numpy as np\n'), ((8344, 8381), 'numpy.array', 'np.array', (['[0, 0, 0.2, 0.6, 0.2, 0, 0]'], {}), '([0, 0, 0.2, 0.6, 0.2, 0, 0])\n', (8352, 8381), True, 'import numpy as np\n'), ((8395, 8415), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {}), '(U)\n', (8412, 8415), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8421, 8465), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (8451, 8465), True, 'import numpy as np\n'), ((8477, 8493), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(2)'}), '(p=2)\n', (8488, 8493), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8507, 8520), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8515, 8520), True, 'import numpy as np\n'), ((8534, 8559), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(0)'}), '(U, j=0)\n', (8551, 8559), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8565, 8609), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (8595, 8609), True, 'import numpy as np\n'), ((8621, 8637), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(2)'}), '(p=2)\n', (8632, 8637), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8651, 8664), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8659, 8664), True, 'import numpy as np\n'), ((8678, 8703), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (8695, 8703), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8709, 8753), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (8739, 8753), True, 'import numpy as np\n'), ((8765, 8781), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(3)'}), '(p=3)\n', (8776, 8781), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8795, 8808), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8803, 8808), True, 'import numpy as np\n'), ((8822, 8847), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(0)'}), '(U, j=0)\n', (8839, 8847), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8853, 8897), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (8883, 8897), True, 'import numpy as np\n'), ((8909, 8925), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(3)'}), '(p=3)\n', (8920, 8925), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8939, 8952), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8947, 8952), True, 'import numpy as np\n'), ((8966, 8991), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (8983, 8991), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((8997, 9041), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (9027, 9041), True, 'import numpy as np\n'), ((9053, 9069), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(3)'}), '(p=3)\n', (9064, 9069), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9083, 9102), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (9091, 9102), True, 'import numpy as np\n'), ((9116, 9141), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(2)'}), '(U, j=2)\n', (9133, 9141), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9147, 9191), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (9177, 9191), True, 'import numpy as np\n'), ((9203, 9219), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(4)'}), '(p=4)\n', (9214, 9219), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9233, 9246), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (9241, 9246), True, 'import numpy as np\n'), ((9260, 9285), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(0)'}), '(U, j=0)\n', (9277, 9285), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9291, 9335), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (9321, 9335), True, 'import numpy as np\n'), ((9347, 9363), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(4)'}), '(p=4)\n', (9358, 9363), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9377, 9390), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (9385, 9390), True, 'import numpy as np\n'), ((9404, 9429), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (9421, 9429), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9435, 9479), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (9465, 9479), True, 'import numpy as np\n'), ((9491, 9507), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(4)'}), '(p=4)\n', (9502, 9507), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9521, 9540), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (9529, 9540), True, 'import numpy as np\n'), ((9554, 9579), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(2)'}), '(U, j=2)\n', (9571, 9579), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9585, 9629), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (9615, 9629), True, 'import numpy as np\n'), ((9641, 9657), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': '(4)'}), '(p=4)\n', (9652, 9657), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9671, 9696), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (9679, 9696), True, 'import numpy as np\n'), ((9710, 9735), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(3)'}), '(U, j=3)\n', (9727, 9735), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9741, 9785), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (9771, 9785), True, 'import numpy as np\n'), ((9797, 9819), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(1)', 'n': '(6)'}), '(p=1, n=6)\n', (9809, 9819), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9833, 9868), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2, 0.2, 0.2])\n', (9841, 9868), True, 'import numpy as np\n'), ((9882, 9907), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(0)'}), '(U, j=0)\n', (9899, 9907), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((9913, 9957), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (9943, 9957), True, 'import numpy as np\n'), ((9969, 9991), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(6)'}), '(p=2, n=6)\n', (9981, 9991), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10005, 10039), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25, 0.25])\n', (10013, 10039), True, 'import numpy as np\n'), ((10053, 10078), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(0)'}), '(U, j=0)\n', (10070, 10078), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10084, 10128), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (10114, 10128), True, 'import numpy as np\n'), ((10140, 10162), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(2)', 'n': '(6)'}), '(p=2, n=6)\n', (10152, 10162), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10176, 10210), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25, 0.25])\n', (10184, 10210), True, 'import numpy as np\n'), ((10224, 10249), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (10241, 10249), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10255, 10299), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (10285, 10299), True, 'import numpy as np\n'), ((10311, 10333), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(6)'}), '(p=3, n=6)\n', (10323, 10333), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10384, 10409), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(0)'}), '(U, j=0)\n', (10401, 10409), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10415, 10459), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (10445, 10459), True, 'import numpy as np\n'), ((10471, 10493), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(6)'}), '(p=3, n=6)\n', (10483, 10493), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10544, 10569), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (10561, 10569), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10575, 10619), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (10605, 10619), True, 'import numpy as np\n'), ((10631, 10653), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(3)', 'n': '(6)'}), '(p=3, n=6)\n', (10643, 10653), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10710, 10735), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(2)'}), '(U, j=2)\n', (10727, 10735), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10741, 10785), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (10771, 10785), True, 'import numpy as np\n'), ((10797, 10819), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(4)', 'n': '(6)'}), '(p=4, n=6)\n', (10809, 10819), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10867, 10892), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(0)'}), '(U, j=0)\n', (10884, 10892), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((10898, 10942), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (10928, 10942), True, 'import numpy as np\n'), ((10954, 10976), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(4)', 'n': '(6)'}), '(p=4, n=6)\n', (10966, 10976), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11024, 11049), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (11041, 11049), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11055, 11099), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (11085, 11099), True, 'import numpy as np\n'), ((11111, 11133), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(4)', 'n': '(6)'}), '(p=4, n=6)\n', (11123, 11133), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11187, 11212), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(2)'}), '(U, j=2)\n', (11204, 11212), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11218, 11262), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (11248, 11262), True, 'import numpy as np\n'), ((11274, 11296), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': '(4)', 'n': '(6)'}), '(p=4, n=6)\n', (11286, 11296), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11356, 11381), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(3)'}), '(U, j=3)\n', (11373, 11381), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11387, 11431), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (11417, 11431), True, 'import numpy as np\n'), ((11443, 11481), 'numpy.array', 'np.array', (['[0, 0, 0, 0.2, 0.8, 1, 1, 1]'], {}), '([0, 0, 0, 0.2, 0.8, 1, 1, 1])\n', (11451, 11481), True, 'import numpy as np\n'), ((11514, 11539), 'numpy.array', 'np.array', (['[0.2, 0.6, 0.2]'], {}), '([0.2, 0.6, 0.2])\n', (11522, 11539), True, 'import numpy as np\n'), ((11553, 11578), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (11570, 11578), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11584, 11628), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (11614, 11628), True, 'import numpy as np\n'), ((11640, 11684), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1])\n', (11648, 11684), True, 'import numpy as np\n'), ((11717, 11742), 'numpy.array', 'np.array', (['[0.2, 0.6, 0.2]'], {}), '([0.2, 0.6, 0.2])\n', (11725, 11742), True, 'import numpy as np\n'), ((11756, 11781), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(1)'}), '(U, j=1)\n', (11773, 11781), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11787, 11831), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (11817, 11831), True, 'import numpy as np\n'), ((11843, 11887), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1])\n', (11851, 11887), True, 'import numpy as np\n'), ((11920, 11951), 'numpy.array', 'np.array', (['[0, 0.2, 0.6, 0.2, 0]'], {}), '([0, 0.2, 0.6, 0.2, 0])\n', (11928, 11951), True, 'import numpy as np\n'), ((11965, 11990), 'femnurbs.SplineUsefulFunctions.transformUtoH', 'SUF.transformUtoH', (['U'], {'j': '(2)'}), '(U, j=2)\n', (11982, 11990), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((11996, 12040), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Hgood', 'Htest'], {}), '(Hgood, Htest)\n', (12026, 12040), True, 'import numpy as np\n'), ((12085, 12104), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (12093, 12104), True, 'import numpy as np\n'), ((12118, 12138), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (12126, 12138), True, 'import numpy as np\n'), ((12152, 12176), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (12173, 12176), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((12182, 12226), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (12212, 12226), True, 'import numpy as np\n'), ((12238, 12257), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (12246, 12257), True, 'import numpy as np\n'), ((12271, 12291), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (12279, 12291), True, 'import numpy as np\n'), ((12305, 12329), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (12326, 12329), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((12335, 12379), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (12365, 12379), True, 'import numpy as np\n'), ((12391, 12410), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (12399, 12410), True, 'import numpy as np\n'), ((12424, 12444), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (12432, 12444), True, 'import numpy as np\n'), ((12458, 12482), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (12479, 12482), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((12488, 12532), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (12518, 12532), True, 'import numpy as np\n'), ((12544, 12563), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (12552, 12563), True, 'import numpy as np\n'), ((12577, 12597), 'numpy.array', 'np.array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (12585, 12597), True, 'import numpy as np\n'), ((12611, 12635), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (12632, 12635), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((12641, 12685), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (12671, 12685), True, 'import numpy as np\n'), ((12697, 12720), 'numpy.array', 'np.array', (['[0.6, 1, 0.3]'], {}), '([0.6, 1, 0.3])\n', (12705, 12720), True, 'import numpy as np\n'), ((12734, 12758), 'numpy.array', 'np.array', (['[[0.6], [0.3]]'], {}), '([[0.6], [0.3]])\n', (12742, 12758), True, 'import numpy as np\n'), ((12772, 12796), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (12793, 12796), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((12802, 12846), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (12832, 12846), True, 'import numpy as np\n'), ((12858, 12878), 'numpy.array', 'np.array', (['[6, 10, 3]'], {}), '([6, 10, 3])\n', (12866, 12878), True, 'import numpy as np\n'), ((12892, 12916), 'numpy.array', 'np.array', (['[[0.6], [0.3]]'], {}), '([[0.6], [0.3]])\n', (12900, 12916), True, 'import numpy as np\n'), ((12930, 12954), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (12951, 12954), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((12960, 13004), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (12990, 13004), True, 'import numpy as np\n'), ((13016, 13041), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (13024, 13041), True, 'import numpy as np\n'), ((13055, 13081), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (13063, 13081), True, 'import numpy as np\n'), ((13095, 13119), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (13116, 13119), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((13125, 13169), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (13155, 13169), True, 'import numpy as np\n'), ((13181, 13206), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 1]'], {}), '([0, 1, 1, 1, 1])\n', (13189, 13206), True, 'import numpy as np\n'), ((13220, 13246), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (13228, 13246), True, 'import numpy as np\n'), ((13260, 13284), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (13281, 13284), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((13290, 13334), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (13320, 13334), True, 'import numpy as np\n'), ((13346, 13371), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 0]'], {}), '([1, 1, 1, 1, 0])\n', (13354, 13371), True, 'import numpy as np\n'), ((13385, 13411), 'numpy.array', 'np.array', (['[[1, 1], [1, 0]]'], {}), '([[1, 1], [1, 0]])\n', (13393, 13411), True, 'import numpy as np\n'), ((13425, 13449), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (13446, 13449), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((13455, 13499), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (13485, 13499), True, 'import numpy as np\n'), ((13511, 13536), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (13519, 13536), True, 'import numpy as np\n'), ((13550, 13576), 'numpy.array', 'np.array', (['[[0, 0], [0, 0]]'], {}), '([[0, 0], [0, 0]])\n', (13558, 13576), True, 'import numpy as np\n'), ((13590, 13614), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (13611, 13614), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((13620, 13664), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (13650, 13664), True, 'import numpy as np\n'), ((13676, 13709), 'numpy.array', 'np.array', (['[0.2, 0.6, 1, 0.3, 0.4]'], {}), '([0.2, 0.6, 1, 0.3, 0.4])\n', (13684, 13709), True, 'import numpy as np\n'), ((13723, 13757), 'numpy.array', 'np.array', (['[[0.6, 0.2], [0.3, 0.4]]'], {}), '([[0.6, 0.2], [0.3, 0.4]])\n', (13731, 13757), True, 'import numpy as np\n'), ((13771, 13795), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (13792, 13795), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((13801, 13845), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (13831, 13845), True, 'import numpy as np\n'), ((13857, 13883), 'numpy.array', 'np.array', (['[2, 6, 10, 3, 4]'], {}), '([2, 6, 10, 3, 4])\n', (13865, 13883), True, 'import numpy as np\n'), ((13897, 13931), 'numpy.array', 'np.array', (['[[0.6, 0.2], [0.3, 0.4]]'], {}), '([[0.6, 0.2], [0.3, 0.4]])\n', (13905, 13931), True, 'import numpy as np\n'), ((13945, 13969), 'femnurbs.SplineUsefulFunctions.transformHtoSides', 'SUF.transformHtoSides', (['H'], {}), '(H)\n', (13966, 13969), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((13975, 14019), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Sgood', 'Stest'], {}), '(Sgood, Stest)\n', (14005, 14019), True, 'import numpy as np\n'), ((14061, 14081), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (14069, 14081), True, 'import numpy as np\n'), ((14095, 14110), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (14103, 14110), True, 'import numpy as np\n'), ((14124, 14148), 'femnurbs.SplineUsefulFunctions.cutHtoElementZ', 'SUF.cutHtoElementZ', (['H', '(0)'], {}), '(H, 0)\n', (14142, 14148), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((14154, 14198), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Zgood', 'Ztest'], {}), '(Zgood, Ztest)\n', (14184, 14198), True, 'import numpy as np\n'), ((14210, 14230), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (14218, 14230), True, 'import numpy as np\n'), ((14244, 14259), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (14252, 14259), True, 'import numpy as np\n'), ((14273, 14297), 'femnurbs.SplineUsefulFunctions.cutHtoElementZ', 'SUF.cutHtoElementZ', (['H', '(1)'], {}), '(H, 1)\n', (14291, 14297), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((14303, 14347), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Zgood', 'Ztest'], {}), '(Zgood, Ztest)\n', (14333, 14347), True, 'import numpy as np\n'), ((14359, 14385), 'numpy.array', 'np.array', (['[0, 0.5, 0.5, 0]'], {}), '([0, 0.5, 0.5, 0])\n', (14367, 14385), True, 'import numpy as np\n'), ((14399, 14422), 'numpy.array', 'np.array', (['[0, 0.5, 0.5]'], {}), '([0, 0.5, 0.5])\n', (14407, 14422), True, 'import numpy as np\n'), ((14436, 14460), 'femnurbs.SplineUsefulFunctions.cutHtoElementZ', 'SUF.cutHtoElementZ', (['H', '(0)'], {}), '(H, 0)\n', (14454, 14460), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((14466, 14510), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Zgood', 'Ztest'], {}), '(Zgood, Ztest)\n', (14496, 14510), True, 'import numpy as np\n'), ((14522, 14548), 'numpy.array', 'np.array', (['[0, 0.5, 0.5, 0]'], {}), '([0, 0.5, 0.5, 0])\n', (14530, 14548), True, 'import numpy as np\n'), ((14562, 14585), 'numpy.array', 'np.array', (['[0.5, 0.5, 0]'], {}), '([0.5, 0.5, 0])\n', (14570, 14585), True, 'import numpy as np\n'), ((14599, 14623), 'femnurbs.SplineUsefulFunctions.cutHtoElementZ', 'SUF.cutHtoElementZ', (['H', '(1)'], {}), '(H, 1)\n', (14617, 14623), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((14629, 14673), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Zgood', 'Ztest'], {}), '(Zgood, Ztest)\n', (14659, 14673), True, 'import numpy as np\n'), ((14685, 14717), 'numpy.array', 'np.array', (['[0, 0, 0.5, 0.5, 0, 0]'], {}), '([0, 0, 0.5, 0.5, 0, 0])\n', (14693, 14717), True, 'import numpy as np\n'), ((14731, 14760), 'numpy.array', 'np.array', (['[0, 0, 0.5, 0.5, 0]'], {}), '([0, 0, 0.5, 0.5, 0])\n', (14739, 14760), True, 'import numpy as np\n'), ((14774, 14798), 'femnurbs.SplineUsefulFunctions.cutHtoElementZ', 'SUF.cutHtoElementZ', (['H', '(0)'], {}), '(H, 0)\n', (14792, 14798), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((14804, 14848), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Zgood', 'Ztest'], {}), '(Zgood, Ztest)\n', (14834, 14848), True, 'import numpy as np\n'), ((14860, 14892), 'numpy.array', 'np.array', (['[0, 0, 0.5, 0.5, 0, 0]'], {}), '([0, 0, 0.5, 0.5, 0, 0])\n', (14868, 14892), True, 'import numpy as np\n'), ((14906, 14935), 'numpy.array', 'np.array', (['[0, 0.5, 0.5, 0, 0]'], {}), '([0, 0.5, 0.5, 0, 0])\n', (14914, 14935), True, 'import numpy as np\n'), ((14949, 14973), 'femnurbs.SplineUsefulFunctions.cutHtoElementZ', 'SUF.cutHtoElementZ', (['H', '(1)'], {}), '(H, 1)\n', (14967, 14973), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((14979, 15023), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Zgood', 'Ztest'], {}), '(Zgood, Ztest)\n', (15009, 15023), True, 'import numpy as np\n'), ((15069, 15078), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15075, 15078), True, 'import numpy as np\n'), ((15136, 15151), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (15143, 15151), True, 'import numpy as np\n'), ((15210, 15226), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (15218, 15226), True, 'import numpy as np\n'), ((135, 159), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (148, 159), False, 'import pytest\n'), ((170, 184), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', ([], {}), '()\n', (182, 184), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((199, 214), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['(0)'], {}), '(0)\n', (211, 214), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((236, 253), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['(1.2)'], {}), '(1.2)\n', (248, 253), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((275, 291), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['{}'], {}), '({})\n', (287, 291), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((313, 329), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['(-1)'], {}), '(-1)\n', (325, 329), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((351, 373), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['{(1): 1}'], {}), '({(1): 1})\n', (363, 373), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((393, 422), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0, 1, 1]'], {}), '([0, 0, 0, 1, 1])\n', (405, 422), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((444, 473), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 1, 1])\n', (456, 473), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((497, 532), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1])\n', (509, 532), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((554, 589), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1, 1])\n', (566, 589), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((611, 639), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[-1, -1, 1, 1]'], {}), '([-1, -1, 1, 1])\n', (623, 639), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((661, 687), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 2, 2]'], {}), '([0, 0, 2, 2])\n', (673, 687), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((711, 747), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0.8, 0.2, 1, 1]'], {}), '([0, 0, 0.8, 0.2, 1, 1])\n', (723, 747), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((769, 806), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0, 1, 0.5, 1, 1]'], {}), '([0, 0, 0, 1, 0.5, 1, 1])\n', (781, 806), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((830, 856), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (842, 856), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((877, 909), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (889, 909), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((930, 968), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1])\n', (942, 968), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((989, 1025), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0.2, 0.8, 1, 1]'], {}), '([0, 0, 0.2, 0.8, 1, 1])\n', (1001, 1025), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1046, 1083), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0, 0.5, 1, 1, 1]'], {}), '([0, 0, 0, 0.5, 1, 1, 1])\n', (1058, 1083), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1104, 1145), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0.1, 0.5, 0.9, 1, 1]'], {}), '([0, 0, 0.1, 0.5, 0.9, 1, 1])\n', (1116, 1145), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1166, 1202), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0.5, 0.5, 1, 1]'], {}), '([0, 0, 0.5, 0.5, 1, 1])\n', (1178, 1202), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1225, 1266), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['[0, 0, 0.5, 0.5, 0.5, 1, 1]'], {}), '([0, 0, 0.5, 0.5, 0.5, 1, 1])\n', (1237, 1266), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4437, 4455), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {}), '(II)\n', (4451, 4455), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4498, 4516), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {}), '(II)\n', (4512, 4516), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4559, 4589), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (4573, 4589), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4632, 4662), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (4646, 4662), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4754, 4784), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (4768, 4784), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4876, 4906), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (4890, 4906), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((4998, 5028), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (5012, 5028), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((5121, 5151), 'femnurbs.SplineUsefulFunctions.isSymetric', 'SUF.isSymetric', (['II'], {'diagonal': '(2)'}), '(II, diagonal=2)\n', (5135, 5151), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((7768, 7799), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 1, 0, 0]'], {}), '([0, 0, 1, 1, 1, 0, 0])\n', (7776, 7799), True, 'import numpy as np\n'), ((7935, 7969), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 0, 0, 0]'], {}), '([0, 0, 0, 1, 1, 0, 0, 0])\n', (7943, 7969), True, 'import numpy as np\n'), ((10347, 10366), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (10355, 10366), True, 'import numpy as np\n'), ((10507, 10526), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (10515, 10526), True, 'import numpy as np\n'), ((10667, 10692), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 0]'], {}), '([0, 1, 1, 1, 0])\n', (10675, 10692), True, 'import numpy as np\n'), ((10833, 10849), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (10841, 10849), True, 'import numpy as np\n'), ((10990, 11006), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (10998, 11006), True, 'import numpy as np\n'), ((11147, 11169), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (11155, 11169), True, 'import numpy as np\n'), ((11310, 11338), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0]'], {}), '([0, 0, 1, 1, 0, 0])\n', (11318, 11338), True, 'import numpy as np\n'), ((15091, 15116), 'femnurbs.SplineUsefulFunctions.isDiagonalDominant', 'SUF.isDiagonalDominant', (['M'], {}), '(M)\n', (15113, 15116), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((15164, 15189), 'femnurbs.SplineUsefulFunctions.isDiagonalDominant', 'SUF.isDiagonalDominant', (['M'], {}), '(M)\n', (15186, 15189), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((15239, 15264), 'femnurbs.SplineUsefulFunctions.isDiagonalDominant', 'SUF.isDiagonalDominant', (['M'], {}), '(M)\n', (15261, 15264), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((15285, 15294), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15291, 15294), True, 'import numpy as np\n'), ((15317, 15342), 'femnurbs.SplineUsefulFunctions.isDiagonalDominant', 'SUF.isDiagonalDominant', (['M'], {}), '(M)\n', (15339, 15342), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((15404, 15429), 'femnurbs.SplineUsefulFunctions.isDiagonalDominant', 'SUF.isDiagonalDominant', (['M'], {}), '(M)\n', (15426, 15429), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((15372, 15381), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15378, 15381), True, 'import numpy as np\n'), ((1358, 1374), 'femnurbs.SplineUsefulFunctions.UBezier', 'SUF.UBezier', ([], {'p': 'p'}), '(p=p)\n', (1369, 1374), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3758, 3779), 'femnurbs.SplineUsefulFunctions.URandom', 'SUF.URandom', ([], {'p': 'p', 'n': 'n'}), '(p=p, n=n)\n', (3769, 3779), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((1879, 1901), 'femnurbs.SplineUsefulFunctions.UUniform', 'SUF.UUniform', ([], {'p': 'p', 'n': 'n'}), '(p=p, n=n)\n', (1891, 1901), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3804, 3819), 'femnurbs.SplineUsefulFunctions.isValidU', 'SUF.isValidU', (['U'], {}), '(U)\n', (3816, 3819), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3852, 3868), 'femnurbs.SplineUsefulFunctions.getPfromU', 'SUF.getPfromU', (['U'], {}), '(U)\n', (3865, 3868), True, 'import femnurbs.SplineUsefulFunctions as SUF\n'), ((3898, 3914), 'femnurbs.SplineUsefulFunctions.getNfromU', 'SUF.getNfromU', (['U'], {}), '(U)\n', (3911, 3914), True, 'import femnurbs.SplineUsefulFunctions as SUF\n')] |
import numpy as np
import matplotlib.pyplot as plt
from read_file import select_original_breakpoints
def plot_ave_curve(slopes, intervals, filename, color):
all_curve_descr = []
for curve in slopes:
curve_descr = np.zeros(N - 1)
for i in range(N - 1):
if curve[i] > curve[i + 1]:
curve_descr[i] = 1
all_curve_descr.append(tuple(curve_descr))
A, B = np.unique(all_curve_descr, return_counts=True, axis=0)
idxs = np.argsort(B)
A = A[idxs]
B = B[idxs]
total = sum(B)
for a, b in zip(A, B):
print("{0}".format(a) + " : {:2f}".format(100 * b / total))
plt.figure(figsize=(3, 3))
j = -1
selected_slopes = []
selected_intervals = []
for i, sample in enumerate(all_curve_descr):
if sample == tuple(A[j]):
selected_slopes.append(slopes[i])
selected_intervals.append(intervals[i])
ave_slope = np.mean(selected_slopes, axis=0)
print(ave_slope)
ave_intervals = np.mean(selected_intervals, axis=0)
ave_tan = np.tan(np.radians(ave_slope))
x = [0]
y = [0]
for s, i in zip(ave_tan, ave_intervals):
x.append(x[-1] + i)
y.append(y[-1] + i * s)
curve_descr = ['+' if a == 0 else '-' for a in A[j]]
curve_descr = ' '.join(curve_descr)
print(curve_descr)
plt.plot(x, y, 'o', ls='-', label='%s (%.2f%%)' % (curve_descr, 100 * B[j]/total), alpha=0.7, c=color)
plt.legend()
plt.xlabel('Normalized time')
plt.ylabel('Views')
plt.tight_layout()
plt.savefig(filename)
if __name__ == '__main__':
sources = ['clusters\\clusters\\clusters_ind_single_0.35_3.txt',
'clusters\\clusters\\clusters_ind_single_0.56_5.txt']
colors = ['tab:red', 'tab:blue', 'tab:orange', 'tab:green', 'tab:grey']
letters = ['a', 'b', 'c', 'd', 'e', 'f']
idx = 0
for N, source in zip([3, 5], sources):
labels = np.loadtxt(source, dtype=np.int)
slopes, intervals = select_original_breakpoints(N, 'segm/segmented_curves_filtered.txt')
unique, counts = np.unique(labels, return_counts=True)
unique = unique[counts >= 10]
counts = counts[counts >= 10]
unique_idxs = np.argsort(counts)[-3:]
unique = unique[unique_idxs].tolist()
# labels = [unique.index(l) if l in unique else -1 for l in labels]
for i, label in enumerate(unique):
idxs = labels == label
slopes_i = slopes[idxs]
intervals_i = intervals[idxs]
print(label, '-> tamanho', len(slopes_i))
filename = 'ave_curve_%d_intervals_%s.pdf' % (N, letters[idx])
plot_ave_curve(slopes_i, intervals_i, filename, colors[i])
idx += 1
# for N in [3, 5]:
# slopes, intervals = select_original_breakpoints(N, 'segm/segmented_curves_filtered.txt')
#
# filename = 'ave_curve_%d_intervals.pdf' % N
# plot_ave_curve(slopes, intervals, filename, 'gray')
# fig, ax = plt.subplots(figsize=(len(A), 10))
#
# im = ax.imshow(np.concatenate((A, B.reshape(-1, 1)), axis=1), cmap=plt.get_cmap("PiYG", 7))
#
# labels = ["$\\alpha_i <= \\alpha_{i+1}$", "$\\alpha_i > \\alpha_{i+1}$"]
# for i in range(len(A)):
# for j in range(N):
# if N-1 == j:
# text = ax.text(j, i, "{:2f}".format(100*B[i]/total),
# ha="center", va="center", color="w")
# else:
# text = ax.text(j, i, labels[int(A[i, j])],
# ha="center", va="center", color="w")
# ax.set_title("Curves")
# ax.xaxis.set_ticklabels([])
# ax.yaxis.set_ticklabels([])
# fig.tight_layout()
# plt.show()
| [
"numpy.radians",
"numpy.mean",
"matplotlib.pyplot.savefig",
"numpy.unique",
"matplotlib.pyplot.ylabel",
"read_file.select_original_breakpoints",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
... | [((416, 470), 'numpy.unique', 'np.unique', (['all_curve_descr'], {'return_counts': '(True)', 'axis': '(0)'}), '(all_curve_descr, return_counts=True, axis=0)\n', (425, 470), True, 'import numpy as np\n'), ((482, 495), 'numpy.argsort', 'np.argsort', (['B'], {}), '(B)\n', (492, 495), True, 'import numpy as np\n'), ((647, 673), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (657, 673), True, 'import matplotlib.pyplot as plt\n'), ((937, 969), 'numpy.mean', 'np.mean', (['selected_slopes'], {'axis': '(0)'}), '(selected_slopes, axis=0)\n', (944, 969), True, 'import numpy as np\n'), ((1011, 1046), 'numpy.mean', 'np.mean', (['selected_intervals'], {'axis': '(0)'}), '(selected_intervals, axis=0)\n', (1018, 1046), True, 'import numpy as np\n'), ((1345, 1453), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {'ls': '"""-"""', 'label': "('%s (%.2f%%)' % (curve_descr, 100 * B[j] / total))", 'alpha': '(0.7)', 'c': 'color'}), "(x, y, 'o', ls='-', label='%s (%.2f%%)' % (curve_descr, 100 * B[j] /\n total), alpha=0.7, c=color)\n", (1353, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1453, 1465), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1463, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1499), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalized time"""'], {}), "('Normalized time')\n", (1480, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1523), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Views"""'], {}), "('Views')\n", (1514, 1523), True, 'import matplotlib.pyplot as plt\n'), ((1529, 1547), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1545, 1547), True, 'import matplotlib.pyplot as plt\n'), ((1552, 1573), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1563, 1573), True, 'import matplotlib.pyplot as plt\n'), ((231, 246), 'numpy.zeros', 'np.zeros', (['(N - 1)'], {}), '(N - 1)\n', (239, 246), True, 'import numpy as np\n'), ((1068, 1089), 'numpy.radians', 'np.radians', (['ave_slope'], {}), '(ave_slope)\n', (1078, 1089), True, 'import numpy as np\n'), ((1936, 1968), 'numpy.loadtxt', 'np.loadtxt', (['source'], {'dtype': 'np.int'}), '(source, dtype=np.int)\n', (1946, 1968), True, 'import numpy as np\n'), ((1997, 2065), 'read_file.select_original_breakpoints', 'select_original_breakpoints', (['N', '"""segm/segmented_curves_filtered.txt"""'], {}), "(N, 'segm/segmented_curves_filtered.txt')\n", (2024, 2065), False, 'from read_file import select_original_breakpoints\n'), ((2091, 2128), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (2100, 2128), True, 'import numpy as np\n'), ((2227, 2245), 'numpy.argsort', 'np.argsort', (['counts'], {}), '(counts)\n', (2237, 2245), True, 'import numpy as np\n')] |
# Importing the required packages
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from catboost import CatBoostClassifier
import statistics
# Function importing Dataset
def importdata():
balance_data = pd.read_csv(
'https://raw.githubusercontent.com/ssfaruque/HD_Computing/master/chemometrics/datasets/DTreeSets/'+
#
#select only one from below
#'noisySets/DT_noisy_005_'+
#'noisySets/DT_noisy_01_'+
#'noisySets/DT_noisy_015_'+
#'noisySets/DT_noisy_02_'+
#'noisySets/DT_noisy_03_'+
#'noisySets/DT_multiplicative_075_'
#'noisySets/DT_multiplicative_090_'
#'noisySets/DT_multiplicative_110_'
#'noisySets/DT_multiplicative_125_'
#'noisySets/DT_additive_025_'
#'noisySets/DT_additive_050_'
#'noisySets/DT_additive_100_'
#
#select only one from below
#'DNA_Anodisc.csv',
#'DNA_ECOLI.csv',
'DNA_inLiquidDNA.csv',
#'Full_Set.csv',
#'Yeast_inLiquidHK.csv',
#'Yeast_inLiquidLive.csv',
sep= ',', header = None)
balance_data = np.array(balance_data)
np.random.shuffle(balance_data)
threshold = 0.0875
for i in range(len(balance_data)):
for j in range(1, len(balance_data[0])):
if float(balance_data[i][j]) < threshold:
balance_data[i][j] = 0
return balance_data
def retrieve_indices_of_label(balance_data, label):
indices = []
for i in range(0, len(balance_data[:, 0])):
if int(balance_data[i][0]) == label:
indices.append(i)
return indices
def update_train_test_sets(balance_data):
num_files_per_category = 5
ppm0 = retrieve_indices_of_label(balance_data, 0)
ppm2 = retrieve_indices_of_label(balance_data, 2)
ppm5 = retrieve_indices_of_label(balance_data, 5)
ppm10 = retrieve_indices_of_label(balance_data, 10)
ppm15 = retrieve_indices_of_label(balance_data, 15)
ppm0_indices = ppm0[ 0 : num_files_per_category]
ppm2_indices = ppm2[ 0 : num_files_per_category]
ppm5_indices = ppm5[ 0 : num_files_per_category]
ppm10_indices = ppm10[0 : num_files_per_category]
ppm15_indices = ppm15[0 : num_files_per_category]
ppm0_samples = np.copy(balance_data[ppm0_indices])
ppm2_samples = np.copy(balance_data[ppm2_indices])
ppm5_samples = np.copy(balance_data[ppm5_indices])
ppm10_samples = np.copy(balance_data[ppm10_indices])
ppm15_samples = np.copy(balance_data[ppm15_indices])
trainset = np.concatenate((ppm0_samples, ppm2_samples, ppm5_samples, ppm10_samples, ppm15_samples))
testset = np.delete(balance_data, (ppm0_indices + ppm2_indices + ppm5_indices + ppm10_indices + ppm15_indices), axis=0)
return trainset, testset
# Function to split the dataset
def splitdataset(balance_data):
# Seperating the target variable
X = balance_data.values[:, 1:1868] #min = 1, max = 1868
Y = balance_data.values[:, 0]
# Spliting the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size = 0.90, random_state = 51, shuffle = True, stratify = None)
return X, Y, X_train, X_test, y_train, y_test
def GradientBoost(X_train, X_test, y_train):
model = CatBoostClassifier(iterations=10, depth=5)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
return y_pred
# Function to calculate accuracy
def cal_accuracy(y_test, y_pred):
#print("Confusion Matrix: ",
# confusion_matrix(y_test, y_pred))
print ("Accuracy : ",
accuracy_score(y_test,y_pred)*100)
print("Report : \n",
classification_report(y_test, y_pred))
# Driver code
def main():
predicted_accuracy = []
predicted_f1 = []
for i in range(10):
#Training Phase
data = importdata()
#X, Y, X_train, X_test, y_train, y_test = splitdataset(data)
trainset, testset = update_train_test_sets(data)
#Testing Phase
y_pred = GradientBoost(trainset[:, 1:1608], testset[:, 1:1608], trainset[:, 0])
cal_accuracy(testset[:, 0], y_pred)
TN = 0
TP = 0
FN = 0
FP = 0
for i in range(0,len(testset[:,0])):
predicted = y_pred[i]
label = testset[i][0]
if predicted == label:
if predicted == 0 or predicted == 2:
TN += 1
else:
TP += 1
else:
if predicted == 0:
if label == 2:
TN += 1
else:
FN += 1
elif predicted == 2:
if label == 0:
TN += 1
else:
FN += 1
elif predicted == 5:
if label == 0 or label == 2:
FP += 1
else:
TP += 1
elif predicted == 10:
if label == 0 or label == 2:
FP += 1
else:
TN += 1
elif predicted == 15:
if label == 0 or label == 2:
FP += 1
else:
TP += 1
f1_score = 2 * TP / (2 * TP + FP + FN)
print("F1-score: {}\n".format(round(f1_score, 2)))
predicted_accuracy.append(accuracy_score(testset[:, 0], y_pred)*100)
predicted_f1.append(f1_score)
Average_Acc = statistics.mean(predicted_accuracy)
Average_F1 = statistics.mean(predicted_f1)
print("Average Accuracy: {}".format(round(Average_Acc, 2)))
print("Average F1-score: {}".format(round(Average_F1, 2)))
# Calling main function
if __name__=="__main__":
main()
| [
"statistics.mean",
"numpy.copy",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.delete",
"sklearn.metrics.classification_report",
"numpy.array",
"numpy.concatenate",
"catboost.CatBoostClassifier",
"sklearn.metrics.accuracy_score",
"numpy.random.shuffle"
] | [((389, 556), 'pandas.read_csv', 'pd.read_csv', (["('https://raw.githubusercontent.com/ssfaruque/HD_Computing/master/chemometrics/datasets/DTreeSets/'\n + 'DNA_inLiquidDNA.csv')"], {'sep': '""","""', 'header': 'None'}), "(\n 'https://raw.githubusercontent.com/ssfaruque/HD_Computing/master/chemometrics/datasets/DTreeSets/'\n + 'DNA_inLiquidDNA.csv', sep=',', header=None)\n", (400, 556), True, 'import pandas as pd\n'), ((1112, 1134), 'numpy.array', 'np.array', (['balance_data'], {}), '(balance_data)\n', (1120, 1134), True, 'import numpy as np\n'), ((1139, 1170), 'numpy.random.shuffle', 'np.random.shuffle', (['balance_data'], {}), '(balance_data)\n', (1156, 1170), True, 'import numpy as np\n'), ((2248, 2283), 'numpy.copy', 'np.copy', (['balance_data[ppm0_indices]'], {}), '(balance_data[ppm0_indices])\n', (2255, 2283), True, 'import numpy as np\n'), ((2304, 2339), 'numpy.copy', 'np.copy', (['balance_data[ppm2_indices]'], {}), '(balance_data[ppm2_indices])\n', (2311, 2339), True, 'import numpy as np\n'), ((2360, 2395), 'numpy.copy', 'np.copy', (['balance_data[ppm5_indices]'], {}), '(balance_data[ppm5_indices])\n', (2367, 2395), True, 'import numpy as np\n'), ((2416, 2452), 'numpy.copy', 'np.copy', (['balance_data[ppm10_indices]'], {}), '(balance_data[ppm10_indices])\n', (2423, 2452), True, 'import numpy as np\n'), ((2473, 2509), 'numpy.copy', 'np.copy', (['balance_data[ppm15_indices]'], {}), '(balance_data[ppm15_indices])\n', (2480, 2509), True, 'import numpy as np\n'), ((2526, 2618), 'numpy.concatenate', 'np.concatenate', (['(ppm0_samples, ppm2_samples, ppm5_samples, ppm10_samples, ppm15_samples)'], {}), '((ppm0_samples, ppm2_samples, ppm5_samples, ppm10_samples,\n ppm15_samples))\n', (2540, 2618), True, 'import numpy as np\n'), ((2629, 2740), 'numpy.delete', 'np.delete', (['balance_data', '(ppm0_indices + ppm2_indices + ppm5_indices + ppm10_indices + ppm15_indices)'], {'axis': '(0)'}), '(balance_data, ppm0_indices + ppm2_indices + ppm5_indices +\n ppm10_indices + ppm15_indices, axis=0)\n', (2638, 2740), True, 'import numpy as np\n'), ((3053, 3140), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.9)', 'random_state': '(51)', 'shuffle': '(True)', 'stratify': 'None'}), '(X, Y, test_size=0.9, random_state=51, shuffle=True,\n stratify=None)\n', (3069, 3140), False, 'from sklearn.model_selection import train_test_split\n'), ((3261, 3303), 'catboost.CatBoostClassifier', 'CatBoostClassifier', ([], {'iterations': '(10)', 'depth': '(5)'}), '(iterations=10, depth=5)\n', (3279, 3303), False, 'from catboost import CatBoostClassifier\n'), ((5555, 5590), 'statistics.mean', 'statistics.mean', (['predicted_accuracy'], {}), '(predicted_accuracy)\n', (5570, 5590), False, 'import statistics\n'), ((5608, 5637), 'statistics.mean', 'statistics.mean', (['predicted_f1'], {}), '(predicted_f1)\n', (5623, 5637), False, 'import statistics\n'), ((3630, 3667), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3651, 3667), False, 'from sklearn.metrics import classification_report\n'), ((3565, 3595), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3579, 3595), False, 'from sklearn.metrics import accuracy_score\n'), ((5455, 5492), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testset[:, 0]', 'y_pred'], {}), '(testset[:, 0], y_pred)\n', (5469, 5492), False, 'from sklearn.metrics import accuracy_score\n')] |
"""__init__
License: BSD 3-Clause License
Copyright (C) 2021, New York University
Copyright note valid unless otherwise stated in individual files.
All rights reserved.
"""
import numpy as np
class SimHead:
def __init__(self, robot, vicon_name='', with_sliders=True, joint_index=None,
measurement_delay_dt=0, control_delay_dt=0, noise_data_std={}):
self._robot = robot
self._vicon_name = vicon_name
self._joint_index = joint_index
# Define the common sensor values.
nv = robot.pin_robot.model.nv
# Get number of joints nj
if robot.useFixedBase:
if joint_index is None:
nj = nv
else:
nj = len(joint_index)
else:
nj = nv - 6
self.nj = nj
self._sensor_joint_positions = np.zeros(nj)
self._sensor_joint_velocities = np.zeros(nj)
self.with_sliders = with_sliders
if self.with_sliders:
self._sensor_slider_positions = np.zeros(4)
# If not fixed base, then assume we have an IMU and a vicon.
if not robot.useFixedBase:
# Simulated IMU.
self._sensor_imu_gyroscope = np.zeros(3)
# Utility for vicon class.
self._sensor__vicon_base_position = np.zeros(7)
self._sensor__vicon_base_velocity = np.zeros(6)
# Controls.
self._control_ctrl_joint_torques = np.zeros(nj)
self.update_noise_data(noise_data_std)
self.update_control_delay(control_delay_dt)
self.update_measurement_delay(measurement_delay_dt)
def update_noise_data(self, noise_data_std):
self._noise_data_std = noise_data_std
if not 'joint_positions' in noise_data_std:
self._noise_data_std['joint_positions'] = np.zeros(self.nj)
if not 'joint_velocities' in noise_data_std:
self._noise_data_std['base_velocity'] = np.zeros(self.nj)
if not 'imu_gyroscope' in noise_data_std:
self._noise_data_std['imu_gyroscope'] = np.zeros(3)
def update_control_delay(self, delay_dt):
self._fill_history_control = True
self._ti = 0
self._control_delay_dt = delay_dt
length = delay_dt + 1
self._history_control = {
'ctrl_joint_torques': np.zeros((length, self.nj))
}
def update_measurement_delay(self, delay_dt):
self._fill_history_measurement = True
self._ti = 0
self._measurement_delay_dt = delay_dt
length = delay_dt + 1
self._history_measurements = {
'joint_positions': np.zeros((length, self.nj)),
'joint_velocities': np.zeros((length, self.nj)),
'imu_gyroscope': np.zeros((length, 3))
}
def sample_noise(self, entry):
noise_var = self._noise_data_std[entry]**2
return np.random.multivariate_normal(np.zeros_like(noise_var), np.diag(noise_var))
def read(self):
q, dq = self._robot.get_state()
write_idx = self._ti % (self._measurement_delay_dt + 1)
if self._fill_history_measurement:
self._fill_history_measurement = False
write_idx = None
read_idx = (self._ti + 1) % (self._measurement_delay_dt + 1)
history = self._history_measurements
if not self._robot.useFixedBase:
# Write to the measurement history with noise.
history['joint_positions'][write_idx] = q[7:]
history['joint_velocities'][write_idx] = dq[6:]
history['imu_gyroscope'][write_idx] = dq[3:6]
self._sensor_imu_gyroscope[:] = history['imu_gyroscope'][read_idx]
self._sensor__vicon_base_position[:] = q[:7]
self._sensor__vicon_base_velocity[:] = dq[:6]
else:
if self._joint_index:
history['joint_positions'][write_idx] = q[self._joint_index]
history['joint_velocities'][write_idx] = dq[self._joint_index]
else:
history['joint_positions'][write_idx] = q
history['joint_velocities'][write_idx] = dq
self._sensor_joint_positions[:] = history['joint_positions'][read_idx]
self._sensor_joint_velocities[:] = history['joint_velocities'][read_idx]
if self.with_sliders:
for i, l in enumerate(['a', 'b', 'c', 'd']):
self._sensor_slider_positions[i] = self._robot.get_slider_position(l)
def write(self):
write_idx = self._ti % (self._measurement_delay_dt + 1)
if self._fill_history_control:
self._fill_history_control = False
write_idx = None
read_idx = (self._ti + 1) % (self._measurement_delay_dt + 1)
history = self._history_control
history['ctrl_joint_torques'][write_idx] = self._control_ctrl_joint_torques
self._last_ctrl_joint_torques = history['ctrl_joint_torques'][read_idx]
self._ti += 1
def sim_step(self):
self._robot.send_joint_command(self._last_ctrl_joint_torques)
def get_sensor(self, sensor_name):
return self.__dict__['_sensor_' + sensor_name]
def set_control(self, control_name, value):
self.__dict__['_control_' + control_name][:] = value
def reset_state(self, q, dq):
self._robot.reset_state(q, dq)
| [
"numpy.zeros",
"numpy.zeros_like",
"numpy.diag"
] | [((843, 855), 'numpy.zeros', 'np.zeros', (['nj'], {}), '(nj)\n', (851, 855), True, 'import numpy as np\n'), ((896, 908), 'numpy.zeros', 'np.zeros', (['nj'], {}), '(nj)\n', (904, 908), True, 'import numpy as np\n'), ((1448, 1460), 'numpy.zeros', 'np.zeros', (['nj'], {}), '(nj)\n', (1456, 1460), True, 'import numpy as np\n'), ((1025, 1036), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1033, 1036), True, 'import numpy as np\n'), ((1212, 1223), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1220, 1223), True, 'import numpy as np\n'), ((1312, 1323), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (1320, 1323), True, 'import numpy as np\n'), ((1372, 1383), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (1380, 1383), True, 'import numpy as np\n'), ((1823, 1840), 'numpy.zeros', 'np.zeros', (['self.nj'], {}), '(self.nj)\n', (1831, 1840), True, 'import numpy as np\n'), ((1946, 1963), 'numpy.zeros', 'np.zeros', (['self.nj'], {}), '(self.nj)\n', (1954, 1963), True, 'import numpy as np\n'), ((2066, 2077), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2074, 2077), True, 'import numpy as np\n'), ((2330, 2357), 'numpy.zeros', 'np.zeros', (['(length, self.nj)'], {}), '((length, self.nj))\n', (2338, 2357), True, 'import numpy as np\n'), ((2634, 2661), 'numpy.zeros', 'np.zeros', (['(length, self.nj)'], {}), '((length, self.nj))\n', (2642, 2661), True, 'import numpy as np\n'), ((2695, 2722), 'numpy.zeros', 'np.zeros', (['(length, self.nj)'], {}), '((length, self.nj))\n', (2703, 2722), True, 'import numpy as np\n'), ((2754, 2775), 'numpy.zeros', 'np.zeros', (['(length, 3)'], {}), '((length, 3))\n', (2762, 2775), True, 'import numpy as np\n'), ((2918, 2942), 'numpy.zeros_like', 'np.zeros_like', (['noise_var'], {}), '(noise_var)\n', (2931, 2942), True, 'import numpy as np\n'), ((2944, 2962), 'numpy.diag', 'np.diag', (['noise_var'], {}), '(noise_var)\n', (2951, 2962), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 12:56:17 2021
@author: Luigi
"""
import numpy as np
import scipy as sci
import sympy as sym
import matplotlib.pyplot as plt
def Lagrange(xnodi, i):
if i == 0:
xzeri = xnodi[1:]
else:
xzeri = np.append(xnodi[:i], xnodi[i + 1 :])
num = np.poly(xzeri)
den = np.polyval(num, xnodi[i])
return num / den
def Interpol(x, y, xx):
m = x.size
n = xx.size
L = np.zeros((m,n))
for k in range(m):
L[k, :] = np.polyval(Lagrange(x, k), xx)
return np.dot(y, L)
def simpsonComp(f, a, b, n):
h = (b - a) / (2 * n)
interv = np.arange(a, b + h, h)
fnodi = f(interv)
I = h * (fnodi[0] + 2 * np.sum(fnodi[2 : 2*n : 2]) + 4 * np.sum(fnodi[1 : 2*n : 2]) + fnodi[2*n]) /3
return I
def simpsonTol(f, a, b, tol):
N = 1
nMax = 2048
err = 1
In = simpsonComp(f, a, b, N)
while err >= tol and N < nMax:
N *= 2
I2n = simpsonComp(f, a, b, N)
err = np.abs(I2n - In) / 15
In = I2n
return In, N
f = lambda x : x - np.sqrt(x - 1)
a = 1
b = 3
x = np.linspace(a, b, 4)
xx = np.linspace(a, b, 100)
y = f(x)
yy = Interpol(x, y, xx)
plt.plot(xx, yy, xx, f(xx), x, y, "o")
plt.legend(["Polinomio di grado 3", "Funzione", "Nodi di interpolazione"])
plt.show()
n = 4
p = lambda nodi : Interpol(x, y, nodi)
I1, N1 = simpsonTol(f, a, b, 10**-5)
print(f"Sono necessare {N1} iterazioni per I1")
I2, N2 = simpsonTol(p, a, b, 10**-5)
print(f"Sono necessare {N2} iterazioni per I2")
I1es = 2.114381916835873
I2es = 2.168048769926493
err1 = abs(I1es - I1)
err2 = abs(I2es - I2)
print("ErrRel I1 = ", err1)
print("ErrRel I2 = ", err2)
| [
"numpy.abs",
"numpy.poly",
"numpy.sqrt",
"numpy.arange",
"numpy.append",
"numpy.sum",
"numpy.linspace",
"numpy.zeros",
"numpy.polyval",
"numpy.dot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1163, 1183), 'numpy.linspace', 'np.linspace', (['a', 'b', '(4)'], {}), '(a, b, 4)\n', (1174, 1183), True, 'import numpy as np\n'), ((1190, 1212), 'numpy.linspace', 'np.linspace', (['a', 'b', '(100)'], {}), '(a, b, 100)\n', (1201, 1212), True, 'import numpy as np\n'), ((1291, 1365), 'matplotlib.pyplot.legend', 'plt.legend', (["['Polinomio di grado 3', 'Funzione', 'Nodi di interpolazione']"], {}), "(['Polinomio di grado 3', 'Funzione', 'Nodi di interpolazione'])\n", (1301, 1365), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1377), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1375, 1377), True, 'import matplotlib.pyplot as plt\n'), ((330, 344), 'numpy.poly', 'np.poly', (['xzeri'], {}), '(xzeri)\n', (337, 344), True, 'import numpy as np\n'), ((356, 381), 'numpy.polyval', 'np.polyval', (['num', 'xnodi[i]'], {}), '(num, xnodi[i])\n', (366, 381), True, 'import numpy as np\n'), ((481, 497), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (489, 497), True, 'import numpy as np\n'), ((583, 595), 'numpy.dot', 'np.dot', (['y', 'L'], {}), '(y, L)\n', (589, 595), True, 'import numpy as np\n'), ((669, 691), 'numpy.arange', 'np.arange', (['a', '(b + h)', 'h'], {}), '(a, b + h, h)\n', (678, 691), True, 'import numpy as np\n'), ((282, 317), 'numpy.append', 'np.append', (['xnodi[:i]', 'xnodi[i + 1:]'], {}), '(xnodi[:i], xnodi[i + 1:])\n', (291, 317), True, 'import numpy as np\n'), ((1129, 1143), 'numpy.sqrt', 'np.sqrt', (['(x - 1)'], {}), '(x - 1)\n', (1136, 1143), True, 'import numpy as np\n'), ((1049, 1065), 'numpy.abs', 'np.abs', (['(I2n - In)'], {}), '(I2n - In)\n', (1055, 1065), True, 'import numpy as np\n'), ((777, 801), 'numpy.sum', 'np.sum', (['fnodi[1:2 * n:2]'], {}), '(fnodi[1:2 * n:2])\n', (783, 801), True, 'import numpy as np\n'), ((744, 768), 'numpy.sum', 'np.sum', (['fnodi[2:2 * n:2]'], {}), '(fnodi[2:2 * n:2])\n', (750, 768), True, 'import numpy as np\n')] |
from django.http import JsonResponse, HttpResponse
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
import json
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.generics import ListAPIView
from .models import Calculator, Chance
from .serializer import CalculatorSerializer, ChanceSerializer
import pandas as pd
from .ensemble import predict_covid
import numpy as np
# # Create your views here.
# class AllObjects(ListAPIView):
# queryset = Calculator.objects.all()
# serializer_class = CalculatorSerializer
#
# def post(self, request):
# serializer = CalculatorSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#
# class View(APIView):
#
# def get(self, request, pk):
# try:
# calculator = Calculator.objects.get(pk=pk)
# serializer = CalculatorSerializer(calculator)
# return Response(serializer.data)
# except:
# return Response(status=status.HTTP_404_NOT_FOUND)
#
# def delete(self, request, pk):
# calculator = Calculator.objects.get(pk=pk)
# calculator.delete()
# return Response(status=status.HTTP_200_OK)
@csrf_exempt
def createPost(request):
if (request.method == "POST"):
byte_string = request.body
body = json.loads(byte_string)
for i in range(len(body)):
if (body[i] == "Yes"):
body[i] = 1
elif (body[i] == "No"):
body[i] = 0
body[0] = int(body[0])
body[10] = float(body[10])
arr = np.asarray(body)
arr = np.expand_dims(arr, axis=0)
chance_of_covid = predict_covid(arr)
chance = Chance()
chance.probability_of_covid = float(chance_of_covid)
chance.save()
return HttpResponse(status=201)
else:
return HttpResponse('Wrong http request')
@csrf_exempt
def getChanceOfCovid(request):
if (request.method == "GET"):
chance_of_covid = Chance.objects.first()
if (chance_of_covid == None):
return HttpResponse("Please enter values first!")
else:
serializer = ChanceSerializer(chance_of_covid)
data = serializer.data.get('probability_of_covid')
data = float(str(round(data, 2)))
chance_of_covid.delete()
return JsonResponse({"data": data}, safe=False)
else:
return HttpResponse('Wrong http request')
@csrf_exempt
def getData(request):
if (request.method == "POST"):
byte_string = request.body
print(byte_string)
body = json.loads(byte_string)
print(body)
for i in range(len(body)):
if (body[i] == "Yes"):
body[i] = 1
elif (body[i] == "No"):
body[i] = 0
body[0] = int(body[0])
body[10] = float(body[10])
return HttpResponse(status=201)
else:
return HttpResponse('Wrong http request')
| [
"json.loads",
"django.http.JsonResponse",
"django.http.HttpResponse",
"numpy.asarray",
"numpy.expand_dims"
] | [((1634, 1657), 'json.loads', 'json.loads', (['byte_string'], {}), '(byte_string)\n', (1644, 1657), False, 'import json\n'), ((1900, 1916), 'numpy.asarray', 'np.asarray', (['body'], {}), '(body)\n', (1910, 1916), True, 'import numpy as np\n'), ((1931, 1958), 'numpy.expand_dims', 'np.expand_dims', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1945, 1958), True, 'import numpy as np\n'), ((2128, 2152), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(201)'}), '(status=201)\n', (2140, 2152), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2178, 2212), 'django.http.HttpResponse', 'HttpResponse', (['"""Wrong http request"""'], {}), "('Wrong http request')\n", (2190, 2212), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2745, 2779), 'django.http.HttpResponse', 'HttpResponse', (['"""Wrong http request"""'], {}), "('Wrong http request')\n", (2757, 2779), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2928, 2951), 'json.loads', 'json.loads', (['byte_string'], {}), '(byte_string)\n', (2938, 2951), False, 'import json\n'), ((3216, 3240), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(201)'}), '(status=201)\n', (3228, 3240), False, 'from django.http import JsonResponse, HttpResponse\n'), ((3266, 3300), 'django.http.HttpResponse', 'HttpResponse', (['"""Wrong http request"""'], {}), "('Wrong http request')\n", (3278, 3300), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2398, 2440), 'django.http.HttpResponse', 'HttpResponse', (['"""Please enter values first!"""'], {}), "('Please enter values first!')\n", (2410, 2440), False, 'from django.http import JsonResponse, HttpResponse\n'), ((2679, 2719), 'django.http.JsonResponse', 'JsonResponse', (["{'data': data}"], {'safe': '(False)'}), "({'data': data}, safe=False)\n", (2691, 2719), False, 'from django.http import JsonResponse, HttpResponse\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>, 2021
"""
def load_binary(path, dtype="<f"):
import numpy as np
with open(path, "rb") as file:
# specify little endian float: dtype="<f"
dat = np.fromfile(file, dtype=dtype)
return dat
def write_binary(dataArray, path):
with open(path, "wb") as file:
dataArray.tofile(file)
def interpol_texture(im, coords, hexa=False):
"""
:param im: [y,x]
:param coords: [y,x]
:param hexa: (optional, default=False)
:return:
"""
import numpy as np
from scipy.interpolate import RegularGridInterpolator
from matplotlib.colors import to_hex
y = np.arange(im.shape[0])
x = np.arange(im.shape[1])
# interpolate data points from the given image im
interpol = RegularGridInterpolator((y, x), im)
col = interpol(coords)
if hexa is True:
col /= 255
t = np.array([to_hex(color) for color in col])
else:
col = col.astype('uint32')
return col
def assign_embedding_colors(points, texPath, flipTexUD=False, flipTexLR=False, rot90=0):
"""
:param points:
:param texPath:
:return:
"""
import imageio
import numpy as np
# read texture
tex = np.array(imageio.imread(texPath))
if flipTexUD:
tex = np.flipud(tex)
if flipTexLR:
tex = np.fliplr(tex)
if rot90:
tex = np.rot90(tex, rot90)
# normalize data points to texture coordinate range
coords = points + np.abs(np.min(points, axis=0))
coords[:, 0] *= (tex.shape[0] - 1) / np.max(coords[:, 0])
coords[:, 1] *= (tex.shape[1] - 1) / np.max(coords[:, 1])
# just to be sure
coords[:, 0] = np.clip(coords[:, 0], 0, tex.shape[0] - 1)
coords[:, 1] = np.clip(coords[:, 1], 0, tex.shape[1] - 1)
# Interpolate values
colors = interpol_texture(tex, coords, hexa=True)
return colors
| [
"numpy.clip",
"numpy.fromfile",
"scipy.interpolate.RegularGridInterpolator",
"numpy.flipud",
"numpy.fliplr",
"matplotlib.colors.to_hex",
"numpy.max",
"numpy.rot90",
"numpy.min",
"imageio.imread",
"numpy.arange"
] | [((659, 681), 'numpy.arange', 'np.arange', (['im.shape[0]'], {}), '(im.shape[0])\n', (668, 681), True, 'import numpy as np\n'), ((690, 712), 'numpy.arange', 'np.arange', (['im.shape[1]'], {}), '(im.shape[1])\n', (699, 712), True, 'import numpy as np\n'), ((783, 818), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(y, x)', 'im'], {}), '((y, x), im)\n', (806, 818), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((1689, 1731), 'numpy.clip', 'np.clip', (['coords[:, 0]', '(0)', '(tex.shape[0] - 1)'], {}), '(coords[:, 0], 0, tex.shape[0] - 1)\n', (1696, 1731), True, 'import numpy as np\n'), ((1751, 1793), 'numpy.clip', 'np.clip', (['coords[:, 1]', '(0)', '(tex.shape[1] - 1)'], {}), '(coords[:, 1], 0, tex.shape[1] - 1)\n', (1758, 1793), True, 'import numpy as np\n'), ((213, 243), 'numpy.fromfile', 'np.fromfile', (['file'], {'dtype': 'dtype'}), '(file, dtype=dtype)\n', (224, 243), True, 'import numpy as np\n'), ((1243, 1266), 'imageio.imread', 'imageio.imread', (['texPath'], {}), '(texPath)\n', (1257, 1266), False, 'import imageio\n'), ((1301, 1315), 'numpy.flipud', 'np.flipud', (['tex'], {}), '(tex)\n', (1310, 1315), True, 'import numpy as np\n'), ((1349, 1363), 'numpy.fliplr', 'np.fliplr', (['tex'], {}), '(tex)\n', (1358, 1363), True, 'import numpy as np\n'), ((1393, 1413), 'numpy.rot90', 'np.rot90', (['tex', 'rot90'], {}), '(tex, rot90)\n', (1401, 1413), True, 'import numpy as np\n'), ((1565, 1585), 'numpy.max', 'np.max', (['coords[:, 0]'], {}), '(coords[:, 0])\n', (1571, 1585), True, 'import numpy as np\n'), ((1627, 1647), 'numpy.max', 'np.max', (['coords[:, 1]'], {}), '(coords[:, 1])\n', (1633, 1647), True, 'import numpy as np\n'), ((1500, 1522), 'numpy.min', 'np.min', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (1506, 1522), True, 'import numpy as np\n'), ((909, 922), 'matplotlib.colors.to_hex', 'to_hex', (['color'], {}), '(color)\n', (915, 922), False, 'from matplotlib.colors import to_hex\n')] |
#!/usr/bin/env python -W ignore::DeprecationWarning
# -*- coding: utf-8 -*-
"""Adenine analyzer module."""
######################################################################
# Copyright (C) 2016 <NAME>, <NAME>, <NAME>
#
# FreeBSD License
######################################################################
import os
import shutil
import logging
import matplotlib; matplotlib.use('AGG')
import multiprocessing as mp
import numpy as np
import pandas as pd
import seaborn as sns
import subprocess
try:
import cPickle as pkl
except:
import pickle as pkl
from sklearn import metrics
from adenine.core import plotting
from adenine.utils import scores
from adenine.utils.extra import title_from_filename
from adenine.utils.extra import timed, items_iterator
# to save info before logging is loaded
GLOBAL_INFO = 'matplotlib backend set to AGG'
def est_clst_perf(root, data_in, labels=None, t_labels=None, model=None,
metric='euclidean'):
"""Estimate the clustering performance.
This estimates the clustering performance by means of several indexes.
Results are saved in a tree-like structure in the root folder.
Parameters
-----------
root : string
The root path for the output creation.
data_in : array of float, shape : (n_samples, n_dimensions)
The low space embedding estimated by the dimensinality reduction and
manifold learning algorithm.
labels : array of float, shape : n_samples
The label assignment performed by the clustering algorithm.
t_labels : array of float, shape : n_samples
The true label vector; None if missing.
model : sklearn or sklearn-like object
An instance of the class that evaluates a step. In particular this must
be a clustering model provided with the clusters_centers_ attribute
(e.g. KMeans).
metric : string
The metric used during the clustering algorithms.
"""
perf_out = dict()
try:
if hasattr(model, 'inertia_'):
# Sum of distances of samples to their closest cluster center.
perf_out['inertia'] = model.inertia_
perf_out['silhouette'] = metrics.silhouette_score(data_in, labels, metric=metric)
if t_labels is not None:
# the next indexes need a gold standard
perf_out['ari'] = metrics.adjusted_rand_score(t_labels, labels)
perf_out['ami'] = metrics.adjusted_mutual_info_score(t_labels, labels)
perf_out['homogeneity'] = metrics.homogeneity_score(t_labels, labels)
perf_out['completeness'] = metrics.completeness_score(t_labels, labels)
perf_out['v_measure'] = metrics.v_measure_score(t_labels, labels)
perf_out['fscore'] = scores.precision_recall_fscore(
scores.confusion_matrix(t_labels, labels)[0])[2]
except ValueError as e:
logging.warning("Clustering performance evaluation failed for %s. "
"Error: %s", model, str(e))
# perf_out = {'empty': 0.0}
perf_out['###'] = 0.
# Define the filename
filename = os.path.join(root, os.path.basename(root))
with open(filename + '_scores.txt', 'w') as f:
f.write("------------------------------------\n"
"Adenine: Clustering Performance for \n"
"\n" + title_from_filename(root, " --> ") + "\n"
"------------------------------------\n")
f.write("Index Name{}|{}Index Score\n".format(' ' * 10, ' ' * 4))
f.write("------------------------------------\n")
for elem in sorted(perf_out.keys()):
f.write("{}{}|{}{:.4}\n"
.format(elem, ' ' * (20 - len(elem)), ' ' * 4,
perf_out[elem]))
f.write("------------------------------------\n")
# pkl Dump
filename += '_scores.pkl'
with open(filename, 'wb') as f:
pkl.dump(perf_out, f)
logging.info("Dumped : %s", filename)
def make_df_clst_perf(root):
"""Summarize all the clustering performance estimations.
Given the output file produced by est_clst_perf(), this function groups all
of them together in friendly text and latex files, and saves the two files
produced in a tree-like structure in the root folder.
Parameters
-----------
root : string
The root path for the output creation.
"""
measures = ('ami', 'ari', 'completeness', 'homogeneity', 'v_measure',
'inertia', 'silhouette', 'fscore')
df = pd.DataFrame(columns=['pipeline'] + list(measures))
for root_, _, filenames in os.walk(root):
for fn in filenames:
if fn.endswith('_scores.pkl'):
with open(os.path.join(root_, fn), 'rb') as f:
perf_out = pkl.load(f)
perf_out['pipeline'] = title_from_filename(root_,
step_sep=" --> ")
df = df.append(perf_out, ignore_index=True)
df = df.fillna('')
nan_val = '---'
pipe_header = 'preprocess --> dim red --> clustering'
size_pipe = max([len(p) for p in df['pipeline']] + [len(pipe_header)])
sizes = [3 + max([len('{: .3}'.format(p)) if p != '' else len(nan_val)
for p in df[mm]] + [len(mm)]) for mm in measures]
# find the best value for each score
best_scores = {
mm: max([p for p in df[mm] if p != ''] or [np.nan]) for mm in measures}
with open(os.path.join(root, 'summary_scores.txt'), 'w') as f, \
open(os.path.join(root, 'summary_scores.tex'), 'w') as g:
measures_header = [' ' * max(size - len(x) - 2, 1) + x + ' '
for size, x in zip(sizes, measures)]
header = "{}{}|{}\n" \
.format(pipe_header,
' ' * (size_pipe - len(pipe_header)),
'|'.join(measures_header))
f.write("-" * len(header) + "\n")
f.write("Adenine: Clustering Performance for each pipeline\n")
f.write("-" * len(header) + "\n")
f.write(header)
f.write("-" * len(header) + "\n")
g.write(r"\documentclass{article}" "\n"
r"\usepackage{adjustbox}" "\n"
r"\usepackage{caption}" "\n"
r"\captionsetup[table]{skip=10pt}" "\n"
r"\begin{document}" "\n"
r"\begin{table}[h!]" "\n"
r"\centering" "\n"
r"\caption{Adenine: Clustering Performance for each pipeline}" "\n"
r"\label{clust-perf}" "\n"
r"\begin{adjustbox}{max width=\textwidth}" "\n"
r"\begin{tabular}{l|rc|rc|rc|rc|rc|rc|rc|rc}" "\n"
r"\textbf{preprocess $\to$ dim red $\to$ clustering} & \textbf{ami} "
r"&& \textbf{ari} && \textbf{completeness} && \textbf{homogeneity} "
r"&& \textbf{v\_measure} && \textbf{inertia} && \textbf{silhouette} "
r"&& \textbf{fscore}"
r" & \\ \hline " "\n")
for _ in df.iterrows():
row = _[1]
all_measures = ['{: .3}'.format(row[mm]) if row[mm] != ''
else nan_val for mm in measures]
stars = [' *' if row[mm] == best_scores[mm] else ' ' for mm in measures]
row_measure = [' ' * max(size - len(x) - 2, 1) + x + ss
for size, x, ss in zip(sizes, all_measures, stars)]
f.write("{}{}|{}\n"
.format(
row['pipeline'],
' ' * (size_pipe - len(row['pipeline'])),
'|'.join(row_measure)
))
row_tex = [x + r'&' + ss for x, ss in zip(all_measures, stars)]
g.write(r"{} & {} \\" "\n"
.format(
row['pipeline'].replace('-->', r'$\to$'),
r'&'.join(row_tex)
))
f.write("-" * len(header) + "\n")
g.write(r"\hline" "\n"
r"\end{tabular}" "\n"
r"\end{adjustbox}" "\n"
r"\end{table}" "\n"
r"\end{document}")
def get_step_attributes(step, pos):
"""Get the attributes of the input step.
This function returns the attributes (i.e. level, name, outcome) of the
input step. This comes handy when dealing with steps with more than one
parameter (e.g. KernelPCA 'poly' or 'rbf').
Parameters
-----------
step : list
A step coded by ade_run.py as
[name, level, param, data_out, data_in, mdl obj, voronoi_mdl_obj]
pos : int
The position of the step inside the pipeline.
Returns
-------
name : string
A unique name for the step (e.g. KernelPCA_rbf).
level : {imputing, preproc, dimred, clustering}
The step level.
data_out : array of float, shape : (n_samples, n_out)
Where n_out is n_dimensions for dimensionality reduction step, or 1
for clustering.
data_in : array of float, shape : (n_samples, n_in)
Where n_in is n_dimensions for preprocessing/imputing/dimensionality
reduction step, or n_dim for clustering (because the data have already
been dimensionality reduced).
param : dictionary
The parameters of the sklearn object implementing the algorithm.
mdl_obj : sklearn or sklearn-like object
This is an instance of the class that evaluates a step.
"""
name, level, param, data_out, \
data_in, mdl_obj, voronoi_mdl_obj = step[:7]
if level.lower() == 'none':
if pos == 0:
level = 'preproc'
elif pos == 1:
level = 'dimred'
# Imputing level
if param.get('missing_values', ''):
name += '-' + param['missing_values']
if param.get('strategy', ''):
name += '_' + param['strategy']
# Preprocessing level
if param.get('norm', ''): # normalize
name += '_' + param['norm']
elif param.get('feature_range', ''): # minmax
name += "_({} - {})".format(*param['feature_range'])
# Append additional parameters in the step name
if name == 'KernelPCA':
name += '_' + param['kernel']
elif name == 'LLE':
name += '_' + param['method']
elif name == 'MDS':
if param['metric']:
name += '_metric'
else:
name += '_nonmetric'
elif name == 'Hierarchical':
name += '_' + param['affinity'] + '_' + param['linkage']
elif name == 'SE':
name += '_' + param['affinity']
try:
n_clusters = param.get('n_clusters', 0) or \
param.get('best_estimator_', dict()).get('cluster_centers_',
np.empty(0)).shape[0] or \
param.get('cluster_centers_', np.empty(0)).shape[0] or \
mdl_obj.__dict__.get('n_clusters', 0) or \
mdl_obj.__dict__.get('cluster_centers_', np.empty(0)).shape[0]
except StandardError:
n_clusters = 0
if n_clusters > 0:
name += '_' + str(n_clusters) + '-clusts'
metric = param.get('affinity', None) or 'euclidean'
return (name, level, param, data_out, data_in, mdl_obj,
voronoi_mdl_obj, metric)
def analysis_worker(elem, root, y, feat_names, index, lock):
"""Parallel pipelines analysis.
Parameters
----------
elem : list
The first two element of this list are the pipe_id and all the data of
that pipeline.
root : string
The root path for the output creation.
y : array of float, shape : n_samples
The label vector; None if missing.
feat_names : array of integers (or strings), shape : n_features
The feature names; a range of numbers if missing.
index : list of integers (or strings)
This is the samples identifier, if provided as first column (or row) of
of the input file. Otherwise it is just an incremental range of size
n_samples.
lock : multiprocessing.synchronize.Lock
Obtained by multiprocessing.Lock().
Needed for optional creation of directories.
"""
# Getting pipeID and content
pipe, content = elem[:2]
out_folder = '' # where the results will be placed
logging.info("Start {} --".format(pipe))
for i, step in enumerate(sorted(content.keys())):
# Tree-like folder structure definition
step_name, step_level, step_param, step_out, step_in, mdl_obj, \
voronoi_mdl_obj, metric = get_step_attributes(content[step], pos=i)
logging.info("LEVEL {} : {}".format(step_level, step_name))
# Output folder definition & creation
out_folder = os.path.join(out_folder, step_name)
rootname = os.path.join(root, out_folder)
with lock:
if not os.path.exists(rootname):
os.makedirs(rootname)
# Launch analysis
if step_level == 'dimred':
plotting.scatter(root=rootname, data_in=step_out, labels=y, true_labels=True)
plotting.silhouette(root=rootname, labels=y, data_in=step_out, model=mdl_obj)
if hasattr(mdl_obj, 'explained_variance_ratio_'):
plotting.pcmagnitude(root=rootname,
points=mdl_obj.explained_variance_ratio_,
title='Explained variance ratio')
if hasattr(mdl_obj, 'lambdas_'):
plotting.pcmagnitude(root=rootname,
points=mdl_obj.lambdas_/np.sum(mdl_obj.lambdas_),
title='Normalized eigenvalues of the centered'
' kernel matrix')
if step_level == 'clustering':
if hasattr(mdl_obj, 'affinity_matrix_'):
try:
n_clusters = mdl_obj.__dict__.get('cluster_centers_',
np.empty(0)).shape[0]
except:
n_clusters = 0
if hasattr(mdl_obj, 'n_clusters'):
n_clusters = mdl_obj.n_clusters
plotting.eigs(root=rootname, affinity=mdl_obj.affinity_matrix_,
n_clusters=n_clusters,
title='Eigenvalues of the graph associated to '
'the affinity matrix')
if hasattr(mdl_obj, 'cluster_centers_'):
_est_name = mdl_obj.__dict__.get('estimator_name', '') or \
type(mdl_obj).__name__
if _est_name != 'AffinityPropagation':
# disable the voronoi plot for affinity prop
plotting.voronoi(root=rootname, labels=y, data_in=step_in,
model=voronoi_mdl_obj)
elif hasattr(mdl_obj, 'n_leaves_'):
plotting.tree(root=rootname, data_in=step_in,
labels=y, index=index, model=mdl_obj)
plotting.dendrogram(root=rootname, data_in=step_in,
labels=y, index=index, model=mdl_obj)
plotting.scatter(root=rootname, labels=step_out,
data_in=step_in, model=mdl_obj)
plotting.silhouette(root=rootname, labels=step_out,
data_in=step_in, model=mdl_obj)
est_clst_perf(root=rootname, data_in=step_in, labels=step_out,
t_labels=y, model=mdl_obj, metric=metric)
@timed
def analyze(input_dict, root, y=None, feat_names=None, index=None, **kwargs):
"""Analyze the results of ade_run.
This function analyze the dictionary generated by ade_run, generates the
plots, and saves them in a tree-like folder structure in rootFolder.
Parameters
-----------
input_dict : dictionary
The dictionary created by ade_run.py on some data.
root : string
The root path for output creation.
y : array of float, shape : n_samples
The label vector; None if missing.
feat_names : array of integers (or strings), shape : n_features
The feature names; a range of numbers if missing.
index : list of integers (or strings)
This is the samples identifier, if provided as first column (or row) of
of the input file. Otherwise it is just an incremental range of size
n_samples.
kwargs : dictionary
Additional optional parameters. In particular it can contain
'plotting_context' and 'file_format' variables, if specified in
the config file.
"""
if GLOBAL_INFO:
logging.info(GLOBAL_INFO)
if kwargs.get('plotting_context', None):
sns.set_context(kwargs.get('plotting_context'))
file_formats = ('png', 'pdf')
ff = kwargs.get('file_format', file_formats[0]).lower()
if ff not in file_formats:
logging.warning("File format unknown. "
"Please select one of %s", file_formats)
plotting.DEFAULT_EXT = file_formats[0]
else:
plotting.DEFAULT_EXT = ff
logging.info("File format set to %s", plotting.DEFAULT_EXT)
lock = mp.Lock()
ps = []
for elem in items_iterator(input_dict):
p = mp.Process(target=analysis_worker,
args=(elem, root, y, feat_names, index, lock))
p.start()
ps.append(p)
for p in ps:
p.join()
# Create summary_scores.{txt, tex}
make_df_clst_perf(root)
# Compile tex
try:
with open(os.devnull, 'w') as devnull:
# Someone may not have pdflatex installed
subprocess.call(["pdflatex",
os.path.join(root, "summary_scores.tex")],
stdout=devnull, stderr=devnull)
logging.info("PDF compilation done.")
shutil.move("summary_scores.pdf",
os.path.join(root, "summary_scores.pdf"))
os.remove("summary_scores.aux")
os.remove("summary_scores.log")
logging.info(".aux and .log cleaned")
except StandardError:
from sys import platform
logging.warning("Suitable pdflatex installation not found.")
if platform not in ["linux", "linux2", "darwin"]:
logging.warning("Your operating system may not support"
"summary_scores.tex automatic pdf compilation.")
| [
"sklearn.metrics.homogeneity_score",
"adenine.utils.scores.confusion_matrix",
"adenine.core.plotting.eigs",
"multiprocessing.Process",
"sklearn.metrics.adjusted_rand_score",
"adenine.utils.extra.title_from_filename",
"sklearn.metrics.completeness_score",
"adenine.core.plotting.silhouette",
"logging.... | [((372, 393), 'matplotlib.use', 'matplotlib.use', (['"""AGG"""'], {}), "('AGG')\n", (386, 393), False, 'import matplotlib\n'), ((3961, 3998), 'logging.info', 'logging.info', (['"""Dumped : %s"""', 'filename'], {}), "('Dumped : %s', filename)\n", (3973, 3998), False, 'import logging\n'), ((4631, 4644), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (4638, 4644), False, 'import os\n'), ((17233, 17292), 'logging.info', 'logging.info', (['"""File format set to %s"""', 'plotting.DEFAULT_EXT'], {}), "('File format set to %s', plotting.DEFAULT_EXT)\n", (17245, 17292), False, 'import logging\n'), ((17304, 17313), 'multiprocessing.Lock', 'mp.Lock', ([], {}), '()\n', (17311, 17313), True, 'import multiprocessing as mp\n'), ((17342, 17368), 'adenine.utils.extra.items_iterator', 'items_iterator', (['input_dict'], {}), '(input_dict)\n', (17356, 17368), False, 'from adenine.utils.extra import timed, items_iterator\n'), ((2186, 2242), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['data_in', 'labels'], {'metric': 'metric'}), '(data_in, labels, metric=metric)\n', (2210, 2242), False, 'from sklearn import metrics\n'), ((3145, 3167), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (3161, 3167), False, 'import os\n'), ((3935, 3956), 'pickle.dump', 'pkl.dump', (['perf_out', 'f'], {}), '(perf_out, f)\n', (3943, 3956), True, 'import pickle as pkl\n'), ((12789, 12824), 'os.path.join', 'os.path.join', (['out_folder', 'step_name'], {}), '(out_folder, step_name)\n', (12801, 12824), False, 'import os\n'), ((12844, 12874), 'os.path.join', 'os.path.join', (['root', 'out_folder'], {}), '(root, out_folder)\n', (12856, 12874), False, 'import os\n'), ((16771, 16796), 'logging.info', 'logging.info', (['GLOBAL_INFO'], {}), '(GLOBAL_INFO)\n', (16783, 16796), False, 'import logging\n'), ((17033, 17110), 'logging.warning', 'logging.warning', (['"""File format unknown. Please select one of %s"""', 'file_formats'], {}), "('File format unknown. Please select one of %s', file_formats)\n", (17048, 17110), False, 'import logging\n'), ((17382, 17467), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'analysis_worker', 'args': '(elem, root, y, feat_names, index, lock)'}), '(target=analysis_worker, args=(elem, root, y, feat_names, index,\n lock))\n', (17392, 17467), True, 'import multiprocessing as mp\n'), ((18093, 18124), 'os.remove', 'os.remove', (['"""summary_scores.aux"""'], {}), "('summary_scores.aux')\n", (18102, 18124), False, 'import os\n'), ((18133, 18164), 'os.remove', 'os.remove', (['"""summary_scores.log"""'], {}), "('summary_scores.log')\n", (18142, 18164), False, 'import os\n'), ((18173, 18210), 'logging.info', 'logging.info', (['""".aux and .log cleaned"""'], {}), "('.aux and .log cleaned')\n", (18185, 18210), False, 'import logging\n'), ((2358, 2403), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['t_labels', 'labels'], {}), '(t_labels, labels)\n', (2385, 2403), False, 'from sklearn import metrics\n'), ((2434, 2486), 'sklearn.metrics.adjusted_mutual_info_score', 'metrics.adjusted_mutual_info_score', (['t_labels', 'labels'], {}), '(t_labels, labels)\n', (2468, 2486), False, 'from sklearn import metrics\n'), ((2525, 2568), 'sklearn.metrics.homogeneity_score', 'metrics.homogeneity_score', (['t_labels', 'labels'], {}), '(t_labels, labels)\n', (2550, 2568), False, 'from sklearn import metrics\n'), ((2608, 2652), 'sklearn.metrics.completeness_score', 'metrics.completeness_score', (['t_labels', 'labels'], {}), '(t_labels, labels)\n', (2634, 2652), False, 'from sklearn import metrics\n'), ((2689, 2730), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['t_labels', 'labels'], {}), '(t_labels, labels)\n', (2712, 2730), False, 'from sklearn import metrics\n'), ((5507, 5547), 'os.path.join', 'os.path.join', (['root', '"""summary_scores.txt"""'], {}), "(root, 'summary_scores.txt')\n", (5519, 5547), False, 'import os\n'), ((5579, 5619), 'os.path.join', 'os.path.join', (['root', '"""summary_scores.tex"""'], {}), "(root, 'summary_scores.tex')\n", (5591, 5619), False, 'import os\n'), ((13051, 13128), 'adenine.core.plotting.scatter', 'plotting.scatter', ([], {'root': 'rootname', 'data_in': 'step_out', 'labels': 'y', 'true_labels': '(True)'}), '(root=rootname, data_in=step_out, labels=y, true_labels=True)\n', (13067, 13128), False, 'from adenine.core import plotting\n'), ((13141, 13218), 'adenine.core.plotting.silhouette', 'plotting.silhouette', ([], {'root': 'rootname', 'labels': 'y', 'data_in': 'step_out', 'model': 'mdl_obj'}), '(root=rootname, labels=y, data_in=step_out, model=mdl_obj)\n', (13160, 13218), False, 'from adenine.core import plotting\n'), ((15273, 15358), 'adenine.core.plotting.scatter', 'plotting.scatter', ([], {'root': 'rootname', 'labels': 'step_out', 'data_in': 'step_in', 'model': 'mdl_obj'}), '(root=rootname, labels=step_out, data_in=step_in, model=mdl_obj\n )\n', (15289, 15358), False, 'from adenine.core import plotting\n'), ((15395, 15483), 'adenine.core.plotting.silhouette', 'plotting.silhouette', ([], {'root': 'rootname', 'labels': 'step_out', 'data_in': 'step_in', 'model': 'mdl_obj'}), '(root=rootname, labels=step_out, data_in=step_in, model=\n mdl_obj)\n', (15414, 15483), False, 'from adenine.core import plotting\n'), ((17943, 17980), 'logging.info', 'logging.info', (['"""PDF compilation done."""'], {}), "('PDF compilation done.')\n", (17955, 17980), False, 'import logging\n'), ((18043, 18083), 'os.path.join', 'os.path.join', (['root', '"""summary_scores.pdf"""'], {}), "(root, 'summary_scores.pdf')\n", (18055, 18083), False, 'import os\n'), ((18278, 18338), 'logging.warning', 'logging.warning', (['"""Suitable pdflatex installation not found."""'], {}), "('Suitable pdflatex installation not found.')\n", (18293, 18338), False, 'import logging\n'), ((4863, 4907), 'adenine.utils.extra.title_from_filename', 'title_from_filename', (['root_'], {'step_sep': '""" --> """'}), "(root_, step_sep=' --> ')\n", (4882, 4907), False, 'from adenine.utils.extra import title_from_filename\n'), ((12913, 12937), 'os.path.exists', 'os.path.exists', (['rootname'], {}), '(rootname)\n', (12927, 12937), False, 'import os\n'), ((12955, 12976), 'os.makedirs', 'os.makedirs', (['rootname'], {}), '(rootname)\n', (12966, 12976), False, 'import os\n'), ((13298, 13414), 'adenine.core.plotting.pcmagnitude', 'plotting.pcmagnitude', ([], {'root': 'rootname', 'points': 'mdl_obj.explained_variance_ratio_', 'title': '"""Explained variance ratio"""'}), "(root=rootname, points=mdl_obj.\n explained_variance_ratio_, title='Explained variance ratio')\n", (13318, 13414), False, 'from adenine.core import plotting\n'), ((14255, 14419), 'adenine.core.plotting.eigs', 'plotting.eigs', ([], {'root': 'rootname', 'affinity': 'mdl_obj.affinity_matrix_', 'n_clusters': 'n_clusters', 'title': '"""Eigenvalues of the graph associated to the affinity matrix"""'}), "(root=rootname, affinity=mdl_obj.affinity_matrix_, n_clusters=\n n_clusters, title=\n 'Eigenvalues of the graph associated to the affinity matrix')\n", (14268, 14419), False, 'from adenine.core import plotting\n'), ((18409, 18520), 'logging.warning', 'logging.warning', (['"""Your operating system may not supportsummary_scores.tex automatic pdf compilation."""'], {}), "(\n 'Your operating system may not supportsummary_scores.tex automatic pdf compilation.'\n )\n", (18424, 18520), False, 'import logging\n'), ((3357, 3391), 'adenine.utils.extra.title_from_filename', 'title_from_filename', (['root', '""" --> """'], {}), "(root, ' --> ')\n", (3376, 3391), False, 'from adenine.utils.extra import title_from_filename\n'), ((4812, 4823), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (4820, 4823), True, 'import pickle as pkl\n'), ((14821, 14907), 'adenine.core.plotting.voronoi', 'plotting.voronoi', ([], {'root': 'rootname', 'labels': 'y', 'data_in': 'step_in', 'model': 'voronoi_mdl_obj'}), '(root=rootname, labels=y, data_in=step_in, model=\n voronoi_mdl_obj)\n', (14837, 14907), False, 'from adenine.core import plotting\n'), ((15004, 15092), 'adenine.core.plotting.tree', 'plotting.tree', ([], {'root': 'rootname', 'data_in': 'step_in', 'labels': 'y', 'index': 'index', 'model': 'mdl_obj'}), '(root=rootname, data_in=step_in, labels=y, index=index, model=\n mdl_obj)\n', (15017, 15092), False, 'from adenine.core import plotting\n'), ((15134, 15227), 'adenine.core.plotting.dendrogram', 'plotting.dendrogram', ([], {'root': 'rootname', 'data_in': 'step_in', 'labels': 'y', 'index': 'index', 'model': 'mdl_obj'}), '(root=rootname, data_in=step_in, labels=y, index=index,\n model=mdl_obj)\n', (15153, 15227), False, 'from adenine.core import plotting\n'), ((17828, 17868), 'os.path.join', 'os.path.join', (['root', '"""summary_scores.tex"""'], {}), "(root, 'summary_scores.tex')\n", (17840, 17868), False, 'import os\n'), ((2813, 2854), 'adenine.utils.scores.confusion_matrix', 'scores.confusion_matrix', (['t_labels', 'labels'], {}), '(t_labels, labels)\n', (2836, 2854), False, 'from adenine.utils import scores\n'), ((4744, 4767), 'os.path.join', 'os.path.join', (['root_', 'fn'], {}), '(root_, fn)\n', (4756, 4767), False, 'import os\n'), ((10836, 10847), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (10844, 10847), True, 'import numpy as np\n'), ((10905, 10916), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (10913, 10916), True, 'import numpy as np\n'), ((11040, 11051), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (11048, 11051), True, 'import numpy as np\n'), ((13642, 13666), 'numpy.sum', 'np.sum', (['mdl_obj.lambdas_'], {}), '(mdl_obj.lambdas_)\n', (13648, 13666), True, 'import numpy as np\n'), ((14054, 14065), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (14062, 14065), True, 'import numpy as np\n')] |
from numba import jit
import numpy as np
import cProfile
import timeit
def L1distances(a,b):
n = len(a)
m = len(b)
distance = [[] for _ in range(n)]
for i in range(0, n):
for j in range(0, m):
distance[i].append(0)
containsNaN = False
for i in range(n):
if a[i] == 'nan' or b[j] == 'nan':
containsNaN = True
if(containsNaN):
print("Warning: at least one of the time series contains NaN values. Time Warping performance will be impacted.")
for i in range(n):
for j in range(m):
if a[i] == 'nan' or b[j] == 'nan':
distance[i][j] = 0
else:
distance[i][j] = abs(a[i] - b[j])
else:
for i in range(n):
for j in range(m):
distance[i][j] = abs(a[i] - b[j])
return distance
def L1distancesNumpy(a,b):
n = len(a)
m = len(b)
distance = np.zeros((n,m))
for i in range(n):
for j in range(m):
distance[i,j] = abs(a[i] - b[j])
return distance
@jit
def L1distancesNumpyFast(a,b):
n = len(a)
m = len(b)
distance = np.zeros((n,m))
for i in range(n):
for j in range(m):
distance[i,j] = abs(a[i] - b[j])
return distance
@jit
def L1distancesFast(a,b):
n = len(a)
m = len(b)
distance = [0.0]
for i in range(1, n):
r = [0.0]
for j in range(1, m):
r.append(0.0)
distance.append(r)
for i in range(n):
for j in range(m):
distance[i][j] = abs(a[i] - b[j])
return distance
n = 2
l = 1000
ts = list(np.linspace(0, n*np.pi, l))
x = list(np.sin(ts))
y = list(np.cos(ts))
npX = np.array(x)
npY = np.array(y)
cProfile.run("L1distances(x,y)",sort="tottime")
#cProfile.run("L1distancesFast(x,y)",sort="tottime")
cProfile.run("L1distancesNumpy(npX,npY)",sort="tottime")
cProfile.run("L1distancesNumpyFast(npX,npY)",sort="tottime")
def test1():
a = L1distances(x,y)
return 0
def test2():
a = L1distancesNumpy(npX,npY)
return 0
def test3():
a = L1distancesNumpyFast(npX,npY)
return 0
timeit.timeit(test1)
timeit.timeit(test2)
timeit.timeit(test3) | [
"cProfile.run",
"numpy.sin",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"timeit.timeit"
] | [((1470, 1481), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1478, 1481), True, 'import numpy as np\n'), ((1488, 1499), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1496, 1499), True, 'import numpy as np\n'), ((1501, 1549), 'cProfile.run', 'cProfile.run', (['"""L1distances(x,y)"""'], {'sort': '"""tottime"""'}), "('L1distances(x,y)', sort='tottime')\n", (1513, 1549), False, 'import cProfile\n'), ((1602, 1659), 'cProfile.run', 'cProfile.run', (['"""L1distancesNumpy(npX,npY)"""'], {'sort': '"""tottime"""'}), "('L1distancesNumpy(npX,npY)', sort='tottime')\n", (1614, 1659), False, 'import cProfile\n'), ((1659, 1720), 'cProfile.run', 'cProfile.run', (['"""L1distancesNumpyFast(npX,npY)"""'], {'sort': '"""tottime"""'}), "('L1distancesNumpyFast(npX,npY)', sort='tottime')\n", (1671, 1720), False, 'import cProfile\n'), ((1882, 1902), 'timeit.timeit', 'timeit.timeit', (['test1'], {}), '(test1)\n', (1895, 1902), False, 'import timeit\n'), ((1903, 1923), 'timeit.timeit', 'timeit.timeit', (['test2'], {}), '(test2)\n', (1916, 1923), False, 'import timeit\n'), ((1924, 1944), 'timeit.timeit', 'timeit.timeit', (['test3'], {}), '(test3)\n', (1937, 1944), False, 'import timeit\n'), ((791, 807), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (799, 807), True, 'import numpy as np\n'), ((978, 994), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (986, 994), True, 'import numpy as np\n'), ((1393, 1421), 'numpy.linspace', 'np.linspace', (['(0)', '(n * np.pi)', 'l'], {}), '(0, n * np.pi, l)\n', (1404, 1421), True, 'import numpy as np\n'), ((1430, 1440), 'numpy.sin', 'np.sin', (['ts'], {}), '(ts)\n', (1436, 1440), True, 'import numpy as np\n'), ((1451, 1461), 'numpy.cos', 'np.cos', (['ts'], {}), '(ts)\n', (1457, 1461), True, 'import numpy as np\n')] |
import os
import glob
import errno
import random
import urllib.request as urllib
import numpy as np
from scipy.io import loadmat
class CWRU:
def __init__(self, exp, rpm, length):
if exp not in ('12DriveEndFault', '12FanEndFault', '48DriveEndFault'):
print("wrong experiment name: {}".format(exp))
exit(1)
if rpm not in ('1797', '1772', '1750', '1730'):
print("wrong rpm value: {}".format(rpm))
exit(1)
# work directory of all data
work_dir = os.getcwd()
rdir = os.path.join(os.path.expanduser(work_dir), 'Datasets/CWRU')
fmeta = os.path.join(os.path.dirname(__file__), 'metadata.txt')
all_lines = open(fmeta).readlines()
lines = []
for line in all_lines:
l = line.split()
if (l[0] == exp or l[0] == 'NormalBaseline') and l[1] == rpm:
lines.append(l)
self.length = length # sequence length
self._load_and_slice_data(rdir, lines)
# shuffle training and test arrays
self._shuffle()
self.labels = tuple(line[2] for line in lines)
self.nclasses = len(self.labels) # number of classes
def _mkdir(self, path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
print("can't create directory '{}''".format(path))
exit(1)
def _download(self, fpath, link):
print("Downloading from '{}' to '{}'".format(link,fpath))
urllib.URLopener().retrieve(link, fpath)
def _load_and_slice_data(self, rdir, infos):
self.X_train = np.zeros((0, self.length))
self.X_test = np.zeros((0, self.length))
self.y_train = []
self.y_test = []
for idx, info in enumerate(infos):
# directory of this file
fdir = os.path.join(rdir, info[0], info[1])
self._mkdir(fdir)
fpath = os.path.join(fdir, info[2] + '.mat')
if not os.path.exists(fpath):
self._download(fpath, info[3].rstrip('\n'))
mat_dict = loadmat(fpath)
key = list(filter(lambda x: 'DE_time' in x, mat_dict.keys()))[0]
time_series = mat_dict[key][:, 0]
idx_last = -(time_series.shape[0] % self.length)
clips = time_series[:idx_last].reshape(-1, self.length)
n = clips.shape[0]
n_split = int(3 * n / 4)
self.X_train = np.vstack((self.X_train, clips[:n_split]))
self.X_test = np.vstack((self.X_test, clips[n_split:]))
self.y_train += [idx] * n_split
self.y_test += [idx] * (clips.shape[0] - n_split)
def _shuffle(self):
# shuffle training samples
index = list(range(self.X_train.shape[0]))
random.Random(0).shuffle(index)
self.X_train = self.X_train[index]
self.y_train = tuple(self.y_train[i] for i in index)
# shuffle test samples
index = list(range(self.X_test.shape[0]))
random.Random(0).shuffle(index)
self.X_test = self.X_test[index]
self.y_test = tuple(self.y_test[i] for i in index)
| [
"os.path.exists",
"urllib.request.URLopener",
"os.makedirs",
"random.Random",
"scipy.io.loadmat",
"os.path.join",
"os.getcwd",
"os.path.dirname",
"numpy.zeros",
"os.path.isdir",
"numpy.vstack",
"os.path.expanduser"
] | [((529, 540), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (538, 540), False, 'import os\n'), ((1724, 1750), 'numpy.zeros', 'np.zeros', (['(0, self.length)'], {}), '((0, self.length))\n', (1732, 1750), True, 'import numpy as np\n'), ((1773, 1799), 'numpy.zeros', 'np.zeros', (['(0, self.length)'], {}), '((0, self.length))\n', (1781, 1799), True, 'import numpy as np\n'), ((569, 597), 'os.path.expanduser', 'os.path.expanduser', (['work_dir'], {}), '(work_dir)\n', (587, 597), False, 'import os\n'), ((646, 671), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (661, 671), False, 'import os\n'), ((1252, 1269), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1263, 1269), False, 'import os\n'), ((1950, 1986), 'os.path.join', 'os.path.join', (['rdir', 'info[0]', 'info[1]'], {}), '(rdir, info[0], info[1])\n', (1962, 1986), False, 'import os\n'), ((2037, 2073), 'os.path.join', 'os.path.join', (['fdir', "(info[2] + '.mat')"], {}), "(fdir, info[2] + '.mat')\n", (2049, 2073), False, 'import os\n'), ((2200, 2214), 'scipy.io.loadmat', 'loadmat', (['fpath'], {}), '(fpath)\n', (2207, 2214), False, 'from scipy.io import loadmat\n'), ((2564, 2606), 'numpy.vstack', 'np.vstack', (['(self.X_train, clips[:n_split])'], {}), '((self.X_train, clips[:n_split]))\n', (2573, 2606), True, 'import numpy as np\n'), ((2633, 2674), 'numpy.vstack', 'np.vstack', (['(self.X_test, clips[n_split:])'], {}), '((self.X_test, clips[n_split:]))\n', (2642, 2674), True, 'import numpy as np\n'), ((1610, 1628), 'urllib.request.URLopener', 'urllib.URLopener', ([], {}), '()\n', (1626, 1628), True, 'import urllib.request as urllib\n'), ((2093, 2114), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (2107, 2114), False, 'import os\n'), ((2900, 2916), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (2913, 2916), False, 'import random\n'), ((3126, 3142), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (3139, 3142), False, 'import random\n'), ((1346, 1365), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1359, 1365), False, 'import os\n')] |
import cv2
import numpy as np
writer = cv2.VideoWriter("output.avi",
cv2.VideoWriter_fourcc(*"MJPG"), 30,(220,220))
image = np.random.randint(0, 255, (220,220,3)).astype('uint8')
for frame in range(1000):
writer.write(image)
writer.release() | [
"numpy.random.randint",
"cv2.VideoWriter_fourcc"
] | [((69, 100), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (91, 100), False, 'import cv2\n'), ((124, 164), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(220, 220, 3)'], {}), '(0, 255, (220, 220, 3))\n', (141, 164), True, 'import numpy as np\n')] |
import numpy as np
import os
import pandas as pd
import pickle
def get_user_avg_ratings(user_item_mat):
"""
Given one user-item matrix, calculate the average rating a user has given.
Input:
- user_item_mat: file containing a dataframe, with rows indicating users
columns indicating items, each value is user's rating for that restaurant
or 0 if the user has not visited that restaurant.
Output:
- df: datafram with two columns, the first one containing user ids, and the
second one containing the average ratings a user has ever given.
"""
# Keep line count to suggest progress when running the dataset.
line_count = 0
# Perpare two lists, each one would be a column in the final dataframe.
avg_list = []
index_list = []
# Read the tsv file line by line.
for line in pd.read_csv(
os.path.join(os.path.dirname(__file__), user_item_mat),
sep='\t', na_values='None', index_col=0, chunksize=1):
# Initialize sum.
user_rating_sum = 0
# Count only the number of restaurants the user has visited.
user_rating_count = 0
user_id = line.index.item()
for bus_id in line:
# If the user has visited the restaurant by bus_id, add the rating
# to the sum and update review count.
if line[bus_id].item()!=0.0:
user_rating_sum += line[bus_id].item()
user_rating_count += 1
# Calculate average rating given by the user.
user_avg_rating = user_rating_sum / user_rating_count
print(line_count)
line_count += 1
avg_list.append(user_avg_rating)
index_list.append(user_id)
# Write the two lists into dataframe.
df = pd.DataFrame(data={"average":avg_list}, index = np.array(index_list))
return df
if __name__ == "__main__":
# Read user_item_matrix.tsv.
user_avg_ratings = get_user_avg_ratings("user_user_similarity/user_item_matrix.tsv")
# Produce user average rating dataframe.
user_avg_ratings.to_csv(os.path.join(os.path.dirname(__file__), "user_avg_ratings.tsv"), sep="\t") | [
"os.path.dirname",
"numpy.array"
] | [((878, 903), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (893, 903), False, 'import os\n'), ((1810, 1830), 'numpy.array', 'np.array', (['index_list'], {}), '(index_list)\n', (1818, 1830), True, 'import numpy as np\n'), ((2082, 2107), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2097, 2107), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
NoveltyHainsworth
computes the novelty measure used by Hainsworth
Args:
X: spectrogram (dimension FFTLength X Observations)
f_s: sample rate
Returns:
d_hai novelty measure
"""
import numpy as np
def NoveltyHainsworth(X, f_s):
epsilon = 1e-5
# difference spectrum (set first diff to zero)
X = np.c_[X[:,0],np.sqrt(X)]
X[X<=0] = epsilon
afDiff = np.log2(X[:,np.arange(1,X.shape[1])]/X[:,np.arange(0,X.shape[1]-1)])
# flux
d_hai = np.sum(afDiff, axis = 0) / X.shape[0]
return (d_hai)
| [
"numpy.sum",
"numpy.sqrt",
"numpy.arange"
] | [((533, 555), 'numpy.sum', 'np.sum', (['afDiff'], {'axis': '(0)'}), '(afDiff, axis=0)\n', (539, 555), True, 'import numpy as np\n'), ((386, 396), 'numpy.sqrt', 'np.sqrt', (['X'], {}), '(X)\n', (393, 396), True, 'import numpy as np\n'), ((448, 472), 'numpy.arange', 'np.arange', (['(1)', 'X.shape[1]'], {}), '(1, X.shape[1])\n', (457, 472), True, 'import numpy as np\n'), ((477, 505), 'numpy.arange', 'np.arange', (['(0)', '(X.shape[1] - 1)'], {}), '(0, X.shape[1] - 1)\n', (486, 505), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.