code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python imports
from __future__ import absolute_import
from __future__ import print_function
import copy
import math
from matplotlib import pyplot as plt
import numpy as np
import os
# mx_modeling imports
from lib import fun
from lib import utils
from power_calcs import kite_pose
from . import rotor_model_util
from .fbl_load_csim_database import FittedDatabase
# cD offsets for various components
# Effective tether drag referenced to kite wing area.
_cD_eff_tether_stranded_base_m600 = 0.11306 # only valid for S=32.9
_cD_eff_tether_fluted_m600 = 0.06756 # only valid for S=32.9
_cD_eff_tether_half_fair_m600 = 0.0131
_cD_eff_tether_faired_m600 = 0.0064
# Drag estimates for other components.
# Baseline from bottoms up drag buildup minus gear
_cD_parasitic_no_fair = 0.0653 - 0.0093
_delta_cD_fair_bridle = -0.0151 # only valid for S=32.9
_delta_cD_fair_landing_gear = -0.0061 # only valid for S=32.9
_cD_gear_m600 = 0.0093 # revised 2017-11 from Ben's estimate
_cD_gear_faired_m600 = 0.0047
# Glide estimates from RPX07 indicated that config had a total drag of ~0.075
# This means that:
# _cD_parasitic_no_fair + _delta_cD_fair_bridle
# + _cD_gear_faired_m600 + _cD_addnl_offset = 0.075
# This fudge factor fills in the difference between these estimate.
_cD_addnl_offset = 0.0294 # fit to make match overall offset from CSim
_cL_offset = -0.125
# Approximate mass deltas for various components
_m_gear_fairing_delta = 8.
_m_gear_removed_delta = -40.
# Based on mass difference between SN04 and SN05 in crosswind.
# Note that this mass difference only includes mass increase from installing
# the slats. The mass increase from reinforced structure and slat bracket
# mounts is already included in the base mass.
# https://codesearch.corp.google.com/makani/config/m600/wing.py?l=62
_m_slats_installed = 40.
# tether and ground station args
_l_tether = 432. + 7.2 # tether + bridle
# Parker Ranch tether attachment point.
# https://codesearch.corp.google.com/makani/config/m600/ground_frame.py?l=29
_gs_position = [0., 0., 6.122]
# Effective center of rotor thrust (assuming all rotors working equally) is the
# average of all rotor locations.
# Source: https://codesearch.corp.google.com/makani/config/m600/rotors.py?l=130
_rotor_locations = {}
_rotor_locations['Sbo'] = [1.613, 3.639, 1.597]
_rotor_locations['Sbi'] = [1.613, 1.213, 1.597]
_rotor_locations['Pbi'] = [1.613, -1.213, 1.597]
_rotor_locations['Pbo'] = [1.613, -3.639, 1.597]
_rotor_locations['Pto'] = [1.960, -3.639, -1.216]
_rotor_locations['Pti'] = [1.960, -1.213, -1.216]
_rotor_locations['Sti'] = [1.960, 1.213, -1.216]
_rotor_locations['Sto'] = [1.960, 3.639, -1.216]
_rotor_thrust_center = np.mean(list(_rotor_locations.values()), axis=0)
# Unit vector of rotor thrust in body frame.
# Direction indicates positive thrust.
# TODO: Update with actual direction. Actual is very nearly along X.
_rotor_thrust_axis = np.array([1., 0., 0.])
# Center of gravity (CG) and rotational inertia of the kite.
# Source: https://codesearch.corp.google.com/makani/config/m600/wing.py?l=54
_CG_sn4 = np.array([-0.085, 0.037, 0.108])
# Source: SN05 crosswind mass:
# https://codesearch.corp.google.com/makani/config/m600/wing.py?l=94
_m_kite = 1629.6 + 51.5 + 10.6 + 0.6
# Inertia is scaled according to actual mass over reference mass.
_inertia_sn4 = np.array(
[[31401.2, 47.3, 22.3],
[47.3, 8228.9, 22.3],
[22.3, 22.3, 36864.5]]) * _m_kite / 1606.98
# Wing reference areas and lengths.
# Source: https://codesearch.corp.google.com/makani/config/m600/wing.py?l=129
_S = 32.9 # Refers to wing area, called A in source.
_b = 25.66 # Wing span
_c = 1.28 # Wing chord
# Bridle parameters.
# Source: https://codesearch.corp.google.com/makani/config/m600/wing.py?l=175
# Bridle hardpoint locations (center of spherical bearing).
_bridle_pos = np.array(
[[-0.1494, -5.8843, 0.13035],
[-0.1494, 5.8661, 0.13035]])
# Bridle radius (distance from axis between bridle anchors to bridle knot).
_bridle_rad= 4.7860
# Bridle offset. Y offset of bridle knot along bridle axis from center.
_bridle_y_offset= -0.5
# Make the bridle model.
_m600_bridle = utils.BridleMoments(_bridle_pos, _bridle_rad, _bridle_y_offset)
# Roll limits.
# Source: https://codesearch.corp.google.com/makani/config/m600/control/crosswind.py?l=421
_max_roll_excursion = 0.48
_nom_tether_roll = np.arctan2(-_bridle_y_offset, _bridle_rad + _bridle_pos[1][2])
_tether_roll_min = _nom_tether_roll - _max_roll_excursion
_tether_roll_max = _nom_tether_roll + _max_roll_excursion
# Aero thrust limits.
# The M600 has aero thrust power limits, max_airspeed_control_power_gen/motor,
# to ensure the controller is not expecting unreasonable thrusts.
# Source: https://codesearch.corp.google.com/makani/config/m600/control/crosswind.py?type=cs&q=max_airspeed_control_power_gen&g=0&l=467
# Notation is - for gen and + for thrust.
# This limit is not inherent to the physics of the FBL or physical kite
# limits, but may be useful when trying to compare results to csim.
_aero_thrust_p_max = 650e3
_aero_thrust_p_min = -1000e3
# Model aero device.
# For the M600, the aero device is the inner ailerons, deflected to spoil the
# wing.
# Source: https://docs.google.com/spreadsheets/d/1lQTqghsm7R2yY-OLu4VsKKtOrJo5OHYYcWZgqPmQS98/edit
# CFD of d4 deflections is used to find average relative change in cL and cD
# with full flap deployment. Largest deflection (100deg) is used,
# so small drag flap deflections are poorly modeled.
# A linear fit of d_cm = f(alpha) is used for cm.
# All values are multiplied by 2 as d4 flap will be used in pair with d3.
# D3 effectiveness assumed equal.
# Changes to cl, cn, and cy are ignored, as these moments should be largely
# balanced by similar balancing moments from d3.
def _drag_flaps(flap_norm, state):
cD_offset = (1.3 * state['cD'] - state['cD']) * 2. * flap_norm
cL_offset = (0.853 * state['cL'] - state['cL']) * 2. * flap_norm
cm_offset = (-0.00769 * state['alpha'] - 0.10391) * 2. * flap_norm
state['aero_device_cD_offset'] = cD_offset
state['aero_device_cL_offset'] = cL_offset
state['aero_device_cm_offset'] = cm_offset
return {
'cD': cD_offset,
'cL': cL_offset,
'cm': cm_offset}
# Modify the usual 'eta_shaft_to_padmount' so the model
# reports at the desired power point.
_power_points = {
'shaft': lambda p: 1.0,
'motor_elec': lambda p: 0.94, # Assumes motor is 94% efficient
'padmount': (
lambda p: 0.94 * 0.96 * (1. - (abs(p) * 1.0)/(3400.**2)) * 0.975)}
def _csim_db_fitter(csimdb, alphad, betad, omega_hat=None):
body_coeff = {}
if omega_hat is None:
omega_hat = csimdb._bomega_hat_0
body_coeff.update(csimdb.CalcForceCoeffs(alphad, betad, omega_hat))
body_coeff.update(csimdb.CalcMomentCoeffs(alphad, betad, omega_hat))
return body_coeff
# Create aero databases from files.
# Includes rudder extension.
_aswing_baseline_csim_db = (
FittedDatabase(os.path.dirname(__file__)
+ '/aero_databases/m600_aswing_baseline.json'))
_aswing_baseline_zero_angular_csim_db = (
FittedDatabase(
os.path.dirname(__file__)
+ '/aero_databases/m600_aswing_baseline_zero_angular_rate.json'))
# Does NOT include rudder extension.
_aswing_stage3slats_csim_db = (
FittedDatabase(os.path.dirname(__file__)
+ '/aero_databases/m600_aero_database_stage_3_slats.json'))
def _aswing_body_coeff_baseline(alpha, beta, omega_hat=None):
# If omega_hat is not provided, use the nominal omega_hat from the database.
if omega_hat is None:
omega_hat = _aswing_baseline_csim_db._bomega_hat_0
return _csim_db_fitter(_aswing_baseline_csim_db, alpha, beta, omega_hat)
def _aswing_body_coeff_stage_3_slat(alpha, beta, omega_hat=None):
# If omega_hat is not provided, use the nominal omega_hat from the database.
if omega_hat is None:
omega_hat = _aswing_stage3slats_csim_db._bomega_hat_0
return _csim_db_fitter(_aswing_stage3slats_csim_db, alpha, beta, omega_hat)
m600_base = {
'c': _c,
'b': _b,
's': _S,
'CG': _CG_sn4,
'inertia': _inertia_sn4,
'rotor_thrust_center': _rotor_thrust_center,
'rotor_thrust_axis': _rotor_thrust_axis,
'aero_device': _drag_flaps,
'v_a_max': 70.0,
'aero_thrust_p_max': _aero_thrust_p_max,
'aero_thrust_p_min': _aero_thrust_p_min,
'tether_roll_min': _tether_roll_min,
'tether_roll_max': _tether_roll_max,
'bridle_moment_from_tether_pitch_roll': (
_m600_bridle.CalculateMomentFromPitchRoll),
# TODO: Add source for control moment residual limits.
'cl_residual_max': 0.11,
'cl_residual_min': -0.1,
'cm_residual_max': 0.4,
'cm_residual_min': -0.6,
'cn_residual_max': 0.02,
'cn_residual_min': -0.02,
'gs_position': _gs_position,
'h_min': 90.,
'incl_max': 1.0,
'l_tether': _l_tether,
'm_kite': _m_kite,
'm_tether': 390.45,
'tension_max': 260000.,
'v_a_min': 35.,
'shaft_power_from_drag_power': (
rotor_model_util.Gen4RotorConfigBySizeFixedPitch(
0.0, n_rotors=8, r_rotor=1.15)),
'power_shaft_max': 900000.,
'torque_shaft_max': 1000., # Nm, from 08/12 pencil spec, https://docs.google.com/spreadsheets/d/1R3nDoGct9COB6donjDL9S-K9XdtTe3shFhP_GG3ElpI/edit#gid=0
'rotor_mach_limit': 0.8,
'eta_shaft_to_pad': _power_points['padmount']}
m600_configs = {
'stage3_slats_faired_fluted': {
'body_coeff_from_alpha_beta': _aswing_body_coeff_stage_3_slat,
'cD_offset': (_cD_parasitic_no_fair
+ _delta_cD_fair_bridle
+ _cD_gear_faired_m600
+ _cD_addnl_offset),
'alpha_min': 0.,
'alpha_max': 8.,
'beta_min': -5.,
'beta_max': 5.,
'm_kite': m600_base['m_kite'] + _m_slats_installed,
'cD_eff_tether': _cD_eff_tether_fluted_m600,
'description': ('SN03, stage3 aswing aero, faired gear, ' +
'partial fair bridle, ' +
'fluted stranded tether, ' +
'alpha 8deg')},
'stage3_slats_fluted_conservative': {
'body_coeff_from_alpha_beta': _aswing_body_coeff_stage_3_slat,
'cD_offset': (_cD_parasitic_no_fair
+ _delta_cD_fair_bridle
+ _cD_gear_faired_m600
+ _cD_addnl_offset),
'tether_roll_max': m600_base['tether_roll_max'] - math.radians(10.),
'tether_roll_min': m600_base['tether_roll_min'] + math.radians(10.),
'alpha_min': 0.,
'alpha_max': 5.,
'beta_min': -4.,
'beta_max': 4.,
'm_kite': m600_base['m_kite'] + _m_slats_installed,
'cD_eff_tether': _cD_eff_tether_fluted_m600,
'description': ('SN03, stage3 aswing aero, faired gear, ' +
'partial fair bridle, ' +
'fluted stranded tether, ' +
'alpha 5deg')},
'baseline_faired_fluted': { # Note: Unfaired bridles
'body_coeff_from_alpha_beta': _aswing_body_coeff_baseline,
'cD_offset': (_cD_parasitic_no_fair
+ _cD_gear_faired_m600
+ _cD_addnl_offset),
'cL_offset': _cL_offset,
'alpha_min': -8.,
'alpha_max': 5.,
'beta_min': -5.,
'beta_max': 5.,
'cD_eff_tether': _cD_eff_tether_fluted_m600,
'description': ('SN05, baseline aswing aero, faired gear, ' +
'unfaired bridle, ' +
'fluted stranded tether, ' +
'alpha max 5deg')},
}
for key, config in m600_configs.items():
base = copy.deepcopy(m600_base)
base.update(config)
m600_configs[key] = base
# Test configs are approximate.
test_to_config = {
'RPX07': 'stage3_slats_faired_fluted', # GS Position not updated for CL
'RPX08': 'stage3_slats_faired_fluted',
'RPX09': 'stage3_slats_faired_fluted',
'CW01': 'baseline_faired_fluted',
'CW02': 'baseline_faired_fluted'}
_base_config = m600_configs['baseline_faired_fluted']
_base_slats_config = m600_configs['stage3_slats_faired_fluted']
def UpdateKitesAndModelsFromOverride(
override, kites, aero_models, bridle_models):
"""Updates kites, aero_models, and bridle_models with new kite.
Args:
override: Dict of overrides to define a kite config.
kites: Dict of kites that is updated with new kites from override. Any kites
with the same name as override will NOT be overwritten.
aero_models: Dict of aero database model that is updated with aero model
that is created from aero database specified in override. Key is 'name'
specified in override.
bridle_models: Dict of bridle models that is updated with model that is
created from override. Key is 'name' specified in override.
"""
# Setup kites
assert override['name'] not in kites, (
'Name already exists in kites and was not updated. Check that new '
+ 'name is correct. \n'
+ 'If name is correct, delete existing entry and rerun to revise.')
kite = override['name']
kites[kite] = MakeKiteFromOverride(override)
# Setup Bridles
assert override['name'] not in bridle_models, (
'Name already exists in bridle_models and was not updated. Check that new'
+ ' name is correct. \n'
+ 'If name is correct, delete existing entry and rerun to revise.')
# Make the bridle model.
bridle_left = np.array(override['tether_hardpoint'])
bridle_right = np.array(override['tether_hardpoint'])
# Define axis along y
bridle_left[1] -= 0.5
bridle_right[1] += 0.5
bridle_pos = np.array([bridle_left, bridle_right])
# Bridle radius (distance from axis between bridle anchors to bridle knot).
bridle_rad = override['bridle_radial_length']
# Bridle offset. Y offset of bridle knot along bridle axis from center.
bridle_y_offset = override['bridle_y_offset']
bridle_models[kite] = utils.BridleMoments(
bridle_pos, bridle_rad, bridle_y_offset)
# Setup aero model
assert override['name'] not in aero_models, (
'Name already exists in aero_models and was not updated. Check that new '
+ 'name is correct. \n'
+ 'If name is correct, delete existing entry and rerun to revise.')
aero_models[kite] = (
FittedDatabase(
fun.GetFullPath('tools/aero_databases/')
+ override['aero_db_file']))
def MakeKiteFromOverride(override, aero_devices=None):
"""Makes a kite config from override dict.
Args:
override: Dict of overrides to define a kite config.
Returns:
Dict of kite config.
"""
kite = {}
# Setup aero device
# Pick from the dict of provided aero devices - if not provided, make a dict
# of ones in the manager.
if aero_devices is None:
aero_devices = {'m600_drag_flaps': _drag_flaps}
if 'aero_device_name' in override:
kite['aero_device'] = aero_devices[override['aero_device_name']]
# Setup Bridles
# Get the tether attach location. Must be defined as an axis, which we assume
# is along kite y. This allows us to only specify a single point, and stretch
# it along y to define the axis.
bridle_left = np.array(override['tether_hardpoint'])
bridle_right = np.array(override['tether_hardpoint'])
# Define axis along y
bridle_left[1] -= 0.5
bridle_right[1] += 0.5
bridle_pos = np.array([bridle_left, bridle_right])
# Bridle radius (distance from axis between bridle anchors to bridle knot).
bridle_rad = override['bridle_radial_length']
# Bridle offset. Y offset of bridle knot along bridle axis from center.
bridle_y_offset = override['bridle_y_offset']
bridle_model = utils.BridleMoments(
bridle_pos, bridle_rad, bridle_y_offset)
kite['bridle_moment_from_tether_pitch_roll'] = (
bridle_model.CalculateMomentFromPitchRoll)
# Make aero model
aero_model = (
FittedDatabase(
fun.GetFullPath('tools/aero_databases/')
+ override['aero_db_file']))
kite['body_coeff_from_alpha_beta'] = (
lambda a, b, o=None: _csim_db_fitter(aero_model, a, b, o))
# Setup shaft power function for rotors
kite['shaft_power_from_drag_power'] = (
rotor_model_util.Gen4RotorConfigBySizeFixedPitch(
override['rotor_pitch'],
a_rotors=override['a_rotors'],
n_rotors=override['n_rotors'],
rotor_mach_limit=override['rotor_mach_limit']))
# Setup eta_shaft_to_padmount function
kite['eta_shaft_to_pad'] = (
lambda p: (
override['eta_motors'] * override['eta_motor_ctrls']
* (1. - (abs(p) * override['ohms_per_m_tether'] * override['l_tether']
/(override['v_tether']**2)))
* override['eta_pad_trans']))
# Setup tether effective drag
# Assumes tether drag and thickness is constant over length of tether
kite['cD_eff_tether'] = (
override['cD_tether'] * override['l_tether'] * override['t_tether']
/ (4. * override['s']))
# Note: Some values (name, bridle settings, rotor settings, etc.)
# from the override are only used here, and not by the config dict needed for
# FBL. We put all of them in one place so the full definition is saved.
kite.update(override)
return kite
def ChangeKiteMass(config, mass_delta):
config['m_kite'] += mass_delta
def ChangeTetherMass(config, mass_delta):
config['m_tether'] += mass_delta
def ChangePowerPoint(config, power_point):
if power_point not in _power_points:
print(('Power point must be: \''
+ '\', or \''.join(list(_power_points.keys()))
+ '\''))
else:
config['eta_shaft_to_pad'] = _power_points[power_point]
def ModifyC_Doffset(config, cD_delta):
config['cD_offset'] += cD_delta
def GetConfigByName(name='baseline_faired_fluted', power_point='padmount'):
if name not in m600_configs:
print('Name must be: \'' + '\', or \''.join(list(m600_configs.keys())) + '\'')
m600_configs[name]
else:
config = copy.deepcopy(m600_configs[name])
ChangePowerPoint(config, power_point)
return config
def GetConfigByTest(test='CW02', power_point='motor_elec'):
if test not in test_to_config:
print('Test must be: \'' + '\', or \''.join(list(test_to_config.keys())) + '\'')
test_to_config[test]
else:
name = test_to_config[test]
config = copy.deepcopy(m600_configs[name])
ChangePowerPoint(config, power_point)
return config
def GetPathArgsByR_LoopAndMinHeight(config, h_min, r_loop, azim=0.):
"""
Returns a dictionary of arguments that can be passed to initialize a
KitePath object. Calcs inclination required to meet minimum height h_min.
"""
if 'gs_position' in config:
gs_position = config['gs_position']
else:
gs_position = [0., 0., 0.]
angle_half_cone = math.asin(r_loop/config['l_tether'])
angle_h_min = math.asin((h_min - gs_position[2])/config['l_tether'])
incl = angle_half_cone + angle_h_min
args = {'shape_params': {'r_loop': r_loop,
'type': 'circle'},
'location_params': {'incl': incl,
'azim': azim}}
return args
m600_configs['stage3_slats_no_gear'] = copy.deepcopy(_base_slats_config)
m600_configs['stage3_slats_no_gear']['m_kite'] += _m_gear_removed_delta
ModifyC_Doffset(m600_configs['stage3_slats_no_gear'], -_cD_gear_faired_m600)
m600_configs['stage3_slats_no_gear_faired_tether'] = copy.deepcopy(
m600_configs['stage3_slats_no_gear'])
ModifyC_Doffset(m600_configs['stage3_slats_no_gear_faired_tether'],
-_cD_eff_tether_fluted_m600 + _cD_eff_tether_faired_m600)
m600_configs['stage3_slats_faired_tether'] = copy.deepcopy(_base_slats_config)
ModifyC_Doffset(m600_configs['stage3_slats_faired_tether'],
-_cD_eff_tether_fluted_m600 + _cD_eff_tether_faired_m600)
def PlotKiteAero(config, **kwargs):
"""Plots the aero database for the given config. Returns a dict of plot
objects.
Kwargs:
alpha_linspace: Tuple of linspace values for alphas.
beta_linspace: Tuple of linspace values for betas.
omega_hat: Reduced angular rates for aero lookup.
plots: A dict where the keys are the variable that's plotted and values are
plot objects. Enables user to append to plots.
levels: For contour plots, sets number of levels.
colormap: For contour plots, sets colormap.
color: For line plots, sets line color.
label: For line plots, sets line label for legend.
figsize: Matplotlib figsize kwarg.
keys: Variables to plot.
If one of alphas or betas linspace is a single value, plot changes to a line
plot instead of the default contour plot."""
alphas = np.linspace(*kwargs.get('alpha_linspace', (-5., 15., 40)))
betas = np.linspace(*kwargs.get('beta_linspace', (-10., 10., 40)))
omega_hat = kwargs.get('omega_hat', None)
plots = kwargs.get('plots', {})
levels = kwargs.get('levels', 30)
colormap = kwargs.get('colormap', 'viridis')
color = kwargs.get('color', 'C0')
label = kwargs.get('label', None)
figsize = kwargs.get('figsize', (9,7))
linestyle = kwargs.get('linestyle', '-')
keys = kwargs.get('keys', ['cL', 'cD', 'cY', 'zeta', 'L1.5/D'])
o = {}
for k in keys:
o[k] = []
for beta in betas:
row = []
for alpha in alphas:
state = {}
state['alpha'] = alpha
state['beta'] = beta
state.update(
config['body_coeff_from_alpha_beta'](alpha, beta, omega_hat))
kite_pose.KitePose._aero_coeff_from_body_coeff(state)
kite_pose.KitePose._apply_aero_offsets(state, config)
if k == 'zeta':
row.append(
(4./27.) * state['cL']**3
/ (state['cD'] + config['cD_eff_tether'])**2)
elif k == 'L1.5/D':
row.append(
state['cL']**1.5
/ (state['cD'] + config['cD_eff_tether']))
elif k == 'L/D':
row.append(
state['cL']
/ (state['cD'] + config['cD_eff_tether']))
else:
row.append(state[k])
o[k].append(row)
for k in keys:
if k in plots:
fig = plots[k]
else:
plots[k] = plt.figure(figsize=figsize)
ax = plots[k].gca()
if len(alphas) == 1:
ax.plot(betas, [v[0] for v in o[k]], color=color, label=label,
linestyle=linestyle)
ax.set_title(k + ' as function of beta @ alpha %0.1f deg' % alphas[0])
ax.set_xlabel('beta [deg]')
ax.set_ylabel(k)
elif len(betas) == 1:
ax.plot(alphas, o[k][0], color=color, label=label, linestyle=linestyle)
ax.set_title(k + ' as function of alpha @ beta %0.1f deg' % betas[0])
ax.set_xlabel('alpha [deg]')
ax.set_ylabel(k)
else:
CS = ax.contour(
alphas, betas, o[k], levels, cmap=colormap)
ax.clabel(CS, inline=1, fontsize=11)
if omega_hat is None:
title = k + ' as function of alpha and beta @ default omega_hat'
else:
title = (k + ' as function of alpha and beta @ omega_hat '
+ '[%0.2f, %0.2f, %0.2f]' % tuple(o for o in omega_hat))
ax.set_title(title)
ax.set_xlabel('alpha [deg]')
ax.set_ylabel('beta [deg]')
if label is not None:
ax.legend()
ax.grid(linestyle='--', linewidth=0.5)
plt.tight_layout()
return plots
| [
"math.asin",
"math.radians",
"power_calcs.kite_pose.KitePose._aero_coeff_from_body_coeff",
"numpy.array",
"os.path.dirname",
"matplotlib.pyplot.figure",
"numpy.arctan2",
"matplotlib.pyplot.tight_layout",
"copy.deepcopy",
"lib.utils.BridleMoments",
"lib.fun.GetFullPath",
"power_calcs.kite_pose.... | [((3496, 3521), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (3504, 3521), True, 'import numpy as np\n'), ((3668, 3700), 'numpy.array', 'np.array', (['[-0.085, 0.037, 0.108]'], {}), '([-0.085, 0.037, 0.108])\n', (3676, 3700), True, 'import numpy as np\n'), ((4434, 4501), 'numpy.array', 'np.array', (['[[-0.1494, -5.8843, 0.13035], [-0.1494, 5.8661, 0.13035]]'], {}), '([[-0.1494, -5.8843, 0.13035], [-0.1494, 5.8661, 0.13035]])\n', (4442, 4501), True, 'import numpy as np\n'), ((4745, 4808), 'lib.utils.BridleMoments', 'utils.BridleMoments', (['_bridle_pos', '_bridle_rad', '_bridle_y_offset'], {}), '(_bridle_pos, _bridle_rad, _bridle_y_offset)\n', (4764, 4808), False, 'from lib import utils\n'), ((4962, 5024), 'numpy.arctan2', 'np.arctan2', (['(-_bridle_y_offset)', '(_bridle_rad + _bridle_pos[1][2])'], {}), '(-_bridle_y_offset, _bridle_rad + _bridle_pos[1][2])\n', (4972, 5024), True, 'import numpy as np\n'), ((19746, 19779), 'copy.deepcopy', 'copy.deepcopy', (['_base_slats_config'], {}), '(_base_slats_config)\n', (19759, 19779), False, 'import copy\n'), ((19984, 20035), 'copy.deepcopy', 'copy.deepcopy', (["m600_configs['stage3_slats_no_gear']"], {}), "(m600_configs['stage3_slats_no_gear'])\n", (19997, 20035), False, 'import copy\n'), ((20229, 20262), 'copy.deepcopy', 'copy.deepcopy', (['_base_slats_config'], {}), '(_base_slats_config)\n', (20242, 20262), False, 'import copy\n'), ((12270, 12294), 'copy.deepcopy', 'copy.deepcopy', (['m600_base'], {}), '(m600_base)\n', (12283, 12294), False, 'import copy\n'), ((14058, 14096), 'numpy.array', 'np.array', (["override['tether_hardpoint']"], {}), "(override['tether_hardpoint'])\n", (14066, 14096), True, 'import numpy as np\n'), ((14114, 14152), 'numpy.array', 'np.array', (["override['tether_hardpoint']"], {}), "(override['tether_hardpoint'])\n", (14122, 14152), True, 'import numpy as np\n'), ((14241, 14278), 'numpy.array', 'np.array', (['[bridle_left, bridle_right]'], {}), '([bridle_left, bridle_right])\n', (14249, 14278), True, 'import numpy as np\n'), ((14551, 14611), 'lib.utils.BridleMoments', 'utils.BridleMoments', (['bridle_pos', 'bridle_rad', 'bridle_y_offset'], {}), '(bridle_pos, bridle_rad, bridle_y_offset)\n', (14570, 14611), False, 'from lib import utils\n'), ((15778, 15816), 'numpy.array', 'np.array', (["override['tether_hardpoint']"], {}), "(override['tether_hardpoint'])\n", (15786, 15816), True, 'import numpy as np\n'), ((15834, 15872), 'numpy.array', 'np.array', (["override['tether_hardpoint']"], {}), "(override['tether_hardpoint'])\n", (15842, 15872), True, 'import numpy as np\n'), ((15961, 15998), 'numpy.array', 'np.array', (['[bridle_left, bridle_right]'], {}), '([bridle_left, bridle_right])\n', (15969, 15998), True, 'import numpy as np\n'), ((16264, 16324), 'lib.utils.BridleMoments', 'utils.BridleMoments', (['bridle_pos', 'bridle_rad', 'bridle_y_offset'], {}), '(bridle_pos, bridle_rad, bridle_y_offset)\n', (16283, 16324), False, 'from lib import utils\n'), ((19364, 19402), 'math.asin', 'math.asin', (["(r_loop / config['l_tether'])"], {}), "(r_loop / config['l_tether'])\n", (19373, 19402), False, 'import math\n'), ((19417, 19473), 'math.asin', 'math.asin', (["((h_min - gs_position[2]) / config['l_tether'])"], {}), "((h_min - gs_position[2]) / config['l_tether'])\n", (19426, 19473), False, 'import math\n'), ((3919, 3997), 'numpy.array', 'np.array', (['[[31401.2, 47.3, 22.3], [47.3, 8228.9, 22.3], [22.3, 22.3, 36864.5]]'], {}), '([[31401.2, 47.3, 22.3], [47.3, 8228.9, 22.3], [22.3, 22.3, 36864.5]])\n', (3927, 3997), True, 'import numpy as np\n'), ((7541, 7566), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7556, 7566), False, 'import os\n'), ((7704, 7729), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7719, 7729), False, 'import os\n'), ((7892, 7917), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7907, 7917), False, 'import os\n'), ((18557, 18590), 'copy.deepcopy', 'copy.deepcopy', (['m600_configs[name]'], {}), '(m600_configs[name])\n', (18570, 18590), False, 'import copy\n'), ((18909, 18942), 'copy.deepcopy', 'copy.deepcopy', (['m600_configs[name]'], {}), '(m600_configs[name])\n', (18922, 18942), False, 'import copy\n'), ((23841, 23859), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23857, 23859), True, 'from matplotlib import pyplot as plt\n'), ((11035, 11053), 'math.radians', 'math.radians', (['(10.0)'], {}), '(10.0)\n', (11047, 11053), False, 'import math\n'), ((11112, 11130), 'math.radians', 'math.radians', (['(10.0)'], {}), '(10.0)\n', (11124, 11130), False, 'import math\n'), ((14929, 14969), 'lib.fun.GetFullPath', 'fun.GetFullPath', (['"""tools/aero_databases/"""'], {}), "('tools/aero_databases/')\n", (14944, 14969), False, 'from lib import fun\n'), ((16502, 16542), 'lib.fun.GetFullPath', 'fun.GetFullPath', (['"""tools/aero_databases/"""'], {}), "('tools/aero_databases/')\n", (16517, 16542), False, 'from lib import fun\n'), ((22725, 22752), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (22735, 22752), True, 'from matplotlib import pyplot as plt\n'), ((22039, 22092), 'power_calcs.kite_pose.KitePose._aero_coeff_from_body_coeff', 'kite_pose.KitePose._aero_coeff_from_body_coeff', (['state'], {}), '(state)\n', (22085, 22092), False, 'from power_calcs import kite_pose\n'), ((22101, 22154), 'power_calcs.kite_pose.KitePose._apply_aero_offsets', 'kite_pose.KitePose._apply_aero_offsets', (['state', 'config'], {}), '(state, config)\n', (22139, 22154), False, 'from power_calcs import kite_pose\n')] |
import numpy as np
import math
# simplex algorithm
'''
Need a super class that takes constants into account in objFun
'''
class Simplex(object):
'''
max 3x+2y
s.t. x+2y <=4
x-y <= 1
x,y >= 0
with slack variables, translated into
c = [3, 2]
A = [[1, 2],
[1, -1]]
b = [4, 1]
For now, assume proper c, A, b inputs
for min f(x), use duality of simplex
'''
'''
method based on multiple sources:
https://www.youtube.com/watch?v=XK26I9eoSl8
https://hubpages.com/technology/Simplex-Algorithm-in-Python
https://jeremykun.com/2014/12/01/
linear-programming-and-the-simplex-algorithm/
https://www.cs.cmu.edu/afs/cs/academic/
class/15780-s16/www/slides/linear_prog.pdf
MAIN SOURCE: https://www.youtube.com/watch?v=BdtdYlUIXak
MAIN SOURCE: http://web.mit.edu/15.053/www/AMP-Chapter-04.pdf
tableu from above example should look like
x y s1 s2 P b
------------------
1 2 1 0 0 4 Constraint Rows
1 -1 0 1 0 1
------------------
-3 -2 0 0 1 0 ObjFun row
'''
'''
Things to Consider:
- objective function has constant number
- infeasible solution
- infinite solution
Currently only handles feasible single point solution
'''
def __init__(self, c, A, b, maxi):
self.varLen = len(c) # number of variables
self.conLen = len(b) # number of slack variables(aka # of constraints)
#np.concatenate((x, np.identity(len(x))),axis=1)
self.maxi = maxi
bCol = np.array([b + [0]], dtype=float) # assumes no constant val in objfun
varTable = np.array(A+[c], dtype=float)
mainTable = np.concatenate((varTable, bCol.T),axis=1)
if not self.maxi: # minimization prob as dual prob
mainTable = mainTable.T
# split mainTable into table with factors and table with constants
bCol = np.array([mainTable[:,-1]])
if self.maxi:
factorTable = mainTable[:,:self.varLen]
slckAndP = np.identity(self.conLen+1) #num of slack = num of constraints
else:
factorTable = mainTable[:,:self.conLen]
slckAndP = np.identity(self.varLen+1)
# multiply -1 to objfun row
factorTable[-1,:] *= -1
self.tableu = np.concatenate((factorTable,slckAndP,bCol.T),axis=1)
# if any variable with negative factor, can improve
def canImprove(self):
for varFactor in self.tableu[-1]: #objFun
if varFactor < 0:
return True
return False
# find pivRow & pivCol; same method for both maxi and mini
def findPivot(self):
# pivotCol is col with most negative factor in objfun
minFact = 0 # minimum factor of objfun
objFun = self.tableu[-1]
for col in range(len(objFun)-2): # exclude P and b col
if objFun[col] < minFact:
minFact = objFun[col]
self.pivCol = col
#self.pivColSet = (self.pivColSet).union([col])
ratios = []
for row in range(len(self.tableu)-1): # exclude objfun row
if (abs(self.tableu[row][self.pivCol]) > 0
and self.tableu[row][-1]/self.tableu[row][self.pivCol] > 0):
constant = self.tableu[row][-1]
ratios.append(constant/self.tableu[row][self.pivCol])
else: ratios.append(float("inf")) #just a place holder
# pick row index minimizing the quotient
self.pivRow = ratios.index(min(ratios))
#destructively change tableu
def pivot(self):
pivR, pivC = self.pivRow, self.pivCol
# divide whole pivRow so factor @ pivRow&Col = 1
self.tableu[pivR] = np.divide(self.tableu[pivR], float(self.tableu[pivR][pivC]))
# subtract the rest with pivRow*factor
notPivRows = set(list(range(len(self.tableu)))).difference(set([pivR]))
for row in notPivRows:
factor = self.tableu[row][pivC]
self.tableu[row] -= np.dot(self.tableu[pivR], factor)
def primalSolution(self):
# initialize values of all variables
primal = [0]*self.varLen
# find basic variable solutions and record them
for col in range(self.varLen):
oneFound = False
varI = 0
varRow = 0
for row in range(len(self.tableu)):
if self.tableu[row][col] == 1:
if not oneFound: oneFound, varI, varRow = True, col, row
else:
oneFound = False
break
if oneFound: primal[varI] = self.tableu[varRow][-1]
return primal
def dualSolution(self):
dual = [0]*self.varLen
# for every "slack variables" in dual problem
for col in range(self.conLen, self.conLen+self.varLen):
objFunRow = self.tableu[-1]
# solution of primal problem is the number at objfunrow
newVal = objFunRow[col]
dual[col-self.conLen] = newVal
return dual
def objectiveValue(self):
return self.tableu[-1][-1]
def simplex(self):
#np.set_printoptions(precision=2)
#print(self.tableu)
while self.canImprove():
self.findPivot()
self.pivot()
#print(self.tableu)
if self.maxi: solution = self.primalSolution()
else: solution = self.dualSolution()
return solution, self.objectiveValue() | [
"numpy.identity",
"numpy.array",
"numpy.dot",
"numpy.concatenate"
] | [((1428, 1460), 'numpy.array', 'np.array', (['[b + [0]]'], {'dtype': 'float'}), '([b + [0]], dtype=float)\n', (1436, 1460), True, 'import numpy as np\n'), ((1510, 1540), 'numpy.array', 'np.array', (['(A + [c])'], {'dtype': 'float'}), '(A + [c], dtype=float)\n', (1518, 1540), True, 'import numpy as np\n'), ((1553, 1595), 'numpy.concatenate', 'np.concatenate', (['(varTable, bCol.T)'], {'axis': '(1)'}), '((varTable, bCol.T), axis=1)\n', (1567, 1595), True, 'import numpy as np\n'), ((1754, 1782), 'numpy.array', 'np.array', (['[mainTable[:, -1]]'], {}), '([mainTable[:, -1]])\n', (1762, 1782), True, 'import numpy as np\n'), ((2083, 2138), 'numpy.concatenate', 'np.concatenate', (['(factorTable, slckAndP, bCol.T)'], {'axis': '(1)'}), '((factorTable, slckAndP, bCol.T), axis=1)\n', (2097, 2138), True, 'import numpy as np\n'), ((1855, 1883), 'numpy.identity', 'np.identity', (['(self.conLen + 1)'], {}), '(self.conLen + 1)\n', (1866, 1883), True, 'import numpy as np\n'), ((1983, 2011), 'numpy.identity', 'np.identity', (['(self.varLen + 1)'], {}), '(self.varLen + 1)\n', (1994, 2011), True, 'import numpy as np\n'), ((3554, 3587), 'numpy.dot', 'np.dot', (['self.tableu[pivR]', 'factor'], {}), '(self.tableu[pivR], factor)\n', (3560, 3587), True, 'import numpy as np\n')] |
from __future__ import division
from builtins import object
from past.utils import old_div
from proteus import *
from proteus.default_p import *
from proteus.mprans import SW2D
from proteus.mprans import SW2DCV
from proteus.Domain import RectangularDomain
import numpy as np
from proteus import (Domain, Context,
MeshTools as mt)
from proteus.Profiling import logEvent
import proteus.SWFlow.SWFlowProblem as SWFlowProblem
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts= Context.Options([
('sw_model',0,"sw_model = {0,1} for {SWEs,DSWEs}"),
("final_time",100.0,"Final time for simulation"),
("dt_output",10.0,"Time interval to output solution"),
("cfl",0.33,"Desired CFL restriction")
])
###################
# DOMAIN AND MESH #
###################
L=(8000.0,800.0)
refinement = 4
domain = RectangularDomain(L=L,x=[0,0,0])
# CREATE REFINEMENT #
nnx0=6
nnx = (nnx0-1)*(2**refinement)+1
nny = old_div((nnx-1),10)+1
he = old_div(L[0],float(nnx-1))
triangleOptions="pAq30Dena%f" % (0.5*he**2,)
######################
##### BATHYMETRY #####
######################
h0=10
a=3000
B=2
k=0.001
g = SWFlowProblem.default_physical_parameters['gravity']
p = old_div(np.sqrt(8*g*h0),a)
s = old_div(np.sqrt(p**2 - k**2),2.)
mannings=k
def bathymetry(X):
x=X[0]
return h0*(x-old_div(L[0],2))**2/a/a
def eta_function(x,t):
coeff1 = a**2*B**2/8./g/g/h0
coeff2 = -B**2/4./g
coeff3 = old_div(-1.,g)
eta_part1 = coeff1*np.exp(-k*t)*(-s*k*np.sin(2*s*t)+(old_div(k**2,4.)-s**2)*np.cos(2*s*t))
eta_part2 = coeff2*np.exp(-k*t)
eta_part3 = coeff3*np.exp(-k*t/2.)*(B*s*np.cos(s*t)+k*B/2.*np.sin(s*t))*(x-old_div(L[0],2))
return h0 + eta_part1 + eta_part2 + eta_part3
##############################
##### INITIAL CONDITIONS #####
##############################
class water_height_at_t0(object):
def uOfXT(self,X,t):
eta = eta_function(X[0],0)
h = eta-bathymetry(X)
hp = max(h,0.)
return hp
class Zero(object):
def uOfXT(self,X,t):
return 0.0
# ********************************** #
# ***** Create mySWFlowProblem ***** #
# ********************************** #
outputStepping = SWFlowProblem.OutputStepping(opts.final_time,dt_output=opts.dt_output)
initialConditions = {'water_height': water_height_at_t0(),
'x_mom': Zero(),
'y_mom': Zero()}
boundaryConditions = {'water_height': lambda x,flag: None,
'x_mom': lambda x,flag: None,
'y_mom': lambda x,flag: lambda x,t: 0.0}
mySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=0,
cfl=0.33,
outputStepping=outputStepping,
structured=True,
he=he,
nnx=nnx,
nny=nny,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=boundaryConditions,
bathymetry=bathymetry)
mySWFlowProblem.physical_parameters['LINEAR_FRICTION']=1
mySWFlowProblem.physical_parameters['mannings']=mannings
| [
"numpy.sqrt",
"proteus.SWFlow.SWFlowProblem.SWFlowProblem",
"proteus.Domain.RectangularDomain",
"past.utils.old_div",
"numpy.exp",
"numpy.cos",
"proteus.SWFlow.SWFlowProblem.OutputStepping",
"numpy.sin",
"proteus.Context.Options"
] | [((547, 782), 'proteus.Context.Options', 'Context.Options', (["[('sw_model', 0, 'sw_model = {0,1} for {SWEs,DSWEs}'), ('final_time', 100.0,\n 'Final time for simulation'), ('dt_output', 10.0,\n 'Time interval to output solution'), ('cfl', 0.33,\n 'Desired CFL restriction')]"], {}), "([('sw_model', 0, 'sw_model = {0,1} for {SWEs,DSWEs}'), (\n 'final_time', 100.0, 'Final time for simulation'), ('dt_output', 10.0,\n 'Time interval to output solution'), ('cfl', 0.33,\n 'Desired CFL restriction')])\n", (562, 782), False, 'from proteus import Domain, Context, MeshTools as mt\n'), ((886, 921), 'proteus.Domain.RectangularDomain', 'RectangularDomain', ([], {'L': 'L', 'x': '[0, 0, 0]'}), '(L=L, x=[0, 0, 0])\n', (903, 921), False, 'from proteus.Domain import RectangularDomain\n'), ((2247, 2318), 'proteus.SWFlow.SWFlowProblem.OutputStepping', 'SWFlowProblem.OutputStepping', (['opts.final_time'], {'dt_output': 'opts.dt_output'}), '(opts.final_time, dt_output=opts.dt_output)\n', (2275, 2318), True, 'import proteus.SWFlow.SWFlowProblem as SWFlowProblem\n'), ((2645, 2895), 'proteus.SWFlow.SWFlowProblem.SWFlowProblem', 'SWFlowProblem.SWFlowProblem', ([], {'sw_model': '(0)', 'cfl': '(0.33)', 'outputStepping': 'outputStepping', 'structured': '(True)', 'he': 'he', 'nnx': 'nnx', 'nny': 'nny', 'domain': 'domain', 'initialConditions': 'initialConditions', 'boundaryConditions': 'boundaryConditions', 'bathymetry': 'bathymetry'}), '(sw_model=0, cfl=0.33, outputStepping=\n outputStepping, structured=True, he=he, nnx=nnx, nny=nny, domain=domain,\n initialConditions=initialConditions, boundaryConditions=\n boundaryConditions, bathymetry=bathymetry)\n', (2672, 2895), True, 'import proteus.SWFlow.SWFlowProblem as SWFlowProblem\n'), ((988, 1008), 'past.utils.old_div', 'old_div', (['(nnx - 1)', '(10)'], {}), '(nnx - 1, 10)\n', (995, 1008), False, 'from past.utils import old_div\n'), ((1253, 1272), 'numpy.sqrt', 'np.sqrt', (['(8 * g * h0)'], {}), '(8 * g * h0)\n', (1260, 1272), True, 'import numpy as np\n'), ((1284, 1308), 'numpy.sqrt', 'np.sqrt', (['(p ** 2 - k ** 2)'], {}), '(p ** 2 - k ** 2)\n', (1291, 1308), True, 'import numpy as np\n'), ((1486, 1502), 'past.utils.old_div', 'old_div', (['(-1.0)', 'g'], {}), '(-1.0, g)\n', (1493, 1502), False, 'from past.utils import old_div\n'), ((1624, 1638), 'numpy.exp', 'np.exp', (['(-k * t)'], {}), '(-k * t)\n', (1630, 1638), True, 'import numpy as np\n'), ((1529, 1543), 'numpy.exp', 'np.exp', (['(-k * t)'], {}), '(-k * t)\n', (1535, 1543), True, 'import numpy as np\n'), ((1716, 1732), 'past.utils.old_div', 'old_div', (['L[0]', '(2)'], {}), '(L[0], 2)\n', (1723, 1732), False, 'from past.utils import old_div\n'), ((1548, 1565), 'numpy.sin', 'np.sin', (['(2 * s * t)'], {}), '(2 * s * t)\n', (1554, 1565), True, 'import numpy as np\n'), ((1586, 1603), 'numpy.cos', 'np.cos', (['(2 * s * t)'], {}), '(2 * s * t)\n', (1592, 1603), True, 'import numpy as np\n'), ((1660, 1680), 'numpy.exp', 'np.exp', (['(-k * t / 2.0)'], {}), '(-k * t / 2.0)\n', (1666, 1680), True, 'import numpy as np\n'), ((1563, 1583), 'past.utils.old_div', 'old_div', (['(k ** 2)', '(4.0)'], {}), '(k ** 2, 4.0)\n', (1570, 1583), False, 'from past.utils import old_div\n'), ((1681, 1694), 'numpy.cos', 'np.cos', (['(s * t)'], {}), '(s * t)\n', (1687, 1694), True, 'import numpy as np\n'), ((1700, 1713), 'numpy.sin', 'np.sin', (['(s * t)'], {}), '(s * t)\n', (1706, 1713), True, 'import numpy as np\n'), ((1368, 1384), 'past.utils.old_div', 'old_div', (['L[0]', '(2)'], {}), '(L[0], 2)\n', (1375, 1384), False, 'from past.utils import old_div\n')] |
# -*- coding: utf-8 -*-
# Author: XuMing <<EMAIL>>
# Brief:
import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))
model = Sequential()
model.add(Embedding(128, output_dim=256))
model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=2)
score = model.evaluate(x_test, y_test, batch_size=128)
print('total loss on test set:', score[0])
print('accuracy of test set:', score[1])
# from keras.utils import plot_model
# plot_model(model, to_file='lstm_model.png')
model.save('lstm_model.h5')
del model
# load model by file
model = keras.models.load_model('lstm_model.h5')
score = model.evaluate(x_test, y_test, batch_size=128)
print('total loss on test set:', score[0])
print('accuracy of test set:', score[1])
x_test = np.random.random((200, 20))
y_test = np.random.randint(2, size=(200, 1))
score = model.evaluate(x_test, y_test, batch_size=128)
print('total loss on test set:', score[0])
print('accuracy of test set:', score[1]) | [
"keras.models.load_model",
"numpy.random.random",
"keras.models.Sequential",
"keras.layers.LSTM",
"numpy.random.randint",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.layers.Dropout"
] | [((266, 294), 'numpy.random.random', 'np.random.random', (['(1000, 20)'], {}), '((1000, 20))\n', (282, 294), True, 'import numpy as np\n'), ((305, 341), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1000, 1)'}), '(2, size=(1000, 1))\n', (322, 341), True, 'import numpy as np\n'), ((351, 378), 'numpy.random.random', 'np.random.random', (['(100, 20)'], {}), '((100, 20))\n', (367, 378), True, 'import numpy as np\n'), ((388, 423), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(100, 1)'}), '(2, size=(100, 1))\n', (405, 423), True, 'import numpy as np\n'), ((433, 445), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (443, 445), False, 'from keras.models import Sequential\n'), ((1035, 1075), 'keras.models.load_model', 'keras.models.load_model', (['"""lstm_model.h5"""'], {}), "('lstm_model.h5')\n", (1058, 1075), False, 'import keras\n'), ((1225, 1252), 'numpy.random.random', 'np.random.random', (['(200, 20)'], {}), '((200, 20))\n', (1241, 1252), True, 'import numpy as np\n'), ((1262, 1297), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(200, 1)'}), '(2, size=(200, 1))\n', (1279, 1297), True, 'import numpy as np\n'), ((456, 486), 'keras.layers.Embedding', 'Embedding', (['(128)'], {'output_dim': '(256)'}), '(128, output_dim=256)\n', (465, 486), False, 'from keras.layers import Embedding\n'), ((498, 507), 'keras.layers.LSTM', 'LSTM', (['(128)'], {}), '(128)\n', (502, 507), False, 'from keras.layers import LSTM\n'), ((519, 531), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (526, 531), False, 'from keras.layers import Dense, Dropout\n'), ((543, 573), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (548, 573), False, 'from keras.layers import Dense, Dropout\n')] |
import numpy as np
import tools
import warnings
class Alpha():
"""
Docstring for ALPHA.
Alpha is the an influence coefficient matrix
Influence coefficient matrix is a representation of the change of vibration
vector in a measuring point when putting a unit weight on a balancing plane.
"""
def __init__(self:'Influence matrix', name:'string'=''):
"""
Instantiate an instance of Alpha
name: optional name of Alpha
"""
self.name = name
def add(self, direct_matrix:'np.array'=None, A:'intial_vibraion numpy.array'=None,
B:'trial matrix numpy.array'=None, U:'trial weight row vector numpy.array'=None,
keep_trial:'optional keep the previous trial weight in every succeeding trial'=False,
name:'string'=''):
'''
Method to add new values for Alpha instance
either the direct_matrix is needed or ALL of (A, B, U)
Args:
direct_matrix: numpy array M rows -> measuring points,
N columns -> balancing planes
A: Initial vibration column array -> numpy array
B: Trial matrix MxN array -> numpy array
U: Trial weights row array -> numpy array
alpha = (A - B) / U
'''
try: # test if direct input
_ = direct_matrix.shape # TODO raise error when matrix is 1 dim
if direct_matrix.shape[0] >= direct_matrix.shape[1]:
self.value = direct_matrix
else:
raise tools.CustomError('Number of rows(measuring points) should be '
'equal or more than the number of columns '
'(balancing planes)!')
except AttributeError:
# if direct matrix is not input calculate it from A, B, U
# test the exstiance of A, A0, B, U to calculate ALPHA
try:
all([A.shape, B.shape, U.shape])
# Test dimensions
if A.shape[1] > 1:
raise tools.CustomError('`A` should be column vector')
elif U.ndim > 1:
raise tools.CustomError('`U` should be row vector')
elif B.shape[0] != A.shape[0] or B.shape[1] != U.shape[0]:
raise tools.CustomError('`B` dimensions should match `A`and `U`')
else:
if not keep_trial:
self.value = (B - A) / U
else:
_A_keep_trial = np.delete((np.insert(B, [0], A, axis=1)),
-1, axis=1)
self.value = (B - _A_keep_trial) / U
except AttributeError:
raise tools.CustomError('Either direct_matrix or (A,B,U) '
'should be passed "numpy arrays"')
def check(self, ill_condition_remove=False):
'''
Method to check the alpha value
* check the symmetrical of the matrix (check for square matrix only,
for square matrix it should be symmetric obeyin the reciprocity law)
* check for ill conditioned planes:
if for any reason two or more planes has independent readings
for example [[1, 2 , 3], [2, 4, 6]] this is named as ill-conditioned planes
as they does not carry new information from the system and considering them
cause solution infliteration.
ill_condition_remove = True : remove the ill_condition planes after the check
'''
self.M = self.value.shape[0]
self.N = self.value.shape[1]
if self.M == self.N:
_check_sym = np.allclose(self.value, self.value.T, 0.1, 1e-06)
if not _check_sym:
warnings.warn('Warning: Influence Matrix is asymmetrical!')
_check_status_sym = 'Influence Matrix is asymmetrical, check your data'
else:
_check_status_sym = 'Influence Matrix is symmetric --> OK'
else:
_check_status_sym = 'Not a square matrix --> no exact solution'
# Checking ILL-CONDITIONED planes
ill_plane = tools.ill_condition(self.value)
if ill_plane:
_check_ill_condition = 'Ill condition found in plane{}'.format(ill_plane)
if ill_condition_remove:
self.value = np.delete(self.value,[ill_plane], axis=1)
else:
_check_ill_condition ='No ill conditioned planes --> ok'
return print('{}\n\n{}'.format(_check_status_sym, _check_ill_condition))
| [
"numpy.insert",
"numpy.allclose",
"tools.CustomError",
"numpy.delete",
"tools.ill_condition",
"warnings.warn"
] | [((4260, 4291), 'tools.ill_condition', 'tools.ill_condition', (['self.value'], {}), '(self.value)\n', (4279, 4291), False, 'import tools\n'), ((3769, 3818), 'numpy.allclose', 'np.allclose', (['self.value', 'self.value.T', '(0.1)', '(1e-06)'], {}), '(self.value, self.value.T, 0.1, 1e-06)\n', (3780, 3818), True, 'import numpy as np\n'), ((1551, 1686), 'tools.CustomError', 'tools.CustomError', (['"""Number of rows(measuring points) should be equal or more than the number of columns (balancing planes)!"""'], {}), "(\n 'Number of rows(measuring points) should be equal or more than the number of columns (balancing planes)!'\n )\n", (1568, 1686), False, 'import tools\n'), ((3866, 3925), 'warnings.warn', 'warnings.warn', (['"""Warning: Influence Matrix is asymmetrical!"""'], {}), "('Warning: Influence Matrix is asymmetrical!')\n", (3879, 3925), False, 'import warnings\n'), ((4466, 4508), 'numpy.delete', 'np.delete', (['self.value', '[ill_plane]'], {'axis': '(1)'}), '(self.value, [ill_plane], axis=1)\n', (4475, 4508), True, 'import numpy as np\n'), ((2080, 2128), 'tools.CustomError', 'tools.CustomError', (['"""`A` should be column vector"""'], {}), "('`A` should be column vector')\n", (2097, 2128), False, 'import tools\n'), ((2792, 2881), 'tools.CustomError', 'tools.CustomError', (['"""Either direct_matrix or (A,B,U) should be passed "numpy arrays\\""""'], {}), '(\n \'Either direct_matrix or (A,B,U) should be passed "numpy arrays"\')\n', (2809, 2881), False, 'import tools\n'), ((2188, 2233), 'tools.CustomError', 'tools.CustomError', (['"""`U` should be row vector"""'], {}), "('`U` should be row vector')\n", (2205, 2233), False, 'import tools\n'), ((2335, 2394), 'tools.CustomError', 'tools.CustomError', (['"""`B` dimensions should match `A`and `U`"""'], {}), "('`B` dimensions should match `A`and `U`')\n", (2352, 2394), False, 'import tools\n'), ((2582, 2610), 'numpy.insert', 'np.insert', (['B', '[0]', 'A'], {'axis': '(1)'}), '(B, [0], A, axis=1)\n', (2591, 2610), True, 'import numpy as np\n')] |
from Kernel import Kernel
from agent.ExchangeAgent import ExchangeAgent
from agent.etf.EtfPrimaryAgent import EtfPrimaryAgent
from agent.HeuristicBeliefLearningAgent import HeuristicBeliefLearningAgent
from agent.examples.ImpactAgent import ImpactAgent
from agent.ZeroIntelligenceAgent import ZeroIntelligenceAgent
from agent.examples.MomentumAgent import MomentumAgent
from agent.etf.EtfArbAgent import EtfArbAgent
from agent.etf.EtfMarketMakerAgent import EtfMarketMakerAgent
from util.order import LimitOrder
from util.oracle.MeanRevertingOracle import MeanRevertingOracle
from util.oracle.SparseMeanRevertingOracle import SparseMeanRevertingOracle
from util import util
import numpy as np
import pandas as pd
import sys
DATA_DIR = "~/data"
# Some config files require additional command line parameters to easily
# control agent or simulation hyperparameters during coarse parallelization.
import argparse
parser = argparse.ArgumentParser(description='Detailed options for momentum config.')
parser.add_argument('-b', '--book_freq', default=0,
help='Frequency at which to archive order book for visualization')
parser.add_argument('-c', '--config', required=True,
help='Name of config file to execute')
parser.add_argument('-g', '--greed', type=float, default=0.25,
help='Impact agent greed')
parser.add_argument('-i', '--impact', action='store_false',
help='Do not actually fire an impact trade.', default=True)
parser.add_argument('-l', '--log_dir', default="twosym",
help='Log directory name (default: unix timestamp at program start)')
parser.add_argument('-n', '--obs_noise', type=float, default=1000000,
help='Observation noise variance for zero intelligence agents (sigma^2_n)')
parser.add_argument('-r', '--shock_variance', type=float, default=500000,
help='Shock variance for mean reversion process (sigma^2_s)')
parser.add_argument('-o', '--log_orders', action='store_true', default=True,
help='Log every order-related action by every agent.')
parser.add_argument('-s', '--seed', type=int, default=1,
help='numpy.random.seed() for simulation')
parser.add_argument('-v', '--verbose', action='store_true',
help='Maximum verbosity!')
parser.add_argument('--config_help', action='store_true',
help='Print argument options for this config file')
args, remaining_args = parser.parse_known_args()
if args.config_help:
parser.print_help()
sys.exit()
# Historical date to simulate. Required even if not relevant.
historical_date = pd.to_datetime('2014-01-28')
seed = args.seed
# Requested log directory.
log_dir = args.log_dir + str(seed)
# Requested order book snapshot archive frequency.
book_freq = args.book_freq
# Observation noise variance for zero intelligence agents.
sigma_n = args.obs_noise
# Shock variance of mean reversion process.
sigma_s = args.shock_variance
# Impact agent greed.
greed = args.greed
# Should the impact agent actually trade?
impact = args.impact
# Random seed specification on the command line. Default: None (by clock).
# If none, we select one via a specific random method and pass it to seed()
# so we can record it for future use. (You cannot reasonably obtain the
# automatically generated seed when seed() is called without a parameter.)
# Note that this seed is used to (1) make any random decisions within this
# config file itself and (2) to generate random number seeds for the
# (separate) Random objects given to each agent. This ensure that when
# the agent population is appended, prior agents will continue to behave
# in the same manner save for influences by the new agents. (i.e. all prior
# agents still have their own separate PRNG sequence, and it is the same as
# before)
if seed is not None:
seed = int(pd.Timestamp.now().timestamp() * 1000000) % (2 ** 32 - 1)
np.random.seed(seed)
# Config parameter that causes util.util.print to suppress most output.
# Also suppresses formatting of limit orders (which is time consuming).
util.silent_mode = not args.verbose
LimitOrder.silent_mode = not args.verbose
# Config parameter that causes every order-related action to be logged by
# every agent. Activate only when really needed as there is a significant
# time penalty to all that object serialization!
log_orders = args.log_orders
print("Silent mode: {}".format(util.silent_mode))
print("Logging orders: {}".format(log_orders))
print("Book freq: {}".format(book_freq))
print("ZeroIntelligenceAgent noise: {:0.4f}".format(sigma_n))
print("ImpactAgent greed: {:0.2f}".format(greed))
print("ImpactAgent firing: {}".format(impact))
print("Shock variance: {:0.4f}".format(sigma_s))
print("Configuration seed: {}\n".format(seed))
# Since the simulator often pulls historical data, we use a real-world
# nanosecond timestamp (pandas.Timestamp) for our discrete time "steps",
# which are considered to be nanoseconds. For other (or abstract) time
# units, one can either configure the Timestamp interval, or simply
# interpret the nanoseconds as something else.
# What is the earliest available time for an agent to act during the
# simulation?
midnight = historical_date
kernelStartTime = midnight
# When should the Kernel shut down? (This should be after market close.)
# Here we go for 8:00 PM the same day to reflect the ETF primary market
kernelStopTime = midnight + pd.to_timedelta('20:00:00')
# This will configure the kernel with a default computation delay
# (time penalty) for each agent's wakeup and recvMsg. An agent
# can change this at any time for itself. (nanoseconds)
defaultComputationDelay = 0 # no delay for this config
# IMPORTANT NOTE CONCERNING AGENT IDS: the id passed to each agent must:
# 1. be unique
# 2. equal its index in the agents list
# This is to avoid having to call an extra getAgentListIndexByID()
# in the kernel every single time an agent must be referenced.
# This is a list of symbols the exchange should trade. It can handle any number.
# It keeps a separate order book for each symbol. The example data includes
# only IBM. This config uses generated data, so the symbol doesn't really matter.
# If shock variance must differ for each traded symbol, it can be overridden here.
symbols = {'SYM1': {'r_bar': 100000, 'kappa': 1.67e-13, 'sigma_s': 0, 'type': util.SymbolType.Stock,
'fund_vol': 1e-4,
'megashock_lambda_a': 2.77778e-18,
'megashock_mean': 1e3,
'megashock_var': 5e4,
'random_state': np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32, dtype='uint64'))},
'SYM2': {'r_bar': 100000, 'kappa': 1.67e-13, 'sigma_s': 0, 'type': util.SymbolType.Stock,
'fund_vol': 1e-4,
'megashock_lambda_a': 2.77778e-18,
'megashock_mean': 1e3,
'megashock_var': 5e4,
'random_state': np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32, dtype='uint64'))},
'ETF': {'r_bar': 100000, 'kappa': 2*1.67e-13, 'sigma_s': 0, 'portfolio': ['SYM1', 'SYM2'],
'fund_vol': 1e-4,
'megashock_lambda_a': 2.77778e-13,
'megashock_mean': 0,
'megashock_var': 5e4,
'random_state': np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32, dtype='uint64')),
'type': util.SymbolType.ETF}
}
# symbols = { 'IBM' : { 'r_bar' : 100000, 'kappa' : 0.05, 'sigma_s' : sigma_s }, 'GOOG' : { 'r_bar' : 150000, 'kappa' : 0.05, 'sigma_s' : sigma_s } }
symbols_full = symbols.copy()
# seed=np.random.randint(low=0,high=2**32)
# seed = 2000
### Configure the Kernel.
kernel = Kernel("Base Kernel", random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32)))
### Configure the agents. When conducting "agent of change" experiments, the
### new agents should be added at the END only.
agent_count = 0
agents = []
agent_types = []
### Configure an exchange agent.
# Let's open the exchange at 9:30 AM.
mkt_open = midnight + pd.to_timedelta('09:30:00')
# And close it at 9:30:00.000001 (i.e. 1,000 nanoseconds or "time steps")
# mkt_close = midnight + pd.to_timedelta('09:30:00.001')
mkt_close = midnight + pd.to_timedelta('15:30:00')
# Configure an appropriate oracle for all traded stocks.
# All agents requiring the same type of Oracle will use the same oracle instance.
oracle = SparseMeanRevertingOracle(mkt_open, mkt_close, symbols)
# Create the exchange.
num_exchanges = 1
agents.extend([ExchangeAgent(j, "Exchange Agent {}".format(j), "ExchangeAgent", mkt_open, mkt_close,
[s for s in symbols_full], log_orders=log_orders, pipeline_delay=0,
computation_delay=0, stream_history=10, book_freq=book_freq,
random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32)))
for j in range(agent_count, agent_count + num_exchanges)])
agent_types.extend(["ExchangeAgent" for j in range(num_exchanges)])
agent_count += num_exchanges
# Let's open the exchange at 5:00 PM.
prime_open = midnight + pd.to_timedelta('17:00:00')
# And close it at 5:00:01 PM
prime_close = midnight + pd.to_timedelta('17:00:01')
# Create the primary.
num_primes = 1
agents.extend([EtfPrimaryAgent(j, "ETF Primary Agent {}".format(j), "EtfPrimaryAgent", prime_open, prime_close, 'ETF',
pipeline_delay=0, computation_delay=0,
random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32)))
for j in range(agent_count, agent_count + num_primes)])
agent_types.extend(["EtfPrimeAgent" for j in range(num_primes)])
agent_count += num_primes
### Configure some zero intelligence agents.
# Cash in this simulator is always in CENTS.
starting_cash = 10000000
# Here are the zero intelligence agents.
symbol1 = 'SYM1'
symbol2 = 'SYM2'
symbol3 = 'ETF'
print(symbols_full)
s1 = symbols_full[symbol1]
s2 = symbols_full[symbol2]
s3 = symbols_full[symbol3]
# Tuples are: (# agents, R_min, R_max, eta, L). L for HBL only.
# Some configs for ZI agents only (among seven parameter settings).
# 4 agents
# zi = [ (1, 0, 250, 1), (1, 0, 500, 1), (1, 0, 1000, 0.8), (1, 0, 1000, 1), (0, 0, 2000, 0.8), (0, 250, 500, 0.8), (0, 250, 500, 1) ]
# hbl = []
# 28 agents
# zi = [ (4, 0, 250, 1), (4, 0, 500, 1), (4, 0, 1000, 0.8), (4, 0, 1000, 1), (4, 0, 2000, 0.8), (4, 250, 500, 0.8), (4, 250, 500, 1) ]
# hbl = []
# 65 agents
# zi = [ (10, 0, 250, 1), (10, 0, 500, 1), (9, 0, 1000, 0.8), (9, 0, 1000, 1), (9, 0, 2000, 0.8), (9, 250, 500, 0.8), (9, 250, 500, 1) ]
# hbl = []
# 100 agents
zi = [ (15, 0, 250, 1), (15, 0, 500, 1), (14, 0, 1000, 0.8), (14, 0, 1000, 1), (14, 0, 2000, 0.8), (14, 250, 500, 0.8), (14, 250, 500, 1) ]
hbl = []
# 1000 agents
# zi = [ (143, 0, 250, 1), (143, 0, 500, 1), (143, 0, 1000, 0.8), (143, 0, 1000, 1), (143, 0, 2000, 0.8), (143, 250, 500, 0.8), (142, 250, 500, 1) ]
# hbl = []
# 10000 agents
# zi = [ (1429, 0, 250, 1), (1429, 0, 500, 1), (1429, 0, 1000, 0.8), (1429, 0, 1000, 1), (1428, 0, 2000, 0.8), (1428, 250, 500, 0.8), (1428, 250, 500, 1) ]
# hbl = []
# Some configs for HBL agents only (among four parameter settings).
# 4 agents
# zi = []
# hbl = [ (1, 250, 500, 1, 2), (1, 250, 500, 1, 3), (1, 250, 500, 1, 5), (1, 250, 500, 1, 8) ]
# 28 agents
# zi = []
# hbl = [ (7, 250, 500, 1, 2), (7, 250, 500, 1, 3), (7, 250, 500, 1, 5), (7, 250, 500, 1, 8) ]
# 1000 agents
# zi = []
# hbl = [ (250, 250, 500, 1, 2), (250, 250, 500, 1, 3), (250, 250, 500, 1, 5), (250, 250, 500, 1, 8) ]
# Some configs that mix both types of agents.
# 28 agents
# zi = [ (3, 0, 250, 1), (3, 0, 500, 1), (3, 0, 1000, 0.8), (3, 0, 1000, 1), (3, 0, 2000, 0.8), (3, 250, 500, 0.8), (2, 250, 500, 1) ]
# hbl = [ (2, 250, 500, 1, 2), (2, 250, 500, 1, 3), (2, 250, 500, 1, 5), (2, 250, 500, 1, 8) ]
# 65 agents
# zi = [ (7, 0, 250, 1), (7, 0, 500, 1), (7, 0, 1000, 0.8), (7, 0, 1000, 1), (7, 0, 2000, 0.8), (7, 250, 500, 0.8), (7, 250, 500, 1) ]
# hbl = [ (4, 250, 500, 1, 2), (4, 250, 500, 1, 3), (4, 250, 500, 1, 5), (4, 250, 500, 1, 8) ]
# 1000 agents
#zi = [(100, 0, 250, 1), (100, 0, 500, 1), (100, 0, 10000, 0.8), (100, 0, 10000, 1), (100, 0, 2000, 0.8),
# (100, 250, 500, 0.8), (100, 250, 500, 1)]
#hbl = [(75, 250, 500, 1, 2), (75, 250, 500, 1, 3), (75, 250, 500, 1, 5), (75, 250, 500, 1, 8)]
# ZI strategy split.
for i, x in enumerate(zi):
strat_name = "Type {} [{} <= R <= {}, eta={}]".format(i + 1, x[1], x[2], x[3])
agents.extend([ZeroIntelligenceAgent(j, "ZI Agent {} {}".format(j, strat_name),
"ZeroIntelligenceAgent {}".format(strat_name),
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32)), log_orders=log_orders,
symbol=symbol1, starting_cash=starting_cash, sigma_n=sigma_n,
r_bar=s1['r_bar'], q_max=10, sigma_pv=5000000, R_min=x[1], R_max=x[2],
eta=x[3], lambda_a=1e-12) for j in range(agent_count, agent_count + x[0])])
agent_types.extend(["ZeroIntelligenceAgent {}".format(strat_name) for j in range(x[0])])
agent_count += x[0]
for i, x in enumerate(zi):
strat_name = "Type {} [{} <= R <= {}, eta={}]".format(i + 1, x[1], x[2], x[3])
agents.extend([ZeroIntelligenceAgent(j, "ZI Agent {} {}".format(j, strat_name),
"ZeroIntelligenceAgent {}".format(strat_name),
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32)), log_orders=log_orders,
symbol=symbol2, starting_cash=starting_cash, sigma_n=sigma_n,
r_bar=s2['r_bar'], q_max=10, sigma_pv=5000000, R_min=x[1], R_max=x[2],
eta=x[3], lambda_a=1e-12) for j in range(agent_count, agent_count + x[0])])
agent_types.extend(["ZeroIntelligenceAgent {}".format(strat_name) for j in range(x[0])])
agent_count += x[0]
# --------------------------------- simbolo ETF -------------------------------------- #
for i, x in enumerate(zi):
strat_name = "Type {} [{} <= R <= {}, eta={}]".format(i + 1, x[1], x[2], x[3])
r_bar_etf = s2['r_bar'] + s1['r_bar']
agents.extend([ZeroIntelligenceAgent(j, "ZI Agent {} {}".format(j, strat_name),
"ZeroIntelligenceAgent {}".format(strat_name),
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32)), log_orders=log_orders,
symbol=symbol3, starting_cash=starting_cash, sigma_n=sigma_n,
r_bar=r_bar_etf, q_max=10,
sigma_pv=5000000, R_min=x[1], R_max=x[2], eta=x[3], lambda_a=1e-12) for j in
range(agent_count, agent_count + x[0])])
agent_types.extend(["ZeroIntelligenceAgent {}".format(strat_name) for j in range(x[0])])
agent_count += x[0]
# HBL strategy split.
for i, x in enumerate(hbl):
strat_name = "Type {} [{} <= R <= {}, eta={}, L={}]".format(i + 1, x[1], x[2], x[3], x[4])
agents.extend([HeuristicBeliefLearningAgent(j, "HBL Agent {} {}".format(j, strat_name),
"HeuristicBeliefLearningAgent {}".format(strat_name),
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32)), log_orders=log_orders,
symbol=symbol1, starting_cash=starting_cash, sigma_n=sigma_n,
r_bar=s1['r_bar'], q_max=10, sigma_pv=5000000, R_min=x[1], R_max=x[2],
eta=x[3], lambda_a=1e-12, L=x[4]) for j in
range(agent_count, agent_count + x[0])])
agent_types.extend(["HeuristicBeliefLearningAgent {}".format(strat_name) for j in range(x[0])])
agent_count += x[0]
for i, x in enumerate(hbl):
strat_name = "Type {} [{} <= R <= {}, eta={}, L={}]".format(i + 1, x[1], x[2], x[3], x[4])
agents.extend([HeuristicBeliefLearningAgent(j, "HBL Agent {} {}".format(j, strat_name),
"HeuristicBeliefLearningAgent {}".format(strat_name),
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32)), log_orders=log_orders,
symbol=symbol2, starting_cash=starting_cash, sigma_n=sigma_n,
r_bar=s2['r_bar'], q_max=10, sigma_pv=5000000, R_min=x[1], R_max=x[2],
eta=x[3], lambda_a=1e-12, L=x[4]) for j in
range(agent_count, agent_count + x[0])])
agent_types.extend(["HeuristicBeliefLearningAgent {}".format(strat_name) for j in range(x[0])])
agent_count += x[0]
# ----------------------------- Belief ---------------------------------- #
for i, x in enumerate(hbl):
strat_name = "Type {} [{} <= R <= {}, eta={}, L={}]".format(i + 1, x[1], x[2], x[3], x[4])
r_bar_etf = s2['r_bar'] + s1['r_bar']
agents.extend([HeuristicBeliefLearningAgent(j, "HBL Agent {} {}".format(j, strat_name),
"HeuristicBeliefLearningAgent {}".format(strat_name),
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32)), log_orders=log_orders,
symbol=symbol3, starting_cash=starting_cash, sigma_n=sigma_n,
r_bar=r_bar_etf,
#portfolio={'SYM1': s1['r_bar'], 'SYM2': s2['r_bar']},
q_max=10,
sigma_pv=5000000, R_min=x[1], R_max=x[2], eta=x[3], lambda_a=1e-12,
L=x[4]) for j in range(agent_count, agent_count + x[0])])
agent_types.extend(["HeuristicBeliefLearningAgent {}".format(strat_name) for j in range(x[0])])
agent_count += x[0]
# Trend followers agent
i = agent_count
lookback = 10
num_tf = 0
for j in range(num_tf):
agents.append(
MomentumAgent(i, "Momentum Agent {}".format(i), type=None, max_size=100, min_size=1,
symbol=symbol1, starting_cash=starting_cash, # lookback=lookback, -> al limite inserire in Trading Agent, per ora tolto
random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32)),
log_orders=log_orders))
agent_types.append("MomentumAgent {}".format(i))
i += 1
agent_count += num_tf
# for j in range(num_tf):
# agents.append(MomentumAgent(i, "Momentum Agent {}".format(i), symbol=symbol2, startingCash = starting_cash, lookback=lookback))
# agent_types.append("MomentumAgent {}".format(i))
# i+=1
# agent_count += num_tf
# for j in range(num_tf):
# agents.append(MomentumAgent(i, "Momentum Agent {}".format(i), symbol=symbol3, startingCash = starting_cash, lookback=lookback))
# agent_types.append("MomentumAgent {}".format(i))
# i+=1
# agent_count += num_tf
# ETF arbitrage agent
i = agent_count
gamma = 250
num_arb = 50
for j in range(num_arb):
agents.append(EtfArbAgent(i, "Etf Arb Agent {}".format(i), "EtfArbAgent", portfolio=['SYM1', 'SYM2'], gamma=gamma,
starting_cash=starting_cash, lambda_a=1e-9, log_orders=log_orders,
random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32))))
agent_types.append("EtfArbAgent {}".format(i))
i += 1
agent_count += num_arb
# ETF market maker agent
# i = agent_count
# gamma = 100
# num_mm = 10
mm = [(50, 250)] # (5, 50), (5, 100), (5, 200), (5, 300)]
# for j in range(num_mm):
# agents.append(EtfMarketMakerAgent(i, "Etf MM Agent {}".format(i), "EtfMarketMakerAgent", portfolio = ['IBM','GOOG'], gamma = gamma, starting_cash = starting_cash, lambda_a=1e-12, log_orders=log_orders, random_state = np.random.RandomState(seed=np.random.randint(low=0,high=2**32))))
# agent_types.append("EtfMarketMakerAgent {}".format(i))
# i+=1
# agent_count += num_mm
for i, x in enumerate(mm):
strat_name = "Type {} [gamma = {}]".format(i + 1, x[1])
print(strat_name)
agents.extend([EtfMarketMakerAgent(j, "Etf MM Agent {} {}".format(j, strat_name),
"EtfMarketMakerAgent {}".format(strat_name), portfolio=['SYM1', 'SYM2'],
gamma=x[1], starting_cash=starting_cash, lambda_a=1e-9, log_orders=log_orders,
random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32)))
for j in range(agent_count, agent_count + x[0])])
agent_types.extend(["EtfMarketMakerAgent {}".format(strat_name) for j in range(x[0])])
agent_count += x[0]
# Impact agent.
# 200 time steps in...
impacts = ['13:00:00', '13:00:06', '13:00:12', '13:00:18', '13:00:24', '13:00:30', '13:00:36', '13:00:42', '13:00:48','13:00:54', '13:01:00']
for itrades in impacts:
impact_time = midnight + pd.to_timedelta(itrades)
i = agent_count
agents.append(ImpactAgent(i, "Impact Agent1 {}".format(i), "ImpactAgent1", symbol="SYM1", starting_cash=starting_cash,
impact=impact, impact_time=impact_time, greed=greed,
random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32))))
agent_types.append("ImpactAgent 1 {}".format(i))
agent_count += 1
# i = agent_count
# agents.append(ImpactAgent(i, "Impact Agent3 {}".format(i), "ImpactAgent3", symbol = "ETF", starting_cash = starting_cash, greed = greed, impact = impact, impact_time = impact_time, random_state = np.random.RandomState(seed=np.random.randint(low=0,high=2**32))))
# agent_types.append("ImpactAgent 3 {}".format(i))
# agent_count += 1
### Configure a simple message latency matrix for the agents. Each entry is the minimum
### nanosecond delay on communication [from][to] agent ID.
# Square numpy array with dimensions equal to total agent count. Most agents are handled
# at init, drawn from a uniform distribution from:
# Times Square (3.9 miles from NYSE, approx. 21 microseconds at the speed of light) to:
# Pike Place Starbucks in Seattle, WA (2402 miles, approx. 13 ms at the speed of light).
# Other agents can be explicitly set afterward (and the mirror half of the matrix is also).
# This configures all agents to a starting latency as described above.
# latency = np.random.uniform(low = 21000, high = 13000000, size=(len(agent_types),len(agent_types)))
latency = np.random.uniform(low=10, high=100, size=(len(agent_types), len(agent_types)))
# Overriding the latency for certain agent pairs happens below, as does forcing mirroring
# of the matrix to be symmetric.
for i, t1 in zip(range(latency.shape[0]), agent_types):
for j, t2 in zip(range(latency.shape[1]), agent_types):
# Three cases for symmetric array. Set latency when j > i, copy it when i > j, same agent when i == j.
if j > i:
# Arb agents should be the fastest in the market.
if (("ExchangeAgent" in t1 and "EtfArbAgent" in t2)
or ("ExchangeAgent" in t2 and "EtfArbAgent" in t1)):
# latency[i,j] = 20000
latency[i, j] = 5
elif (("ExchangeAgent" in t1 and "EtfMarketMakerAgent" in t2)
or ("ExchangeAgent" in t2 and "EtfMarketMakerAgent" in t1)):
# latency[i,j] = 20000
latency[i, j] = 1
elif (("ExchangeAgent" in t1 and "ImpactAgent" in t2)
or ("ExchangeAgent" in t2 and "ImpactAgent" in t1)):
# latency[i,j] = 20000
latency[i, j] = 1
elif i > j:
# This "bottom" half of the matrix simply mirrors the top.
if (("ExchangeAgent" in t1 and "EtfArbAgent" in t2)
or ("ExchangeAgent" in t2 and "EtfArbAgent" in t1)):
# latency[i,j] = 20000
latency[i, j] = 5
elif (("ExchangeAgent" in t1 and "EtfMarketMakerAgent" in t2)
or ("ExchangeAgent" in t2 and "EtfMarketMakerAgent" in t1)):
# latency[i,j] = 20000
latency[i, j] = 1
elif (("ExchangeAgent" in t1 and "ImpactAgent" in t2)
or ("ExchangeAgent" in t2 and "ImpactAgent" in t1)):
# latency[i,j] = 20000
latency[i, j] = 1
else:
latency[i, j] = latency[j, i]
else:
# This is the same agent. How long does it take to reach localhost? In our data center, it actually
# takes about 20 microseconds.
# latency[i,j] = 10000
latency[i, j] = 1
# Configure a simple latency noise model for the agents.
# Index is ns extra delay, value is probability of this delay being applied.
# In this config, there is no latency (noisy or otherwise).
noise = [0.0]
# Start the kernel running.
kernel.runner(agents=agents, startTime=kernelStartTime,
stopTime=kernelStopTime, agentLatency=latency,
latencyNoise=noise,
defaultComputationDelay=defaultComputationDelay,
oracle=oracle, log_dir=log_dir)
# ABBIAMO PROBLEMI CON I TEMPI;
# python3 -u liquidity_telemetry.py ../../log/1599743361/ExchangeAgent0.bz2 ../../log/1599743361/ORDERBOOK_SYM2_FULL.bz2 -o ../../twosymbols.png -c configs/plot_2sym.json
| [
"pandas.to_timedelta",
"argparse.ArgumentParser",
"pandas.Timestamp.now",
"util.oracle.SparseMeanRevertingOracle.SparseMeanRevertingOracle",
"numpy.random.randint",
"numpy.random.seed",
"sys.exit",
"pandas.to_datetime"
] | [((923, 999), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Detailed options for momentum config."""'}), "(description='Detailed options for momentum config.')\n", (946, 999), False, 'import argparse\n'), ((2672, 2700), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-01-28"""'], {}), "('2014-01-28')\n", (2686, 2700), True, 'import pandas as pd\n'), ((3975, 3995), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3989, 3995), True, 'import numpy as np\n'), ((8614, 8669), 'util.oracle.SparseMeanRevertingOracle.SparseMeanRevertingOracle', 'SparseMeanRevertingOracle', (['mkt_open', 'mkt_close', 'symbols'], {}), '(mkt_open, mkt_close, symbols)\n', (8639, 8669), False, 'from util.oracle.SparseMeanRevertingOracle import SparseMeanRevertingOracle\n'), ((2579, 2589), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2587, 2589), False, 'import sys\n'), ((5486, 5513), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""20:00:00"""'], {}), "('20:00:00')\n", (5501, 5513), True, 'import pandas as pd\n'), ((8254, 8281), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""09:30:00"""'], {}), "('09:30:00')\n", (8269, 8281), True, 'import pandas as pd\n'), ((8437, 8464), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""15:30:00"""'], {}), "('15:30:00')\n", (8452, 8464), True, 'import pandas as pd\n'), ((9344, 9371), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""17:00:00"""'], {}), "('17:00:00')\n", (9359, 9371), True, 'import pandas as pd\n'), ((9427, 9454), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""17:00:01"""'], {}), "('17:00:01')\n", (9442, 9454), True, 'import pandas as pd\n'), ((22025, 22049), 'pandas.to_timedelta', 'pd.to_timedelta', (['itrades'], {}), '(itrades)\n', (22040, 22049), True, 'import pandas as pd\n'), ((6693, 6747), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)', 'dtype': '"""uint64"""'}), "(low=0, high=2 ** 32, dtype='uint64')\n", (6710, 6747), True, 'import numpy as np\n'), ((7093, 7147), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)', 'dtype': '"""uint64"""'}), "(low=0, high=2 ** 32, dtype='uint64')\n", (7110, 7147), True, 'import numpy as np\n'), ((7492, 7546), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)', 'dtype': '"""uint64"""'}), "(low=0, high=2 ** 32, dtype='uint64')\n", (7509, 7546), True, 'import numpy as np\n'), ((7946, 7984), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (7963, 7984), True, 'import numpy as np\n'), ((3917, 3935), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (3933, 3935), True, 'import pandas as pd\n'), ((9069, 9107), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (9086, 9107), True, 'import numpy as np\n'), ((9753, 9791), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (9770, 9791), True, 'import numpy as np\n'), ((19375, 19413), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (19392, 19413), True, 'import numpy as np\n'), ((20402, 20440), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (20419, 20440), True, 'import numpy as np\n'), ((22347, 22385), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (22364, 22385), True, 'import numpy as np\n'), ((13061, 13099), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (13078, 13099), True, 'import numpy as np\n'), ((13984, 14022), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (14001, 14022), True, 'import numpy as np\n'), ((15039, 15077), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (15056, 15077), True, 'import numpy as np\n'), ((16051, 16089), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (16068, 16089), True, 'import numpy as np\n'), ((17078, 17116), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (17095, 17116), True, 'import numpy as np\n'), ((18223, 18261), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (18240, 18261), True, 'import numpy as np\n'), ((21563, 21601), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2 ** 32)'}), '(low=0, high=2 ** 32)\n', (21580, 21601), True, 'import numpy as np\n')] |
'''
Wrapper routine that reads in the segment bounds
- construction of fluorescence decay traces per segment, and fit
'''
def Segmented_crosscorrs(MakeCrossCorrs, MakeLifetimelst, Simulated_insteadof_MeasuredData, CPA_insteadof_binned):
#import matplotlib.pyplot as plt
import numpy as np
import os
#import pyarrow.parquet as pq
#import pandas as pd
import loaddata_functions as load
import CPA_functions as findjumps
import fitexp_MLE as fitexp
import correlate_jit
#import acc_functions as acc
# =============================================================================
# import preamble
# =============================================================================
if Simulated_insteadof_MeasuredData:
import preamble_simulated as pre
else:
import preamble_measured as pre
# =============================================================================
# set outputfolder
# =============================================================================
if CPA_insteadof_binned:
outputfolder = pre.outputfolder_1 + pre.outputfolder_2_CPA
else:
outputfolder = pre.outputfolder_1 + pre.outputfolder_2_binned
# =============================================================================
# get to work on the specified dots
# =============================================================================
Dotlist = [i for i in os.listdir(pre.timetags_filepath) if i.startswith('Dot_') and pre.sig in i]
print('\nRunning CPA routine, start by reading in data\n')
for dot_file in Dotlist:
dot_idx = int(dot_file[4:6])
print('\n##################################################')
print('Starting Dot', dot_idx)
# =============================================================================
# create the folder to save the data
# =============================================================================
savepath = outputfolder + 'Dot_%02d/' %dot_idx
if not os.path.exists(savepath):
os.makedirs(savepath)
# =============================================================================
# load the timestamps
# =============================================================================
timestamps_chA_bin, timestamps_chB_bin, timestamps_chR_bin = load.LoadTimeStamps(pre.timetags_filepath+'Dot_%02d/' %dot_idx, pre.timetags_filenames, pre.timetags_headers)
timestamps_bin = np.sort(np.concatenate((timestamps_chA_bin, timestamps_chB_bin)))
# =============================================================================
# Load metadata
# =============================================================================
file_metadata = load.ReadDict(savepath+pre.metadata_filename, 20)
# =============================================================================
# Segment bounds (changepoints or bins)
# =============================================================================
'''
seg_idx_extra : array of the indices of the photons at the found changepoints, Includes 0, and the total photon count
seg_times_bin_extra : array of the time bin of the found changepoint. Unit is counter card resolution. Includes 0 and the end of the measurement
segmentlengths_s : array of the length of the segments, in seconds
segmentcounts : array of the number of counts in every segmens
'''
filepath_segbounds = savepath + pre.segments_filename
seg_idx_extra, seg_times_bin_extra, segmentlengths,segmentcounts=load.LoadSegments(filepath_segbounds)
segmentlengths_s=segmentlengths*pre.dtau_ns*1e-9 # in seconds [durations]
# =============================================================================
# Segmented cross-correlation for segment-decay traces
# =============================================================================
'''
correlating the photon arrival times with the laser reference times
this data will become the decay curves
input
fname_tcspc : the file name of the TCSPC file
ccparams : the parameters for the cross-correlation calculation
seg_times_bin_extra : array of the time bin of the found changepoint. Unit is counter card resolution. Includes 0 and the end of the measurement
pre.dtau_ns : the tiing resolution of the counter card, in ns
ccmeta : the parameters of the correlation calculation and the metadata
output
crosscorrs : 2D aray. The calculated correlation, per channel
cctaus_ns : the shift times used for the correlation, in ns
'''
filepath_cc = savepath + pre.crosscorrs_segmented_filename
if MakeCrossCorrs:
print('\nCorrelating detector and laser in segments, for decay traces')
cctaus_bin = np.arange(0,int(pre.minitimebin_ns/pre.dtau_ns)+1).astype(np.int64)
cctaus_ns = cctaus_bin* pre.dtau_ns
crosscorrs = np.zeros((2,len(seg_times_bin_extra)-1, int(pre.minitimebin_ns/pre.dtau_ns)+1))
for a in range(len(seg_times_bin_extra)-1):
# take only the timestamps inside the relevant segment
timestamps_chA_bin_part = timestamps_chA_bin[(seg_times_bin_extra[a]<=timestamps_chA_bin)*(timestamps_chA_bin<seg_times_bin_extra[a+1])]
timestamps_chB_bin_part = timestamps_chB_bin[(seg_times_bin_extra[a]<=timestamps_chB_bin)*(timestamps_chB_bin<seg_times_bin_extra[a+1])]
timestamps_chR_bin_part = timestamps_chR_bin[(timestamps_chR_bin>=seg_times_bin_extra[a])*(timestamps_chR_bin<seg_times_bin_extra[a+1])]
crosscorrs[0,a] = correlate_jit.Corr(timestamps_chR_bin_part, timestamps_chA_bin_part, cctaus_bin)
crosscorrs[1,a] = correlate_jit.Corr(timestamps_chR_bin_part, timestamps_chB_bin_part, cctaus_bin)
# save the data
load.SaveSegmentedCC(filepath_cc, crosscorrs, cctaus_ns)
if not MakeCrossCorrs:
# load the data
crosscorrs, cctaus_ns = load.LoadSegmentedCC(filepath_cc)
# =============================================================================
# Segmented intensity lists, and fitted lifetimes/decay rates
# =============================================================================
'''
Create a list of the lifetimes and intensities of the segmented data
input
crosscorrs : the calculated correlations, segmented by CPA
cctaus_ns : the shift times used for the correlation, in ns
mygap : a time interval to exclude from the fitting procedure, to remove the effects of electronic ringing
rise_time_bin : the number of bins at the end of the trace to exclude from the fitting procedure. This accounts for the rise time of the measurement apparatus
output
crosscorrs_merged : 1D array, correlation of both channels with the reference pulses. Corrected for possible differences in cable length between detectors
gamma_lst : 1D array of the single-exponential decay rates of the CPA segments
gamma_err_lst : 1D array of the errors of the single-exponential decay rates of the CPA segments
segmentlengths_s : 1D array of the time urations of the CPA segments
segmentcounts : 1D array, number of photon events in each CPA segment
'''
filepath_seg = savepath + pre.segment_fit_filename
if MakeLifetimelst:
crosscorrsA_prep, cctaus_ns_prep = fitexp.PrepCrossCorrs(crosscorrs[0], cctaus_ns, Tau0_bin=-file_metadata['Tau0A_bin'], gap=pre.mygap_bin, rise_time_bin=int(5/pre.dtau_ns))
crosscorrsB_prep, cctaus_ns_prep = fitexp.PrepCrossCorrs(crosscorrs[1], cctaus_ns, Tau0_bin=-file_metadata['Tau0B_bin'], gap=pre.mygap_bin, rise_time_bin=int(5/pre.dtau_ns))
crosscorrsAB_prep = crosscorrsA_prep + crosscorrsB_prep
#crosscorrsAB_prep=crosscorrs[0]+crosscorrs[1]
#cctaus_ns_prep=100-cctaus_ns
fitdata = fitexp.Make_seg_fit_single(crosscorrsAB_prep, cctaus_ns_prep)
load.SaveFitData(filepath_seg, pre.ccmeta, fitdata)
# if not MakeLifetimelst:
# load the data
# obsolete here, but left in as a reference of how to load the data
# ccmeta, fitdata = load.LoadFitData(filepath_seg)
| [
"loaddata_functions.LoadTimeStamps",
"os.path.exists",
"os.listdir",
"os.makedirs",
"loaddata_functions.SaveFitData",
"loaddata_functions.LoadSegments",
"correlate_jit.Corr",
"loaddata_functions.LoadSegmentedCC",
"loaddata_functions.ReadDict",
"numpy.concatenate",
"fitexp_MLE.Make_seg_fit_single... | [((2503, 2620), 'loaddata_functions.LoadTimeStamps', 'load.LoadTimeStamps', (["(pre.timetags_filepath + 'Dot_%02d/' % dot_idx)", 'pre.timetags_filenames', 'pre.timetags_headers'], {}), "(pre.timetags_filepath + 'Dot_%02d/' % dot_idx, pre.\n timetags_filenames, pre.timetags_headers)\n", (2522, 2620), True, 'import loaddata_functions as load\n'), ((2933, 2984), 'loaddata_functions.ReadDict', 'load.ReadDict', (['(savepath + pre.metadata_filename)', '(20)'], {}), '(savepath + pre.metadata_filename, 20)\n', (2946, 2984), True, 'import loaddata_functions as load\n'), ((3821, 3858), 'loaddata_functions.LoadSegments', 'load.LoadSegments', (['filepath_segbounds'], {}), '(filepath_segbounds)\n', (3838, 3858), True, 'import loaddata_functions as load\n'), ((1515, 1548), 'os.listdir', 'os.listdir', (['pre.timetags_filepath'], {}), '(pre.timetags_filepath)\n', (1525, 1548), False, 'import os\n'), ((2145, 2169), 'os.path.exists', 'os.path.exists', (['savepath'], {}), '(savepath)\n', (2159, 2169), False, 'import os\n'), ((2183, 2204), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (2194, 2204), False, 'import os\n'), ((2646, 2702), 'numpy.concatenate', 'np.concatenate', (['(timestamps_chA_bin, timestamps_chB_bin)'], {}), '((timestamps_chA_bin, timestamps_chB_bin))\n', (2660, 2702), True, 'import numpy as np\n'), ((6341, 6397), 'loaddata_functions.SaveSegmentedCC', 'load.SaveSegmentedCC', (['filepath_cc', 'crosscorrs', 'cctaus_ns'], {}), '(filepath_cc, crosscorrs, cctaus_ns)\n', (6361, 6397), True, 'import loaddata_functions as load\n'), ((6498, 6531), 'loaddata_functions.LoadSegmentedCC', 'load.LoadSegmentedCC', (['filepath_cc'], {}), '(filepath_cc)\n', (6518, 6531), True, 'import loaddata_functions as load\n'), ((8646, 8707), 'fitexp_MLE.Make_seg_fit_single', 'fitexp.Make_seg_fit_single', (['crosscorrsAB_prep', 'cctaus_ns_prep'], {}), '(crosscorrsAB_prep, cctaus_ns_prep)\n', (8672, 8707), True, 'import fitexp_MLE as fitexp\n'), ((8733, 8784), 'loaddata_functions.SaveFitData', 'load.SaveFitData', (['filepath_seg', 'pre.ccmeta', 'fitdata'], {}), '(filepath_seg, pre.ccmeta, fitdata)\n', (8749, 8784), True, 'import loaddata_functions as load\n'), ((6100, 6185), 'correlate_jit.Corr', 'correlate_jit.Corr', (['timestamps_chR_bin_part', 'timestamps_chA_bin_part', 'cctaus_bin'], {}), '(timestamps_chR_bin_part, timestamps_chA_bin_part, cctaus_bin\n )\n', (6118, 6185), False, 'import correlate_jit\n'), ((6215, 6300), 'correlate_jit.Corr', 'correlate_jit.Corr', (['timestamps_chR_bin_part', 'timestamps_chB_bin_part', 'cctaus_bin'], {}), '(timestamps_chR_bin_part, timestamps_chB_bin_part, cctaus_bin\n )\n', (6233, 6300), False, 'import correlate_jit\n')] |
import unittest
import padertorch as pt
import numpy as np
import torch
from padertorch.contrib.examples.pit.model import PermutationInvariantTrainingModel
class TestDeepClusteringModel(unittest.TestCase):
# TODO: Test forward deterministic if not train
def setUp(self):
self.model = pt.models.bss.DeepClusteringModel()
self.T = 100
self.B = 4
self.E = 20
self.K = 2
self.F = 257
self.num_frames = [100, 90, 80, 70]
self.inputs = {
'Y_abs': [
np.abs(np.random.normal(
size=(num_frames_, self.F)
)).astype(np.float32)
for num_frames_ in self.num_frames
],
'target_mask': [
np.abs(np.random.choice(
[0, 1],
size=(num_frames_, self.K, self.F)
)).astype(np.float32)
for num_frames_ in self.num_frames
]
}
def test_signature(self):
assert callable(getattr(self.model, 'forward', None))
assert callable(getattr(self.model, 'review', None))
def test_forward(self):
inputs = pt.data.example_to_device(self.inputs)
model_out = self.model(inputs)
for embedding, num_frames in zip(model_out, self.num_frames):
expected_shape = (num_frames, self.E, self.F)
assert embedding.shape == expected_shape, embedding.shape
def test_review(self):
inputs = pt.data.example_to_device(self.inputs)
mask = self.model(inputs)
review = self.model.review(inputs, mask)
assert 'losses' in review, review.keys()
assert 'dc_loss' in review['losses'], review['losses'].keys()
def test_minibatch_equal_to_single_example(self):
inputs = pt.data.example_to_device(self.inputs)
mask = self.model(inputs)
review = self.model.review(inputs, mask)
actual_loss = review['losses']['dc_loss']
reference_loss = list()
for observation, target_mask in zip(
self.inputs['Y_abs'],
self.inputs['target_mask'],
):
inputs = {
'Y_abs': [observation],
'target_mask': [target_mask],
}
inputs = pt.data.example_to_device(inputs)
mask = self.model(inputs)
review = self.model.review(inputs, mask)
reference_loss.append(review['losses']['dc_loss'])
reference_loss = torch.mean(torch.stack(reference_loss))
np.testing.assert_allclose(
actual_loss.detach().numpy(),
reference_loss.detach().numpy(),
atol=1e-6
)
class TestPermutationInvariantTrainingModel(unittest.TestCase):
# TODO: Test forward deterministic if not train
def setUp(self):
self.model = PermutationInvariantTrainingModel(
dropout_input=0.5,
dropout_hidden=0.5,
dropout_linear=0.5
)
self.T = 100
self.B = 4
self.K = 2
self.F = 257
self.num_frames = [100, 90, 80, 70]
self.inputs = {
'Y_abs': [
np.abs(np.random.normal(
size=(num_frames_, self.F)
)).astype(np.float32)
for num_frames_ in self.num_frames
],
'X_abs': [
np.abs(np.random.normal(
size=(num_frames_, self.K, self.F)
)).astype(np.float32)
for num_frames_ in self.num_frames
],
'Y_norm': [
np.abs(np.random.normal(
size=(num_frames_, self.F)
)).astype(np.float32)
for num_frames_ in self.num_frames
],
'X_norm': [
np.abs(np.random.normal(
size=(num_frames_, self.K, self.F)
)).astype(np.float32)
for num_frames_ in self.num_frames
],
'cos_phase_difference': [
np.abs(np.random.normal(
size=(num_frames_, self.K, self.F)
)).astype(np.float32)
for num_frames_ in self.num_frames
]
}
def test_signature(self):
assert callable(getattr(self.model, 'forward', None))
assert callable(getattr(self.model, 'review', None))
def test_forward(self):
inputs = pt.data.example_to_device(self.inputs)
mask = self.model(inputs)
for m, t in zip(mask, inputs['X_abs']):
np.testing.assert_equal(m.size(), t.size())
def test_review(self):
inputs = pt.data.example_to_device(self.inputs)
mask = self.model(inputs)
review = self.model.review(inputs, mask)
assert 'losses' in review, review.keys()
assert 'pit_mse_loss' in review['losses'], review['losses'].keys()
def test_minibatch_equal_to_single_example(self):
inputs = pt.data.example_to_device(self.inputs)
self.model.eval()
mask = self.model(inputs)
review = self.model.review(inputs, mask)
actual_loss = review['losses']['pit_mse_loss']
reference_loss = list()
for Y_abs, X_abs, Y_norm, X_norm, cos_phase_difference in zip(
self.inputs['Y_abs'],
self.inputs['X_abs'],
self.inputs['Y_norm'],
self.inputs['X_norm'],
self.inputs['cos_phase_difference'],
):
inputs = {
'Y_abs': [Y_abs],
'X_abs': [X_abs],
'Y_norm': [Y_norm],
'X_norm': [X_norm],
'cos_phase_difference': [cos_phase_difference],
}
inputs = pt.data.example_to_device(inputs)
self.model.eval()
mask = self.model(inputs)
review = self.model.review(inputs, mask)
reference_loss.append(review['losses']['pit_mse_loss'])
reference_loss = torch.mean(torch.stack(reference_loss))
np.testing.assert_allclose(
actual_loss.detach().numpy(),
reference_loss.detach().numpy(),
atol=1e-6
)
def test_evaluation_mode_deterministic(self):
self.model.eval()
inputs = pt.data.example_to_device(self.inputs)
mask1 = self.model(inputs)[0]
inputs = pt.data.example_to_device(self.inputs)
mask2 = self.model(inputs)[0]
np.testing.assert_allclose(
mask1.detach().numpy(),
mask2.detach().numpy(),
atol=1e-6
)
| [
"numpy.random.normal",
"padertorch.models.bss.DeepClusteringModel",
"numpy.random.choice",
"torch.stack",
"padertorch.data.example_to_device",
"padertorch.contrib.examples.pit.model.PermutationInvariantTrainingModel"
] | [((303, 338), 'padertorch.models.bss.DeepClusteringModel', 'pt.models.bss.DeepClusteringModel', ([], {}), '()\n', (336, 338), True, 'import padertorch as pt\n'), ((1189, 1227), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (1214, 1227), True, 'import padertorch as pt\n'), ((1511, 1549), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (1536, 1549), True, 'import padertorch as pt\n'), ((1825, 1863), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (1850, 1863), True, 'import padertorch as pt\n'), ((2875, 2971), 'padertorch.contrib.examples.pit.model.PermutationInvariantTrainingModel', 'PermutationInvariantTrainingModel', ([], {'dropout_input': '(0.5)', 'dropout_hidden': '(0.5)', 'dropout_linear': '(0.5)'}), '(dropout_input=0.5, dropout_hidden=0.5,\n dropout_linear=0.5)\n', (2908, 2971), False, 'from padertorch.contrib.examples.pit.model import PermutationInvariantTrainingModel\n'), ((4488, 4526), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (4513, 4526), True, 'import padertorch as pt\n'), ((4711, 4749), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (4736, 4749), True, 'import padertorch as pt\n'), ((5030, 5068), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (5055, 5068), True, 'import padertorch as pt\n'), ((6342, 6380), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (6367, 6380), True, 'import padertorch as pt\n'), ((6437, 6475), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['self.inputs'], {}), '(self.inputs)\n', (6462, 6475), True, 'import padertorch as pt\n'), ((2304, 2337), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['inputs'], {}), '(inputs)\n', (2329, 2337), True, 'import padertorch as pt\n'), ((2529, 2556), 'torch.stack', 'torch.stack', (['reference_loss'], {}), '(reference_loss)\n', (2540, 2556), False, 'import torch\n'), ((5800, 5833), 'padertorch.data.example_to_device', 'pt.data.example_to_device', (['inputs'], {}), '(inputs)\n', (5825, 5833), True, 'import padertorch as pt\n'), ((6062, 6089), 'torch.stack', 'torch.stack', (['reference_loss'], {}), '(reference_loss)\n', (6073, 6089), False, 'import torch\n'), ((554, 598), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_frames_, self.F)'}), '(size=(num_frames_, self.F))\n', (570, 598), True, 'import numpy as np\n'), ((775, 835), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': '(num_frames_, self.K, self.F)'}), '([0, 1], size=(num_frames_, self.K, self.F))\n', (791, 835), True, 'import numpy as np\n'), ((3209, 3253), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_frames_, self.F)'}), '(size=(num_frames_, self.F))\n', (3225, 3253), True, 'import numpy as np\n'), ((3424, 3476), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_frames_, self.K, self.F)'}), '(size=(num_frames_, self.K, self.F))\n', (3440, 3476), True, 'import numpy as np\n'), ((3648, 3692), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_frames_, self.F)'}), '(size=(num_frames_, self.F))\n', (3664, 3692), True, 'import numpy as np\n'), ((3864, 3916), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_frames_, self.K, self.F)'}), '(size=(num_frames_, self.K, self.F))\n', (3880, 3916), True, 'import numpy as np\n'), ((4102, 4154), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_frames_, self.K, self.F)'}), '(size=(num_frames_, self.K, self.F))\n', (4118, 4154), True, 'import numpy as np\n')] |
from datetime import date
import numpy as np
from ds_weather_api import *
class WeatherFeatureExtractor:
date_to_hours = None
data_loaded = False
DATA_PATH = '../Data/WeatherData_May2014-May2019.bin'
@classmethod
def load_data(cls):
class CustomUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == "__main__":
module = 'ds_weather_api'
return super().find_class(module, name)
with open(cls.DATA_PATH, 'rb') as f:
weather_data = CustomUnpickler(f).load()
# Change Format
cls.date_to_hours = {}
date_to_hours = cls.date_to_hours
for data in weather_data:
hourly_data = data[WeatherReportType.Hourly]
for val in data[WeatherReportType.Daily].values():
daily = val
break
hours = sorted(hourly_data.keys())
for hour in hours:
dt = hour.date()
if dt not in date_to_hours:
date_to_hours[dt.isoformat()] = {}
date_to_hours[dt.isoformat()][hour.hour] = hourly_data[hour].extend_with(daily)
# Imputate data from previous hour
prev_good_hour_value = None
prev_good_temperature = None
for dt in date_to_hours:
for hour in range(24):
if hour not in date_to_hours[dt]:
date_to_hours[dt][hour] = prev_good_hour_value
else:
prev_good_hour_value = date_to_hours[dt][hour]
if prev_good_hour_value is None:
continue
if prev_good_hour_value.temperature is not None:
prev_good_temperature = prev_good_hour_value.temperature
else:
prev_good_hour_value.temperature = prev_good_temperature
cls.data_loaded = True
@classmethod
def get_feature_dict(cls, date_, time):
if not cls.data_loaded:
cls.load_data()
date_to_hours = cls.date_to_hours
hour = int(np.round(time / 100.))
attrs = ['weather_label', 'precipitation_intensity', 'precipitation_probability', 'visibility',
'cloud_cover', 'humidity', 'wind_bearing', 'wind_speed', 'uv_index', 'temperature',
'moon_phase', 'sunrise_time', 'sunset_time', 'dew_point', 'pressure']
#
# if date_ not in date_to_hours or hour not in date_to_hours[date_]:
# ret[attr_name] = np.nan
# else:
# ret[attr_name] = date_to_hours[date_][hour].__dict__[attr_name]
if date_ not in date_to_hours or hour not in date_to_hours[date_]:
return {at_name: np.nan for at_name in attrs}
hour_data = date_to_hours[date_][hour]
return {at_name: hour_data.__dict__[at_name] for at_name in attrs}
def get_weather_features_dict(date, time):
"""
:param date: the 'FL_DATE' column
:param time: the 'HOUR' column
:return: rtn_dict: labels = {'weather_label', 'precipitation_intensity', 'precipitation_probability', 'visibility',
'cloud_cover', 'humidity', 'wind_bearing', 'wind_speed', 'uv_index', 'temperature',
'moon_phase', 'sunrise_time', 'sunset_time', 'dew_point', 'pressure'}
"""
return WeatherFeatureExtractor.get_feature_dict(date, time)
| [
"numpy.round"
] | [((2161, 2183), 'numpy.round', 'np.round', (['(time / 100.0)'], {}), '(time / 100.0)\n', (2169, 2183), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import requests
import numpy as np
from multiprocessing import Process, Manager, Lock
import traceback
import signal
import sys
import argparse
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
###############################################################################
# Simple script for testing site reponse time against concurrent requests #
# Developed with Python 2.7.6. Tested on OS X and Linux #
# Example Usage: #
# python spawn_api_requests.py --concurrency 15 #
###############################################################################
def signal_term_handler(signal, frame):
# Catch SIGTERM from User
# If this fails, and processes are left running, execute:
# kill -9 `ps -ef | grep -i python | grep -i spawn_api_requests | head -n1 | awk '{ print $2 }'`
print("Script forced to aborted by user")
sys.exit()
def status(code):
# Print Colorized Status Codes (unix OSes only)
# 2XX HTTP Response (Green)
if str(code)[0] == "2":
return "\033[92m" + str(code) + "\033[0m"
# 4XX/5XX HTTP Response (Red)
elif str(code)[0] in ["4", "5"]:
return "\033[91m" + str(code) + "\033[0m"
# Other HTTP Response (Yellow)
else:
return "\033[93m" + str(code) + "\033[0m"
def safe_print(string, lock):
# Threadsafe version of print
# print() is threadsafe by default, however newlines are not
lock.acquire()
print(string)
lock.release()
# Method to authenticate and send requests for each worker
def send_http_request(results, times, pages, authentication, timeout, lock, number=1):
# Optional authentication step
if authentication['api_authentication']['enabled']:
try:
print("\nAuthenticating thread number %s" %(number))
request_type = authentication['api_authentication']['request_type']
login_url = "%s://%s/%s" %(url_details['protocol'], url_details['url'], authentication['api_authentication']['endpoint'])
header = {'Content-Type': authentication['api_authentication']['payload_format']}
payload = authentication['api_authentication']['payload_as_string']
r = requests.request(request_type, url=login_url, headers=header, data=payload, verify=False, allow_redirects=True)
except Exception as e:
lock.acquire()
print("Failed to send Authentication Request. Failure Response:")
traceback.print_exc()
lock.relase()
sys.exit()
if r.status_code in [200, 201]:
cookies = dict(r.cookies)
safe_print("Authentication Succeeded\n\tSession Cookie: %s" % (dict(cookies)), lock)
if sum(1 for _ in r.cookies) == 0:
safe_print("\t\033[91mWarning:\033[0m Received 2XX status from server, but no Session Cookie was readable. You're probably NOT authenticated", lock)
else:
safe_print("Authentication Failure:\n\tStatus: %s\n\tResponse: %s" %(status(r.status_code), r.text), lock)
sys.exit()
else:
cookies = {}
# After authentication, traverse through each page
try:
for page in url_details['endpoints']:
current_url = "%s://%s/%s" %(url_details['protocol'], url_details['url'], page)
try:
r = requests.request( 'get',
url=current_url,
cookies=cookies,
verify=False,
allow_redirects=True,
timeout=(timeout['connect'], timeout['read']))
times.append(r.elapsed.microseconds)
results["Valid Response"] += 1
except requests.exceptions.ReadTimeout as e:
safe_print("Request Thread %s:\n\t\033[91mRead Timeout!\033[0m No server response in %s seconds" %(number, timeout['read']), lock)
results["Read Timeout"] += 1
return
except requests.exceptions.ConnectTimeout as e:
lock.acquire()
safe_print("Request Thread %s:\n\t\033[91mConnect Timeout!\033[0m No server response in %s seconds" %(number, timeout['connect']), lock)
lock.release()
results["Connect Timeout"] += 1
return
except requests.exceptions.ConnectionError as e:
safe_print("Request Thread %s:\n\t\033[91mConnection Error!\033[0m %s" %(number, e), lock)
results["Connection Error"] += 1
return
except Exception as e:
safe_print("Request Thread %s:\n\t\033[91mUnexpected Error!\033[0m %s" %(number, e), lock)
return
if not r.status_code == 200:
safe_print("Failed to get page:\n\tURL: %s\n\tStatus: %s" %(current_url, status(r.status_code)), lock)
else:
if r.history:
for redirect in r.history:
safe_print("Request Thread %s:\n\tStatus: %s\n\tTime: %s\n\tRedirects:\n\t\t%s : %s" %( number, status(r.status_code), float(r.elapsed.microseconds) / 1000000,
status(redirect.status_code), redirect.url),
lock)
safe_print("\tFinal Destination:\n\t\t%s : %s" %(status(r.status_code), r.url), lock)
else:
safe_print("Request Thread %s:\n\tURL: %s\n\tStatus: %s\n\tTime: %s" %(number, r.url, status(r.status_code), float(r.elapsed.microseconds) / 1000000), lock)
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
# Disable all SSL warnings
try:
requests.packages.urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
except:
pass
# Catch Sigterm from User
signal.signal(signal.SIGTERM, signal_term_handler)
# Globals
subprocesses = []
count = 0
manager = Manager()
lock = manager.Lock()
times = manager.list()
results = manager.dict()
results["Valid Response"] = 0
results["Connection Error"] = 0
results["Read Timeout"] = 0
results["Connect Timeout"] = 0
# Import list of URLs:
# protocol, base url, and list of endpoints
with open('config/url_list.json', 'r') as url_endpoints:
url_details = json.load(url_endpoints)
# Import Authentication Details:
# username, password, url, request type and url endpoint
with open('config/authentication.json', 'r') as login_params:
authentication = json.load(login_params)
# Parse User Command Line Arguments
parser = argparse.ArgumentParser(description='Spawn multiple HTTP request threads to request specified URL.')
parser.add_argument("--concurrency", dest="concurrency", type=int, default=1, required=False, help='number of users simultaneously requesting pages (ex. --concurrency 15)')
args = parser.parse_args()
user_args = vars(args)
# Configurable Parameter Defaults
concurrency = user_args['concurrency']
timeout = {"read": 5, "connect": 5}
# Send Parallel URL Requests
# Note: Number of worker processes is bound by host
# Too many subprocesses yields OSOSError: [Errno 35] Resource temporarily unavailable
# This should be configurable on your OS
print("\nSpawning: \n\t%s subprocesses for %s simultaneous requests of page" %(concurrency, concurrency))
# Spawn a process for every request instance
for x in range(0,concurrency):
count += 1
p = Process(target=send_http_request, args=(results, times, url_details, authentication, timeout, lock, count,))
subprocesses.append(p)
p.start()
# Wait for all processes to complete
for subprocess in subprocesses:
subprocess.join()
# Calculate average response time
# Average Time in seconds
avg_time = "N/A"
if len(times) > 0:
avg_time = float(sum(times)/len(times))/1000000
# Print Results to Console
# JW: TODO clean these up so they don't throw exceptions for empty data setsg
print("\nAll Requests Sent:")
print("\tValid Response: %s" %(results["Valid Response"]))
print("\tConnection Error: %s" %(results["Connection Error"]))
print("\tRead Timeout: %s" %(results["Read Timeout"]))
print("\tConnect Timeout: %s\n" %(results["Connect Timeout"]))
print("Average Response Time:\n\t%s seconds" %(str(avg_time)))
print("Minimum Response Time:\n\t%s seconds" %(str(float(min(times))/1000000)))
print("Maximum Response Time:\n\t%s seconds" %(str(float(max(times))/1000000)))
print("Median Response Time:\n\t%s seconds" %(np.median(times)/1000000))
print("Standard Deviation:\n\t%s seconds\n" %(np.std(times)/1000000))
| [
"signal.signal",
"numpy.median",
"requests.packages.urllib3.disable_warnings",
"argparse.ArgumentParser",
"multiprocessing.Process",
"numpy.std",
"requests.request",
"json.load",
"sys.exit",
"multiprocessing.Manager",
"traceback.print_exc"
] | [((1084, 1094), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1092, 1094), False, 'import sys\n'), ((5911, 5961), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'signal_term_handler'], {}), '(signal.SIGTERM, signal_term_handler)\n', (5924, 5961), False, 'import signal\n'), ((6021, 6030), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (6028, 6030), False, 'from multiprocessing import Process, Manager, Lock\n'), ((6681, 6786), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Spawn multiple HTTP request threads to request specified URL."""'}), "(description=\n 'Spawn multiple HTTP request threads to request specified URL.')\n", (6704, 6786), False, 'import argparse\n'), ((5673, 5717), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (5715, 5717), False, 'import requests\n'), ((5722, 5788), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (5764, 5788), False, 'import requests\n'), ((5793, 5860), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', (['InsecurePlatformWarning'], {}), '(InsecurePlatformWarning)\n', (5835, 5860), False, 'import requests\n'), ((6399, 6423), 'json.load', 'json.load', (['url_endpoints'], {}), '(url_endpoints)\n', (6408, 6423), False, 'import json\n'), ((6607, 6630), 'json.load', 'json.load', (['login_params'], {}), '(login_params)\n', (6616, 6630), False, 'import json\n'), ((7565, 7676), 'multiprocessing.Process', 'Process', ([], {'target': 'send_http_request', 'args': '(results, times, url_details, authentication, timeout, lock, count)'}), '(target=send_http_request, args=(results, times, url_details,\n authentication, timeout, lock, count))\n', (7572, 7676), False, 'from multiprocessing import Process, Manager, Lock\n'), ((2323, 2438), 'requests.request', 'requests.request', (['request_type'], {'url': 'login_url', 'headers': 'header', 'data': 'payload', 'verify': '(False)', 'allow_redirects': '(True)'}), '(request_type, url=login_url, headers=header, data=payload,\n verify=False, allow_redirects=True)\n', (2339, 2438), False, 'import requests\n'), ((3108, 3118), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3116, 3118), False, 'import sys\n'), ((5591, 5601), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5599, 5601), False, 'import sys\n'), ((2562, 2583), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2581, 2583), False, 'import traceback\n'), ((2610, 2620), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2618, 2620), False, 'import sys\n'), ((3364, 3508), 'requests.request', 'requests.request', (['"""get"""'], {'url': 'current_url', 'cookies': 'cookies', 'verify': '(False)', 'allow_redirects': '(True)', 'timeout': "(timeout['connect'], timeout['read'])"}), "('get', url=current_url, cookies=cookies, verify=False,\n allow_redirects=True, timeout=(timeout['connect'], timeout['read']))\n", (3380, 3508), False, 'import requests\n'), ((8638, 8654), 'numpy.median', 'np.median', (['times'], {}), '(times)\n', (8647, 8654), True, 'import numpy as np\n'), ((8713, 8726), 'numpy.std', 'np.std', (['times'], {}), '(times)\n', (8719, 8726), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""DQN_Keras_Cartpole.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zquQYmlYsqKFfnUf7rxfnz7WH02VHA_T
"""
from statistics import mean
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import deque
import os
import csv
import numpy as np
import random
import gym
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
SCORES_CSV_PATH = "scores.csv"
SCORES_PNG_PATH = "scores.png"
SOLVED_CSV_PATH = "solved.csv"
SOLVED_PNG_PATH = "solved.png"
AVERAGE_SCORE_TO_SOLVE = 195
CONSECUTIVE_RUNS_TO_SOLVE = 100
class ScoreLogger:
def __init__(self, env_name):
self.scores = deque(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)
self.env_name = env_name
if os.path.exists(SCORES_PNG_PATH):
os.remove(SCORES_PNG_PATH)
if os.path.exists(SCORES_CSV_PATH):
os.remove(SCORES_CSV_PATH)
def add_score(self, score, run):
self._save_csv(SCORES_CSV_PATH, score)
self._save_png(input_path=SCORES_CSV_PATH,
output_path=SCORES_PNG_PATH,
x_label="runs",
y_label="scores",
average_of_n_last=CONSECUTIVE_RUNS_TO_SOLVE,
show_goal=True,
show_trend=True,
show_legend=True)
self.scores.append(score)
mean_score = mean(self.scores)
print("Scores: (min: " + str(min(self.scores)) + ", avg: " + str(mean_score) + ", max: " + str(max(self.scores)) + ")\n")
if mean_score >= AVERAGE_SCORE_TO_SOLVE and len(self.scores) >= CONSECUTIVE_RUNS_TO_SOLVE:
solve_score = run-CONSECUTIVE_RUNS_TO_SOLVE
print("Solved in " + str(solve_score) + " runs, " + str(run) + " total runs.")
self._save_csv(SOLVED_CSV_PATH, solve_score)
self._save_png(input_path=SOLVED_CSV_PATH,
output_path=SOLVED_PNG_PATH,
x_label="trials",
y_label="steps before solve",
average_of_n_last=None,
show_goal=False,
show_trend=False,
show_legend=False)
exit()
def _save_png(self, input_path, output_path, x_label, y_label, average_of_n_last, show_goal, show_trend, show_legend):
x = []
y = []
with open(input_path, "r") as scores:
reader = csv.reader(scores)
data = list(reader)
for i in range(0, len(data)):
x.append(int(i))
y.append(int(data[i][0]))
plt.subplots()
plt.plot(x, y, label="score per run")
average_range = average_of_n_last if average_of_n_last is not None else len(x)
plt.plot(x[-average_range:], [np.mean(y[-average_range:])] * len(y[-average_range:]), linestyle="--", label="last " + str(average_range) + " runs average")
if show_goal:
plt.plot(x, [AVERAGE_SCORE_TO_SOLVE] * len(x), linestyle=":", label=str(AVERAGE_SCORE_TO_SOLVE) + " score average goal")
if show_trend and len(x) > 1:
trend_x = x[1:]
z = np.polyfit(np.array(trend_x), np.array(y[1:]), 1)
p = np.poly1d(z)
plt.plot(trend_x, p(trend_x), linestyle="-.", label="trend")
plt.title(self.env_name)
plt.xlabel(x_label)
plt.ylabel(y_label)
if show_legend:
plt.legend(loc="upper left")
plt.savefig(output_path, bbox_inches="tight")
plt.close()
def _save_csv(self, path, score):
if not os.path.exists(path):
with open(path, "w"):
pass
scores_file = open(path, "a")
with scores_file:
writer = csv.writer(scores_file)
writer.writerow([score])
ENV_NAME = "CartPole-v1"
GAMMA = 0.95
LEARNING_RATE = 0.001
MEMORY_SIZE = 1000000
BATCH_SIZE = 20
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995
class DQNSolver:
def __init__(self, observation_space, action_space):
self.exploration_rate = EXPLORATION_MAX
self.action_space = action_space
self.memory = deque(maxlen=MEMORY_SIZE)
self.model = Sequential()
self.model.add(Dense(24, input_shape=(observation_space,), activation="relu"))
self.model.add(Dense(24, activation="relu"))
self.model.add(Dense(self.action_space, activation="linear"))
self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() < self.exploration_rate:
return random.randrange(self.action_space)
q_values = self.model.predict(state)
return np.argmax(q_values[0])
def experience_replay(self):
if len(self.memory) < BATCH_SIZE:
return
batch = random.sample(self.memory, BATCH_SIZE)
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0]))
q_values = self.model.predict(state)
q_values[0][action] = q_update
self.model.fit(state, q_values, verbose=0)
self.exploration_rate *= EXPLORATION_DECAY
self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)
def cartpole():
env = gym.make(ENV_NAME)
score_logger = ScoreLogger(ENV_NAME)
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
dqn_solver = DQNSolver(observation_space, action_space)
run = 0
while True:
run += 1
state = env.reset()
state = np.reshape(state, [1, observation_space])
step = 0
while True:
step += 1
#env.render()
action = dqn_solver.act(state)
state_next, reward, terminal, info = env.step(action)
reward = reward if not terminal else -reward
state_next = np.reshape(state_next, [1, observation_space])
dqn_solver.remember(state, action, reward, state_next, terminal)
state = state_next
if terminal:
print("Run: " + str(run) + ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step))
score_logger.add_score(step, run)
break
dqn_solver.experience_replay()
if __name__ == "__main__":
cartpole() | [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.poly1d",
"keras.layers.Dense",
"gym.make",
"os.remove",
"os.path.exists",
"numpy.mean",
"collections.deque",
"numpy.reshape",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"csv.reade... | [((252, 273), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (266, 273), False, 'import matplotlib\n'), ((5753, 5771), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (5761, 5771), False, 'import gym\n'), ((796, 835), 'collections.deque', 'deque', ([], {'maxlen': 'CONSECUTIVE_RUNS_TO_SOLVE'}), '(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)\n', (801, 835), False, 'from collections import deque\n'), ((881, 912), 'os.path.exists', 'os.path.exists', (['SCORES_PNG_PATH'], {}), '(SCORES_PNG_PATH)\n', (895, 912), False, 'import os\n'), ((964, 995), 'os.path.exists', 'os.path.exists', (['SCORES_CSV_PATH'], {}), '(SCORES_CSV_PATH)\n', (978, 995), False, 'import os\n'), ((1547, 1564), 'statistics.mean', 'mean', (['self.scores'], {}), '(self.scores)\n', (1551, 1564), False, 'from statistics import mean\n'), ((2814, 2828), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2826, 2828), True, 'import matplotlib.pyplot as plt\n'), ((2837, 2874), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""score per run"""'}), "(x, y, label='score per run')\n", (2845, 2874), True, 'import matplotlib.pyplot as plt\n'), ((3528, 3552), 'matplotlib.pyplot.title', 'plt.title', (['self.env_name'], {}), '(self.env_name)\n', (3537, 3552), True, 'import matplotlib.pyplot as plt\n'), ((3561, 3580), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (3571, 3580), True, 'import matplotlib.pyplot as plt\n'), ((3589, 3608), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (3599, 3608), True, 'import matplotlib.pyplot as plt\n'), ((3684, 3729), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'bbox_inches': '"""tight"""'}), "(output_path, bbox_inches='tight')\n", (3695, 3729), True, 'import matplotlib.pyplot as plt\n'), ((3738, 3749), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3747, 3749), True, 'import matplotlib.pyplot as plt\n'), ((4389, 4414), 'collections.deque', 'deque', ([], {'maxlen': 'MEMORY_SIZE'}), '(maxlen=MEMORY_SIZE)\n', (4394, 4414), False, 'from collections import deque\n'), ((4437, 4449), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4447, 4449), False, 'from keras.models import Sequential\n'), ((5064, 5086), 'numpy.argmax', 'np.argmax', (['q_values[0]'], {}), '(q_values[0])\n', (5073, 5086), True, 'import numpy as np\n'), ((5198, 5236), 'random.sample', 'random.sample', (['self.memory', 'BATCH_SIZE'], {}), '(self.memory, BATCH_SIZE)\n', (5211, 5236), False, 'import random\n'), ((6055, 6096), 'numpy.reshape', 'np.reshape', (['state', '[1, observation_space]'], {}), '(state, [1, observation_space])\n', (6065, 6096), True, 'import numpy as np\n'), ((926, 952), 'os.remove', 'os.remove', (['SCORES_PNG_PATH'], {}), '(SCORES_PNG_PATH)\n', (935, 952), False, 'import os\n'), ((1009, 1035), 'os.remove', 'os.remove', (['SCORES_CSV_PATH'], {}), '(SCORES_CSV_PATH)\n', (1018, 1035), False, 'import os\n'), ((2637, 2655), 'csv.reader', 'csv.reader', (['scores'], {}), '(scores)\n', (2647, 2655), False, 'import csv\n'), ((3432, 3444), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (3441, 3444), True, 'import numpy as np\n'), ((3646, 3674), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3656, 3674), True, 'import matplotlib.pyplot as plt\n'), ((3804, 3824), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3818, 3824), False, 'import os\n'), ((3966, 3989), 'csv.writer', 'csv.writer', (['scores_file'], {}), '(scores_file)\n', (3976, 3989), False, 'import csv\n'), ((4473, 4535), 'keras.layers.Dense', 'Dense', (['(24)'], {'input_shape': '(observation_space,)', 'activation': '"""relu"""'}), "(24, input_shape=(observation_space,), activation='relu')\n", (4478, 4535), False, 'from keras.layers import Dense\n'), ((4560, 4588), 'keras.layers.Dense', 'Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (4565, 4588), False, 'from keras.layers import Dense\n'), ((4613, 4658), 'keras.layers.Dense', 'Dense', (['self.action_space'], {'activation': '"""linear"""'}), "(self.action_space, activation='linear')\n", (4618, 4658), False, 'from keras.layers import Dense\n'), ((4907, 4923), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4921, 4923), True, 'import numpy as np\n'), ((4968, 5003), 'random.randrange', 'random.randrange', (['self.action_space'], {}), '(self.action_space)\n', (4984, 5003), False, 'import random\n'), ((6373, 6419), 'numpy.reshape', 'np.reshape', (['state_next', '[1, observation_space]'], {}), '(state_next, [1, observation_space])\n', (6383, 6419), True, 'import numpy as np\n'), ((3377, 3394), 'numpy.array', 'np.array', (['trend_x'], {}), '(trend_x)\n', (3385, 3394), True, 'import numpy as np\n'), ((3396, 3411), 'numpy.array', 'np.array', (['y[1:]'], {}), '(y[1:])\n', (3404, 3411), True, 'import numpy as np\n'), ((4709, 4731), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'LEARNING_RATE'}), '(lr=LEARNING_RATE)\n', (4713, 4731), False, 'from keras.optimizers import Adam\n'), ((3001, 3028), 'numpy.mean', 'np.mean', (['y[-average_range:]'], {}), '(y[-average_range:])\n', (3008, 3028), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import print_function
import glob
import gc
import numpy as np
from lmatools.stream.subset import coroutine
from lmatools.density_tools import unique_vectors
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# --------------------------------------------------------------------------
# ----- This section could be replaced with stormdrain.pipeline imports ----
# --------------------------------------------------------------------------
# class map_projector(object):
# def __init__(self, ctr_lat, ctr_lon, proj_name='eqc'):
# self.mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# self.geoProj = GeographicSystem()
#
# def __call__(self, lon, lat, alt):
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# return x, y, z
#
# @coroutine
# def map_projector(ctr_lat, ctr_lon, target, proj_name='eqc'):
# mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# geoProj = GeographicSystem()
# while True:
# lon, lat, alt = (yield)
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# target.send((x,y,z))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
@coroutine
def flash_count_log(logfile, format_string="%s flashes in frame starting at %s"):
""" Write flash count for some frame to a file-like object. File open/close should be handled
by the calling routine."""
# Track flash count for each frame
frame_times = {}
try:
while True:
# Receive list of flashes, frame start time
flashes, frame_start_time = (yield)
n_flashes = len(flashes)
try:
frame_times[frame_start_time] += n_flashes
except KeyError:
# Key doesn't exist, so can't increment flash count
frame_times[frame_start_time] = n_flashes
except GeneratorExit:
all_times = list(frame_times.keys())
all_times.sort()
for frame_start_time in all_times:
flash_count_status = format_string % (frame_times[frame_start_time], frame_start_time)
if hasattr(logfile, 'write'):
logfile.write(flash_count_status+'\n')
else:
logfile.info(flash_count_status)
@coroutine
def filter_flash(target, min_points=10):
""" Filters flash by minimum number of points.
"""
while True:
evs, flash = (yield) # Receive a flash
if (flash['n_points'] >= 10):
target.send((evs, flash))
del evs, flash
def stack_chopped_arrays(chop_sequence):
""" Given a sequence of lists of arrays, return an equal length sequence
where the arrays have been combined by position in the original sequence.
The lists of arrays must each be of the same length. This is useful when
there is a list of arrays corresponding to data subdivided into time
series chunks.
In the example below, each row is data from a different file (letters)
and each column is a different time window in a time series. By stacking
the columns, a combined time series is generated.
([a0, a1, a2, a3],
[b0, b1, b2, b3],
[c0, c1, c2, c3],)
becomes
[a0+b0+c0, a1+b1+c1, a2+b2+c2, a3+b3+b3]
where plus indicates concatenation
"""
combined = [np.hstack(a) for a in zip(*chop_sequence)]
return combined
class ArrayChopper(object):
""" Initialized with an array of N_+1 edges corresponding to N
windows. The edges are assumed to be sorted.
Methods
window_masks(data, edge_key=None): given an array of data with a named dtype,
return a list of boolean masks that can be used to index data,
giving the subset of data which corresponds to each window.
If an edge_key is provided, it is assumed to reference a named array
and masking is performed on data[edge_key]
chop(data, edge_key=None): Returns a list of arrays where the
masks described above have been applied to chop the data
Generator functions for each of the above are also available
gen_window_masks, gen_chop
"""
def __init__(self, edges):
self.edges = edges
def _apply_edge_key(self, data, edge_key):
if edge_key is not None:
d = data[edge_key]
else:
d = data
return d
def gen_edge_pairs(self):
for l, r in zip(self.edges[:-1], self.edges[1:]):
yield l, r
def window_masks(self, data, edge_key=None):
masks = [w for w in self.gen_window_masks(self, data, edge_key)]
return masks
def gen_window_masks(self, data, edge_key=None):
d = self._apply_edge_key(data, edge_key)
for l, r in self.gen_edge_pairs():
# make sure this is only one-side inclusive to eliminate double-counting
within = (d >= l) & (d < r)
yield within
def chop(self, data, edge_key=None):
chopped = [d for d in self.gen_chop(data, edge_key)]
return chopped
def gen_chop(self, data, edge_key=None):
# d = self._apply_edge_key(data, edge_key)
for mask in self.gen_window_masks(data, edge_key):
yield data[mask]
@coroutine
def flashes_to_frames(time_edges, targets, time_key='start', do_events=False,
time_edges_datetime=None, flash_counter=None):
""" time_edges_datetime is same len as time_edges but with datetime objects
instead of floats.
When paired with extract_events_for_flashes, and events=False, the
flashes are placed in the correct time frame, and any events from that
flash, including those that cross a time boundary, are included.
if do_events='event_time_key', then also subset the events. This
operation is naive, i.e., the events are selected by time with no
attempt to keep events together with their parent flash. Therefore, it
is important to ensure that events and flashes are sent together in
chunks that do not cross time boundaries, which implies pre-aggregating
and time-tagging the event data so that the events and flashes remain
together when naively subset. If those conditions are met then this
option allows one to set up a pipeline without an additional
extract_events_for_flashes step.
"""
if time_edges_datetime is None:
# print "Datetime-style time edges not found, using time edges in seconds for flash count label"
time_edges_datetime = time_edges
flash_count_messages = []
assert len(time_edges) == (len(time_edges_datetime))
assert len(time_edges) == (len(targets)+1)
while True:
events, flashes = (yield)
start_times = flashes[time_key]
sort_idx = np.argsort(start_times) #, order=[time_key])
idx = np.searchsorted(start_times[sort_idx], time_edges)
slices = [slice(*i) for i in zip(idx[0:-1], idx[1:])]
if do_events != False:
ev_start_times = events[do_events]
ev_sort_idx = np.argsort(ev_start_times)
ev_idx = np.searchsorted(ev_start_times[ev_sort_idx], time_edges)
ev_slices = [slice(*i) for i in zip(ev_idx[0:-1], ev_idx[1:])]
else:
ev_slices = range(len(time_edges))
for target, s, ev_s, frame_start_time in zip(targets,
slices, ev_slices, time_edges_datetime[:-1]):
these_flashes = flashes[sort_idx][s]
if do_events != False:
these_events = events[ev_sort_idx][ev_s]
else:
these_events = events
if flash_counter is not None:
flash_counter.send((these_flashes, frame_start_time))
# flash_count_status = "Sending %s flashes to frame starting at %s" % (len(these_flashes), frame_start_time)
# flash_count_messages += flash_count_status
# print flash_count_status
target.send((these_events, these_flashes))
del events, flashes, start_times, sort_idx, idx, slices
log.info(flash_count_messages)
def event_yielder(evs, fls):
for fl in fls:
these_events = evs[evs['flash_id'] == fl['flash_id']]
# if len(these_events) <> fl['n_points']:
# print 'not giving all ', fl['n_points'], ' events? ', these_events.shape
for an_ev in these_events:
yield an_ev
@coroutine
def extract_events_for_flashes(target, flashID_key='flash_id'):
""" Takes a large table of events and grabs only the events belonging to the flashes.
"""
while True:
evs, fls = (yield)
# print 'extracting events'
# event_dtype = evs[0].dtype
event_dtype = evs.dtype
events = np.fromiter( (event_yielder(evs, fls)) , dtype=event_dtype)
# The line below (maybe maybe maybe)
# events = np.fromiter((evs[evs['flash_id'] == fl['flash_id']] for fl in fls), dtype=event_dtype)
# does the same thing as the two following lines, but is 10x slower.
# The 'mapper' could actually be optimized further by calculating it globally, once per events table,
# but this is fast enough and saves having to pass through another variable.
# mapper = dict(zip(evs['flash_id'],evs))
# events = np.fromiter( (mapper[fl['flash_id']] for fl in fls), dtype=event_dtype)
target.send((events, fls))
del events, evs, fls
# @coroutine
# def extract_events(target, flashID_key='flash_id'):
# """ Takes a large table of events and grabs only the events belonging to the flash.
# This is useful if you filter out a bunch of flashes before going to the trouble of
# reading the flashes in.
# """
# while True:
# evs, flash = (yield)
# flash_id = flash[flashID_key]
# event_dtype = evs[0].dtype
# # events = [ev[:] for ev in evs if ev[flashID_key] == flash_id]
# # events = np.asarray(events, dtype=event_dtype)
# # events = evs[:]
# events = evs[evs[flashID_key]==flash_id]
# # events = np.fromiter((ev[:] for ev in evs if ev[flashID_key] == flash_id), dtype=event_dtype)
# target.send((events, flash))
@coroutine
def no_projection(x_coord, y_coord, z_coord, target, use_flashes=False):
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
x,y,z = points[x_coord], points[y_coord], points[z_coord]
target.send((events, flashes, x,y,z))
del events, flashes, x,y,z, points
@coroutine
def project(x_coord, y_coord, z_coord, mapProj, geoProj, target,
use_flashes=False, transform=True):
""" Adds projected coordinates to the flash and events stream"""
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
if transform:
x,y,z = mapProj.fromECEF(*geoProj.toECEF(
points[x_coord], points[y_coord], points[z_coord]))
else:
x,y,z = points[x_coord], points[y_coord], points[z_coord]
target.send((events, flashes, np.atleast_1d(x),
np.atleast_1d(y), np.atleast_1d(z)))
del events, flashes, x,y,z, points
@coroutine
def footprint_mean(flash_id_key='flash_id', area_key='area'):
""" Takes x, y, z flash locations and gets
Extent density unique pixels, average all flashes
"""
while True:
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
if len(x_i) > 0:
footprints = dict(list(zip(flash[flash_id_key], flash[area_key])))
# print 'with points numbering', len(x_i)
unq_idx = unique_vectors(x_i, y_i, events['flash_id'])
# if x[unq_idx].shape[0] > 1:
fl_id = events['flash_id'][unq_idx]
areas = [footprints[fi] for fi in fl_id] #puts areas in same order as x[unq_idx], y[unq_idx]
# counts normalized by areas
target.send((x[unq_idx],y[unq_idx],areas))
del footprints, unq_idx, fl_id, areas
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def footprint_mean_3d(flash_id_key='flash_id', area_key='area'):
""" Takes x, y, z flash locations and gets
Extent density unique pixels, average all flashes
"""
while True:
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
z_i = np.floor( (z-z0)/dz ).astype('int32')
if len(x_i) > 0:
footprints = dict(list(zip(flash[flash_id_key], flash[area_key])))
# print 'with points numbering', len(x_i)
unq_idx = unique_vectors(x_i, y_i, z_i, events['flash_id'])
# if x[unq_idx].shape[0] > 1:
fl_id = events['flash_id'][unq_idx]
areas = [footprints[fi] for fi in fl_id] #puts areas in same order as x[unq_idx], y[unq_idx]
# counts normalized by areas
target.send((x[unq_idx],y[unq_idx],z[unq_idx],areas))
del footprints, unq_idx, fl_id, areas
# else:
# print ''
del events, flash, x, y, z, x_i, y_i, z_i
@coroutine
def point_density(target, weight_key=None, weight_flashes=True,
flash_id_key='flash_id', event_grid_area_fraction_key=None):
""" Sends event x, y, z location directly. If weight_key is provided
also extract the weights from the flash data with variable name matching
weight_key. if weight_flashes=False, use the event data instead of the
flash data.
"""
while True:
events, flash, x, y, z = (yield)
# print 'Doing point density',
x = np.atleast_1d(x)
y = np.atleast_1d(y)
if len(x) > 0:
if weight_key is not None:
if weight_flashes:
weight_lookup = dict(list(zip(flash[flash_id_key],
flash[weight_key])))
#puts weights in same order as x, y
weights = np.fromiter((weight_lookup[fi] for fi in
events['flash_id']), dtype='float64')
else:
weights = events[weight_key]
else:
weights = None
log.debug('with points numbering %s'.format(len(x)))
target.send((x, y, weights))
del events, flash ,x,y,z
@coroutine
def point_density_3d(target, weight_key=None, weight_flashes=True,
flash_id_key='flash_id'):
""" Sends event x, y, z location directly. If weight_key is provided
also extract the weights from the flash data with variable name matching
weight_key. if weight_flashes=False, use the event data instead of the
flash data.
"""
while True:
events, flash, x, y, z = (yield)
# print 'Doing point density',
if len(x) > 0:
if weight_key is not None:
if weight_flashes:
weight_lookup = dict(list(zip(flash[flash_id_key],
flash[weight_key])))
#puts weights in same order as x, y
weights = np.fromiter((weight_lookup[fi] for fi in
events['flash_id']), dtype='float64')
else:
weights = events[weight_key]
else:
weights = None
log.debug('with points numbering %s'.format(len(x)))
target.send((x, y, z, weights))
del events, flash ,x,y,z
@coroutine
def flash_std(x0, y0, dx, dy, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
NOTE: Use of this function is to only find the standard deviation of flash size.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
if len(x_i) > 0:
log.debug(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key]**2.)))
weights = [weight_lookup[fi] for fi in events[unq_idx]['flash_id']] #puts weights in same order as x[unq_idx], y[unq_idx]
del weight_lookup
else:
weights = None
target.send((x[unq_idx], y[unq_idx], weights))
del weights, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def flash_std_3d(x0, y0, z0, dx, dy, dz, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print('Doing extent density',)
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
z_i = np.floor( (z-z0)/dz ).astype('int32')
log.debug(len(x_i))
if len(x_i) > 0:
log.info(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, z_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key]**2.)))
weights = [weight_lookup[fi] for fi in events[unq_idx]['flash_id']] #puts weights in same order as x[unq_idx], y[unq_idx]
del weight_lookup
else:
weights = None
target.send((x[unq_idx], y[unq_idx], z[unq_idx], weights))
del weights, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i, z_i
@coroutine
def extent_density(x0, y0, dx, dy, target, flash_id_key='flash_id',
weight_key=None, event_grid_area_fraction_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
test_flash_id = 53735
if len(x_i) > 0:
log.info(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key])))
#puts weights in same order as x[unq_idx], y[unq_idx]
weights = np.fromiter((weight_lookup[fi] for fi in
events[unq_idx]['flash_id']), dtype='float64')
# del weight_lookup
else:
weights = None
if event_grid_area_fraction_key is not None:
# Each event with a unique index is replicated above
# with the representative value for the flash (weights = None
# implies a weight of +1 for each flash). If there is knowledge
# of how much of the underlying grid cell each event fills
# (e.g. from pixel-based event detector), then we can modify
# the weights by how much of the grid cell is filled.
# The logic here presumes that any of the events in the grid
# cell cover as much area as any other, i.e., that the pixels
# doing the event detection don't move during the time of the
# flash.
grid_frac = events[unq_idx][event_grid_area_fraction_key]
else:
grid_frac = None
# Diagnostics
# test_flash_mask = (events['flash_id'] == test_flash_id)
# test_events = events[test_flash_mask]
# if (test_flash_mask.sum() > 0) & (weight_key == 'area'):
# print("Data for flash {0}".format(test_flash_id))
# mesh_xi = test_events['mesh_xi']
# mesh_yi = test_events['mesh_yi']
# mesh_frac = test_events['mesh_frac']
# mesh_t = test_events['time']
# for vals in zip(mesh_t, mesh_frac,
# mesh_xi, x_i[test_flash_mask],
# mesh_yi, y_i[test_flash_mask],
# ):
# print(vals, weight_lookup[test_flash_id])
#
# test_flash_mask = (events[unq_idx]['flash_id'] == test_flash_id)
# test_events = events[unq_idx][test_flash_mask]
# if (test_flash_mask.sum() > 0) & (weight_key == 'area'):
# print("Unique data for flash {0}".format(test_flash_id))
# mesh_xi = test_events['mesh_xi']
# mesh_yi = test_events['mesh_yi']
# mesh_frac = test_events['mesh_frac']
# mesh_t = test_events['time']
# for vals in zip(mesh_t, mesh_frac,
# mesh_xi, x_i[unq_idx][test_flash_mask],
# mesh_yi, y_i[unq_idx][test_flash_mask],
# weights[test_flash_mask]):
# print(vals, weight_lookup[test_flash_id])
target.send((x[unq_idx], y[unq_idx], weights, grid_frac))
del weights, grid_frac, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def extent_density_3d(x0, y0, z0, dx, dy, dz, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print('Doing extent density',)
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
z_i = np.floor( (z-z0)/dz ).astype('int32')
log.debug(len(x_i))
if len(x_i) > 0:
log.info(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, z_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key])))
weights = [weight_lookup[fi] for fi in events[unq_idx]['flash_id']] #puts weights in same order as x[unq_idx], y[unq_idx]
del weight_lookup
else:
weights = None
target.send((x[unq_idx], y[unq_idx], z[unq_idx], weights))
del weights, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i, z_i
@coroutine
def accumulate_points_on_grid(grid, xedge, yedge, out=None, label='',
grid_frac_weights=False):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
# When we do a calculation like average flash area, we need to sum the
# areas, and sum the flashes, and divide at the end. Otherwise, if we have
# a frame that spans multiple data files (and therefore chunks of
# x,y,weights) we will calculate the sum of the averages due to each chunk
# instead of getting the true average. Therefore, create a set of grids for
# tracking the accumulation, and update the final grid with the new
# accumulation each time through the loop.
count_hist = grid.copy()
total_hist = grid.copy()
have_weights = False
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
# There is an issue with small weights being rounded to
# zero in histogramdd, so multiply by some large value
# and divide it back out later.
# https://github.com/numpy/numpy/issues/9465
# seems to not be necessary for the dynamic range we have
# grid_frac = grid_frac.astype('f8')
# grid_frac_scale = 1.0e5
# grid_frac = grid_frac.astype('f8')*grid_frac_scale
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=grid_frac, normed=False)
count_hist += count.astype(count_hist.dtype)
# if grid_frac_weights:
# count /= grid_frac_scale
if weights is not None:
have_weights = True
# histogramdd sums up weights in each bin for normed=False
if grid_frac is not None:
weights = weights*grid_frac
total, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=weights, normed=False)
total_hist += total
del total, edges
# try:
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# except AttributeError:
# # if x,y are each scalars, need to make 1D arrays
# x = np.asarray((x,))
# y = np.asarray((y,))
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
if have_weights:
bad = (count_hist <= 0)
avg = np.asarray(total_hist, dtype='float32')/count_hist
avg[bad] = 0.0
del bad
else:
avg = count_hist
grid[:] = avg[:].astype(grid.dtype)
# grid += count.astype(grid.dtype)
del count, avg
del x, y, weights, grid_frac
gc.collect()
except GeneratorExit:
out['out'] = grid
@coroutine
def accumulate_points_on_grid_3d(grid, xedge, yedge, zedge, out=None, label=''):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
assert zedge.shape[0] == grid.shape[2]+1
if out == None:
out = {}
# grid = None
try:
while True:
x, y, z, weights = (yield)
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=None, normed=False)
if weights != None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=weights, normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
del count
del x, y, z, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
##Repetition of functions below can probably be reduced to the original functions, however
##remain as this was the only way to get new gridded fields that were no the mean.
####FOR STANDARD DEVIATION OF A SINGLE FIELD:
@coroutine
def accumulate_points_on_grid_sdev(grid, grid2, xedge, yedge, out=None, label='', grid_frac_weights=True):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=grid_frac, normed=False)
if weights is not None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=np.asarray(weights)**2., normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
grid = np.sqrt(grid - (grid2)**2.)
del count
del x, y, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
@coroutine
def accumulate_points_on_grid_sdev_3d(grid, grid2, xedge, yedge, zedge, out=None, label=''):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
assert zedge.shape[0] == grid.shape[2]+1
if out == None:
out = {}
# grid = None
try:
while True:
x, y, z, weights = (yield)
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=None, normed=False)
if weights != None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=np.asarray(weights)**2., normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
grid = np.sqrt(grid - (grid2)**2.)
del count
del x, y, z, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
#####For Minima of extensive quantities:
@coroutine
def accumulate_minimum_on_grid(grid, xedge, yedge, out=None, label='', grid_frac_weights=True):
"""
Instead of adding values from the counts produced from new blobs of data as
they arrive, take the minimum of the previous value and the new value at
each grid location. Logic prior to this function must eliminate all but one
of the values at each grid cell, since the histogram process accumulates
all of the values at that grid cell location for the blob of data that.
arrives.
"""
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=grid_frac, normed=False)
if weights is not None:
have_weights = True
# histogramdd sums up weights in each bin for normed=False
if grid_frac is not None:
weights = weights*grid_frac
total, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=weights, normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')#/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
hascount, hasgrid = (count > 0), (grid > 0)
compboth = hasgrid & hascount
countonly = np.isclose(grid, 0) & hascount
minboth = np.minimum(grid[compboth],
count[compboth].astype(grid.dtype))
grid[compboth] = minboth
grid[countonly] = count[countonly].astype(grid.dtype)
del count
del x, y, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
#####FOR TOTAL ENERGY:
@coroutine
def accumulate_energy_on_grid(grid, xedge, yedge, out=None, label='', grid_frac_weights=True):
"""
Like accumulate_points_on_grid, but doesn't normalize by the total count
"""
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=grid_frac, normed=False)
if weights is not None:
have_weights = True
# histogramdd sums up weights in each bin for normed=False
if grid_frac is not None:
weights = weights*grid_frac
total, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=weights, normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')#/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
del count
del x, y, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
@coroutine
def accumulate_energy_on_grid_3d(grid, xedge, yedge, zedge, out=None, label=''):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
assert zedge.shape[0] == grid.shape[2]+1
if out == None:
out = {}
# grid = None
try:
while True:
x, y, z, weights = (yield)
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=None, normed=False)
if weights != None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=np.abs(weights), normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')#/count
count[bad] = 0.0
del total, edges, bad
# try:
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# except AttributeError:
# # if x,y are each scalars, need to make 1D arrays
# x = np.asarray((x,))
# y = np.asarray((y,))
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
del count
del x, y, z, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
# if __name__ == '__main__':
# do_profile=False
# if do_profile:
# import hotshot
# from hotshot import stats
# prof = hotshot.Profile("density_test_profile")
# prof.runcall(example)
# prof.close()
# s=stats.load("density_test_profile")
# s.sort_stats("time").print_stats()
# else:
# x_coord, y_coord, lons, lats, test_grid = example()
| [
"logging.getLogger",
"logging.NullHandler",
"numpy.abs",
"lmatools.density_tools.unique_vectors",
"numpy.fromiter",
"numpy.histogramdd",
"numpy.sqrt",
"numpy.hstack",
"numpy.searchsorted",
"numpy.isclose",
"numpy.floor",
"numpy.asarray",
"numpy.argsort",
"gc.collect",
"numpy.atleast_1d"
... | [((235, 262), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (252, 262), False, 'import logging\n'), ((278, 299), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (297, 299), False, 'import logging\n'), ((3917, 3929), 'numpy.hstack', 'np.hstack', (['a'], {}), '(a)\n', (3926, 3929), True, 'import numpy as np\n'), ((7484, 7507), 'numpy.argsort', 'np.argsort', (['start_times'], {}), '(start_times)\n', (7494, 7507), True, 'import numpy as np\n'), ((7543, 7593), 'numpy.searchsorted', 'np.searchsorted', (['start_times[sort_idx]', 'time_edges'], {}), '(start_times[sort_idx], time_edges)\n', (7558, 7593), True, 'import numpy as np\n'), ((14727, 14743), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (14740, 14743), True, 'import numpy as np\n'), ((14756, 14772), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (14769, 14772), True, 'import numpy as np\n'), ((7760, 7786), 'numpy.argsort', 'np.argsort', (['ev_start_times'], {}), '(ev_start_times)\n', (7770, 7786), True, 'import numpy as np\n'), ((7808, 7864), 'numpy.searchsorted', 'np.searchsorted', (['ev_start_times[ev_sort_idx]', 'time_edges'], {}), '(ev_start_times[ev_sort_idx], time_edges)\n', (7823, 7864), True, 'import numpy as np\n'), ((12639, 12683), 'lmatools.density_tools.unique_vectors', 'unique_vectors', (['x_i', 'y_i', "events['flash_id']"], {}), "(x_i, y_i, events['flash_id'])\n", (12653, 12683), False, 'from lmatools.density_tools import unique_vectors\n'), ((13733, 13782), 'lmatools.density_tools.unique_vectors', 'unique_vectors', (['x_i', 'y_i', 'z_i', "events['flash_id']"], {}), "(x_i, y_i, z_i, events['flash_id'])\n", (13747, 13782), False, 'from lmatools.density_tools import unique_vectors\n'), ((17513, 17559), 'lmatools.density_tools.unique_vectors', 'unique_vectors', (['x_i', 'y_i', 'events[flash_id_key]'], {}), '(x_i, y_i, events[flash_id_key])\n', (17527, 17559), False, 'from lmatools.density_tools import unique_vectors\n'), ((19085, 19136), 'lmatools.density_tools.unique_vectors', 'unique_vectors', (['x_i', 'y_i', 'z_i', 'events[flash_id_key]'], {}), '(x_i, y_i, z_i, events[flash_id_key])\n', (19099, 19136), False, 'from lmatools.density_tools import unique_vectors\n'), ((20656, 20702), 'lmatools.density_tools.unique_vectors', 'unique_vectors', (['x_i', 'y_i', 'events[flash_id_key]'], {}), '(x_i, y_i, events[flash_id_key])\n', (20670, 20702), False, 'from lmatools.density_tools import unique_vectors\n'), ((24836, 24887), 'lmatools.density_tools.unique_vectors', 'unique_vectors', (['x_i', 'y_i', 'z_i', 'events[flash_id_key]'], {}), '(x_i, y_i, z_i, events[flash_id_key])\n', (24850, 24887), False, 'from lmatools.density_tools import unique_vectors\n'), ((29184, 29196), 'gc.collect', 'gc.collect', ([], {}), '()\n', (29194, 29196), False, 'import gc\n'), ((30903, 30915), 'gc.collect', 'gc.collect', ([], {}), '()\n', (30913, 30915), False, 'import gc\n'), ((33032, 33044), 'gc.collect', 'gc.collect', ([], {}), '()\n', (33042, 33044), False, 'import gc\n'), ((34868, 34880), 'gc.collect', 'gc.collect', ([], {}), '()\n', (34878, 34880), False, 'import gc\n'), ((37608, 37620), 'gc.collect', 'gc.collect', ([], {}), '()\n', (37618, 37620), False, 'import gc\n'), ((39645, 39657), 'gc.collect', 'gc.collect', ([], {}), '()\n', (39655, 39657), False, 'import gc\n'), ((41823, 41835), 'gc.collect', 'gc.collect', ([], {}), '()\n', (41833, 41835), False, 'import gc\n'), ((11951, 11967), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (11964, 11967), True, 'import numpy as np\n'), ((11990, 12006), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (12003, 12006), True, 'import numpy as np\n'), ((12008, 12024), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (12021, 12024), True, 'import numpy as np\n'), ((12368, 12391), 'numpy.floor', 'np.floor', (['((x - x0) / dx)'], {}), '((x - x0) / dx)\n', (12376, 12391), True, 'import numpy as np\n'), ((12420, 12443), 'numpy.floor', 'np.floor', (['((y - y0) / dy)'], {}), '((y - y0) / dy)\n', (12428, 12443), True, 'import numpy as np\n'), ((13411, 13434), 'numpy.floor', 'np.floor', (['((x - x0) / dx)'], {}), '((x - x0) / dx)\n', (13419, 13434), True, 'import numpy as np\n'), ((13463, 13486), 'numpy.floor', 'np.floor', (['((y - y0) / dy)'], {}), '((y - y0) / dy)\n', (13471, 13486), True, 'import numpy as np\n'), ((13515, 13538), 'numpy.floor', 'np.floor', (['((z - z0) / dz)'], {}), '((z - z0) / dz)\n', (13523, 13538), True, 'import numpy as np\n'), ((17280, 17303), 'numpy.floor', 'np.floor', (['((x - x0) / dx)'], {}), '((x - x0) / dx)\n', (17288, 17303), True, 'import numpy as np\n'), ((17332, 17355), 'numpy.floor', 'np.floor', (['((y - y0) / dy)'], {}), '((y - y0) / dy)\n', (17340, 17355), True, 'import numpy as np\n'), ((18774, 18797), 'numpy.floor', 'np.floor', (['((x - x0) / dx)'], {}), '((x - x0) / dx)\n', (18782, 18797), True, 'import numpy as np\n'), ((18826, 18849), 'numpy.floor', 'np.floor', (['((y - y0) / dy)'], {}), '((y - y0) / dy)\n', (18834, 18849), True, 'import numpy as np\n'), ((18878, 18901), 'numpy.floor', 'np.floor', (['((z - z0) / dz)'], {}), '((z - z0) / dz)\n', (18886, 18901), True, 'import numpy as np\n'), ((20395, 20418), 'numpy.floor', 'np.floor', (['((x - x0) / dx)'], {}), '((x - x0) / dx)\n', (20403, 20418), True, 'import numpy as np\n'), ((20447, 20470), 'numpy.floor', 'np.floor', (['((y - y0) / dy)'], {}), '((y - y0) / dy)\n', (20455, 20470), True, 'import numpy as np\n'), ((20964, 21055), 'numpy.fromiter', 'np.fromiter', (["(weight_lookup[fi] for fi in events[unq_idx]['flash_id'])"], {'dtype': '"""float64"""'}), "((weight_lookup[fi] for fi in events[unq_idx]['flash_id']),\n dtype='float64')\n", (20975, 21055), True, 'import numpy as np\n'), ((24525, 24548), 'numpy.floor', 'np.floor', (['((x - x0) / dx)'], {}), '((x - x0) / dx)\n', (24533, 24548), True, 'import numpy as np\n'), ((24577, 24600), 'numpy.floor', 'np.floor', (['((y - y0) / dy)'], {}), '((y - y0) / dy)\n', (24585, 24600), True, 'import numpy as np\n'), ((24629, 24652), 'numpy.floor', 'np.floor', (['((z - z0) / dz)'], {}), '((z - z0) / dz)\n', (24637, 24652), True, 'import numpy as np\n'), ((27058, 27074), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (27071, 27074), True, 'import numpy as np\n'), ((27095, 27111), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (27108, 27111), True, 'import numpy as np\n'), ((27217, 27293), 'numpy.histogramdd', 'np.histogramdd', (['(x, y)'], {'bins': '(xedge, yedge)', 'weights': 'grid_frac', 'normed': '(False)'}), '((x, y), bins=(xedge, yedge), weights=grid_frac, normed=False)\n', (27231, 27293), True, 'import numpy as np\n'), ((29647, 29663), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (29660, 29663), True, 'import numpy as np\n'), ((29684, 29700), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (29697, 29700), True, 'import numpy as np\n'), ((29721, 29737), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (29734, 29737), True, 'import numpy as np\n'), ((29843, 29929), 'numpy.histogramdd', 'np.histogramdd', (['(x, y, z)'], {'bins': '(xedge, yedge, zedge)', 'weights': 'None', 'normed': '(False)'}), '((x, y, z), bins=(xedge, yedge, zedge), weights=None, normed=\n False)\n', (29857, 29929), True, 'import numpy as np\n'), ((31719, 31735), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (31732, 31735), True, 'import numpy as np\n'), ((31756, 31772), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (31769, 31772), True, 'import numpy as np\n'), ((31878, 31954), 'numpy.histogramdd', 'np.histogramdd', (['(x, y)'], {'bins': '(xedge, yedge)', 'weights': 'grid_frac', 'normed': '(False)'}), '((x, y), bins=(xedge, yedge), weights=grid_frac, normed=False)\n', (31892, 31954), True, 'import numpy as np\n'), ((33507, 33523), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (33520, 33523), True, 'import numpy as np\n'), ((33544, 33560), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (33557, 33560), True, 'import numpy as np\n'), ((33581, 33597), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (33594, 33597), True, 'import numpy as np\n'), ((33703, 33789), 'numpy.histogramdd', 'np.histogramdd', (['(x, y, z)'], {'bins': '(xedge, yedge, zedge)', 'weights': 'None', 'normed': '(False)'}), '((x, y, z), bins=(xedge, yedge, zedge), weights=None, normed=\n False)\n', (33717, 33789), True, 'import numpy as np\n'), ((35897, 35913), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (35910, 35913), True, 'import numpy as np\n'), ((35934, 35950), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (35947, 35950), True, 'import numpy as np\n'), ((36056, 36132), 'numpy.histogramdd', 'np.histogramdd', (['(x, y)'], {'bins': '(xedge, yedge)', 'weights': 'grid_frac', 'normed': '(False)'}), '((x, y), bins=(xedge, yedge), weights=grid_frac, normed=False)\n', (36070, 36132), True, 'import numpy as np\n'), ((38300, 38316), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (38313, 38316), True, 'import numpy as np\n'), ((38337, 38353), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (38350, 38353), True, 'import numpy as np\n'), ((38459, 38535), 'numpy.histogramdd', 'np.histogramdd', (['(x, y)'], {'bins': '(xedge, yedge)', 'weights': 'grid_frac', 'normed': '(False)'}), '((x, y), bins=(xedge, yedge), weights=grid_frac, normed=False)\n', (38473, 38535), True, 'import numpy as np\n'), ((40108, 40124), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (40121, 40124), True, 'import numpy as np\n'), ((40145, 40161), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (40158, 40161), True, 'import numpy as np\n'), ((40182, 40198), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (40195, 40198), True, 'import numpy as np\n'), ((40304, 40390), 'numpy.histogramdd', 'np.histogramdd', (['(x, y, z)'], {'bins': '(xedge, yedge, zedge)', 'weights': 'None', 'normed': '(False)'}), '((x, y, z), bins=(xedge, yedge, zedge), weights=None, normed=\n False)\n', (40318, 40390), True, 'import numpy as np\n'), ((15072, 15150), 'numpy.fromiter', 'np.fromiter', (["(weight_lookup[fi] for fi in events['flash_id'])"], {'dtype': '"""float64"""'}), "((weight_lookup[fi] for fi in events['flash_id']), dtype='float64')\n", (15083, 15150), True, 'import numpy as np\n'), ((16193, 16271), 'numpy.fromiter', 'np.fromiter', (["(weight_lookup[fi] for fi in events['flash_id'])"], {'dtype': '"""float64"""'}), "((weight_lookup[fi] for fi in events['flash_id']), dtype='float64')\n", (16204, 16271), True, 'import numpy as np\n'), ((27754, 27828), 'numpy.histogramdd', 'np.histogramdd', (['(x, y)'], {'bins': '(xedge, yedge)', 'weights': 'weights', 'normed': '(False)'}), '((x, y), bins=(xedge, yedge), weights=weights, normed=False)\n', (27768, 27828), True, 'import numpy as np\n'), ((30094, 30182), 'numpy.histogramdd', 'np.histogramdd', (['(x, y, z)'], {'bins': '(xedge, yedge, zedge)', 'weights': 'weights', 'normed': '(False)'}), '((x, y, z), bins=(xedge, yedge, zedge), weights=weights,\n normed=False)\n', (30108, 30182), True, 'import numpy as np\n'), ((32936, 32964), 'numpy.sqrt', 'np.sqrt', (['(grid - grid2 ** 2.0)'], {}), '(grid - grid2 ** 2.0)\n', (32943, 32964), True, 'import numpy as np\n'), ((34769, 34797), 'numpy.sqrt', 'np.sqrt', (['(grid - grid2 ** 2.0)'], {}), '(grid - grid2 ** 2.0)\n', (34776, 34797), True, 'import numpy as np\n'), ((36445, 36519), 'numpy.histogramdd', 'np.histogramdd', (['(x, y)'], {'bins': '(xedge, yedge)', 'weights': 'weights', 'normed': '(False)'}), '((x, y), bins=(xedge, yedge), weights=weights, normed=False)\n', (36459, 36519), True, 'import numpy as np\n'), ((36675, 36709), 'numpy.asarray', 'np.asarray', (['total'], {'dtype': '"""float32"""'}), "(total, dtype='float32')\n", (36685, 36709), True, 'import numpy as np\n'), ((38848, 38922), 'numpy.histogramdd', 'np.histogramdd', (['(x, y)'], {'bins': '(xedge, yedge)', 'weights': 'weights', 'normed': '(False)'}), '((x, y), bins=(xedge, yedge), weights=weights, normed=False)\n', (38862, 38922), True, 'import numpy as np\n'), ((39078, 39112), 'numpy.asarray', 'np.asarray', (['total'], {'dtype': '"""float32"""'}), "(total, dtype='float32')\n", (39088, 39112), True, 'import numpy as np\n'), ((40799, 40833), 'numpy.asarray', 'np.asarray', (['total'], {'dtype': '"""float32"""'}), "(total, dtype='float32')\n", (40809, 40833), True, 'import numpy as np\n'), ((30309, 30343), 'numpy.asarray', 'np.asarray', (['total'], {'dtype': '"""float32"""'}), "(total, dtype='float32')\n", (30319, 30343), True, 'import numpy as np\n'), ((32372, 32406), 'numpy.asarray', 'np.asarray', (['total'], {'dtype': '"""float32"""'}), "(total, dtype='float32')\n", (32382, 32406), True, 'import numpy as np\n'), ((34206, 34240), 'numpy.asarray', 'np.asarray', (['total'], {'dtype': '"""float32"""'}), "(total, dtype='float32')\n", (34216, 34240), True, 'import numpy as np\n'), ((37267, 37286), 'numpy.isclose', 'np.isclose', (['grid', '(0)'], {}), '(grid, 0)\n', (37277, 37286), True, 'import numpy as np\n'), ((28800, 28839), 'numpy.asarray', 'np.asarray', (['total_hist'], {'dtype': '"""float32"""'}), "(total_hist, dtype='float32')\n", (28810, 28839), True, 'import numpy as np\n'), ((40615, 40630), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (40621, 40630), True, 'import numpy as np\n'), ((32180, 32199), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (32190, 32199), True, 'import numpy as np\n'), ((34014, 34033), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (34024, 34033), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Drawon
# Time:2020/11/6 16:01
# Version:python 3.7.6
import logging
import datetime
import numpy as np
import pandas as pd
from statsmodels.tsa.holtwinters import Holt
import warnings
warnings.filterwarnings('ignore')
def valueForecast(file):
"""
电费预测
:param data: 电量数据
格式为:用户 日期 使用电量值
:return: 预测电量值
"""
logging.debug('开始运行')
data = pd.read_excel(file)
if data.shape[0] == 0:
raise ValueError('相关性原始数据不存在')
data.iloc[:, 0] = data.iloc[:,0].astype(str)
users = set(data.iloc[:,0].values)
# 用电量预测
result_pre = pd.DataFrame(columns=['DATA_DATE', 'DATA_DATE1', 'DATA_DATE2', 'DATA_DATE3', 'DATA_DATE4', 'DATA_DATE5'])
for user in users:
subdata = data.loc[data.iloc[:,0]==user]
df_index = pd.MultiIndex.from_frame(subdata.iloc[:, 1:2])
df = pd.DataFrame(np.array(subdata.iloc[:,-1]).reshape(1,-1),columns=df_index)
df.dropna(axis=1,inplace=True)
df_values = df.values.flatten()
model = Holt(endog=df_values, initialization_method='estimated', ).fit()
pre = model.forecast(steps=5)
print(f'数据的预测 {pre}')
res2 = pd.DataFrame(pre).T
res2.columns = ['DATA_DATE1', 'DATA_DATE2', 'DATA_DATE3', 'DATA_DATE4', 'DATA_DATE5']
res2['DATA_DATE'] = datetime.date.today()
res2['USRE'] = user
print(f'RES2 {res2}')
result_pre = result_pre.append(res2, ignore_index=True)
print(result_pre)
return result_pre
| [
"pandas.MultiIndex.from_frame",
"statsmodels.tsa.holtwinters.Holt",
"logging.debug",
"numpy.array",
"pandas.read_excel",
"pandas.DataFrame",
"datetime.date.today",
"warnings.filterwarnings"
] | [((240, 273), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (263, 273), False, 'import warnings\n'), ((390, 411), 'logging.debug', 'logging.debug', (['"""开始运行"""'], {}), "('开始运行')\n", (403, 411), False, 'import logging\n'), ((423, 442), 'pandas.read_excel', 'pd.read_excel', (['file'], {}), '(file)\n', (436, 442), True, 'import pandas as pd\n'), ((627, 736), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['DATA_DATE', 'DATA_DATE1', 'DATA_DATE2', 'DATA_DATE3', 'DATA_DATE4',\n 'DATA_DATE5']"}), "(columns=['DATA_DATE', 'DATA_DATE1', 'DATA_DATE2', 'DATA_DATE3',\n 'DATA_DATE4', 'DATA_DATE5'])\n", (639, 736), True, 'import pandas as pd\n'), ((824, 870), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (['subdata.iloc[:, 1:2]'], {}), '(subdata.iloc[:, 1:2])\n', (848, 870), True, 'import pandas as pd\n'), ((1343, 1364), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1362, 1364), False, 'import datetime\n'), ((1201, 1218), 'pandas.DataFrame', 'pd.DataFrame', (['pre'], {}), '(pre)\n', (1213, 1218), True, 'import pandas as pd\n'), ((1053, 1109), 'statsmodels.tsa.holtwinters.Holt', 'Holt', ([], {'endog': 'df_values', 'initialization_method': '"""estimated"""'}), "(endog=df_values, initialization_method='estimated')\n", (1057, 1109), False, 'from statsmodels.tsa.holtwinters import Holt\n'), ((897, 926), 'numpy.array', 'np.array', (['subdata.iloc[:, -1]'], {}), '(subdata.iloc[:, -1])\n', (905, 926), True, 'import numpy as np\n')] |
import torch
from rbodo.models import MlpIn, MlpOut, CnnIn, CnnOut, RnnOut
from thop import profile
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
sb.set()
input_shape = (100, 64)
num_frames = 10
num_embed = 128
num_sweeps = 20
num_classes = 5
print('MLP')
ops_mlp = []
params_mlp = []
neurons_mlp = []
for s in range(num_sweeps):
_num_embed = num_embed * (s + 1) * 2
shape_in = (num_frames, int(np.prod(input_shape)),)
model_in = MlpIn(input_shape, _num_embed)
model_out = MlpOut(_num_embed, num_frames, num_classes)
macs_in, params_in, neurons_in, macs_per_layer_in, params_per_layer_in, neurons_per_layer_in = \
profile(model_in, (torch.randn(shape_in),))
shape_out = (1, _num_embed * num_frames)
macs_out, params_out, neurons_out, macs_per_layer_out, params_per_layer_out, neurons_per_layer_out = \
profile(model_out, (torch.randn(shape_out),))
ops_mlp.append(macs_in + macs_out)
params_mlp.append(params_in + params_out)
neurons_mlp.append(neurons_in + neurons_out)
print('CNN')
ops_cnn = []
params_cnn = []
neurons_cnn = []
for s in range(num_sweeps):
num_channels = 6 * (s + 1)
shape_in = (num_frames, 1, *input_shape)
model_in = CnnIn(num_embed, num_frames, num_channels)
model_out = CnnOut(num_embed, num_classes)
macs_in, params_in, neurons_in, macs_per_layer_in, params_per_layer_in, neurons_per_layer_in = \
profile(model_in, (torch.randn(shape_in),))
shape_out = (1, num_embed)
macs_out, params_out, neurons_out, macs_per_layer_out, params_per_layer_out, neurons_per_layer_out = \
profile(model_out, (torch.randn(shape_out),))
ops_cnn.append(macs_in + macs_out)
params_cnn.append(params_in + params_out)
neurons_cnn.append(neurons_in + neurons_out)
print('RNN')
ops_rnn = []
params_rnn = []
neurons_rnn = []
for s in range(num_sweeps):
shape_in = (num_frames, 1, *input_shape)
model_in = CnnIn(num_embed, num_frames, 6)
model_out = RnnOut(num_embed, num_classes, (s + 1) * 4)
macs_in, params_in, neurons_in, macs_per_layer_in, params_per_layer_in, neurons_per_layer_in = \
profile(model_in, (torch.randn(shape_in),))
shape_out = (1, num_frames, 3 * 42)
macs_out, params_out, neurons_out, macs_per_layer_out, params_per_layer_out, neurons_per_layer_out = \
profile(model_out, (torch.randn(shape_out),))
ops_rnn.append(macs_in + macs_out)
params_rnn.append(params_in + params_out)
neurons_rnn.append(neurons_in + neurons_out)
normalize = True
log = False
if normalize:
neurons_mlp = np.divide(neurons_mlp, np.min(neurons_mlp))
neurons_cnn = np.divide(neurons_cnn, np.min(neurons_cnn))
neurons_rnn = np.divide(neurons_rnn, np.min(neurons_rnn))
ops_mlp = np.divide(ops_mlp, np.min(ops_mlp))
ops_cnn = np.divide(ops_cnn, np.min(ops_cnn))
ops_rnn = np.divide(ops_rnn, np.min(ops_rnn))
params_mlp = np.divide(params_mlp, np.min(params_mlp))
params_cnn = np.divide(params_cnn, np.min(params_cnn))
params_rnn = np.divide(params_rnn, np.min(params_rnn))
# plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.scatter(neurons_mlp, ops_mlp, label='mlp')
plt.scatter(neurons_cnn, ops_cnn, label='cnn')
plt.scatter(neurons_rnn, ops_rnn, label='rnn')
if log:
plt.xscale('log')
plt.yscale('log')
if normalize:
plt.xlabel('Scale factor neurons')
plt.ylabel('Scale factor parameters')
else:
plt.xlabel('# neurons')
plt.ylabel('# ops')
plt.legend()
plt.savefig('ops')
plt.close()
plt.scatter(neurons_mlp, params_mlp, label='mlp')
plt.scatter(neurons_cnn, params_cnn, label='cnn')
plt.scatter(neurons_rnn, params_rnn, label='rnn')
if log:
plt.xscale('log')
plt.yscale('log')
if normalize:
plt.xlabel('Scale factor neurons')
plt.ylabel('Scale factor parameters')
else:
plt.xlabel('# neurons')
plt.ylabel('# params')
plt.legend()
plt.savefig('params')
plt.close()
| [
"numpy.prod",
"seaborn.set",
"rbodo.models.MlpOut",
"matplotlib.pyplot.savefig",
"rbodo.models.CnnOut",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"rbodo.models.RnnOut",
"rbodo.models.CnnIn",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"nu... | [((172, 180), 'seaborn.set', 'sb.set', ([], {}), '()\n', (178, 180), True, 'import seaborn as sb\n'), ((3160, 3206), 'matplotlib.pyplot.scatter', 'plt.scatter', (['neurons_mlp', 'ops_mlp'], {'label': '"""mlp"""'}), "(neurons_mlp, ops_mlp, label='mlp')\n", (3171, 3206), True, 'import matplotlib.pyplot as plt\n'), ((3207, 3253), 'matplotlib.pyplot.scatter', 'plt.scatter', (['neurons_cnn', 'ops_cnn'], {'label': '"""cnn"""'}), "(neurons_cnn, ops_cnn, label='cnn')\n", (3218, 3253), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3300), 'matplotlib.pyplot.scatter', 'plt.scatter', (['neurons_rnn', 'ops_rnn'], {'label': '"""rnn"""'}), "(neurons_rnn, ops_rnn, label='rnn')\n", (3265, 3300), True, 'import matplotlib.pyplot as plt\n'), ((3506, 3518), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3516, 3518), True, 'import matplotlib.pyplot as plt\n'), ((3519, 3537), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ops"""'], {}), "('ops')\n", (3530, 3537), True, 'import matplotlib.pyplot as plt\n'), ((3538, 3549), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3547, 3549), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3600), 'matplotlib.pyplot.scatter', 'plt.scatter', (['neurons_mlp', 'params_mlp'], {'label': '"""mlp"""'}), "(neurons_mlp, params_mlp, label='mlp')\n", (3562, 3600), True, 'import matplotlib.pyplot as plt\n'), ((3601, 3650), 'matplotlib.pyplot.scatter', 'plt.scatter', (['neurons_cnn', 'params_cnn'], {'label': '"""cnn"""'}), "(neurons_cnn, params_cnn, label='cnn')\n", (3612, 3650), True, 'import matplotlib.pyplot as plt\n'), ((3651, 3700), 'matplotlib.pyplot.scatter', 'plt.scatter', (['neurons_rnn', 'params_rnn'], {'label': '"""rnn"""'}), "(neurons_rnn, params_rnn, label='rnn')\n", (3662, 3700), True, 'import matplotlib.pyplot as plt\n'), ((3909, 3921), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3919, 3921), True, 'import matplotlib.pyplot as plt\n'), ((3922, 3943), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""params"""'], {}), "('params')\n", (3933, 3943), True, 'import matplotlib.pyplot as plt\n'), ((3944, 3955), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3953, 3955), True, 'import matplotlib.pyplot as plt\n'), ((471, 501), 'rbodo.models.MlpIn', 'MlpIn', (['input_shape', '_num_embed'], {}), '(input_shape, _num_embed)\n', (476, 501), False, 'from rbodo.models import MlpIn, MlpOut, CnnIn, CnnOut, RnnOut\n'), ((518, 561), 'rbodo.models.MlpOut', 'MlpOut', (['_num_embed', 'num_frames', 'num_classes'], {}), '(_num_embed, num_frames, num_classes)\n', (524, 561), False, 'from rbodo.models import MlpIn, MlpOut, CnnIn, CnnOut, RnnOut\n'), ((1236, 1278), 'rbodo.models.CnnIn', 'CnnIn', (['num_embed', 'num_frames', 'num_channels'], {}), '(num_embed, num_frames, num_channels)\n', (1241, 1278), False, 'from rbodo.models import MlpIn, MlpOut, CnnIn, CnnOut, RnnOut\n'), ((1295, 1325), 'rbodo.models.CnnOut', 'CnnOut', (['num_embed', 'num_classes'], {}), '(num_embed, num_classes)\n', (1301, 1325), False, 'from rbodo.models import MlpIn, MlpOut, CnnIn, CnnOut, RnnOut\n'), ((1955, 1986), 'rbodo.models.CnnIn', 'CnnIn', (['num_embed', 'num_frames', '(6)'], {}), '(num_embed, num_frames, 6)\n', (1960, 1986), False, 'from rbodo.models import MlpIn, MlpOut, CnnIn, CnnOut, RnnOut\n'), ((2003, 2046), 'rbodo.models.RnnOut', 'RnnOut', (['num_embed', 'num_classes', '((s + 1) * 4)'], {}), '(num_embed, num_classes, (s + 1) * 4)\n', (2009, 2046), False, 'from rbodo.models import MlpIn, MlpOut, CnnIn, CnnOut, RnnOut\n'), ((3313, 3330), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3323, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3335, 3352), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (3345, 3352), True, 'import matplotlib.pyplot as plt\n'), ((3371, 3405), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Scale factor neurons"""'], {}), "('Scale factor neurons')\n", (3381, 3405), True, 'import matplotlib.pyplot as plt\n'), ((3410, 3447), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scale factor parameters"""'], {}), "('Scale factor parameters')\n", (3420, 3447), True, 'import matplotlib.pyplot as plt\n'), ((3458, 3481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# neurons"""'], {}), "('# neurons')\n", (3468, 3481), True, 'import matplotlib.pyplot as plt\n'), ((3486, 3505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# ops"""'], {}), "('# ops')\n", (3496, 3505), True, 'import matplotlib.pyplot as plt\n'), ((3713, 3730), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3723, 3730), True, 'import matplotlib.pyplot as plt\n'), ((3735, 3752), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (3745, 3752), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3805), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Scale factor neurons"""'], {}), "('Scale factor neurons')\n", (3781, 3805), True, 'import matplotlib.pyplot as plt\n'), ((3810, 3847), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scale factor parameters"""'], {}), "('Scale factor parameters')\n", (3820, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3858, 3881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# neurons"""'], {}), "('# neurons')\n", (3868, 3881), True, 'import matplotlib.pyplot as plt\n'), ((3886, 3908), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# params"""'], {}), "('# params')\n", (3896, 3908), True, 'import matplotlib.pyplot as plt\n'), ((2623, 2642), 'numpy.min', 'np.min', (['neurons_mlp'], {}), '(neurons_mlp)\n', (2629, 2642), True, 'import numpy as np\n'), ((2685, 2704), 'numpy.min', 'np.min', (['neurons_cnn'], {}), '(neurons_cnn)\n', (2691, 2704), True, 'import numpy as np\n'), ((2747, 2766), 'numpy.min', 'np.min', (['neurons_rnn'], {}), '(neurons_rnn)\n', (2753, 2766), True, 'import numpy as np\n'), ((2801, 2816), 'numpy.min', 'np.min', (['ops_mlp'], {}), '(ops_mlp)\n', (2807, 2816), True, 'import numpy as np\n'), ((2851, 2866), 'numpy.min', 'np.min', (['ops_cnn'], {}), '(ops_cnn)\n', (2857, 2866), True, 'import numpy as np\n'), ((2901, 2916), 'numpy.min', 'np.min', (['ops_rnn'], {}), '(ops_rnn)\n', (2907, 2916), True, 'import numpy as np\n'), ((2957, 2975), 'numpy.min', 'np.min', (['params_mlp'], {}), '(params_mlp)\n', (2963, 2975), True, 'import numpy as np\n'), ((3016, 3034), 'numpy.min', 'np.min', (['params_cnn'], {}), '(params_cnn)\n', (3022, 3034), True, 'import numpy as np\n'), ((3075, 3093), 'numpy.min', 'np.min', (['params_rnn'], {}), '(params_rnn)\n', (3081, 3093), True, 'import numpy as np\n'), ((432, 452), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (439, 452), True, 'import numpy as np\n'), ((690, 711), 'torch.randn', 'torch.randn', (['shape_in'], {}), '(shape_in)\n', (701, 711), False, 'import torch\n'), ((896, 918), 'torch.randn', 'torch.randn', (['shape_out'], {}), '(shape_out)\n', (907, 918), False, 'import torch\n'), ((1454, 1475), 'torch.randn', 'torch.randn', (['shape_in'], {}), '(shape_in)\n', (1465, 1475), False, 'import torch\n'), ((1646, 1668), 'torch.randn', 'torch.randn', (['shape_out'], {}), '(shape_out)\n', (1657, 1668), False, 'import torch\n'), ((2175, 2196), 'torch.randn', 'torch.randn', (['shape_in'], {}), '(shape_in)\n', (2186, 2196), False, 'import torch\n'), ((2376, 2398), 'torch.randn', 'torch.randn', (['shape_out'], {}), '(shape_out)\n', (2387, 2398), False, 'import torch\n')] |
import abc
from collections import OrderedDict
from pathlib import Path
import numpy as np
import pandas as pd
from ibllib.io import raw_data_loaders as raw
import logging
log = logging.getLogger("ibllib")
class BaseExtractor(abc.ABC):
"""
Base extractor class
:param session_path: Absolute path of session folder
:type session_path: str
"""
session_path = None
save_names = None
default_path = Path("alf") # relative to session
def __init__(self, session_path=None):
# If session_path is None Path(session_path) will fail
self.session_path = Path(session_path)
def extract(self, save=False, path_out=None, **kwargs):
"""
:return: numpy.ndarray or list of ndarrays, list of filenames
:rtype: dtype('float64')
"""
out = self._extract(**kwargs)
files = self._save(out, path_out=path_out) if save else None
return out, files
def _save(self, data, path_out=None):
# Chack if self.save_namesis of the same length of out
if not path_out:
path_out = self.session_path.joinpath(self.default_path)
path_out.mkdir(exist_ok=True, parents=True)
def _write_to_disk(file_path, data):
"""Implements different save calls depending on file extension"""
csv_separators = {
".csv": ",",
".ssv": " ",
".tsv": "\t",
}
file_path = Path(file_path)
if file_path.suffix == ".npy":
np.save(file_path, data)
elif file_path.suffix in [".parquet", ".pqt"]:
if not isinstance(data, pd.DataFrame):
log.error("Data is not a panda's DataFrame object")
raise TypeError("Data is not a panda's DataFrame object")
data.to_parquet(file_path)
elif file_path.suffix in [".csv", ".ssv", ".tsv"]:
sep = csv_separators[file_path.suffix]
data.to_csv(file_path, sep=sep)
# np.savetxt(file_path, data, delimiter=sep)
else:
log.error(f"Don't know how to save {file_path.suffix} files yet")
if isinstance(self.save_names, str):
file_paths = path_out.joinpath(self.save_names)
_write_to_disk(file_paths, data)
else: # Should be list or tuple...
assert len(data) == len(self.save_names)
file_paths = []
for data, fn in zip(data, self.save_names):
fpath = path_out.joinpath(fn)
_write_to_disk(fpath, data)
file_paths.append(fpath)
return file_paths
@abc.abstractmethod
def _extract(self):
pass
class BaseBpodTrialsExtractor(BaseExtractor):
"""
Base (abstract) extractor class for bpod jsonable data set
Wrps the _extract private method
:param session_path: Absolute path of session folder
:type session_path: str
:param bpod_trials
:param settings
"""
bpod_trials = None
settings = None
def extract(self, bpod_trials=None, settings=None, **kwargs):
"""
:param: bpod_trials (optional) bpod trials from jsonable in a dictionary
:param: settings (optional) bpod iblrig settings json file in a dictionary
:param: save (bool) write output ALF files, defaults to False
:param: path_out (pathlib.Path) output path (defaults to `{session_path}/alf`)
:return: numpy.ndarray or list of ndarrays, list of filenames
:rtype: dtype('float64')
"""
self.bpod_trials = bpod_trials
self.settings = settings
if self.bpod_trials is None:
self.bpod_trials = raw.load_data(self.session_path)
if not self.settings:
self.settings = raw.load_settings(self.session_path)
if self.settings is None:
self.settings = {"IBLRIG_VERSION_TAG": "100.0.0"}
elif self.settings["IBLRIG_VERSION_TAG"] == "":
self.settings["IBLRIG_VERSION_TAG"] = "100.0.0"
return super(BaseBpodTrialsExtractor, self).extract(**kwargs)
def run_extractor_classes(classes, session_path=None, **kwargs):
"""
Run a set of extractors with the same inputs
:param classes: list of Extractor class
:param save: True/False
:param path_out: (defaults to alf path)
:param kwargs: extractor arguments (session_path...)
:return: dictionary of arrays, list of files
"""
files = []
outputs = OrderedDict({})
assert session_path
# if a single class is passed, convert as a list
try:
iter(classes)
except TypeError:
classes = [classes]
for classe in classes:
out, fil = classe(session_path=session_path).extract(**kwargs)
if isinstance(fil, list):
files.extend(fil)
elif fil is not None:
files.append(fil)
if isinstance(classe.var_names, str):
outputs[classe.var_names] = out
else:
for i, k in enumerate(classe.var_names):
outputs[k] = out[i]
assert (len(files) == 0) or (len(files) == len(outputs.keys()))
return outputs, files
| [
"logging.getLogger",
"collections.OrderedDict",
"pathlib.Path",
"ibllib.io.raw_data_loaders.load_settings",
"numpy.save",
"ibllib.io.raw_data_loaders.load_data"
] | [((181, 208), 'logging.getLogger', 'logging.getLogger', (['"""ibllib"""'], {}), "('ibllib')\n", (198, 208), False, 'import logging\n'), ((434, 445), 'pathlib.Path', 'Path', (['"""alf"""'], {}), "('alf')\n", (438, 445), False, 'from pathlib import Path\n'), ((4547, 4562), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (4558, 4562), False, 'from collections import OrderedDict\n'), ((604, 622), 'pathlib.Path', 'Path', (['session_path'], {}), '(session_path)\n', (608, 622), False, 'from pathlib import Path\n'), ((1477, 1492), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (1481, 1492), False, 'from pathlib import Path\n'), ((3754, 3786), 'ibllib.io.raw_data_loaders.load_data', 'raw.load_data', (['self.session_path'], {}), '(self.session_path)\n', (3767, 3786), True, 'from ibllib.io import raw_data_loaders as raw\n'), ((3845, 3881), 'ibllib.io.raw_data_loaders.load_settings', 'raw.load_settings', (['self.session_path'], {}), '(self.session_path)\n', (3862, 3881), True, 'from ibllib.io import raw_data_loaders as raw\n'), ((1552, 1576), 'numpy.save', 'np.save', (['file_path', 'data'], {}), '(file_path, data)\n', (1559, 1576), True, 'import numpy as np\n')] |
"""
Warm start to choose regularisation strength
============================================
"""
###############################################################################
# Setup
# -----
import matplotlib.pyplot as plt
import numpy as np
from group_lasso import GroupLasso
np.random.seed(0)
GroupLasso.LOG_LOSSES = True
###############################################################################
# Set dataset parameters
# ----------------------
group_sizes = [np.random.randint(10, 20) for i in range(50)]
active_groups = [np.random.randint(2) for _ in group_sizes]
groups = np.concatenate(
[size * [i] for i, size in enumerate(group_sizes)]
).reshape(-1, 1)
num_coeffs = sum(group_sizes)
num_datapoints = 10000
noise_std = 20
###############################################################################
# Generate data matrix
# --------------------
X = np.random.standard_normal((num_datapoints, num_coeffs))
###############################################################################
# Generate coefficients
# ---------------------
w = np.concatenate(
[
np.random.standard_normal(group_size) * is_active
for group_size, is_active in zip(group_sizes, active_groups)
]
)
w = w.reshape(-1, 1)
true_coefficient_mask = w != 0
intercept = 2
###############################################################################
# Generate regression targets
# ---------------------------
y_true = X @ w + intercept
y = y_true + np.random.randn(*y_true.shape) * noise_std
###############################################################################
# Generate estimator and train it
# -------------------------------
num_regs = 10
regularisations = np.logspace(-0.5, 1.5, num_regs)
weights = np.empty((num_regs, w.shape[0],))
gl = GroupLasso(
groups=groups,
group_reg=5,
l1_reg=0,
frobenius_lipschitz=True,
scale_reg="inverse_group_size",
subsampling_scheme=1,
supress_warning=True,
n_iter=1000,
tol=1e-3,
warm_start=True, # Warm start to start each subsequent fit with previous weights
)
for i, group_reg in enumerate(regularisations[::-1]):
gl.group_reg = group_reg
gl.fit(X, y)
weights[-(i + 1)] = gl.sparsity_mask_.squeeze()
###############################################################################
# Visualise chosen covariate groups
# ---------------------------------
plt.figure()
plt.pcolormesh(np.arange(w.shape[0]), regularisations, -weights, cmap="gray")
plt.yscale("log")
plt.xlabel("Covariate number")
plt.ylabel("Regularisation strength")
plt.title("Active groups are black and inactive groups are white")
plt.show()
| [
"numpy.random.standard_normal",
"group_lasso.GroupLasso",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.empty",
"numpy.random.seed",
"matplotlib.pyplot.title",
"numpy.logspace",
"numpy.random.randn... | [((284, 301), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (298, 301), True, 'import numpy as np\n'), ((881, 936), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(num_datapoints, num_coeffs)'], {}), '((num_datapoints, num_coeffs))\n', (906, 936), True, 'import numpy as np\n'), ((1701, 1733), 'numpy.logspace', 'np.logspace', (['(-0.5)', '(1.5)', 'num_regs'], {}), '(-0.5, 1.5, num_regs)\n', (1712, 1733), True, 'import numpy as np\n'), ((1744, 1776), 'numpy.empty', 'np.empty', (['(num_regs, w.shape[0])'], {}), '((num_regs, w.shape[0]))\n', (1752, 1776), True, 'import numpy as np\n'), ((1783, 1983), 'group_lasso.GroupLasso', 'GroupLasso', ([], {'groups': 'groups', 'group_reg': '(5)', 'l1_reg': '(0)', 'frobenius_lipschitz': '(True)', 'scale_reg': '"""inverse_group_size"""', 'subsampling_scheme': '(1)', 'supress_warning': '(True)', 'n_iter': '(1000)', 'tol': '(0.001)', 'warm_start': '(True)'}), "(groups=groups, group_reg=5, l1_reg=0, frobenius_lipschitz=True,\n scale_reg='inverse_group_size', subsampling_scheme=1, supress_warning=\n True, n_iter=1000, tol=0.001, warm_start=True)\n", (1793, 1983), False, 'from group_lasso import GroupLasso\n'), ((2389, 2401), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2399, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2497), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2490, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2498, 2528), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Covariate number"""'], {}), "('Covariate number')\n", (2508, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2529, 2566), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Regularisation strength"""'], {}), "('Regularisation strength')\n", (2539, 2566), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2633), 'matplotlib.pyplot.title', 'plt.title', (['"""Active groups are black and inactive groups are white"""'], {}), "('Active groups are black and inactive groups are white')\n", (2576, 2633), True, 'import matplotlib.pyplot as plt\n'), ((2634, 2644), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2642, 2644), True, 'import matplotlib.pyplot as plt\n'), ((478, 503), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)'], {}), '(10, 20)\n', (495, 503), True, 'import numpy as np\n'), ((541, 561), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (558, 561), True, 'import numpy as np\n'), ((2417, 2438), 'numpy.arange', 'np.arange', (['w.shape[0]'], {}), '(w.shape[0])\n', (2426, 2438), True, 'import numpy as np\n'), ((1476, 1506), 'numpy.random.randn', 'np.random.randn', (['*y_true.shape'], {}), '(*y_true.shape)\n', (1491, 1506), True, 'import numpy as np\n'), ((1101, 1138), 'numpy.random.standard_normal', 'np.random.standard_normal', (['group_size'], {}), '(group_size)\n', (1126, 1138), True, 'import numpy as np\n')] |
import os.path as osp
import chainer
import numpy as np
import instance_occlsegm_lib.data
from instance_occlsegm_lib.datasets.apc.arc2017.jsk import ItemDataDataset
from instance_occlsegm_lib.datasets.apc.arc2017_v2 import load_item_data
from instance_occlsegm_lib.datasets.apc import \
ARC2017ItemDataSyntheticInstanceSegmentationDataset
class ARC2017SyntheticDataset(ItemDataDataset):
def __init__(self, do_aug=False, aug_level='all'):
if not osp.exists(self.item_data_dir):
self.download()
ret_load_item_data = load_item_data(self.item_data_dir)
super(ARC2017SyntheticDataset, self).__init__(
split='train',
ret_load_item_data=ret_load_item_data,
do_aug=do_aug,
aug_level=aug_level,
from_scratch=True,
skip_known=False,
verbose=False,
)
def __len__(self):
return int(10e3)
# cls.item_data_dir = \
# osp.expanduser('~/data/arc2017/datasets/ItemDataARC2017')
# # In order to acquire ItemDataARC2017_MaskPred from scratch,
# # 1. you first run this to download raw item data.
# # 2. you run generate_item_data_mask_pred.py next.
# @classmethod
# def download(cls):
# instance_occlsegm_lib.data.download(
# url='https://drive.google.com/uc?id=1hJe4JZvqc2Ni1sjuKwXuBxgddHH2zNFa', # NOQA
# md5='c8ad2268b7f2d16accd716c0269d4e5f',
# path=cls.item_data_dir + '.zip',
# postprocess=instance_occlsegm_lib.data.extractall,
# )
item_data_dir = \
osp.expanduser('~/data/instance_occlsegm_lib/synthetic2d/datasets/ItemDataARC2017_MaskPred') # NOQA
@classmethod
def download(cls):
instance_occlsegm_lib.data.download(
url='https://drive.google.com/uc?id=1OYoLwsRuHKP8is-7JIE2ii5f-VeICjv1', # NOQA
md5='5a64bb03613589e5aeab41d8319bb945',
path=cls.item_data_dir + '.zip',
postprocess=instance_occlsegm_lib.data.extractall,
)
class ARC2017SyntheticInstancesDataset(
ARC2017ItemDataSyntheticInstanceSegmentationDataset
):
def __init__(self, do_aug=False, aug_level='all'):
item_data_dir = ARC2017SyntheticDataset.item_data_dir
if not osp.exists(item_data_dir):
ARC2017SyntheticDataset.download()
super(ARC2017SyntheticInstancesDataset, self).__init__(
item_data_dir, do_aug=do_aug, aug_level=aug_level
)
# -----------------------------------------------------------------------------
# Deprecated
class ARC2017SyntheticCachedDataset(chainer.dataset.DatasetMixin):
class_names = None
dataset_dir = osp.expanduser('~/data/instance_occlsegm_lib/synthetic2d/ARC2017SyntheticCachedDataset') # NOQA
def __init__(self, split):
assert split in ['train', 'test']
self._split = split
self.class_names = ARC2017SyntheticDataset().class_names
size_all = int(10e3)
size_train = int(size_all * 0.75)
size_test = size_all - size_train
self._size = dict(train=size_train, test=size_test)
def __len__(self):
return self._size[self._split]
def get_example(self, i):
if self._split == 'train':
j = i
else:
assert self._split == 'test'
j = i + self._size['train']
cache_file = osp.join(self.dataset_dir, '%08d.npz' % j)
cache_data = np.load(cache_file)
return cache_data['img'], cache_data['lbl']
| [
"os.path.exists",
"os.path.join",
"numpy.load",
"os.path.expanduser",
"instance_occlsegm_lib.datasets.apc.arc2017_v2.load_item_data"
] | [((1618, 1720), 'os.path.expanduser', 'osp.expanduser', (['"""~/data/instance_occlsegm_lib/synthetic2d/datasets/ItemDataARC2017_MaskPred"""'], {}), "(\n '~/data/instance_occlsegm_lib/synthetic2d/datasets/ItemDataARC2017_MaskPred'\n )\n", (1632, 1720), True, 'import os.path as osp\n'), ((2717, 2810), 'os.path.expanduser', 'osp.expanduser', (['"""~/data/instance_occlsegm_lib/synthetic2d/ARC2017SyntheticCachedDataset"""'], {}), "(\n '~/data/instance_occlsegm_lib/synthetic2d/ARC2017SyntheticCachedDataset')\n", (2731, 2810), True, 'import os.path as osp\n'), ((557, 591), 'instance_occlsegm_lib.datasets.apc.arc2017_v2.load_item_data', 'load_item_data', (['self.item_data_dir'], {}), '(self.item_data_dir)\n', (571, 591), False, 'from instance_occlsegm_lib.datasets.apc.arc2017_v2 import load_item_data\n'), ((3419, 3461), 'os.path.join', 'osp.join', (['self.dataset_dir', "('%08d.npz' % j)"], {}), "(self.dataset_dir, '%08d.npz' % j)\n", (3427, 3461), True, 'import os.path as osp\n'), ((3483, 3502), 'numpy.load', 'np.load', (['cache_file'], {}), '(cache_file)\n', (3490, 3502), True, 'import numpy as np\n'), ((467, 497), 'os.path.exists', 'osp.exists', (['self.item_data_dir'], {}), '(self.item_data_dir)\n', (477, 497), True, 'import os.path as osp\n'), ((2301, 2326), 'os.path.exists', 'osp.exists', (['item_data_dir'], {}), '(item_data_dir)\n', (2311, 2326), True, 'import os.path as osp\n')] |
"""
- Run target skeletonization on the interaction between two point clouds.
- Compare empirical and analytic estimates for the proxy count.
"""
from functools import partial
import numpy as np
import numpy.linalg as la
import scipy.linalg.interpolative as sli # pylint: disable=no-name-in-module
import matplotlib.pyplot as mp
import logging
logger = logging.getLogger(__name__)
# {{{ run
def compute_target_reconstruction_error(
actx, interaction_mat, kernel,
sources, targets, proxies, *,
id_eps: float,
verbose: bool = True) -> float:
proxy_mat = kernel(targets, proxies)
k, idx, proj = sli.interp_decomp(proxy_mat.T, id_eps)
P = sli.reconstruct_interp_matrix(idx, proj).T # noqa: N806
idx = idx[:k]
id_error = la.norm(proxy_mat - P @ proxy_mat[idx, :]) / la.norm(proxy_mat)
if verbose:
logger.info("id_rank: %3d num_rank %3d nproxy %4d",
idx.size, la.matrix_rank(proxy_mat, tol=id_eps), proxies.ndofs)
logger.info("id_error: %.15e (eps %.5e)", id_error, id_eps)
rec_error = la.norm(
interaction_mat - P @ interaction_mat[idx, :]
) / la.norm(interaction_mat)
if verbose:
logger.info("rec_error: %.15e", rec_error)
logger.info("\n")
return rec_error, P.shape[1]
def run_error_model(ctx_factory, visualize: bool = True) -> None:
import dsplayground as ds
actx = ds.get_cl_array_context(ctx_factory)
np.random.seed(42)
sli.seed(42)
# {{{ parameters
ambient_dim = 3
nsources = 512
ntargets = 512
proxy_radius_factor = 1.5
max_target_radius = 1.0
min_source_radius = 2.5
proxy_radius = proxy_radius_factor * max_target_radius
# }}}
# {{{ set up geometry
targets = ds.make_random_points_in_sphere(ambient_dim, ntargets,
rmin=0.0, rmax=max_target_radius)
sources = ds.make_random_points_in_sphere(ambient_dim, nsources,
rmin=min_source_radius, rmax=min_source_radius + 0.5)
source_radius, source_center = ds.get_point_radius_and_center(sources)
logger.info("sources: radius %.5e center %s", source_radius, source_center)
target_radius, target_center = ds.get_point_radius_and_center(targets)
logger.info("targets: radius %.5e center %s", target_radius, target_center)
def make_proxy_points(nproxies):
if abs(proxy_radius - min_source_radius) < 0.1:
# NOTE: if the sources are really close to the proxies / targets,
# the skeletonization does a pretty bad job. to counter that, we
# insert another ring of proxy points inside the first one
return np.hstack([
proxy_radius * ds.make_sphere(nproxies),
0.85 * proxy_radius * ds.make_sphere(nproxies),
])
else:
return proxy_radius * ds.make_sphere(nproxies)
sources = ds.as_source(actx, sources)
targets = ds.as_target(actx, targets)
# }}}
# {{{ set up kernel evaluation
from sumpy.kernel import LaplaceKernel
kernel = LaplaceKernel(ambient_dim)
kernel = partial(ds.evaluate_p2p_simple, actx, kernel)
interaction_mat = kernel(targets, sources)
# }}}
# {{{ plot error vs id_eps
nproxy_model = ds.estimate_proxies_from_id_eps(ambient_dim, 1.0e-16,
target_radius, source_radius, proxy_radius,
ntargets, nsources) + 16
proxies = ds.as_source(actx, make_proxy_points(nproxy_model))
id_eps_array = 10.0**(-np.arange(2, 16))
rec_errors = np.empty((id_eps_array.size,))
for i, id_eps in enumerate(id_eps_array):
nproxy_model = ds.estimate_proxies_from_id_eps(ambient_dim, id_eps,
target_radius, source_radius, proxy_radius,
ntargets, nsources)
rec_errors[i], _ = compute_target_reconstruction_error(
actx, interaction_mat, kernel,
sources, targets, proxies,
id_eps=id_eps, verbose=False)
logger.info("id_eps %.5e model nproxy %d rec error %.5e",
id_eps, nproxy_model, rec_errors[i])
# }}}
# {{{ plot proxy count model vs estimate
nproxy_empirical = np.empty(id_eps_array.size, dtype=np.int64)
nproxy_model = np.empty(id_eps_array.size, dtype=np.int64)
id_rank = np.empty(id_eps_array.size, dtype=np.int64)
nproxies = 3
for i, id_eps in enumerate(id_eps_array):
# {{{ increase nproxies until the id_eps tolerance is reached
nproxies = max(nproxies - 2, 3)
while nproxies < 2 * max(ntargets, nsources):
proxies = ds.as_source(actx, make_proxy_points(nproxies))
rec_error, rank = compute_target_reconstruction_error(
actx, interaction_mat, kernel,
sources, targets, proxies,
id_eps=id_eps, verbose=False)
if rec_error < 5 * id_eps:
break
nproxies += 2
# }}}
nproxy_empirical[i] = nproxies
nproxy_model[i] = ds.estimate_proxies_from_id_eps(ambient_dim, id_eps,
target_radius, source_radius, proxy_radius,
ntargets, nsources)
id_rank[i] = rank
logger.info("id_eps %.5e nproxy empirical %3d model %3d rank %3d / %3d",
id_eps, nproxy_empirical[i], nproxy_model[i], rank, ntargets)
# }}}
# {{{ write and visualize
filename = "p2p_model_{}d_{}.npz".format(ambient_dim, "_".join([
str(v) for v in (
"ntargets", ntargets,
"nsources", nsources,
"factor", proxy_radius_factor)
]).replace(".", "_"))
np.savez_compressed(filename,
# parameters
ambient_dim=ambient_dim,
nsources=nsources,
ntargets=ntargets,
proxy_radius_factor=proxy_radius_factor,
max_target_radius=max_target_radius,
min_source_radius=min_source_radius,
# geometry
sources=actx.to_numpy(sources.nodes()),
targets=actx.to_numpy(targets.nodes()),
proxies=make_proxy_points(32),
# convergence
id_eps=id_eps_array,
rec_errors=rec_errors,
rec_nproxy=nproxy_model,
# model
nproxy_empirical=nproxy_empirical,
nproxy_model=nproxy_model,
id_rank=id_rank,
)
if visualize:
plot_error_model(filename)
# }}}
# }}}
# {{{
def plot_error_model(datafile: str) -> None:
import pathlib
datafile = pathlib.Path(datafile)
basename = datafile.with_suffix("")
r = np.load(datafile)
fig = mp.figure()
# {{{ geometry
sources = r["sources"]
targets = r["targets"]
proxies = r["proxies"]
ax = fig.add_subplot(111, projection="3d")
ax.plot(sources[0], sources[1], sources[2], "o", label="$Sources$")
ax.plot(targets[0], targets[1], targets[2], "o", label="$Targets$")
ax.plot(proxies[0], proxies[1], proxies[2], "ko", label="$Proxies$")
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_zlabel("$z$")
ax.margins(0.05, 0.05, 0.05)
legend = ax.legend(
bbox_to_anchor=(0, 1.02, 1.0, 0.2),
loc="lower left", mode="expand",
borderaxespad=0, ncol=3)
fig.savefig(f"{basename}_geometry",
bbox_extra_artists=(legend,),
bbox_inches="tight",
)
fig.clf()
# }}}
# {{{ convergence errors
id_eps = r["id_eps"]
rec_errors = r["rec_errors"]
ax = fig.gca()
ax.loglog(id_eps, rec_errors, "o-")
ax.loglog(id_eps, id_eps, "k--")
ax.set_xlabel(r"$\epsilon_{id}$")
ax.set_ylabel(r"$Relative Error$")
fig.savefig(f"{basename}_id_eps")
fig.clf()
# }}}
# {{{ model vs empirical
nproxy_empirical = r["nproxy_empirical"]
nproxy_model = r["nproxy_model"]
ax = fig.gca()
ax.semilogx(id_eps, nproxy_empirical, "o-", label="$Empirical$")
# ax.semilogx(id_eps_array, id_rank, "o-", label="$Rank$")
ax.semilogx(id_eps, nproxy_model, "ko-", label="$Model$")
ax.set_xlabel(r"$\epsilon$")
ax.set_ylabel(r"$\#~proxy$")
ax.legend()
fig.savefig(f"{basename}_model_vs_empirical")
fig.clf()
# }}}
# }}}
if __name__ == "__main__":
import sys
import pyopencl as cl
logging.basicConfig(level=logging.INFO)
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
run_error_model(cl.create_some_context)
| [
"logging.getLogger",
"numpy.linalg.matrix_rank",
"dsplayground.get_point_radius_and_center",
"scipy.linalg.interpolative.reconstruct_interp_matrix",
"numpy.linalg.norm",
"dsplayground.as_target",
"numpy.arange",
"pathlib.Path",
"scipy.linalg.interpolative.interp_decomp",
"sumpy.kernel.LaplaceKerne... | [((361, 388), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (378, 388), False, 'import logging\n'), ((643, 681), 'scipy.linalg.interpolative.interp_decomp', 'sli.interp_decomp', (['proxy_mat.T', 'id_eps'], {}), '(proxy_mat.T, id_eps)\n', (660, 681), True, 'import scipy.linalg.interpolative as sli\n'), ((1439, 1475), 'dsplayground.get_cl_array_context', 'ds.get_cl_array_context', (['ctx_factory'], {}), '(ctx_factory)\n', (1462, 1475), True, 'import dsplayground as ds\n'), ((1481, 1499), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1495, 1499), True, 'import numpy as np\n'), ((1504, 1516), 'scipy.linalg.interpolative.seed', 'sli.seed', (['(42)'], {}), '(42)\n', (1512, 1516), True, 'import scipy.linalg.interpolative as sli\n'), ((1799, 1892), 'dsplayground.make_random_points_in_sphere', 'ds.make_random_points_in_sphere', (['ambient_dim', 'ntargets'], {'rmin': '(0.0)', 'rmax': 'max_target_radius'}), '(ambient_dim, ntargets, rmin=0.0, rmax=\n max_target_radius)\n', (1830, 1892), True, 'import dsplayground as ds\n'), ((1914, 2027), 'dsplayground.make_random_points_in_sphere', 'ds.make_random_points_in_sphere', (['ambient_dim', 'nsources'], {'rmin': 'min_source_radius', 'rmax': '(min_source_radius + 0.5)'}), '(ambient_dim, nsources, rmin=\n min_source_radius, rmax=min_source_radius + 0.5)\n', (1945, 2027), True, 'import dsplayground as ds\n'), ((2071, 2110), 'dsplayground.get_point_radius_and_center', 'ds.get_point_radius_and_center', (['sources'], {}), '(sources)\n', (2101, 2110), True, 'import dsplayground as ds\n'), ((2227, 2266), 'dsplayground.get_point_radius_and_center', 'ds.get_point_radius_and_center', (['targets'], {}), '(targets)\n', (2257, 2266), True, 'import dsplayground as ds\n'), ((2926, 2953), 'dsplayground.as_source', 'ds.as_source', (['actx', 'sources'], {}), '(actx, sources)\n', (2938, 2953), True, 'import dsplayground as ds\n'), ((2968, 2995), 'dsplayground.as_target', 'ds.as_target', (['actx', 'targets'], {}), '(actx, targets)\n', (2980, 2995), True, 'import dsplayground as ds\n'), ((3100, 3126), 'sumpy.kernel.LaplaceKernel', 'LaplaceKernel', (['ambient_dim'], {}), '(ambient_dim)\n', (3113, 3126), False, 'from sumpy.kernel import LaplaceKernel\n'), ((3140, 3185), 'functools.partial', 'partial', (['ds.evaluate_p2p_simple', 'actx', 'kernel'], {}), '(ds.evaluate_p2p_simple, actx, kernel)\n', (3147, 3185), False, 'from functools import partial\n'), ((3574, 3604), 'numpy.empty', 'np.empty', (['(id_eps_array.size,)'], {}), '((id_eps_array.size,))\n', (3582, 3604), True, 'import numpy as np\n'), ((4225, 4268), 'numpy.empty', 'np.empty', (['id_eps_array.size'], {'dtype': 'np.int64'}), '(id_eps_array.size, dtype=np.int64)\n', (4233, 4268), True, 'import numpy as np\n'), ((4288, 4331), 'numpy.empty', 'np.empty', (['id_eps_array.size'], {'dtype': 'np.int64'}), '(id_eps_array.size, dtype=np.int64)\n', (4296, 4331), True, 'import numpy as np\n'), ((4346, 4389), 'numpy.empty', 'np.empty', (['id_eps_array.size'], {'dtype': 'np.int64'}), '(id_eps_array.size, dtype=np.int64)\n', (4354, 4389), True, 'import numpy as np\n'), ((6609, 6631), 'pathlib.Path', 'pathlib.Path', (['datafile'], {}), '(datafile)\n', (6621, 6631), False, 'import pathlib\n'), ((6681, 6698), 'numpy.load', 'np.load', (['datafile'], {}), '(datafile)\n', (6688, 6698), True, 'import numpy as np\n'), ((6709, 6720), 'matplotlib.pyplot.figure', 'mp.figure', ([], {}), '()\n', (6718, 6720), True, 'import matplotlib.pyplot as mp\n'), ((8402, 8441), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (8421, 8441), False, 'import logging\n'), ((691, 731), 'scipy.linalg.interpolative.reconstruct_interp_matrix', 'sli.reconstruct_interp_matrix', (['idx', 'proj'], {}), '(idx, proj)\n', (720, 731), True, 'import scipy.linalg.interpolative as sli\n'), ((787, 829), 'numpy.linalg.norm', 'la.norm', (['(proxy_mat - P @ proxy_mat[idx, :])'], {}), '(proxy_mat - P @ proxy_mat[idx, :])\n', (794, 829), True, 'import numpy.linalg as la\n'), ((832, 850), 'numpy.linalg.norm', 'la.norm', (['proxy_mat'], {}), '(proxy_mat)\n', (839, 850), True, 'import numpy.linalg as la\n'), ((1095, 1149), 'numpy.linalg.norm', 'la.norm', (['(interaction_mat - P @ interaction_mat[idx, :])'], {}), '(interaction_mat - P @ interaction_mat[idx, :])\n', (1102, 1149), True, 'import numpy.linalg as la\n'), ((1178, 1202), 'numpy.linalg.norm', 'la.norm', (['interaction_mat'], {}), '(interaction_mat)\n', (1185, 1202), True, 'import numpy.linalg as la\n'), ((3297, 3416), 'dsplayground.estimate_proxies_from_id_eps', 'ds.estimate_proxies_from_id_eps', (['ambient_dim', '(1e-16)', 'target_radius', 'source_radius', 'proxy_radius', 'ntargets', 'nsources'], {}), '(ambient_dim, 1e-16, target_radius,\n source_radius, proxy_radius, ntargets, nsources)\n', (3328, 3416), True, 'import dsplayground as ds\n'), ((3675, 3795), 'dsplayground.estimate_proxies_from_id_eps', 'ds.estimate_proxies_from_id_eps', (['ambient_dim', 'id_eps', 'target_radius', 'source_radius', 'proxy_radius', 'ntargets', 'nsources'], {}), '(ambient_dim, id_eps, target_radius,\n source_radius, proxy_radius, ntargets, nsources)\n', (3706, 3795), True, 'import dsplayground as ds\n'), ((5075, 5195), 'dsplayground.estimate_proxies_from_id_eps', 'ds.estimate_proxies_from_id_eps', (['ambient_dim', 'id_eps', 'target_radius', 'source_radius', 'proxy_radius', 'ntargets', 'nsources'], {}), '(ambient_dim, id_eps, target_radius,\n source_radius, proxy_radius, ntargets, nsources)\n', (5106, 5195), True, 'import dsplayground as ds\n'), ((955, 992), 'numpy.linalg.matrix_rank', 'la.matrix_rank', (['proxy_mat'], {'tol': 'id_eps'}), '(proxy_mat, tol=id_eps)\n', (969, 992), True, 'import numpy.linalg as la\n'), ((3539, 3555), 'numpy.arange', 'np.arange', (['(2)', '(16)'], {}), '(2, 16)\n', (3548, 3555), True, 'import numpy as np\n'), ((2886, 2910), 'dsplayground.make_sphere', 'ds.make_sphere', (['nproxies'], {}), '(nproxies)\n', (2900, 2910), True, 'import dsplayground as ds\n'), ((2729, 2753), 'dsplayground.make_sphere', 'ds.make_sphere', (['nproxies'], {}), '(nproxies)\n', (2743, 2753), True, 'import dsplayground as ds\n'), ((2793, 2817), 'dsplayground.make_sphere', 'ds.make_sphere', (['nproxies'], {}), '(nproxies)\n', (2807, 2817), True, 'import dsplayground as ds\n')] |
# Imports
import random
import numpy as np
import time as t
import torch.nn as nn
import torch.optim as optim
import torchvision.utils as vutils
import time as time
from torch import autograd
import model
from keijzer_exogan import *
# initialize random seeds
manualSeed = 999
random.seed(manualSeed)
torch.manual_seed(manualSeed)
"""
Local variables
"""
workers = 0 # Number of workers for dataloader, 0 when to_vram is enabled
batch_size = 64 # 2**11
image_size = 32
nz = 100 # size of latent vector
num_epochs = 10*10**3
torch.backends.cudnn.benchmark=True # Uses udnn auto-tuner to find the best algorithm to use for your hardware, speeds up training by almost 50%
lr = 1e-4
beta1 = 0
beta2 = 0.9
lambda_ = 10 # 10
beta1 = 0.5 # Beta1 hyperparam for Adam optimizers
selected_gpus = [0] # Number of GPUs available. Use 0 for CPU mode.
path = '/datb/16011015/ExoGAN_data/selection//' #notice how you dont put the last folder in here...
images = np.load(path+'first_chunks_25_percent_images.npy').astype('float32')
swap_labels_randomly = False
train_d_g_conditional = False # switch between training D and G based on set threshold
d_g_conditional_threshold = 0.55 # D_G_z1 < threshold, train G
train_d_g_conditional_per_epoch = False
train_d_g_conditional_per_n_iters = False
train_d_g_n_iters = 2 # When 2, train D 2 times before training G 1 time
use_saved_weights = True
g_iters = 2 # 5
d_iters = 1 # 1, discriminator is called critic in WGAN paper
print('Batch size: ', batch_size)
ngpu = len(selected_gpus)
print('Number of GPUs used: ', ngpu)
"""
Load data and prepare DataLoader
"""
shuffle = True
if shuffle:
np.random.shuffle(images) # shuffles the images
images = images[:int(len(images)*1)] # use only first ... percent of the data (0.05)
print('Number of images: ', len(images))
dataset = numpy_dataset(data=images, to_vram=True) # to_vram pins it to all GPU's
#dataset = numpy_dataset(data=images, to_vram=True, transform=transforms.Compose([transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])) # to_vram pins it to all GPU's
# Create the dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=workers, pin_memory=False)
"""
Load and setup models
"""
# Initialize cuda
device = torch.device("cuda:"+str(selected_gpus[0]) if (torch.cuda.is_available() and ngpu > 0) else "cpu")
# Load models
netG = model.Generator(ngpu).to(device)
netD = model.Discriminator(ngpu).to(device)
# Apply weights
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
netG.apply(weights_init) # It's not clean/efficient to load these ones first, but it works.
netD.apply(weights_init)
if use_saved_weights:
try:
# Load saved weights
netG.load_state_dict(torch.load('netG_state_dict3', map_location=device)) #net.module..load_... for parallel model , net.load_... for single gpu model
netD.load_state_dict(torch.load('netD_state_dict3', map_location=device))
print('Succesfully loaded saved weights.')
except:
print('Could not load saved weights, using new ones.')
pass
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netG = nn.DataParallel(netG, device_ids=selected_gpus, output_device=device)
netD = nn.DataParallel(netD, device_ids=selected_gpus, output_device=device)
"""
Define input training stuff (fancy this up)
"""
# Initialize BCELoss function
criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(64, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
real_label = 1
fake_label = 0
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, beta2)) # should be sgd
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, beta2))
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
switch = True # condition switch, to switch between D and G per epoch
previous_switch = 0
train_D = True
train_G = True
def calc_gradient_penalty(netD, real_data, fake_data, b_size):
"""
Source: https://github.com/jalola/improved-wgan-pytorch/blob/master/gan_train.py
"""
one = torch.tensor(1, device=device)
alpha = torch.rand(b_size, 1, device=device)
alpha = alpha.expand(b_size, int(real_data.nelement()/b_size)).contiguous()
alpha = alpha.view(b_size, 1, image_size, image_size)
#alpha = alpha.to(device)
fake_data = fake_data.view(b_size, one, image_size, image_size)
interpolates = alpha * real_data + ((one - alpha) * fake_data)
#interpolates = interpolates.to(device)
interpolates.requires_grad_(True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), - one)
gradient_penalty = ((gradients.norm(2, dim=1) - one) ** 2).mean() * lambda_
return gradient_penalty
"""
Highly adapted from: https://github.com/jalola/improved-wgan-pytorch/blob/master/gan_train.py
"""
one = torch.FloatTensor([1]).to(device)
mone = one * -1
MSELoss = nn.MSELoss()
iters = 0
t1 = time.time()
for epoch in range(num_epochs):
for i, data in enumerate(dataloader, 0):
real = data.to(device)
b_size = real.size(0)
"""
Train G
"""
for p in netD.parameters():
p.requires_grad_(False)
for _ in range(g_iters):
netG.zero_grad()
noise = torch.randn(batch_size, nz, 1, 1, device=device)
noise.requires_grad_(True)
fake = netG(noise)
# Additional loss terms
mean_L = MSELoss(netG(noise).mean(), torch.tensor(0.46, device=device))*6
std_L = MSELoss(netG(noise).std(), torch.tensor(0.46, device=device))*6
#mean_L = 0
#std_L = 0
g_cost = netD(fake).mean() #- mean_L - std_L # mines mean and std loss, because those should get low, not high like netD(fake)
g_cost.backward(mone)
g_cost = -g_cost # -1 to maximize g_cost
optimizerG.step()
"""
Train D
"""
for p in netD.parameters():
p.requires_grad_(True)
for _ in range(d_iters):
netD.zero_grad()
# generate fake data
noise = torch.randn(b_size, nz, 1, 1, device=device)
with torch.no_grad():
noisev = noise # Freeze G, training D
fake = netG(noisev).detach()
# train with real data
d_real = netD(real).mean()
# train with fake data
d_fake = netD(fake).mean()
# train with interpolates data
gradient_penalty = calc_gradient_penalty(netD, real, fake, b_size)
# final disc cost
d_cost = d_fake - d_real + gradient_penalty
d_cost.backward()
optimizerD.step()
w_dist = d_fake - d_real # wasserstein distance
L = d_cost + g_cost
weights_saved = False
if (iters % 100 == 0): # save weights every % .... iters
#print('weights saved')
if ngpu > 1:
torch.save(netG.module.state_dict(), 'netG_state_dict3')
torch.save(netD.module.state_dict(), 'netD_state_dict3')
else:
torch.save(netG.state_dict(), 'netG_state_dict3')
torch.save(netD.state_dict(), 'netD_state_dict3')
if i % (16) == 0:
t2 = time.time()
print('[%d/%d][%d/%d] \t Total loss = %.3f \t d_cost = %.3f \t g_cost = %.3f \t Gradient pen. = %.3f \t D(G(z)) = %.3f \t D(x) = %.3f \t mu L: %.3f \t std L: %.3f \t t = %.3f'%
(epoch, num_epochs, i, len(dataloader), L, d_cost, g_cost, gradient_penalty, d_fake, d_real, mean_L, std_L, (t2-t1)))
t1 = time.time()
iters += i | [
"torch.nn.init.constant_",
"model.Discriminator",
"torch.nn.DataParallel",
"random.seed",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"model.Generator",
"numpy.load",
"time.time",
"torch.nn.init.normal_",
"numpy.random.shuffle"
] | [((281, 304), 'random.seed', 'random.seed', (['manualSeed'], {}), '(manualSeed)\n', (292, 304), False, 'import random\n'), ((3773, 3785), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (3783, 3785), True, 'import torch.nn as nn\n'), ((5763, 5775), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5773, 5775), True, 'import torch.nn as nn\n'), ((5792, 5803), 'time.time', 'time.time', ([], {}), '()\n', (5801, 5803), True, 'import time as time\n'), ((1641, 1666), 'numpy.random.shuffle', 'np.random.shuffle', (['images'], {}), '(images)\n', (1658, 1666), True, 'import numpy as np\n'), ((3518, 3587), 'torch.nn.DataParallel', 'nn.DataParallel', (['netG'], {'device_ids': 'selected_gpus', 'output_device': 'device'}), '(netG, device_ids=selected_gpus, output_device=device)\n', (3533, 3587), True, 'import torch.nn as nn\n'), ((3599, 3668), 'torch.nn.DataParallel', 'nn.DataParallel', (['netD'], {'device_ids': 'selected_gpus', 'output_device': 'device'}), '(netD, device_ids=selected_gpus, output_device=device)\n', (3614, 3668), True, 'import torch.nn as nn\n'), ((955, 1007), 'numpy.load', 'np.load', (["(path + 'first_chunks_25_percent_images.npy')"], {}), "(path + 'first_chunks_25_percent_images.npy')\n", (962, 1007), True, 'import numpy as np\n'), ((2442, 2463), 'model.Generator', 'model.Generator', (['ngpu'], {}), '(ngpu)\n', (2457, 2463), False, 'import model\n'), ((2482, 2507), 'model.Discriminator', 'model.Discriminator', (['ngpu'], {}), '(ngpu)\n', (2501, 2507), False, 'import model\n'), ((2696, 2737), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (2711, 2737), True, 'import torch.nn as nn\n'), ((2790, 2831), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight.data', '(1.0)', '(0.02)'], {}), '(m.weight.data, 1.0, 0.02)\n', (2805, 2831), True, 'import torch.nn as nn\n'), ((2840, 2873), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (2857, 2873), True, 'import torch.nn as nn\n'), ((8308, 8319), 'time.time', 'time.time', ([], {}), '()\n', (8317, 8319), True, 'import time as time\n'), ((8667, 8678), 'time.time', 'time.time', ([], {}), '()\n', (8676, 8678), True, 'import time as time\n')] |
"""
===================
Cartesian Space DMP
===================
In a Cartesian Space DMP, the rotation are represented by quaternions. A
normal DMP cannot be used in this case because it requires that each
component can be linearly interpolated on its own, which is not the case
for three-dimensional orientations.
The following plot shows the trajectory generated by an imitated Cartesian
Space DMP, start and goal positions, and orientations. Note that
executing such a DMP on a robot requires an inverse kinematic that computes
the required joint angles to reach the given poses. It is not guaranteed that
a smooth trajectory in Cartesian space will result in a smooth trajectory in
joint space.
"""
print(__doc__)
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from bolero.representation import CartesianDMPBehavior
def matrix_from_quaternion(q):
w, x, y, z = q
x2 = 2.0 * x * x
y2 = 2.0 * y * y
z2 = 2.0 * z * z
xy = 2.0 * x * y
xz = 2.0 * x * z
yz = 2.0 * y * z
xw = 2.0 * x * w
yw = 2.0 * y * w
zw = 2.0 * z * w
R = np.array([[1.0 - y2 - z2, xy - zw, xz + yw],
[ xy + zw, 1.0 - x2 - z2, yz - xw],
[ xz - yw, yz + xw, 1.0 - x2 - y2]])
return R
def plot_pose(ax, x, s=1.0, **kwargs):
p = x[:3]
R = matrix_from_quaternion(x[3:])
for d, c in enumerate(["r", "g", "b"]):
ax.plot([p[0], p[0] + s * R[0, d]],
[p[1], p[1] + s * R[1, d]],
[p[2], p[2] + s * R[2, d]], color=c, **kwargs)
return ax
def plot_trajectory(ax, X, color="k"):
ax.plot(X[:, 0], X[:, 1], X[:, 2], lw=2, color=color)
for x in X[50:-50:50]:
plot_pose(ax, x, s=0.03, lw=2, alpha=0.5)
plot_pose(ax, X[0], s=0.05, lw=3)
plot_pose(ax, X[-1], s=0.05, lw=3)
try:
dirname = os.path.dirname(os.path.realpath(__file__))
except NameError:
dirname = "."
model = os.path.join(dirname, "cart_dmp_model.yaml")
config = os.path.join(dirname, "cart_dmp_config.yaml")
dmp = CartesianDMPBehavior(configuration_file=model)
dmp.init(7, 7)
dmp.load_config(config)
plt.figure(figsize=(18, 10))
ax = plt.subplot(221, projection="3d", aspect="equal")
plt.setp(ax, xlim=(0.3, 0.6), ylim=(-0.15, 0.15), zlim=(0.7, 1.0),
xlabel="X", ylabel="Y", zlabel="Z")
X = dmp.trajectory()
plot_trajectory(ax, X, "k")
ax = plt.subplot(223)
ax.plot(X[:, 0], label="X", c="r")
ax.plot(X[:, 1], label="Y", c="g")
ax.plot(X[:, 2], label="Z", c="b")
ax.legend(loc="upper right")
plt.setp(ax, xlabel="Step", ylabel="Position")
ax = plt.subplot(224)
dt = dmp.dt
ax.plot(np.diff(X[:, 0]) / dt, label="X", c="r")
ax.plot(np.diff(X[:, 1]) / dt, label="Y", c="g")
ax.plot(np.diff(X[:, 2]) / dt, label="Z", c="b")
ax.legend(loc="upper right")
plt.setp(ax, xlabel="Step", ylabel="Velocity")
plt.show()
| [
"matplotlib.pyplot.setp",
"bolero.representation.CartesianDMPBehavior",
"os.path.join",
"numpy.diff",
"os.path.realpath",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1994, 2038), 'os.path.join', 'os.path.join', (['dirname', '"""cart_dmp_model.yaml"""'], {}), "(dirname, 'cart_dmp_model.yaml')\n", (2006, 2038), False, 'import os\n'), ((2048, 2093), 'os.path.join', 'os.path.join', (['dirname', '"""cart_dmp_config.yaml"""'], {}), "(dirname, 'cart_dmp_config.yaml')\n", (2060, 2093), False, 'import os\n'), ((2101, 2147), 'bolero.representation.CartesianDMPBehavior', 'CartesianDMPBehavior', ([], {'configuration_file': 'model'}), '(configuration_file=model)\n', (2121, 2147), False, 'from bolero.representation import CartesianDMPBehavior\n'), ((2188, 2216), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (2198, 2216), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2271), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {'projection': '"""3d"""', 'aspect': '"""equal"""'}), "(221, projection='3d', aspect='equal')\n", (2233, 2271), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2379), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xlim': '(0.3, 0.6)', 'ylim': '(-0.15, 0.15)', 'zlim': '(0.7, 1.0)', 'xlabel': '"""X"""', 'ylabel': '"""Y"""', 'zlabel': '"""Z"""'}), "(ax, xlim=(0.3, 0.6), ylim=(-0.15, 0.15), zlim=(0.7, 1.0), xlabel=\n 'X', ylabel='Y', zlabel='Z')\n", (2280, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2439, 2455), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (2450, 2455), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2636), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xlabel': '"""Step"""', 'ylabel': '"""Position"""'}), "(ax, xlabel='Step', ylabel='Position')\n", (2598, 2636), True, 'import matplotlib.pyplot as plt\n'), ((2643, 2659), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (2654, 2659), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2894), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xlabel': '"""Step"""', 'ylabel': '"""Velocity"""'}), "(ax, xlabel='Step', ylabel='Velocity')\n", (2856, 2894), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2906), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2904, 2906), True, 'import matplotlib.pyplot as plt\n'), ((1127, 1246), 'numpy.array', 'np.array', (['[[1.0 - y2 - z2, xy - zw, xz + yw], [xy + zw, 1.0 - x2 - z2, yz - xw], [xz -\n yw, yz + xw, 1.0 - x2 - y2]]'], {}), '([[1.0 - y2 - z2, xy - zw, xz + yw], [xy + zw, 1.0 - x2 - z2, yz -\n xw], [xz - yw, yz + xw, 1.0 - x2 - y2]])\n', (1135, 1246), True, 'import numpy as np\n'), ((1922, 1948), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1938, 1948), False, 'import os\n'), ((2680, 2696), 'numpy.diff', 'np.diff', (['X[:, 0]'], {}), '(X[:, 0])\n', (2687, 2696), True, 'import numpy as np\n'), ((2729, 2745), 'numpy.diff', 'np.diff', (['X[:, 1]'], {}), '(X[:, 1])\n', (2736, 2745), True, 'import numpy as np\n'), ((2778, 2794), 'numpy.diff', 'np.diff', (['X[:, 2]'], {}), '(X[:, 2])\n', (2785, 2794), True, 'import numpy as np\n')] |
import datetime
import os
import tempfile
import numpy as np
import scipy.stats as st
import xarray as xr
from wildfire.data import goes_level_1
def test_goes_band(goes_level_1_mesoscale):
actual = goes_level_1.GoesBand(dataset=goes_level_1_mesoscale)
assert actual.region == "M1"
assert actual.satellite == "G16"
np.testing.assert_almost_equal(actual.band_wavelength_micrometers, 0.47, decimal=2)
assert actual.scan_time_utc == datetime.datetime(2020, 1, 1, 0, 1, 26, 200000)
assert actual.band_id == 1
assert actual.parse().equals(actual.reflectance_factor)
assert np.isnan(actual.reflectance_factor.data).sum() == 0
assert (
np.isnan(actual.brightness_temperature.data).sum() == actual.dataset.Rad.data.size
)
np.testing.assert_array_equal(
actual.normalize(), st.zscore(actual.reflectance_factor, axis=None)
)
np.testing.assert_array_equal(
actual.normalize(use_radiance=True), st.zscore(actual.dataset.Rad, axis=None),
)
with tempfile.TemporaryDirectory() as temp_directory:
filepath = actual.to_netcdf(directory=temp_directory)
assert os.path.exists(filepath)
assert isinstance(xr.open_dataset(filepath), xr.core.dataset.Dataset)
def test_reflective_band(goes_level_1_mesoscale):
actual = goes_level_1.GoesBand(dataset=goes_level_1_mesoscale)
assert actual.parse().equals(actual.reflectance_factor)
assert np.isnan(actual.reflectance_factor.data).sum() == 0
np.testing.assert_array_equal(
actual.normalize(), st.zscore(actual.reflectance_factor, axis=None)
)
def test_emissive_band(goes_level_1_channel_7):
actual = goes_level_1.GoesBand(dataset=goes_level_1_channel_7)
assert actual.parse().equals(actual.brightness_temperature)
assert np.isnan(actual.brightness_temperature.data).sum() == 0
np.testing.assert_array_equal(
actual.normalize(), st.zscore(actual.brightness_temperature, axis=None)
)
def test_filter_bad_pixels(goes_level_1_mesoscale):
goes_band = goes_level_1.GoesBand(dataset=goes_level_1_mesoscale)
actual = goes_band.filter_bad_pixels()
assert isinstance(actual, goes_level_1.GoesBand)
assert actual.scan_time_utc == goes_band.scan_time_utc
assert actual.band_id == goes_band.band_id
assert np.isnan(actual.reflectance_factor).sum() == 0
def test_rescale_to_2km(goes_level_1_mesoscale, goes_level_1_conus, goes_level_1_full):
actual = goes_level_1.GoesBand(dataset=goes_level_1_mesoscale).rescale_to_2km()
assert isinstance(actual, goes_level_1.GoesBand)
assert actual.dataset.Rad.shape == (500, 500)
assert actual.rescale_to_2km().dataset.Rad.shape == (500, 500)
actual = goes_level_1.GoesBand(dataset=goes_level_1_conus).rescale_to_2km()
assert actual.dataset.Rad.shape == (1500, 2500)
assert actual.rescale_to_2km().dataset.Rad.shape == (1500, 2500)
actual = goes_level_1.GoesBand(dataset=goes_level_1_full).rescale_to_2km()
assert actual.dataset.Rad.shape == (5424, 5424)
assert actual.rescale_to_2km().dataset.Rad.shape == (5424, 5424)
def test_read_netcdf(goes_level_1_filepaths_no_wildfire):
actual = goes_level_1.read_netcdf(
local_filepath=goes_level_1_filepaths_no_wildfire[0],
)
assert isinstance(actual, goes_level_1.GoesBand)
def test_normalize():
x = np.array([1, 2, 3, 4, 5])
actual = goes_level_1.band.normalize(data=x)
expected = st.zscore(x)
np.testing.assert_array_equal(actual, expected)
def test_get_goes_band_local(goes_level_1_filepaths_no_wildfire):
local_filepath = goes_level_1_filepaths_no_wildfire[0]
region, channel, satellite, scan_time = goes_level_1.utilities.parse_filename(
local_filepath
)
actual = goes_level_1.get_goes_band(
satellite=goes_level_1.utilities.SATELLITE_LONG_HAND[satellite],
region=region,
channel=channel,
scan_time_utc=scan_time,
local_directory=os.path.join(
"tests", "resources", "goes_level_1_scan_no_wildfire"
),
s3=False,
)
assert isinstance(actual, goes_level_1.GoesBand)
assert actual.band_id == channel
assert actual.region == region
assert actual.satellite == satellite
assert actual.scan_time_utc == scan_time
| [
"datetime.datetime",
"tempfile.TemporaryDirectory",
"os.path.exists",
"wildfire.data.goes_level_1.utilities.parse_filename",
"numpy.testing.assert_array_equal",
"wildfire.data.goes_level_1.read_netcdf",
"os.path.join",
"wildfire.data.goes_level_1.GoesBand",
"numpy.testing.assert_almost_equal",
"nu... | [((206, 259), 'wildfire.data.goes_level_1.GoesBand', 'goes_level_1.GoesBand', ([], {'dataset': 'goes_level_1_mesoscale'}), '(dataset=goes_level_1_mesoscale)\n', (227, 259), False, 'from wildfire.data import goes_level_1\n'), ((334, 421), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['actual.band_wavelength_micrometers', '(0.47)'], {'decimal': '(2)'}), '(actual.band_wavelength_micrometers, 0.47,\n decimal=2)\n', (364, 421), True, 'import numpy as np\n'), ((1313, 1366), 'wildfire.data.goes_level_1.GoesBand', 'goes_level_1.GoesBand', ([], {'dataset': 'goes_level_1_mesoscale'}), '(dataset=goes_level_1_mesoscale)\n', (1334, 1366), False, 'from wildfire.data import goes_level_1\n'), ((1670, 1723), 'wildfire.data.goes_level_1.GoesBand', 'goes_level_1.GoesBand', ([], {'dataset': 'goes_level_1_channel_7'}), '(dataset=goes_level_1_channel_7)\n', (1691, 1723), False, 'from wildfire.data import goes_level_1\n'), ((2046, 2099), 'wildfire.data.goes_level_1.GoesBand', 'goes_level_1.GoesBand', ([], {'dataset': 'goes_level_1_mesoscale'}), '(dataset=goes_level_1_mesoscale)\n', (2067, 2099), False, 'from wildfire.data import goes_level_1\n'), ((3181, 3259), 'wildfire.data.goes_level_1.read_netcdf', 'goes_level_1.read_netcdf', ([], {'local_filepath': 'goes_level_1_filepaths_no_wildfire[0]'}), '(local_filepath=goes_level_1_filepaths_no_wildfire[0])\n', (3205, 3259), False, 'from wildfire.data import goes_level_1\n'), ((3360, 3385), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (3368, 3385), True, 'import numpy as np\n'), ((3399, 3434), 'wildfire.data.goes_level_1.band.normalize', 'goes_level_1.band.normalize', ([], {'data': 'x'}), '(data=x)\n', (3426, 3434), False, 'from wildfire.data import goes_level_1\n'), ((3450, 3462), 'scipy.stats.zscore', 'st.zscore', (['x'], {}), '(x)\n', (3459, 3462), True, 'import scipy.stats as st\n'), ((3467, 3514), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (3496, 3514), True, 'import numpy as np\n'), ((3686, 3739), 'wildfire.data.goes_level_1.utilities.parse_filename', 'goes_level_1.utilities.parse_filename', (['local_filepath'], {}), '(local_filepath)\n', (3723, 3739), False, 'from wildfire.data import goes_level_1\n'), ((453, 500), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)', '(0)', '(1)', '(26)', '(200000)'], {}), '(2020, 1, 1, 0, 1, 26, 200000)\n', (470, 500), False, 'import datetime\n'), ((828, 875), 'scipy.stats.zscore', 'st.zscore', (['actual.reflectance_factor'], {'axis': 'None'}), '(actual.reflectance_factor, axis=None)\n', (837, 875), True, 'import scipy.stats as st\n'), ((962, 1002), 'scipy.stats.zscore', 'st.zscore', (['actual.dataset.Rad'], {'axis': 'None'}), '(actual.dataset.Rad, axis=None)\n', (971, 1002), True, 'import scipy.stats as st\n'), ((1019, 1048), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1046, 1048), False, 'import tempfile\n'), ((1145, 1169), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1159, 1169), False, 'import os\n'), ((1553, 1600), 'scipy.stats.zscore', 'st.zscore', (['actual.reflectance_factor'], {'axis': 'None'}), '(actual.reflectance_factor, axis=None)\n', (1562, 1600), True, 'import scipy.stats as st\n'), ((1918, 1969), 'scipy.stats.zscore', 'st.zscore', (['actual.brightness_temperature'], {'axis': 'None'}), '(actual.brightness_temperature, axis=None)\n', (1927, 1969), True, 'import scipy.stats as st\n'), ((1196, 1221), 'xarray.open_dataset', 'xr.open_dataset', (['filepath'], {}), '(filepath)\n', (1211, 1221), True, 'import xarray as xr\n'), ((2464, 2517), 'wildfire.data.goes_level_1.GoesBand', 'goes_level_1.GoesBand', ([], {'dataset': 'goes_level_1_mesoscale'}), '(dataset=goes_level_1_mesoscale)\n', (2485, 2517), False, 'from wildfire.data import goes_level_1\n'), ((2719, 2768), 'wildfire.data.goes_level_1.GoesBand', 'goes_level_1.GoesBand', ([], {'dataset': 'goes_level_1_conus'}), '(dataset=goes_level_1_conus)\n', (2740, 2768), False, 'from wildfire.data import goes_level_1\n'), ((2921, 2969), 'wildfire.data.goes_level_1.GoesBand', 'goes_level_1.GoesBand', ([], {'dataset': 'goes_level_1_full'}), '(dataset=goes_level_1_full)\n', (2942, 2969), False, 'from wildfire.data import goes_level_1\n'), ((3974, 4041), 'os.path.join', 'os.path.join', (['"""tests"""', '"""resources"""', '"""goes_level_1_scan_no_wildfire"""'], {}), "('tests', 'resources', 'goes_level_1_scan_no_wildfire')\n", (3986, 4041), False, 'import os\n'), ((603, 643), 'numpy.isnan', 'np.isnan', (['actual.reflectance_factor.data'], {}), '(actual.reflectance_factor.data)\n', (611, 643), True, 'import numpy as np\n'), ((676, 720), 'numpy.isnan', 'np.isnan', (['actual.brightness_temperature.data'], {}), '(actual.brightness_temperature.data)\n', (684, 720), True, 'import numpy as np\n'), ((1438, 1478), 'numpy.isnan', 'np.isnan', (['actual.reflectance_factor.data'], {}), '(actual.reflectance_factor.data)\n', (1446, 1478), True, 'import numpy as np\n'), ((1799, 1843), 'numpy.isnan', 'np.isnan', (['actual.brightness_temperature.data'], {}), '(actual.brightness_temperature.data)\n', (1807, 1843), True, 'import numpy as np\n'), ((2314, 2349), 'numpy.isnan', 'np.isnan', (['actual.reflectance_factor'], {}), '(actual.reflectance_factor)\n', (2322, 2349), True, 'import numpy as np\n')] |
import gym
import retro
import numpy as np
import cv2
import random
# Discretize continuous action space
class Discretizer(gym.ActionWrapper):
def __init__(self, env, combos):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.MultiBinary)
buttons = env.unwrapped.buttons
self._decode_discrete_action = []
for combo in combos:
arr = np.array([False] * env.action_space.n)
for button in combo:
arr[buttons.index(button)] = True
self._decode_discrete_action.append(arr)
self.action_space = gym.spaces.Discrete(len(self._decode_discrete_action))
def action(self, act):
return self._decode_discrete_action[act].copy()
# Limit the episode length
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super().__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return obs, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
# Skip frames
class SkipFrames(gym.Wrapper):
def __init__(self, env, n = 4):
gym.Wrapper.__init__(self, env)
self.n = n
def step(self, action):
done = False
totalReward = 0.0
for _ in range(self.n):
obs, reward, done, info = self.env.step(action)
totalReward += reward
if done:
break
return obs, totalReward, done, info
# Convert observation to greyscale
class Rgb2Gray(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
(oldh, oldw, _oldc) = env.observation_space.shape
self.observation_space = gym.spaces.Box(low = 0, high = 255,
shape = (oldh, oldw, 1),
dtype = np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return frame[:,:,None]
# Downsample the observation
class Downsample(gym.ObservationWrapper):
def __init__(self, env, ratio):
gym.ObservationWrapper.__init__(self, env)
(oldh, oldw, oldc) = env.observation_space.shape
newshape = (oldh//ratio, oldw//ratio, oldc)
self.observation_space = gym.spaces.Box(low = 0, high = 255,
shape = newshape,
dtype = np.uint8)
def observation(self, frame):
height, width, _ = self.observation_space.shape
frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
if frame.ndim == 2:
frame = frame[:, :, None]
return frame
#change observation space to return 4 stacked frames for temporal information
from collections import deque
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
(oldh, oldw, _oldc) = env.observation_space.shape
newStackShape = (oldh, oldw, k)
self.observation_space = gym.spaces.Box(low = 0, high = 255,
shape = newStackShape,
dtype = np.uint8)
self.k = k
self.frames = deque([], maxlen = k)
def reset(self):
obs = self.env.reset()
for _ in range(self.k):
self.frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis = 2)
#normalize observation space
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=np.float32(0), high=np.float32(1),
shape=env.observation_space.shape,
dtype=np.float32)
def observation(self, observation):
return np.array(observation).astype(np.float32) / 255.0 | [
"gym.ObservationWrapper.__init__",
"collections.deque",
"gym.spaces.Box",
"numpy.array",
"cv2.cvtColor",
"gym.Wrapper.__init__",
"numpy.concatenate",
"cv2.resize",
"numpy.float32"
] | [((1456, 1487), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (1476, 1487), False, 'import gym\n'), ((1909, 1951), 'gym.ObservationWrapper.__init__', 'gym.ObservationWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (1940, 1951), False, 'import gym\n'), ((2043, 2113), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(oldh, oldw, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(oldh, oldw, 1), dtype=np.uint8)\n', (2057, 2113), False, 'import gym\n'), ((2273, 2312), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (2285, 2312), False, 'import cv2\n'), ((2460, 2502), 'gym.ObservationWrapper.__init__', 'gym.ObservationWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (2491, 2502), False, 'import gym\n'), ((2645, 2708), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'newshape', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=newshape, dtype=np.uint8)\n', (2659, 2708), False, 'import gym\n'), ((2920, 2984), 'cv2.resize', 'cv2.resize', (['frame', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (width, height), interpolation=cv2.INTER_AREA)\n', (2930, 2984), False, 'import cv2\n'), ((3252, 3283), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (3272, 3283), False, 'import gym\n'), ((3415, 3483), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'newStackShape', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=newStackShape, dtype=np.uint8)\n', (3429, 3483), False, 'import gym\n'), ((3629, 3648), 'collections.deque', 'deque', (['[]'], {'maxlen': 'k'}), '([], maxlen=k)\n', (3634, 3648), False, 'from collections import deque\n'), ((4053, 4088), 'numpy.concatenate', 'np.concatenate', (['self.frames'], {'axis': '(2)'}), '(self.frames, axis=2)\n', (4067, 4088), True, 'import numpy as np\n'), ((4206, 4248), 'gym.ObservationWrapper.__init__', 'gym.ObservationWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (4237, 4248), False, 'import gym\n'), ((408, 446), 'numpy.array', 'np.array', (['([False] * env.action_space.n)'], {}), '([False] * env.action_space.n)\n', (416, 446), True, 'import numpy as np\n'), ((4301, 4314), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (4311, 4314), True, 'import numpy as np\n'), ((4321, 4334), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (4331, 4334), True, 'import numpy as np\n'), ((4541, 4562), 'numpy.array', 'np.array', (['observation'], {}), '(observation)\n', (4549, 4562), True, 'import numpy as np\n')] |
import abc
import numpy as np
import os
from ttools import tec as ttec, io, utils
class TroughLabelJobManager:
def __init__(self, job_class, **kwargs):
self.job_class = job_class
self._job = job_class(None, **kwargs)
self._kwargs = kwargs
self.save_dir = None
@classmethod
def get_random(cls, job_class, params_dir):
params = job_class.get_random_params()
io.write_yaml(os.path.join(params_dir, 'params.yaml'), **params)
obj = cls(job_class, **params)
obj.save_dir = params_dir
return obj
def make_job(self, date):
return self.job_class(date, **self._kwargs)
def __getattr__(self, item):
return getattr(self._job, item)
class TroughLabelJob(abc.ABC):
def __init__(self, date, **kwargs):
self.date = date
self.bg_est_shape = kwargs['bg_est_shape']
self.tec = None
self.tec_times = None
self.x = None
self.times = None
self.ssmlon = None
self.arb = None
if date is not None:
self.load_data()
self.model_output = None
self.trough = None
def load_data(self):
one_h = np.timedelta64(1, 'h')
start_time = self.date.astype('datetime64[D]').astype('datetime64[s]')
end_time = start_time + np.timedelta64(1, 'D')
tec_start = start_time - np.floor(self.bg_est_shape[0] / 2) * one_h
tec_end = end_time + (np.floor(self.bg_est_shape[0] / 2)) * one_h
self.tec, self.tec_times, ssmlon, n = io.get_tec_data(tec_start, tec_end)
self.x, self.times = ttec.preprocess_interval(self.tec, self.tec_times, bg_est_shape=self.bg_est_shape)
self.ssmlon, = utils.moving_func_trim(self.bg_est_shape[0], ssmlon)
self.arb, _ = io.get_arb_data(start_time, end_time)
@abc.abstractmethod
def run(self):
...
@staticmethod
@abc.abstractmethod
def get_random_params():
...
@abc.abstractmethod
def plot(self, swarm_troughs, plot_dir):
...
| [
"ttools.tec.preprocess_interval",
"ttools.io.get_tec_data",
"ttools.io.get_arb_data",
"ttools.utils.moving_func_trim",
"os.path.join",
"numpy.floor",
"numpy.timedelta64"
] | [((1198, 1220), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (1212, 1220), True, 'import numpy as np\n'), ((1552, 1587), 'ttools.io.get_tec_data', 'io.get_tec_data', (['tec_start', 'tec_end'], {}), '(tec_start, tec_end)\n', (1567, 1587), False, 'from ttools import tec as ttec, io, utils\n'), ((1617, 1704), 'ttools.tec.preprocess_interval', 'ttec.preprocess_interval', (['self.tec', 'self.tec_times'], {'bg_est_shape': 'self.bg_est_shape'}), '(self.tec, self.tec_times, bg_est_shape=self.\n bg_est_shape)\n', (1641, 1704), True, 'from ttools import tec as ttec, io, utils\n'), ((1723, 1775), 'ttools.utils.moving_func_trim', 'utils.moving_func_trim', (['self.bg_est_shape[0]', 'ssmlon'], {}), '(self.bg_est_shape[0], ssmlon)\n', (1745, 1775), False, 'from ttools import tec as ttec, io, utils\n'), ((1798, 1835), 'ttools.io.get_arb_data', 'io.get_arb_data', (['start_time', 'end_time'], {}), '(start_time, end_time)\n', (1813, 1835), False, 'from ttools import tec as ttec, io, utils\n'), ((435, 474), 'os.path.join', 'os.path.join', (['params_dir', '"""params.yaml"""'], {}), "(params_dir, 'params.yaml')\n", (447, 474), False, 'import os\n'), ((1332, 1354), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (1346, 1354), True, 'import numpy as np\n'), ((1388, 1422), 'numpy.floor', 'np.floor', (['(self.bg_est_shape[0] / 2)'], {}), '(self.bg_est_shape[0] / 2)\n', (1396, 1422), True, 'import numpy as np\n'), ((1461, 1495), 'numpy.floor', 'np.floor', (['(self.bg_est_shape[0] / 2)'], {}), '(self.bg_est_shape[0] / 2)\n', (1469, 1495), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
__author__ = 'fpajot'
import gzip
from io import StringIO, BytesIO
import logging
from os import path
import pickle
import boto3
import pandas
import numpy as np
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def get_keys(s3: boto3.resources.base.ServiceResource,
bucket: str, prefix: str = '',
suffix: str = '',
**kwargs):
"""
Generate the keys in an S3 bucket.
:param s3: S3 client
:param bucket: S3 bucket name.
:param prefix: Only fetch keys that start with this prefix (optional).
:param suffix: Only fetch keys that end with this suffix (optional).
:param '**kwargs': used for passing arguments to list_objects_v2 method
"""
kwargs.update({'Bucket': bucket})
# do the filtering directly with S3 API.
if isinstance(prefix, str):
kwargs.update({'Prefix': prefix})
done = False
while not done:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3.list_objects_v2(**kwargs)
if 'Contents' in resp.keys():
for obj in resp['Contents']:
key = obj['Key']
if key.endswith(suffix):
yield key
else:
logger.info('Nothing found for the given prefix and/or suffix')
# The S3 API is paginated, default MaxKeys is 123
done = not resp['IsTruncated']
if not done:
kwargs.update({'ContinuationToken': resp['NextContinuationToken']})
def _get_splited_df_streams(df, parts, func, buffer_class, **kwargs):
"""
Splits pandas.Dataframe into parts and returns list of correspond streams objects
:param df: pandas Dataframe which to be splitted
:param parts: number of output files
:param func: function to dump Dataframe
:param buffer_class: class of stream I/O
:param sort_keys: list of column names (sort keys)
:param '**kwargs': used for passing arguments to dumping Dataframe functions
:return: list of streams, contain parts of Dataframe
:rtype: list
"""
if 'sort_keys' in kwargs.keys():
sort_keys = kwargs['sort_keys']
del kwargs['sort_keys']
else:
sort_keys = None
if sort_keys is not None:
assert len(sort_keys) > 0, 'Sort keys not accepted, it must be not empty list of strings'
func_kwargs = {}
for key in kwargs.keys():
if key in list(func.__code__.co_varnames):
func_kwargs[key] = kwargs[key]
buffers = []
if sort_keys is None:
parts_df = np.array_split(df, parts)
else:
parts_df = np.array_split(df.sort_values(sort_keys), parts)
for p in parts_df:
b = buffer_class()
if func == pandas.DataFrame.to_excel:
w = pandas.ExcelWriter(b, engine='xlsxwriter')
func(p, w, **func_kwargs)
w.save()
else:
func(p, b, **func_kwargs)
buffers.append(b)
return buffers
def put_df(s3: boto3.resources.base.ServiceResource,
df: pandas.DataFrame,
bucket: str,
key: str,
**kwargs
):
"""
Put pandas.DataFrame object to s3 using a specific format
:param s3: S3 client
:param df: DataFrame to put into s3
:param bucket: bucket name of the target file
:param key: aws key of the target file
:param format: file format to use, i.e csv
:param compression: file compression applied
:param parts: number of output files
:param sort_keys: list of column names (sort keys)
:param '**kwargs': used for passing arguments to pandas writing methods
"""
# Uploads the given file using a managed uploader,
# which will split up large files automatically
# and upload parts in parallel
if not isinstance(df, pandas.DataFrame):
raise TypeError('Provided content must type pandas.DataFrame')
if 'format' in kwargs.keys():
format = kwargs['format']
del kwargs['format']
else:
format = 'csv'
if 'compression' in kwargs.keys():
compression = kwargs['compression']
del kwargs['compression']
else:
compression = None
if 'parts' in kwargs.keys():
parts = kwargs['parts']
del kwargs['parts']
else:
parts = 1
assert parts > 0, 'Number of parts not accepted, it must be > 0'
assert format in ['csv', 'parquet', 'pickle', 'xlsx'], \
'provider format value not accepted'
if format == 'csv':
assert compression in [None, 'gzip'], \
'provider compression value not accepted'
buffers = []
content_type = 'text'
content_encoding = 'default'
if format == 'csv':
kwargs['index_label'] = False
kwargs['index'] = False
buffers = _get_splited_df_streams(df, parts, pandas.DataFrame.to_csv, StringIO, **kwargs)
if compression == 'gzip':
logger.info('Using csv compression with gzip')
content_type = 'text/csv' # the original type
content_encoding = 'gzip' # MUST have or browsers will error
tmp_buffer = []
for buffer in buffers:
buffer.seek(0)
gz_buffer = BytesIO()
# compress string stream using gzip
with gzip.GzipFile(mode='w', fileobj=gz_buffer) as gz_file:
gz_file.write(bytes(buffer.getvalue(), 'utf-8'))
tmp_buffer.append(gz_buffer)
buffers = tmp_buffer
elif format == 'xlsx':
kwargs['sheet_name'] = 'Sheet1'
kwargs['index'] = False
buffers = _get_splited_df_streams(df, parts, pandas.DataFrame.to_excel, BytesIO, **kwargs)
elif format == 'parquet':
if 'engine' in kwargs:
engine = kwargs['engine']
else:
engine = 'pyarrow'
buffers = _get_splited_df_streams(df, parts, pandas.DataFrame.to_parquet, BytesIO, engine=engine, **kwargs)
elif format == 'pickle':
buffers = _get_splited_df_streams(df, parts, pickle.dump, BytesIO)
content_encoding = 'application/octet-stream'
else:
raise TypeError('File type not supported')
for bid, buffer in enumerate(buffers, start=1):
if parts == 1:
key_str = key
else:
dirname, basename = path.split(key)
basename_parts = basename.split(sep='.')
obj_name = '.'.join([basename_parts[0], str(bid)] + basename_parts[1:])
key_str = '/'.join([dirname, basename_parts[0], obj_name])
s3.put_object(
Bucket=bucket,
Key=key_str,
ContentType=content_type, # the original type
ContentEncoding=content_encoding, # MUST have or browsers will error
Body=buffer.getvalue()
)
if compression is None:
logger.info(f'File uploaded using format {format}')
else:
logger.info(f'File uploaded using format {format}, '
f'compression {compression}')
def get_df(s3: boto3.resources.base.ServiceResource,
bucket: str,
key: str,
format: str,
**kwargs):
"""
Import object from s3 and convert to pandas_utils.DataFrame if possible
:param s3: S3 client
:param bucket: bucket name of the target file
:param key: aws key of the target file
:param format: file format to get DataFrame from, i.e csv
:param compression: file compression used
:param '**kwargs': used for passing arguments to pandas reading methods
:return: DataFrame from data in S3
:rtype: pandas.DataFrame
"""
assert format in ['csv', 'parquet', 'pickle', 'xlsx'], \
'provider format value not accepted'
object_ = s3.get_object(Bucket=bucket, Key=key)
if format == 'pickle':
return pickle.loads(object_['Body'].read(), **kwargs)
elif format == 'csv':
return pandas.read_csv(object_['Body'], **kwargs)
elif format == 'parquet':
return pandas.read_parquet(BytesIO(object_['Body'].read()), **kwargs)
elif format == 'xlsx':
return pandas.read_excel(BytesIO(object_['Body'].read()), **kwargs)
def get_df_from_keys(s3: boto3.resources.base.ServiceResource,
bucket: str,
prefix: str,
suffix: str = '',
**kwargs):
"""
Build a DataFrame from multiple files in the same folder in S3
:param s3: S3 client
:param bucket: bucket name of the target file
:param prefix: aws key of the target file
:param suffix: suffix to match when looking for files
:param format: file format to get DataFrame from, i.e csv
:rtype: pandas.DataFrame
"""
if 'format' in kwargs.keys():
format = kwargs['format']
del kwargs['format']
else:
format = 'suffix'
assert format in ["csv", "parquet", "xlsx", "suffix", "mixed"], f"{format} format not supported"
if format == "mixed":
logger.warning('Mixed format used, might discard files')
l_df = list()
for f in get_keys(s3, bucket, prefix=prefix, suffix=suffix):
if f != prefix:
if format == 'suffix':
logger.warning('Auto format detection based on suffix used')
format = f.split('.')[-1]
obj_ = get_df(s3, bucket, f, format, **kwargs)
l_df.append(obj_)
elif format == 'mixed':
processed = False
for format_ in ['csv', 'parquet', 'xlsx']:
try:
obj_ = get_df(s3, bucket, f, format_, **kwargs)
l_df.append(obj_)
processed = True
except Exception:
pass
if processed == False:
logger.warning(f'No format matched for file {f}')
else:
obj_ = get_df(s3, bucket, f, format, **kwargs)
l_df.append(obj_)
if len(l_df) > 0:
return pandas.concat(l_df, axis=0, ignore_index=True) \
.reset_index(drop=True)
else:
return None
| [
"logging.basicConfig",
"logging.getLogger",
"pandas.read_csv",
"io.BytesIO",
"os.path.split",
"numpy.array_split",
"gzip.GzipFile",
"pandas.ExcelWriter",
"pandas.concat"
] | [((190, 229), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (209, 229), False, 'import logging\n'), ((239, 266), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (256, 266), False, 'import logging\n'), ((2652, 2677), 'numpy.array_split', 'np.array_split', (['df', 'parts'], {}), '(df, parts)\n', (2666, 2677), True, 'import numpy as np\n'), ((2868, 2910), 'pandas.ExcelWriter', 'pandas.ExcelWriter', (['b'], {'engine': '"""xlsxwriter"""'}), "(b, engine='xlsxwriter')\n", (2886, 2910), False, 'import pandas\n'), ((6436, 6451), 'os.path.split', 'path.split', (['key'], {}), '(key)\n', (6446, 6451), False, 'from os import path\n'), ((8078, 8120), 'pandas.read_csv', 'pandas.read_csv', (["object_['Body']"], {}), "(object_['Body'], **kwargs)\n", (8093, 8120), False, 'import pandas\n'), ((5326, 5335), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (5333, 5335), False, 'from io import StringIO, BytesIO\n'), ((10212, 10258), 'pandas.concat', 'pandas.concat', (['l_df'], {'axis': '(0)', 'ignore_index': '(True)'}), '(l_df, axis=0, ignore_index=True)\n', (10225, 10258), False, 'import pandas\n'), ((5409, 5451), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'mode': '"""w"""', 'fileobj': 'gz_buffer'}), "(mode='w', fileobj=gz_buffer)\n", (5422, 5451), False, 'import gzip\n')] |
import pandas as pd
import numpy as np
from typing import List, Optional
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
class Indices:
"""
Price Technical Indicators
"""
def __init__(
self, df: pd.DataFrame, date_col: str = "date", price_col: str = "price"
) -> None:
self.df = df
self.date_col = date_col
self.price_col = price_col
def get_vola_index(
self, volatile_period: Optional[int] = 30
) -> pd.DataFrame:
"""
Volatility Index is a measure of market's expectation of volatility over
the near term.
Volatility is often described as the "rate and magnitude of changes in
prices" and in finance often referred to as risk.
Reference:
www.moneycontrol.com
Returns:
pd.DataFrame: Pandas DataFrame
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
v = np.log(data[self.price_col]).diff().rolling(volatile_period).std() * np.sqrt(365)
df_bvol = pd.DataFrame(data={'BVOL_Index': v})
data = pd.concat([data, df_bvol], join="inner", axis=1)
data = data.dropna()
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
return data
@staticmethod
def get_vola_graph(
data: pd.DataFrame, output_path: Optional[str] = "bvol_index.png"
) -> None:
"""
Make a line graph of volatile index with respect to time
Args:
data(pd.DataFrame): Output of get_vola_index function
output_path(str): Path to save plot
"""
fig, ax = plt.subplots(figsize=(14, 12))
rect = fig.patch
rect.set_facecolor("yellow")
ax1 = plt.subplot(211)
ax1.plot(data["date"], data["price"], color="blue", label="Price")
plt.ylabel("Price", color="red", fontsize=20)
ax1.axes.get_xaxis().set_ticks([])
plt.legend()
ax1.tick_params(axis="y", colors="b")
ax1.grid(color="grey", linestyle="-", linewidth=0.25, alpha=0.5)
ax2 = plt.subplot(212)
ax2.plot(
data["date"], data["BVOL_Index"], color="b", label="BVOL Index"
)
plt.xlabel("Time", color="red", fontsize=20)
plt.ylabel("Volatility Index", color="r", fontsize=20)
plt.legend()
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90)
ax2.grid(color="grey", linestyle="-", linewidth=0.25, alpha=0.5)
ax2.tick_params(axis="x", colors="b")
ax2.tick_params(axis="y", colors="b")
plt.suptitle("Price and Volatility Index", color="red", fontsize=24)
plt.savefig(output_path, bbox_inches="tight", facecolor="orange")
plt.show()
def get_rsi(self) -> pd.DataFrame:
"""
Type:
Momentum indicator
Computation:
It is based on the average price increase during a period of
rising prices and average price fall during a period of
falling stock prices. Relative Strength Index (RSI) is
plotted between 0 and 100.
What it signals:
Usually, the market is treated as overbought when RSI
goes above 70 (80 for highly volatile stocks) and
oversold when it hits 30—20 for highly volatile stocks.
Reference:
https://economictimes.indiatimes.com/
Returns:
pd.DataFrame: Pandas DataFrame with RSI values
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
data["price_change"] = data[self.price_col] - data[
self.price_col
].shift(1)
data.dropna(inplace=True)
data["gain"] = np.where(data["price_change"] >= 0, data["price_change"], 0)
data["loss"] = np.where(data["price_change"] <= 0, abs(data["price_change"]), 0)
data["gain_average"] = data["gain"].rolling(14).mean()
data["loss_average"] = data["loss"].rolling(14).mean()
data["RS"] = data["gain_average"] / data["loss_average"]
data["RSI_1"] = 100 * (1 - (1 / (1 + data["RS"])))
data["RS_Smooth"] = (
data["gain_average"].shift(1) * 13 + data["gain"]
) / (data["loss_average"].shift(1) * 13 + data["loss"])
data["RSI_2"] = 100 * (1 - (1 / (1 + data["RS_Smooth"])))
data = data.fillna(0).reset_index(drop=True)
data.drop(
[
"gain",
"loss",
"price_change",
"gain_average",
"loss_average",
"RS",
],
axis=1,
inplace=True,
)
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
return data
@staticmethod
def get_rsi_graph(data: pd.DataFrame) -> None:
"""
Plot RSI against date and price
Args:
data(pd.DataFrame): Output of get_rsi function.
"""
fig, ax = plt.subplots(figsize=(14, 12))
rect = fig.patch
rect.set_facecolor("yellow")
ax1 = plt.subplot(211)
ax1.plot(data["date"], data["price"], color="blue", label="Price")
plt.ylabel("Price ($)", color="red", fontsize=20)
ax1.axes.get_xaxis().set_ticks([])
plt.legend()
ax1.tick_params(axis="y", colors="b")
ax2 = plt.subplot(212)
ax2.plot(data["date"], data["RSI_2"], color="b", label="RSI")
plt.xlabel("Time", color="red", fontsize=20)
plt.ylabel("Relative Strength Index (RSI)", color="r", fontsize=20)
plt.text(
data["date"][int(len(data) / 2)],
80,
">70 OverBought",
fontsize=20,
color="black",
)
plt.text(
data["date"][int(len(data) / 2)],
15,
"<30 OverSold",
fontsize=20,
color="black",
)
plt.legend()
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90)
ax2.tick_params(axis="x", colors="b")
ax2.tick_params(axis="y", colors="b")
ax2.axhline(y=70, color="r")
ax2.axhline(y=30, color="r")
plt.suptitle(
"Price and Relative Strength Index", color="red", fontsize=24
)
plt.savefig("rsi.png", bbox_inches="tight", facecolor="orange")
plt.show()
def get_bollinger_bands(
self,
days: Optional[int] = 20,
plot: Optional[bool] = False,
out_path: Optional[str] = "bollinger_bands.png",
) -> pd.DataFrame:
"""
Type:
Trend, volatility, momentum indicator
Computation:
They comprise three lines: A 20-day moving average, an upper
band and lower band—the upper and lower bands are plotted as
two standard deviations from the moving average.
What it signals:
The moving average shows the trend, the gap between
upper and lower band shows volatility in the counter.
References:
1. https://economictimes.indiatimes.com/
2. https://www.bollingerbands.com/bollinger-bands
Args:
days (int): Number of days to calculate moving average
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: A pandas DataFrame and save a plot to given path.
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
data["SMA"] = data[self.price_col].rolling(days).mean()
data["SD"] = data[self.price_col].rolling(days).std()
data["BB_upper"] = data["SMA"] + data["SD"] * 2
data["BB_lower"] = data["SMA"] - data["SMA"] * 2
data.drop(["SD", "SMA"], axis=1, inplace=True)
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
while plot:
fig, ax = plt.subplots(figsize=(16, 12))
plt.plot(data[self.date_col], data["BB_upper"], color="g")
plt.plot(data[self.date_col], data["BB_lower"], color="g")
plt.plot(data[self.date_col], data[self.price_col], color="orange")
plt.legend()
plt.xlabel("Time", color="b", fontsize=22)
plt.ylabel("Price", color="b", fontsize=22)
plt.title("Bollinger Bands", color="b", fontsize=27)
plt.tick_params(labelsize=17)
fig.set_facecolor("yellow")
plt.grid()
plt.savefig(
out_path, bbox_inches="tight", facecolor="orange",
)
plt.show()
break
return data
def get_moving_average_convergence_divergence(
self, plot: Optional[bool] = False, out_path: Optional[str] = "macd.png"
) -> pd.DataFrame:
"""
Type
Trend and momentum indicator
Computation
The difference between 12 and 26-day moving averages.
What it signals
Rising Moving Average Convergence Divergence (MACD) indicates an
upward price trend and falling MACD indicates a downward price trend.
Reference:
https://economictimes.indiatimes.com/
Args:
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: Pandas DataFrame with MACD values
"""
data = self.df.copy()
data["EMA_12"] = data[self.price_col].ewm(span=12, adjust=False).mean()
data["EMA_26"] = data[self.price_col].ewm(span=26, adjust=False).mean()
data["MACD"] = data["EMA_12"] - data["EMA_26"]
data.drop(["EMA_12", "EMA_26"], axis=1, inplace=True)
data = data.dropna()
while plot:
fig, ax = plt.subplots(figsize=(14, 9))
plt.plot(
data[self.date_col],
data[self.price_col],
color="r",
label="Price",
)
plt.plot(data[self.date_col], data["MACD"], color="b", label="MACD")
plt.legend()
plt.title("Price and MACD Plot", fontsize=28, color="b")
plt.xlabel("Time", color="b", fontsize=19)
plt.ylabel("Price", color="b", fontsize=19)
plt.savefig(out_path, bbox_inches="tight", facecolor="orange")
fig.set_facecolor("orange")
plt.show()
break
return data
def get_simple_moving_average(
self,
days: Optional[int] = 15,
plot: Optional[bool] = False,
out_path: Optional[str] = "sma.png",
):
"""
Simple moving average of given days
Args:
days (int): Number of days to calculate SMA
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: Pandas DataFrame with SMA values
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
data["SMA"] = data[self.price_col].rolling(days).mean()
data = data.dropna()
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
while plot:
fig, ax = plt.subplots(figsize=(14, 9))
plt.plot(
data[self.date_col],
data[self.price_col],
color="r",
label="Price",
)
plt.plot(data[self.date_col], data["SMA"], color="b", label="SMA")
plt.legend()
plt.title("Price and SMA Plot", fontsize=28, color="b")
plt.xlabel("Time", color="b", fontsize=19)
plt.ylabel("Price", color="b", fontsize=19)
plt.savefig(out_path, bbox_inches="tight", facecolor="orange")
fig.set_facecolor("orange")
plt.show()
break
return data
def get_exponential_moving_average(
self,
periods: List[int] = [20],
plot: Optional[bool] = False,
out_path: Optional[str] = "ema.png",
):
"""
The EMA is a moving average that places a greater weight and
significance on the most recent data points. Like all moving averages,
this technical indicator is used to produce buy and sell signals based
on crossovers and divergences from the historical average.
Traders often use several different EMA days, for instance, 20-day,
30-day, 90-day, and 200-day moving averages.
Reference:
https://www.investopedia.com/
Args:
periods (list): List of period to calculate EMA
days (int): Number of days to calculate SMA
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: Pandas DataFrame with EMA values
"""
data = self.df.copy()
for period in periods:
data["EMA_{}".format(period)] = (
data[self.price_col].ewm(span=period, adjust=False).mean()
)
while plot:
fig, ax = plt.subplots(figsize=(14, 9))
plt.plot(
data[self.date_col],
data[self.price_col],
color="r",
label="Price",
)
for period in periods:
plt.plot(
data[self.date_col],
data["EMA_{}".format(period)],
label="EMA_{}".format(period),
)
plt.legend()
plt.title("Price and EMA Plot", fontsize=28, color="b")
plt.xlabel("Time", color="b", fontsize=19)
plt.ylabel("Price/EMA", color="b", fontsize=19)
plt.savefig(out_path, bbox_inches="tight", facecolor="orange")
fig.set_facecolor("orange")
plt.show()
break
return data
| [
"warnings.filterwarnings",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"numpy.log",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.suptitl... | [((122, 155), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (145, 155), False, 'import warnings\n'), ((1115, 1151), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'BVOL_Index': v}"}), "(data={'BVOL_Index': v})\n", (1127, 1151), True, 'import pandas as pd\n'), ((1167, 1215), 'pandas.concat', 'pd.concat', (['[data, df_bvol]'], {'join': '"""inner"""', 'axis': '(1)'}), "([data, df_bvol], join='inner', axis=1)\n", (1176, 1215), True, 'import pandas as pd\n'), ((1745, 1775), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 12)'}), '(figsize=(14, 12))\n', (1757, 1775), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1868), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1863, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1997), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {'color': '"""red"""', 'fontsize': '(20)'}), "('Price', color='red', fontsize=20)\n", (1962, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2049, 2061), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2059, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2212), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2207, 2212), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2369), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'color': '"""red"""', 'fontsize': '(20)'}), "('Time', color='red', fontsize=20)\n", (2335, 2369), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2432), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Volatility Index"""'], {'color': '"""r"""', 'fontsize': '(20)'}), "('Volatility Index', color='r', fontsize=20)\n", (2388, 2432), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2453), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2451, 2453), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2762), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Price and Volatility Index"""'], {'color': '"""red"""', 'fontsize': '(24)'}), "('Price and Volatility Index', color='red', fontsize=24)\n", (2704, 2762), True, 'import matplotlib.pyplot as plt\n'), ((2771, 2836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'bbox_inches': '"""tight"""', 'facecolor': '"""orange"""'}), "(output_path, bbox_inches='tight', facecolor='orange')\n", (2782, 2836), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2853, 2855), True, 'import matplotlib.pyplot as plt\n'), ((3945, 4005), 'numpy.where', 'np.where', (["(data['price_change'] >= 0)", "data['price_change']", '(0)'], {}), "(data['price_change'] >= 0, data['price_change'], 0)\n", (3953, 4005), True, 'import numpy as np\n'), ((5255, 5285), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 12)'}), '(figsize=(14, 12))\n', (5267, 5285), True, 'import matplotlib.pyplot as plt\n'), ((5362, 5378), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (5373, 5378), True, 'import matplotlib.pyplot as plt\n'), ((5462, 5511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price ($)"""'], {'color': '"""red"""', 'fontsize': '(20)'}), "('Price ($)', color='red', fontsize=20)\n", (5472, 5511), True, 'import matplotlib.pyplot as plt\n'), ((5563, 5575), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5573, 5575), True, 'import matplotlib.pyplot as plt\n'), ((5637, 5653), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (5648, 5653), True, 'import matplotlib.pyplot as plt\n'), ((5732, 5776), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'color': '"""red"""', 'fontsize': '(20)'}), "('Time', color='red', fontsize=20)\n", (5742, 5776), True, 'import matplotlib.pyplot as plt\n'), ((5785, 5852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Strength Index (RSI)"""'], {'color': '"""r"""', 'fontsize': '(20)'}), "('Relative Strength Index (RSI)', color='r', fontsize=20)\n", (5795, 5852), True, 'import matplotlib.pyplot as plt\n'), ((6203, 6215), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6213, 6215), True, 'import matplotlib.pyplot as plt\n'), ((6456, 6534), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Price and Relative Strength Index"""'], {'color': '"""red"""', 'fontsize': '(24)'}), "('Price and Relative Strength Index', color='red', fontsize=24)\n", (6468, 6534), True, 'import matplotlib.pyplot as plt\n'), ((6565, 6628), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rsi.png"""'], {'bbox_inches': '"""tight"""', 'facecolor': '"""orange"""'}), "('rsi.png', bbox_inches='tight', facecolor='orange')\n", (6576, 6628), True, 'import matplotlib.pyplot as plt\n'), ((6637, 6647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6645, 6647), True, 'import matplotlib.pyplot as plt\n'), ((1084, 1096), 'numpy.sqrt', 'np.sqrt', (['(365)'], {}), '(365)\n', (1091, 1096), True, 'import numpy as np\n'), ((8339, 8369), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (8351, 8369), True, 'import matplotlib.pyplot as plt\n'), ((8382, 8440), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', "data['BB_upper']"], {'color': '"""g"""'}), "(data[self.date_col], data['BB_upper'], color='g')\n", (8390, 8440), True, 'import matplotlib.pyplot as plt\n'), ((8453, 8511), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', "data['BB_lower']"], {'color': '"""g"""'}), "(data[self.date_col], data['BB_lower'], color='g')\n", (8461, 8511), True, 'import matplotlib.pyplot as plt\n'), ((8524, 8591), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', 'data[self.price_col]'], {'color': '"""orange"""'}), "(data[self.date_col], data[self.price_col], color='orange')\n", (8532, 8591), True, 'import matplotlib.pyplot as plt\n'), ((8604, 8616), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8614, 8616), True, 'import matplotlib.pyplot as plt\n'), ((8629, 8671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'color': '"""b"""', 'fontsize': '(22)'}), "('Time', color='b', fontsize=22)\n", (8639, 8671), True, 'import matplotlib.pyplot as plt\n'), ((8684, 8727), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {'color': '"""b"""', 'fontsize': '(22)'}), "('Price', color='b', fontsize=22)\n", (8694, 8727), True, 'import matplotlib.pyplot as plt\n'), ((8740, 8792), 'matplotlib.pyplot.title', 'plt.title', (['"""Bollinger Bands"""'], {'color': '"""b"""', 'fontsize': '(27)'}), "('Bollinger Bands', color='b', fontsize=27)\n", (8749, 8792), True, 'import matplotlib.pyplot as plt\n'), ((8805, 8834), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(17)'}), '(labelsize=17)\n', (8820, 8834), True, 'import matplotlib.pyplot as plt\n'), ((8887, 8897), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8895, 8897), True, 'import matplotlib.pyplot as plt\n'), ((8910, 8972), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_path'], {'bbox_inches': '"""tight"""', 'facecolor': '"""orange"""'}), "(out_path, bbox_inches='tight', facecolor='orange')\n", (8921, 8972), True, 'import matplotlib.pyplot as plt\n'), ((9016, 9026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9024, 9026), True, 'import matplotlib.pyplot as plt\n'), ((10208, 10237), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 9)'}), '(figsize=(14, 9))\n', (10220, 10237), True, 'import matplotlib.pyplot as plt\n'), ((10250, 10327), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', 'data[self.price_col]'], {'color': '"""r"""', 'label': '"""Price"""'}), "(data[self.date_col], data[self.price_col], color='r', label='Price')\n", (10258, 10327), True, 'import matplotlib.pyplot as plt\n'), ((10419, 10487), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', "data['MACD']"], {'color': '"""b"""', 'label': '"""MACD"""'}), "(data[self.date_col], data['MACD'], color='b', label='MACD')\n", (10427, 10487), True, 'import matplotlib.pyplot as plt\n'), ((10500, 10512), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10510, 10512), True, 'import matplotlib.pyplot as plt\n'), ((10525, 10581), 'matplotlib.pyplot.title', 'plt.title', (['"""Price and MACD Plot"""'], {'fontsize': '(28)', 'color': '"""b"""'}), "('Price and MACD Plot', fontsize=28, color='b')\n", (10534, 10581), True, 'import matplotlib.pyplot as plt\n'), ((10594, 10636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'color': '"""b"""', 'fontsize': '(19)'}), "('Time', color='b', fontsize=19)\n", (10604, 10636), True, 'import matplotlib.pyplot as plt\n'), ((10649, 10692), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {'color': '"""b"""', 'fontsize': '(19)'}), "('Price', color='b', fontsize=19)\n", (10659, 10692), True, 'import matplotlib.pyplot as plt\n'), ((10705, 10767), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_path'], {'bbox_inches': '"""tight"""', 'facecolor': '"""orange"""'}), "(out_path, bbox_inches='tight', facecolor='orange')\n", (10716, 10767), True, 'import matplotlib.pyplot as plt\n'), ((10820, 10830), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10828, 10830), True, 'import matplotlib.pyplot as plt\n'), ((11706, 11735), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 9)'}), '(figsize=(14, 9))\n', (11718, 11735), True, 'import matplotlib.pyplot as plt\n'), ((11748, 11825), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', 'data[self.price_col]'], {'color': '"""r"""', 'label': '"""Price"""'}), "(data[self.date_col], data[self.price_col], color='r', label='Price')\n", (11756, 11825), True, 'import matplotlib.pyplot as plt\n'), ((11917, 11983), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', "data['SMA']"], {'color': '"""b"""', 'label': '"""SMA"""'}), "(data[self.date_col], data['SMA'], color='b', label='SMA')\n", (11925, 11983), True, 'import matplotlib.pyplot as plt\n'), ((11996, 12008), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12006, 12008), True, 'import matplotlib.pyplot as plt\n'), ((12021, 12076), 'matplotlib.pyplot.title', 'plt.title', (['"""Price and SMA Plot"""'], {'fontsize': '(28)', 'color': '"""b"""'}), "('Price and SMA Plot', fontsize=28, color='b')\n", (12030, 12076), True, 'import matplotlib.pyplot as plt\n'), ((12089, 12131), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'color': '"""b"""', 'fontsize': '(19)'}), "('Time', color='b', fontsize=19)\n", (12099, 12131), True, 'import matplotlib.pyplot as plt\n'), ((12144, 12187), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {'color': '"""b"""', 'fontsize': '(19)'}), "('Price', color='b', fontsize=19)\n", (12154, 12187), True, 'import matplotlib.pyplot as plt\n'), ((12200, 12262), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_path'], {'bbox_inches': '"""tight"""', 'facecolor': '"""orange"""'}), "(out_path, bbox_inches='tight', facecolor='orange')\n", (12211, 12262), True, 'import matplotlib.pyplot as plt\n'), ((12315, 12325), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12323, 12325), True, 'import matplotlib.pyplot as plt\n'), ((13594, 13623), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 9)'}), '(figsize=(14, 9))\n', (13606, 13623), True, 'import matplotlib.pyplot as plt\n'), ((13636, 13713), 'matplotlib.pyplot.plot', 'plt.plot', (['data[self.date_col]', 'data[self.price_col]'], {'color': '"""r"""', 'label': '"""Price"""'}), "(data[self.date_col], data[self.price_col], color='r', label='Price')\n", (13644, 13713), True, 'import matplotlib.pyplot as plt\n'), ((14027, 14039), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14037, 14039), True, 'import matplotlib.pyplot as plt\n'), ((14052, 14107), 'matplotlib.pyplot.title', 'plt.title', (['"""Price and EMA Plot"""'], {'fontsize': '(28)', 'color': '"""b"""'}), "('Price and EMA Plot', fontsize=28, color='b')\n", (14061, 14107), True, 'import matplotlib.pyplot as plt\n'), ((14120, 14162), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'color': '"""b"""', 'fontsize': '(19)'}), "('Time', color='b', fontsize=19)\n", (14130, 14162), True, 'import matplotlib.pyplot as plt\n'), ((14175, 14222), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price/EMA"""'], {'color': '"""b"""', 'fontsize': '(19)'}), "('Price/EMA', color='b', fontsize=19)\n", (14185, 14222), True, 'import matplotlib.pyplot as plt\n'), ((14235, 14297), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_path'], {'bbox_inches': '"""tight"""', 'facecolor': '"""orange"""'}), "(out_path, bbox_inches='tight', facecolor='orange')\n", (14246, 14297), True, 'import matplotlib.pyplot as plt\n'), ((14350, 14360), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14358, 14360), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1043), 'numpy.log', 'np.log', (['data[self.price_col]'], {}), '(data[self.price_col])\n', (1021, 1043), True, 'import numpy as np\n')] |
import numpy as np
import torch.autograd
import torch.optim as optim
from torch.autograd import Variable
from ddpg.memory import Memory
from ddpg.model import Critic
class DDPGagent:
def __init__(self, num_inputs, num_outputs, anf, hidden_size=128, actor_learning_rate=1e-6 * 7,
critic_learning_rate=1e-4, gamma=0.99, tau=1e-3, max_memory_size=50000):
# Params
self.num_states = num_inputs
self.num_actions = num_outputs
self.gamma = gamma
self.tau = tau
self.curr_states = np.array([0, 0, 0])
# Networks
self.actor = anf
self.actor_target = anf
self.critic = Critic(self.num_states + self.num_actions, hidden_size, self.num_actions)
self.critic_target = Critic(self.num_states + self.num_actions, hidden_size, self.num_actions)
# Training
self.memory = Memory(max_memory_size)
self.critic_criterion = torch.nn.MSELoss(reduction='sum')
self.actor_optimizer = optim.SGD(self.actor.parameters(), lr=actor_learning_rate, momentum=0.99)
self.critic_optimizer = optim.SGD(self.critic.parameters(), lr=critic_learning_rate, momentum=0.99)
def get_action(self, state):
state = Variable(torch.from_numpy(state).float().unsqueeze(0))
action = self.actor.forward(state)
action = action.detach().numpy()[0, 0]
return action
def update(self, batch_size):
states, actions, rewards, next_states, _ = self.memory.sample(batch_size)
states = torch.FloatTensor(states)
actions = torch.FloatTensor(actions)
rewards = torch.FloatTensor(rewards)
next_states = torch.FloatTensor(next_states)
actions = torch.reshape(actions, (batch_size, 1))
# Critic loss
Qvals = self.critic.forward(states, actions)
next_actions = self.actor_target.forward(next_states)
next_Q = self.critic_target.forward(next_states, next_actions.detach())
Qprime = rewards + self.gamma * next_Q
critic_loss = self.critic_criterion(Qvals, Qprime) / 5.
if critic_loss.item() > 20:
critic_loss = critic_loss / critic_loss.item() * 20.0
# Actor loss
policy_loss = -self.critic.forward(states, self.actor.forward(states)).mean() / -10.
# update networks
self.actor_optimizer.zero_grad()
policy_loss.backward()
self.actor_optimizer.step()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# update target networks
for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))
for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))
| [
"numpy.array",
"ddpg.model.Critic",
"ddpg.memory.Memory"
] | [((546, 565), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (554, 565), True, 'import numpy as np\n'), ((664, 737), 'ddpg.model.Critic', 'Critic', (['(self.num_states + self.num_actions)', 'hidden_size', 'self.num_actions'], {}), '(self.num_states + self.num_actions, hidden_size, self.num_actions)\n', (670, 737), False, 'from ddpg.model import Critic\n'), ((767, 840), 'ddpg.model.Critic', 'Critic', (['(self.num_states + self.num_actions)', 'hidden_size', 'self.num_actions'], {}), '(self.num_states + self.num_actions, hidden_size, self.num_actions)\n', (773, 840), False, 'from ddpg.model import Critic\n'), ((883, 906), 'ddpg.memory.Memory', 'Memory', (['max_memory_size'], {}), '(max_memory_size)\n', (889, 906), False, 'from ddpg.memory import Memory\n')] |
import subprocess
from mock import MagicMock
import nglview
import numpy as np
# local
from make_dummy_comm import *
# TODO : add more show_xxx
# (check test_widgets.py)
def _write(*args, **kargs):
# fake write method
subprocess.check_call(['cp', nglview.datafiles.PDB, 'tmp.pdb'])
class MockStructure:
def as_pdb_string(self):
with open(nglview.datafiles.PDB) as fh:
return fh.read()
class MockRosettaPose:
def dump_pdb(self, _):
_write()
def test_show_schrodinger():
s = MagicMock()
s.write = _write
nglview.show_schrodinger(s)
def test_show_htmd():
mol = MagicMock()
mol.write = _write
n_frames = 10
mol.coordinates = np.zeros((n_frames, 1000, 3))
mol.numFrames = n_frames
view = nglview.show_htmd(mol)
view
def test_show_rosetta():
pose = MockRosettaPose()
view = nglview.show_rosetta(pose)
def test_show_iotbx():
mol = MockStructure()
nglview.show_iotbx(mol)
| [
"mock.MagicMock",
"subprocess.check_call",
"nglview.show_schrodinger",
"numpy.zeros",
"nglview.show_iotbx",
"nglview.show_rosetta",
"nglview.show_htmd"
] | [((230, 293), 'subprocess.check_call', 'subprocess.check_call', (["['cp', nglview.datafiles.PDB, 'tmp.pdb']"], {}), "(['cp', nglview.datafiles.PDB, 'tmp.pdb'])\n", (251, 293), False, 'import subprocess\n'), ((530, 541), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (539, 541), False, 'from mock import MagicMock\n'), ((567, 594), 'nglview.show_schrodinger', 'nglview.show_schrodinger', (['s'], {}), '(s)\n', (591, 594), False, 'import nglview\n'), ((629, 640), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (638, 640), False, 'from mock import MagicMock\n'), ((704, 733), 'numpy.zeros', 'np.zeros', (['(n_frames, 1000, 3)'], {}), '((n_frames, 1000, 3))\n', (712, 733), True, 'import numpy as np\n'), ((774, 796), 'nglview.show_htmd', 'nglview.show_htmd', (['mol'], {}), '(mol)\n', (791, 796), False, 'import nglview\n'), ((873, 899), 'nglview.show_rosetta', 'nglview.show_rosetta', (['pose'], {}), '(pose)\n', (893, 899), False, 'import nglview\n'), ((955, 978), 'nglview.show_iotbx', 'nglview.show_iotbx', (['mol'], {}), '(mol)\n', (973, 978), False, 'import nglview\n')] |
import torch
import numpy as np
from pathlib import Path
from torch.utils.data import Dataset, DataLoader, Sampler
from torch.nn.utils.rnn import pad_sequence
import src.monitor.logger as logger
import random
#TODO: remove
BUCKET_SIZE=1
ILEN_MIN = 2
ILEN_MAX = 10000
# def _seed_worker(worker_idx):
# seed = torch.initial_seed() & ((1 << 63) - 1)
# random.seed(seed)
# np.random.seed((seed >> 32, seed % (1 << 32)))
## Customized function
def collate_fn(batch):
"""
batch list: samples
"""
batch.sort(key=lambda d: d['ilen'], reverse=True)
xs_pad = pad_sequence([d['feat'] for d in batch], batch_first=True)
ilens = torch.stack([d['ilen'] for d in batch])
ys = [d['label'] for d in batch]
olens = torch.stack([d['olen'] for d in batch])
return xs_pad, ilens, ys, olens
class BucketSampler(Sampler):
def __init__(self, ilens, min_ilen, max_ilen, half_batch_ilen, \
batch_size, bucket_size, bucket_reverse, drop_last):
self.ilens = ilens
self.min_ilen = min_ilen
self.max_ilen = max_ilen
self.half_batch_ilen = half_batch_ilen if half_batch_ilen else ILEN_MAX
self.batch_size = batch_size
self.bucket_size = bucket_size
self.drop_last = drop_last
self.bucket_reverse = bucket_reverse# if True: long -> short
self._create_buckets()
# logger.log(f"Bucket size distribution: {[len(bucket[1]) for bucket in self.buckets]}")
def __iter__(self):
for bin_idx, bucket in self.buckets:
batch_size = self._get_batch_size(bin_idx)
np.random.shuffle(bucket)
batch = []
for idx in bucket:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
num_batchs = 0
for bin_idx, bucket in self.buckets:
batch_size = self._get_batch_size(bin_idx)
if self.drop_last:
num_batchs += len(bucket) // batch_size
else:
num_batchs += (len(bucket) + batch_size - 1) // batch_size
return num_batchs
def _get_batch_size(self,bin_idx):
if self.bucket_reverse:
batch_size = max(1, self.batch_size // 2) if bin_idx < self.half_batch_size_bucket_idx else self.batch_size
else:
batch_size = max(1, self.batch_size // 2) if bin_idx > self.half_batch_size_bucket_idx else self.batch_size
return batch_size
def _create_buckets(self):
lb = min(ILEN_MIN,self.bucket_size) if not self.min_ilen else self.min_ilen
ub = max(ILEN_MAX,self.ilens.max()) if not self.max_ilen else self.max_ilen
if self.bucket_reverse:
bins = np.arange(ub, lb, -self.bucket_size) # long -> short
else:
bins = np.arange(lb, ub, self.bucket_size) # short -> long
bucket_idx = np.digitize(self.ilens, bins, right=True)
self.half_batch_size_bucket_idx = np.digitize(self.half_batch_ilen, bins, right=True)
self.buckets = []
for bin_idx in range(1, len(bins) - 1):
bucket = np.where(bucket_idx == bin_idx)[0]
if len(bucket) > 0:
self.buckets.append((bin_idx, bucket))
# if self.bucket_reverse:
# for bin_idx in range(1, len(bins)-1):
# bucket = np.where(bucket_idx == bin_idx)[0]
# if len(bucket) > 0:
# self.buckets.append((bin_idx, bucket))
# else:
# for bin_idx in range(1,len(bins)-1):
# bucket = np.where(bucket_idx == bin_idx)[0]
# if len(bucket) > 0:
# self.buckets.append((bin_idx, bucket))
random.shuffle(self.buckets)
#TODO: In addition to npy and memmap choice, still need KaldiDataset
class ESPDataset(Dataset):
def __init__(self, data_dir, is_memmap):
"""
data_dir: str
is_memmap: bool
"""
if is_memmap:
feat_path = data_dir.joinpath('feat').with_suffix('.dat')
logger.log(f"Loading {feat_path} from memmap...",prefix='info')
self.feat = np.load(feat_path, mmap_mode='r')
else:
feat_path = data_dir.joinpath('feat').with_suffix('.npy')
logger.warning(f"Loading whole data ({feat_path}) into RAM")
self.feat = np.load(feat_path)
self.ilens = np.load(data_dir.joinpath('ilens.npy'))
self.iptr = np.zeros(len(self.ilens)+1, dtype=int)
self.ilens.cumsum(out=self.iptr[1:])
self.label = np.load(data_dir.joinpath('label.npy'))
self.olens = np.load(data_dir.joinpath('olens.npy'))
self.optr = np.zeros(len(self.olens) + 1, dtype=int)
self.olens.cumsum(out=self.optr[1:])
assert len(self.ilens) == len(self.olens), \
"Number of samples should be the same in features and labels"
def __len__(self):
return len(self.ilens)
def __getitem__(self,idx):
return{
'feat':torch.as_tensor(self.feat[self.iptr[idx]:self.iptr[idx+1],:]),
'ilen':torch.as_tensor(self.ilens[idx]),
'label':torch.as_tensor(self.label[self.optr[idx]:self.optr[idx+1]]),
'olen':torch.as_tensor(self.olens[idx]),
}
def get_loader(data_dir, batch_size, is_memmap, is_bucket, num_workers=0,
min_ilen=None, max_ilen=None, half_batch_ilen=None,
bucket_reverse=False, shuffle=True, read_file=False,
drop_last=False, pin_memory=True):
assert not read_file, "Load from Kaldi ark haven't been implemented yet"
dset = ESPDataset(data_dir, is_memmap)
# if data is already loaded in memory
if not is_memmap:
num_workers = 0
logger.notice(f"Loading data from {data_dir} with {num_workers} threads")
if is_bucket:
my_sampler = BucketSampler(dset.ilens,
min_ilen = min_ilen,
max_ilen = max_ilen,
half_batch_ilen = half_batch_ilen,
batch_size=batch_size,
bucket_size=BUCKET_SIZE,
bucket_reverse=bucket_reverse,
drop_last = drop_last)
loader = DataLoader(dset, batch_size=1, num_workers=num_workers,
collate_fn=collate_fn, batch_sampler=my_sampler,
drop_last=drop_last, pin_memory=pin_memory)
else:
loader = DataLoader(dset, batch_size=batch_size, num_workers=num_workers,
collate_fn=collate_fn, shuffle=shuffle,
drop_last=drop_last, pin_memory=pin_memory)
return loader
# Support multiple dataset co-training
# class DataContainer:
# def __init__(self, data_dirs, batch_size, dev_batch_size, is_memmap,
# is_bucket, num_workers=0, min_ilen=None, max_ilen=None,
# half_batch_ilen=None, bucket_reverse=False, shuffle=True,
# read_file=False, drop_last=False, pin_memory=True):
# self.data_dirs = data_dirs
# self.num_datasets = len(self.data_dirs)
# self.batch_size = batch_size
# self.is_memmap = is_memmap
# self.is_bucket = is_bucket
# self.num_workers = num_workers
# self.min_ilen = min_ilen
# self.max_ilen = max_ilen
# self.half_batch_ilen = half_batch_ilen
# self.bucket_reverse=bucket_reverse
# self.shuffle = shuffle
# self.read_file = read_file
# self.reload_cnt = 0
# self.loader_iters = list()
# self.dev_loaders = list()
# for data_dir in self.data_dirs:
# self.loader_iters.append(
# iter(get_loader(
# data_dir.joinpath('train'),
# batch_size = self.batch_size,
# is_memmap = self.is_memmap,
# is_bucket = self.is_bucket,
# num_workers = self.num_workers,
# min_ilen = self.min_ilen,
# max_ilen = self.max_ilen,
# half_batch_ilen = self.half_batch_ilen,
# bucket_reverse = self.bucket_reverse,
# shuffle = self.shuffle,
# read_file = self.read_file
# )))
# self.dev_loaders.append(
# get_loader(
# data_dir.joinpath('dev'),
# batch_size = dev_batch_size,
# is_memmap = self.is_memmap,
# is_bucket = False,
# num_workers = self.num_workers,
# shuffle =False,
# ))
# def get_item(self, accent_idx=None, num=1):
# ret_ls = []
# if accent_idx is None: # for MultiASR
# accent_ids = np.random.randint(self.num_datasets, size=num)
# else:
# accent_ids = np.repeat(accent_idx,num)
# for accent_id in accent_ids:
# try:
# ret = next(self.loader_iters[accent_id])
# ret_ls.append((accent_id,ret))
# except StopIteration:
# self.loader_iters[accent_id] = iter(get_loader(
# self.data_dirs[accent_id].joinpath('train'),
# batch_size = self.batch_size,
# is_memmap = self.is_memmap,
# is_bucket = self.is_bucket,
# num_workers = self.num_workers,
# min_ilen = self.min_ilen,
# max_ilen = self.max_ilen,
# half_batch_ilen = self.half_batch_ilen,
# bucket_reverse = self.bucket_reverse,
# shuffle = self.shuffle,
# read_file = self.read_file))
# self.reload_cnt += 1
# ret = next(self.loader_iters[accent_id])
# ret_ls.append((accent_id,ret))
# return ret_ls
# if __name__ == "__main__":
# data_dir = 'mydata/eval'
# loader = get_loader(data_dir, batch_size=128, is_memmap=True, num_workers=4)
# for data in loader:
# print(data['feats'][-1])
| [
"torch.as_tensor",
"random.shuffle",
"src.monitor.logger.notice",
"numpy.digitize",
"numpy.where",
"torch.stack",
"torch.nn.utils.rnn.pad_sequence",
"src.monitor.logger.log",
"torch.utils.data.DataLoader",
"numpy.load",
"src.monitor.logger.warning",
"numpy.arange",
"numpy.random.shuffle"
] | [((591, 649), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (["[d['feat'] for d in batch]"], {'batch_first': '(True)'}), "([d['feat'] for d in batch], batch_first=True)\n", (603, 649), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((662, 701), 'torch.stack', 'torch.stack', (["[d['ilen'] for d in batch]"], {}), "([d['ilen'] for d in batch])\n", (673, 701), False, 'import torch\n'), ((751, 790), 'torch.stack', 'torch.stack', (["[d['olen'] for d in batch]"], {}), "([d['olen'] for d in batch])\n", (762, 790), False, 'import torch\n'), ((5940, 6013), 'src.monitor.logger.notice', 'logger.notice', (['f"""Loading data from {data_dir} with {num_workers} threads"""'], {}), "(f'Loading data from {data_dir} with {num_workers} threads')\n", (5953, 6013), True, 'import src.monitor.logger as logger\n'), ((3042, 3083), 'numpy.digitize', 'np.digitize', (['self.ilens', 'bins'], {'right': '(True)'}), '(self.ilens, bins, right=True)\n', (3053, 3083), True, 'import numpy as np\n'), ((3126, 3177), 'numpy.digitize', 'np.digitize', (['self.half_batch_ilen', 'bins'], {'right': '(True)'}), '(self.half_batch_ilen, bins, right=True)\n', (3137, 3177), True, 'import numpy as np\n'), ((3879, 3907), 'random.shuffle', 'random.shuffle', (['self.buckets'], {}), '(self.buckets)\n', (3893, 3907), False, 'import random\n'), ((6529, 6687), 'torch.utils.data.DataLoader', 'DataLoader', (['dset'], {'batch_size': '(1)', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'batch_sampler': 'my_sampler', 'drop_last': 'drop_last', 'pin_memory': 'pin_memory'}), '(dset, batch_size=1, num_workers=num_workers, collate_fn=\n collate_fn, batch_sampler=my_sampler, drop_last=drop_last, pin_memory=\n pin_memory)\n', (6539, 6687), False, 'from torch.utils.data import Dataset, DataLoader, Sampler\n'), ((6761, 6914), 'torch.utils.data.DataLoader', 'DataLoader', (['dset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'shuffle': 'shuffle', 'drop_last': 'drop_last', 'pin_memory': 'pin_memory'}), '(dset, batch_size=batch_size, num_workers=num_workers, collate_fn\n =collate_fn, shuffle=shuffle, drop_last=drop_last, pin_memory=pin_memory)\n', (6771, 6914), False, 'from torch.utils.data import Dataset, DataLoader, Sampler\n'), ((1619, 1644), 'numpy.random.shuffle', 'np.random.shuffle', (['bucket'], {}), '(bucket)\n', (1636, 1644), True, 'import numpy as np\n'), ((2882, 2918), 'numpy.arange', 'np.arange', (['ub', 'lb', '(-self.bucket_size)'], {}), '(ub, lb, -self.bucket_size)\n', (2891, 2918), True, 'import numpy as np\n'), ((2968, 3003), 'numpy.arange', 'np.arange', (['lb', 'ub', 'self.bucket_size'], {}), '(lb, ub, self.bucket_size)\n', (2977, 3003), True, 'import numpy as np\n'), ((4228, 4292), 'src.monitor.logger.log', 'logger.log', (['f"""Loading {feat_path} from memmap..."""'], {'prefix': '"""info"""'}), "(f'Loading {feat_path} from memmap...', prefix='info')\n", (4238, 4292), True, 'import src.monitor.logger as logger\n'), ((4316, 4349), 'numpy.load', 'np.load', (['feat_path'], {'mmap_mode': '"""r"""'}), "(feat_path, mmap_mode='r')\n", (4323, 4349), True, 'import numpy as np\n'), ((4446, 4506), 'src.monitor.logger.warning', 'logger.warning', (['f"""Loading whole data ({feat_path}) into RAM"""'], {}), "(f'Loading whole data ({feat_path}) into RAM')\n", (4460, 4506), True, 'import src.monitor.logger as logger\n'), ((4531, 4549), 'numpy.load', 'np.load', (['feat_path'], {}), '(feat_path)\n', (4538, 4549), True, 'import numpy as np\n'), ((5199, 5263), 'torch.as_tensor', 'torch.as_tensor', (['self.feat[self.iptr[idx]:self.iptr[idx + 1], :]'], {}), '(self.feat[self.iptr[idx]:self.iptr[idx + 1], :])\n', (5214, 5263), False, 'import torch\n'), ((5281, 5313), 'torch.as_tensor', 'torch.as_tensor', (['self.ilens[idx]'], {}), '(self.ilens[idx])\n', (5296, 5313), False, 'import torch\n'), ((5335, 5397), 'torch.as_tensor', 'torch.as_tensor', (['self.label[self.optr[idx]:self.optr[idx + 1]]'], {}), '(self.label[self.optr[idx]:self.optr[idx + 1]])\n', (5350, 5397), False, 'import torch\n'), ((5416, 5448), 'torch.as_tensor', 'torch.as_tensor', (['self.olens[idx]'], {}), '(self.olens[idx])\n', (5431, 5448), False, 'import torch\n'), ((3273, 3304), 'numpy.where', 'np.where', (['(bucket_idx == bin_idx)'], {}), '(bucket_idx == bin_idx)\n', (3281, 3304), True, 'import numpy as np\n')] |
"""
File Reader
This reads data from a file found in ../data/.
The data file must be set with setInput().
Author: <NAME>
Date: February 2021
"""
import numpy as np
import os
import glob
from readers.Reader import Reader
class FileReader(Reader):
"""
Initialize the reader
Args:
framesize: Number of data points returned per read
Default => 100
channels: Number of channels returned during read
Default => 8
"""
def __init__(self, framesize=100, channels=8):
file = os.path.realpath(__file__)+'/../data/emg1KT60.csv'
file = file.split('backend')[0]
data = np.genfromtxt(file+'backend/data/emg1KT60.csv',
delimiter=",", names=["x"])
self.currentIndex = 0
self.channels = channels
self.framesize = framesize
self.data = data['x']
"""
Start the reader
"""
def start(self):
# No start setup required
return True
"""
Stop the reader
"""
def stop(self):
# No Stop setup required
return True
"""
Read from the selected file
"""
def read(self):
result = []
print(self.data[self.currentIndex:self.currentIndex + self.framesize])
next = self.data[self.currentIndex:self.currentIndex +
self.framesize].tolist()
self.currentIndex = self.currentIndex + self.framesize
for j in range(0, int(self.channels)):
result.insert(j, next)
return result
"""
Set the input file
Args:
inputFile: File found in ../data
"""
def setInput(self, inputFile):
file = os.path.realpath(__file__)
file = file.split('backend')[0]
print(inputFile)
filepath = 'backend/data/'+inputFile+".csv"
data = np.genfromtxt(file+filepath,
delimiter=",", names=["x"])
print(file+filepath)
self.currentIndex = 0
self.data = data['x']
| [
"os.path.realpath",
"numpy.genfromtxt"
] | [((687, 764), 'numpy.genfromtxt', 'np.genfromtxt', (["(file + 'backend/data/emg1KT60.csv')"], {'delimiter': '""","""', 'names': "['x']"}), "(file + 'backend/data/emg1KT60.csv', delimiter=',', names=['x'])\n", (700, 764), True, 'import numpy as np\n'), ((1762, 1788), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1778, 1788), False, 'import os\n'), ((1921, 1979), 'numpy.genfromtxt', 'np.genfromtxt', (['(file + filepath)'], {'delimiter': '""","""', 'names': "['x']"}), "(file + filepath, delimiter=',', names=['x'])\n", (1934, 1979), True, 'import numpy as np\n'), ((581, 607), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (597, 607), False, 'import os\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utils.
See GCS api for upload / download functionality:
https://github.com/googleapis/python-storage/blob/master/google/cloud/storage/blob.py # pylint: disable=line-too-long
"""
import datetime
import os
import random
import numpy as np
import torch
def make_reproducible(random_seed):
"""Make experiments reproducible."""
print(f'Making reproducible on seed {random_seed}')
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(random_seed)
def get_timestamp(datetime_format='%YY_%mM_%dD-%Hh_%Mm_%Ss'):
"""Create timestamp."""
timestamp = datetime.datetime.now().strftime(datetime_format)
return timestamp
def save_metrics(metrics, output_dir, config):
"""Save metrics and upload it to GCS."""
if config.debug:
return
save_path = os.path.join(output_dir, 'metrics.pt')
torch.save(metrics, save_path)
def save_model(model, optimizer, output_dir, epoch_i, config):
"""Save model and upload it to GCS."""
if config.debug:
return
save_dict = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
ckpt_dir = os.path.join(output_dir, 'ckpts')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_path = os.path.join(ckpt_dir, f'ckpt__epoch_{epoch_i:04d}.pt')
torch.save(save_dict, save_path)
def save_model_config(model_config, output_dir, config):
"""Save model and upload it to GCS."""
if config.debug:
return
save_path = os.path.join(output_dir, 'model_config.pt')
torch.save(model_config, save_path)
def save_flags(flags, output_dir, config):
"""Save flags and upload it to GCS."""
if config.debug:
return
save_path = os.path.join(output_dir, 'flagfile.txt')
flags.append_flags_into_file(save_path)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.path.exists",
"os.makedirs",
"os.path.join",
"random.seed",
"datetime.datetime.now",
"numpy.random.seed",
"torch.save",
"torch.cuda.manual_seed"
] | [((1007, 1031), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (1018, 1031), False, 'import random\n'), ((1034, 1061), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1048, 1061), True, 'import numpy as np\n'), ((1064, 1094), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (1081, 1094), False, 'import torch\n'), ((1097, 1132), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['random_seed'], {}), '(random_seed)\n', (1119, 1132), False, 'import torch\n'), ((1135, 1174), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['random_seed'], {}), '(random_seed)\n', (1161, 1174), False, 'import torch\n'), ((1619, 1657), 'os.path.join', 'os.path.join', (['output_dir', '"""metrics.pt"""'], {}), "(output_dir, 'metrics.pt')\n", (1631, 1657), False, 'import os\n'), ((1660, 1690), 'torch.save', 'torch.save', (['metrics', 'save_path'], {}), '(metrics, save_path)\n', (1670, 1690), False, 'import torch\n'), ((1940, 1973), 'os.path.join', 'os.path.join', (['output_dir', '"""ckpts"""'], {}), "(output_dir, 'ckpts')\n", (1952, 1973), False, 'import os\n'), ((2049, 2104), 'os.path.join', 'os.path.join', (['ckpt_dir', 'f"""ckpt__epoch_{epoch_i:04d}.pt"""'], {}), "(ckpt_dir, f'ckpt__epoch_{epoch_i:04d}.pt')\n", (2061, 2104), False, 'import os\n'), ((2107, 2139), 'torch.save', 'torch.save', (['save_dict', 'save_path'], {}), '(save_dict, save_path)\n', (2117, 2139), False, 'import torch\n'), ((2284, 2327), 'os.path.join', 'os.path.join', (['output_dir', '"""model_config.pt"""'], {}), "(output_dir, 'model_config.pt')\n", (2296, 2327), False, 'import os\n'), ((2330, 2365), 'torch.save', 'torch.save', (['model_config', 'save_path'], {}), '(model_config, save_path)\n', (2340, 2365), False, 'import torch\n'), ((2496, 2536), 'os.path.join', 'os.path.join', (['output_dir', '"""flagfile.txt"""'], {}), "(output_dir, 'flagfile.txt')\n", (2508, 2536), False, 'import os\n'), ((1983, 2007), 'os.path.exists', 'os.path.exists', (['ckpt_dir'], {}), '(ckpt_dir)\n', (1997, 2007), False, 'import os\n'), ((2013, 2034), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {}), '(ckpt_dir)\n', (2024, 2034), False, 'import os\n'), ((1414, 1437), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1435, 1437), False, 'import datetime\n')] |
import cv2
import os
import numpy as np
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename),0)
if img is not None:
images.append(img)
return images
def data_crop(img,crop_size):
ims = []
for im in img:
h, w = im.shape
rand_range_h = h-crop_size
rand_range_w = w-crop_size
x_offset = np.random.randint(rand_range_w)
y_offset = np.random.randint(rand_range_h)
im = im[y_offset:y_offset+crop_size, x_offset:x_offset+crop_size]
ims.append(im)
return ims
def data_augment(img):
ims = []
for im in img:
h, w = im.shape
if np.random.rand() > 0.5:
im = np.fliplr(im)
if np.random.rand() > 0.5:
angle = 10*np.random.rand()
if np.random.rand() > 0.5:
angle *= -1
M = cv2.getRotationMatrix2D((w/2,h/2),angle,1)
im = cv2.warpAffine(im,M,(w,h))
ims.append(im)
return ims
| [
"os.listdir",
"cv2.warpAffine",
"numpy.random.rand",
"numpy.fliplr",
"os.path.join",
"numpy.random.randint",
"cv2.getRotationMatrix2D"
] | [((114, 132), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (124, 132), False, 'import os\n'), ((458, 489), 'numpy.random.randint', 'np.random.randint', (['rand_range_w'], {}), '(rand_range_w)\n', (475, 489), True, 'import numpy as np\n'), ((509, 540), 'numpy.random.randint', 'np.random.randint', (['rand_range_h'], {}), '(rand_range_h)\n', (526, 540), True, 'import numpy as np\n'), ((159, 189), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (171, 189), False, 'import os\n'), ((753, 769), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (767, 769), True, 'import numpy as np\n'), ((794, 807), 'numpy.fliplr', 'np.fliplr', (['im'], {}), '(im)\n', (803, 807), True, 'import numpy as np\n'), ((819, 835), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (833, 835), True, 'import numpy as np\n'), ((966, 1015), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(w / 2, h / 2)', 'angle', '(1)'], {}), '((w / 2, h / 2), angle, 1)\n', (989, 1015), False, 'import cv2\n'), ((1026, 1055), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'M', '(w, h)'], {}), '(im, M, (w, h))\n', (1040, 1055), False, 'import cv2\n'), ((866, 882), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (880, 882), True, 'import numpy as np\n'), ((898, 914), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (912, 914), True, 'import numpy as np\n')] |
# Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
from onnx_test_runner import OnnxTestRunner
import numpy as np
def _make_module(in_shape):
input_1 = helper.make_tensor_value_info('input_1', TensorProto.FLOAT, in_shape)
input_2 = helper.make_tensor_value_info('input_2', TensorProto.FLOAT, in_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, in_shape)
initializers = []
# add1
add1 = onnx.helper.make_node(
'Add',
inputs=['input_1', 'input_2'],
outputs=['add1'],
)
# batchnorm1
scale1 = helper.make_tensor(
'scale1',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.randn(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(scale1)
bias1 = helper.make_tensor(
'bias1',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.randn(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(bias1)
mean1 = helper.make_tensor(
'mean1',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.randn(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(mean1)
var1 = helper.make_tensor(
'var1',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.rand(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(var1)
batchnorm1 = onnx.helper.make_node(
'BatchNormalization',
inputs=['add1', 'scale1', 'bias1', 'mean1', 'var1'],
outputs=['batchnorm1']
)
# conv2d
weight_shape = [in_shape[1], in_shape[1], 1, 1]
weight = helper.make_tensor(
'weight',
TensorProto.FLOAT,
dims=weight_shape,
vals=np.random.randn(*weight_shape).astype(np.float32).flatten().tolist())
initializers.append(weight)
conv2d = onnx.helper.make_node(
'Conv',
inputs=['batchnorm1', 'weight'],
outputs=['conv2d'],
kernel_shape=[1, 1],
pads=[0, 0, 0, 0],
)
# add2
add2 = onnx.helper.make_node(
'Add',
inputs=['add1', 'conv2d'],
outputs=['add2'],
)
# batchnorm2
scale2 = helper.make_tensor(
'scale2',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.randn(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(scale2)
bias2 = helper.make_tensor(
'bias2',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.randn(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(bias2)
mean2 = helper.make_tensor(
'mean2',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.randn(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(mean2)
var2 = helper.make_tensor(
'var2',
TensorProto.FLOAT,
dims=in_shape[1:2],
vals=np.random.rand(in_shape[1],).astype(np.float32).flatten().tolist()
)
initializers.append(var2)
batchnorm2 = onnx.helper.make_node(
'BatchNormalization',
inputs=['add2', 'scale2', 'bias2', 'mean2', 'var2'],
outputs=['output']
)
graph_def = helper.make_graph([add1, batchnorm1, conv2d, add2, batchnorm2], 'test-model', [input_1, input_2], [output], initializer=initializers)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
in_shapes = [
[1, 32, 56, 56],
[1, 64, 56, 56],
[1, 128, 56, 56],
[1, 256, 56, 56]
]
@pytest.mark.parametrize('in_shape', in_shapes)
def test_act_load_psum_fuse(in_shape, request):
model_def = _make_module(in_shape)
runner = OnnxTestRunner(request.node.name, ['k510'])
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_act_load_psum_fuse.py'])
| [
"onnx.helper.make_graph",
"onnx.helper.make_node",
"numpy.random.rand",
"onnx.helper.make_tensor_value_info",
"pytest.main",
"onnx.helper.make_model",
"pytest.mark.parametrize",
"onnx_test_runner.OnnxTestRunner",
"numpy.random.randn"
] | [((4356, 4402), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_shape"""', 'in_shapes'], {}), "('in_shape', in_shapes)\n", (4379, 4402), False, 'import pytest\n'), ((869, 938), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input_1"""', 'TensorProto.FLOAT', 'in_shape'], {}), "('input_1', TensorProto.FLOAT, in_shape)\n", (898, 938), False, 'from onnx import helper\n'), ((953, 1022), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input_2"""', 'TensorProto.FLOAT', 'in_shape'], {}), "('input_2', TensorProto.FLOAT, in_shape)\n", (982, 1022), False, 'from onnx import helper\n'), ((1036, 1104), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output"""', 'TensorProto.FLOAT', 'in_shape'], {}), "('output', TensorProto.FLOAT, in_shape)\n", (1065, 1104), False, 'from onnx import helper\n'), ((1150, 1227), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""'], {'inputs': "['input_1', 'input_2']", 'outputs': "['add1']"}), "('Add', inputs=['input_1', 'input_2'], outputs=['add1'])\n", (1171, 1227), False, 'import onnx\n'), ((2185, 2309), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""BatchNormalization"""'], {'inputs': "['add1', 'scale1', 'bias1', 'mean1', 'var1']", 'outputs': "['batchnorm1']"}), "('BatchNormalization', inputs=['add1', 'scale1',\n 'bias1', 'mean1', 'var1'], outputs=['batchnorm1'])\n", (2206, 2309), False, 'import onnx\n'), ((2636, 2763), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Conv"""'], {'inputs': "['batchnorm1', 'weight']", 'outputs': "['conv2d']", 'kernel_shape': '[1, 1]', 'pads': '[0, 0, 0, 0]'}), "('Conv', inputs=['batchnorm1', 'weight'], outputs=[\n 'conv2d'], kernel_shape=[1, 1], pads=[0, 0, 0, 0])\n", (2657, 2763), False, 'import onnx\n'), ((2829, 2902), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""'], {'inputs': "['add1', 'conv2d']", 'outputs': "['add2']"}), "('Add', inputs=['add1', 'conv2d'], outputs=['add2'])\n", (2850, 2902), False, 'import onnx\n'), ((3860, 3980), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""BatchNormalization"""'], {'inputs': "['add2', 'scale2', 'bias2', 'mean2', 'var2']", 'outputs': "['output']"}), "('BatchNormalization', inputs=['add2', 'scale2',\n 'bias2', 'mean2', 'var2'], outputs=['output'])\n", (3881, 3980), False, 'import onnx\n'), ((4024, 4161), 'onnx.helper.make_graph', 'helper.make_graph', (['[add1, batchnorm1, conv2d, add2, batchnorm2]', '"""test-model"""', '[input_1, input_2]', '[output]'], {'initializer': 'initializers'}), "([add1, batchnorm1, conv2d, add2, batchnorm2],\n 'test-model', [input_1, input_2], [output], initializer=initializers)\n", (4041, 4161), False, 'from onnx import helper\n'), ((4174, 4228), 'onnx.helper.make_model', 'helper.make_model', (['graph_def'], {'producer_name': '"""kendryte"""'}), "(graph_def, producer_name='kendryte')\n", (4191, 4228), False, 'from onnx import helper\n'), ((4504, 4547), 'onnx_test_runner.OnnxTestRunner', 'OnnxTestRunner', (['request.node.name', "['k510']"], {}), "(request.node.name, ['k510'])\n", (4518, 4547), False, 'from onnx_test_runner import OnnxTestRunner\n'), ((4659, 4709), 'pytest.main', 'pytest.main', (["['-vv', 'test_act_load_psum_fuse.py']"], {}), "(['-vv', 'test_act_load_psum_fuse.py'])\n", (4670, 4709), False, 'import pytest\n'), ((1396, 1424), 'numpy.random.randn', 'np.random.randn', (['in_shape[1]'], {}), '(in_shape[1])\n', (1411, 1424), True, 'import numpy as np\n'), ((1620, 1648), 'numpy.random.randn', 'np.random.randn', (['in_shape[1]'], {}), '(in_shape[1])\n', (1635, 1648), True, 'import numpy as np\n'), ((1843, 1871), 'numpy.random.randn', 'np.random.randn', (['in_shape[1]'], {}), '(in_shape[1])\n', (1858, 1871), True, 'import numpy as np\n'), ((2064, 2091), 'numpy.random.rand', 'np.random.rand', (['in_shape[1]'], {}), '(in_shape[1])\n', (2078, 2091), True, 'import numpy as np\n'), ((2520, 2550), 'numpy.random.randn', 'np.random.randn', (['*weight_shape'], {}), '(*weight_shape)\n', (2535, 2550), True, 'import numpy as np\n'), ((3071, 3099), 'numpy.random.randn', 'np.random.randn', (['in_shape[1]'], {}), '(in_shape[1])\n', (3086, 3099), True, 'import numpy as np\n'), ((3295, 3323), 'numpy.random.randn', 'np.random.randn', (['in_shape[1]'], {}), '(in_shape[1])\n', (3310, 3323), True, 'import numpy as np\n'), ((3518, 3546), 'numpy.random.randn', 'np.random.randn', (['in_shape[1]'], {}), '(in_shape[1])\n', (3533, 3546), True, 'import numpy as np\n'), ((3739, 3766), 'numpy.random.rand', 'np.random.rand', (['in_shape[1]'], {}), '(in_shape[1])\n', (3753, 3766), True, 'import numpy as np\n')] |
#!\usr\bin\python
# coding=utf-8
# Author: youngfeng
# Update: 07/08/2018
"""
Progressive, concluded by Sarkar et al. (ase '15), is one of the basic sampling techiques in performance prediction.
It iteratively randomly select samples from train pool to train a cart model, and test on testing pool untill the
learning curve come to flatten/convergence point.
The details of Progressive are introduced in paper "Cost-Efficient Sampling for Performance Prediction of Configurable Systems".
"""
import pandas as pd
import random as rd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
# for figures
import matplotlib.pyplot as plt
# global variable COLLECTOR, to save points in learning curve
COLLECTOR = []
# global variable DATASETS, to save train_pool, test_pool, validation_pool
DATASETS = []
class config_node:
"""
for each configuration, we create a config_node object to save its informations
index : actual rank
features : feature list
perfs : actual performance
"""
def __init__(self, index, features, perfs, predicted):
self.index = index
self.features = features
self.perfs = perfs
self.predicted = predicted
def find_lowest_rank(train_set, test_set):
"""
build cart model on train_set and predict on test_set, to find the lowest rank in predicted top-10 test_set
"""
sorted_test = sorted(test_set, key=lambda x: x.perfs[-1])
# train data
train_features = [t.features for t in train_set]
train_perfs = [t.perfs[-1] for t in train_set]
# test data
test_perfs = [t.features for t in sorted_test]
cart_model = DecisionTreeRegressor()
cart_model.fit(train_features, train_perfs)
predicted = cart_model.predict(test_perfs)
predicted_id = [[i, p] for i, p in enumerate(predicted)]
# i-> actual rank, p -> predicted value
predicted_sorted = sorted(predicted_id, key=lambda x: x[-1])
# print(predicted_sorted)
# assigning predicted ranks
predicted_rank_sorted = [[p[0], p[-1], i] for i,p in enumerate(predicted_sorted)]
# p[0] -> actual rank, p[-1] -> perdicted value, i -> predicted rank
select_few = predicted_rank_sorted[:10]
# print the predcited top-10 configuration
# for sf in select_few[:10]:
# print("actual rank:", sf[0], " actual value:", sorted_test[sf[0]].perfs[-1], " predicted value:", sf[1], " predicted rank: ", sf[2])
# print("------------")
return np.min([sf[0] for sf in select_few])
def predict_by_cart(train_set, test_set):
train_fea_vector = [new_sample.features for new_sample in train_set]
train_pef_vector = [new_sample.perfs[-1] for new_sample in train_set]
test_fea_vector = [new_sample.features for new_sample in test_set]
test_pef_vector = [new_sample.perfs[-1] for new_sample in test_set]
##################### train model based on train set
##################### setting of minsplit and minbucket
# S = len(train_set)
# if S <= 100:
# minbucket = np.floor((S/10)+(1/2))
# minsplit = 2*minbucket
# else:
# minsplit = np.floor((S/10)+(1/2))
# minbucket = np.floor(minsplit/2)
# if minbucket < 2:
# minbucket = 2
# if minsplit < 4:
# minsplit = 4
# minbucket = int(minbucket) # cart cannot set a float minbucket or minsplit
# minsplit = int(minsplit)
# print("[min samples split]: ", minsplit)
# print("[min samples leaf] : ", minbucket)
# cart_model = DecisionTreeRegressor( min_samples_split = minsplit,
# min_samples_leaf = minbucket,
# max_depth = 30)
cart_model = DecisionTreeRegressor()
cart_model.fit(train_fea_vector, train_pef_vector)
test_pef_predicted = cart_model.predict(test_fea_vector)
mmre_lst = []
for (config, predicted_perf) in zip(test_set, test_pef_predicted):
config.predicted[-1] = predicted_perf
for config in test_set:
mmre = abs(config.perfs[-1] - config.predicted[-1])/abs(config.perfs[-1])
mmre_lst.append(mmre)
return np.mean(mmre_lst)
def split_data_by_fraction(csv_file, fraction):
# step1: read from csv file
pdcontent = pd.read_csv(csv_file)
attr_list = pdcontent.columns # all feature list
# step2: split attribute - method 1
features = [i for i in attr_list if "$<" not in i]
perfs = [i for i in attr_list if "$<" in i]
sortedcontent = pdcontent.sort_values(perfs[-1]) # from small to big
# step3: collect configuration
configs = list()
for c in range(len(pdcontent)):
configs.append(config_node(c, # actual rank
sortedcontent.iloc[c][features].tolist(), # feature list
sortedcontent.iloc[c][perfs].tolist(), # performance list
sortedcontent.iloc[c][perfs].tolist(), # predicted performance list
))
# for config in configs:
# print(config.index, "-", config.perfs, "-", config.predicted, "-", config.rank)
# step4: data split
# fraction = 0.4 # split fraction
# rd.seed(seed) # random seed
rd.shuffle(configs) # shuffle the configs
indexes = range(len(configs))
train_index = indexes[:int(fraction*len(configs))]
test_index = indexes[int((fraction)*len(configs)): int((fraction+0.2)*len(configs))]
validation_index = indexes[int((fraction+0.2)*len(configs)):]
train_pool = [configs[i] for i in train_index]
test_pool = [configs[i] for i in test_index]
validation_pool = [configs[i] for i in validation_index]
return [train_pool, test_pool, validation_pool]
def predict_by_progressive(train_pool, test_pool):
# rd.shuffle(train_pool)
train_set = train_pool[:10]
count = 10
lives = 3
last_mmre = -1
# step6: progressive cycle
while lives > 0 and count < len(train_pool):
# add sample to train set
train_set.append(train_pool[count])
count = count + 1
current_mmre = predict_by_cart(train_set, test_pool)
COLLECTOR.append([count, (1-current_mmre)])
if (1-current_mmre) <= last_mmre:
lives = lives - 1
else:
lives = 3
last_mmre = (1-current_mmre)
# if current_mmre < 0.1:
# break
return train_set
if __name__ == "__main__":
#######################################################################################
split_data = split_data_by_fraction("data/Apache_AllMeasurements.csv", 0.4)
train_pool = split_data[0]
test_pool = split_data[1]
validation_pool = split_data[2]
# apply progressive on proj
print("### Testing on Test Pool: ")
train_set = predict_by_progressive(train_pool, test_pool)
for config in train_set:
print(config.index, ",", end="")
print("\n--------------------")
# evaluate on validation pool
mmre = predict_by_cart(train_set, validation_pool)
print("### Evaulation on Validation Pool:")
print("[mmre]:", mmre)
#######################################################################################
# sort the validation pool by predicted_perf
rk = find_lowest_rank(train_set, validation_pool)
print("[min rank]:", rk)
| [
"numpy.mean",
"random.shuffle",
"sklearn.tree.DecisionTreeRegressor",
"pandas.read_csv",
"numpy.min"
] | [((1587, 1610), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (1608, 1610), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((2371, 2407), 'numpy.min', 'np.min', (['[sf[0] for sf in select_few]'], {}), '([sf[0] for sf in select_few])\n', (2377, 2407), True, 'import numpy as np\n'), ((3463, 3486), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (3484, 3486), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((3857, 3874), 'numpy.mean', 'np.mean', (['mmre_lst'], {}), '(mmre_lst)\n', (3864, 3874), True, 'import numpy as np\n'), ((3966, 3987), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (3977, 3987), True, 'import pandas as pd\n'), ((4791, 4810), 'random.shuffle', 'rd.shuffle', (['configs'], {}), '(configs)\n', (4801, 4810), True, 'import random as rd\n')] |
import argparse
import glob
import csv
import numpy as np
import pandas as pd
def parse_arguments():
parser = argparse.ArgumentParser("Trains a simple BiLSTM to detect sentential arguments across multiple topics.")
parser.add_argument("--data", type=str, help="The path to the folder containing the TSV files with the training data.")
return parser.parse_args()
def read_data(data_path):
data = pd.read_csv(data_path, sep="\t", names=["motion", "hypothesis", "evidence", "evidenceclass"], index_col=0)
return data
def extend_negative(links, hypotheses, evidences):
hyp2idx = {hyp: idx for idx, hyp in enumerate(hypotheses)}
ev2idx = {ev: idx for idx, ev in enumerate(evidences)}
adjacency = np.zeros((len(hypotheses), len(evidences)))
for link in links:
adjacency[hyp2idx[link[0]], ev2idx[link[1]]] = 1
random_hypotheses = np.random.randint(0, len(hypotheses), size=5*len(links))
random_evidences = np.random.randint(0, len(evidences), size=5*len(links))
random_links_indices = list(filter(lambda link: not adjacency[link[0], link[1]], zip(random_hypotheses, random_evidences)))[:len(links) * 1]
random_links = list(map(lambda link: (hypotheses[link[0]], evidences[link[1]]), random_links_indices))
data = np.concatenate((links, random_links))
labels = np.concatenate((["link"]*len(links), ["no-link"]*len(random_links)))
complete = np.concatenate((data, np.expand_dims(labels, axis=1)), axis=1)
return complete
if "__main__"==__name__:
args = parse_arguments()
data = read_data(args.data)
expanded_columns = ["motion", "hypothesis", "evidence", "label"]
expanded_data = list()
user_groups = data.groupby("motion")
for name, group in user_groups:
links = group[["hypothesis", "evidence"]]
link_tuples = [tuple(x) for x in links.values]
hypotheses = sorted(set(links["hypothesis"].tolist()))
evidences = sorted(set(links["evidence"].tolist()))
data = extend_negative(link_tuples, hypotheses, evidences)
user_column = np.expand_dims([name]*data.shape[0], axis=1)
user_specific_frame = pd.DataFrame(data=np.concatenate((user_column, data), axis=1), columns=expanded_columns)
expanded_data.append(user_specific_frame)
expanded_frame = pd.concat(expanded_data, ignore_index=True)
expanded_frame.to_csv(args.data + ".expanded", index=False, quotechar="'", quoting=csv.QUOTE_ALL)
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.concatenate",
"numpy.expand_dims",
"pandas.concat"
] | [((117, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Trains a simple BiLSTM to detect sentential arguments across multiple topics."""'], {}), "(\n 'Trains a simple BiLSTM to detect sentential arguments across multiple topics.'\n )\n", (140, 231), False, 'import argparse\n'), ((419, 529), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'sep': '"""\t"""', 'names': "['motion', 'hypothesis', 'evidence', 'evidenceclass']", 'index_col': '(0)'}), "(data_path, sep='\\t', names=['motion', 'hypothesis', 'evidence',\n 'evidenceclass'], index_col=0)\n", (430, 529), True, 'import pandas as pd\n'), ((1289, 1326), 'numpy.concatenate', 'np.concatenate', (['(links, random_links)'], {}), '((links, random_links))\n', (1303, 1326), True, 'import numpy as np\n'), ((2327, 2370), 'pandas.concat', 'pd.concat', (['expanded_data'], {'ignore_index': '(True)'}), '(expanded_data, ignore_index=True)\n', (2336, 2370), True, 'import pandas as pd\n'), ((2091, 2137), 'numpy.expand_dims', 'np.expand_dims', (['([name] * data.shape[0])'], {'axis': '(1)'}), '([name] * data.shape[0], axis=1)\n', (2105, 2137), True, 'import numpy as np\n'), ((1446, 1476), 'numpy.expand_dims', 'np.expand_dims', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (1460, 1476), True, 'import numpy as np\n'), ((2184, 2227), 'numpy.concatenate', 'np.concatenate', (['(user_column, data)'], {'axis': '(1)'}), '((user_column, data), axis=1)\n', (2198, 2227), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, confusion_matrix
from lifelines import CoxPHFitter
from datautils.dataset import Dataset
from datautils.data import Data
from datautils.helper import save_output
from tqdm import tqdm
import argparse
#%%
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--imputation_mode', default="mean")
arg_parser.add_argument('--seed', type=int, default=42)
arg_parser.add_argument('--positive_weight', type=int, default=56)
ARGS = arg_parser.parse_args()
#%%
print(f"Running Cox with imputation_mode = {ARGS.imputation_mode}, seed = {ARGS.seed}")
print('Arguments:', ARGS)
#%%
dataset = Dataset("data/challenge_data",
batchSize=100,
train_ratio=0.8,
normalize=True,
padding=False,
imputeForward=(False if ARGS.imputation_mode == "mean" else True),
calculateDelay=False,
seed=ARGS.seed)
#%%
columns = list(dataset.train_data.features.keys())[:-2]
# dataset.train_data.x.shape
# dataset.val_data.x.shape
# dataset.test_data.x.shape
#%%
# create windowing system here
T = 6
#idx = 10
def process_data(d: Data, T: int) -> (pd.DataFrame, np.array):
npa = d.x
target_npa = d.y
processed = []
labels = []
print("* Processing data...")
for idx in tqdm(range(npa.shape[0])):
if target_npa[idx].sum() == 0:
processed.extend([[row,7,1] for row in npa[idx]])
else:
sepsis_count = 0
for i in range(npa[idx].shape[0]):
t = (T + 1) - sepsis_count
t = t if t >= 1 else 1
s = 1 if t > T else 0
processed.append([npa[idx][i],t,s])
sepsis_count += 1 if target_npa[idx][i][0] == 1 else 0
labels.extend(target_npa[idx].flatten().tolist())
return (pd.DataFrame(processed, columns=["x","t","s"]), np.array(labels))
# Naive windowing:
# for i in range(df[idx].shape[0]):
# window = df[idx][i:i+T]
# matches = np.where(window[:,-1]==1)[0]
# if matches.size > 0:
# t = matches[0] + 1
# s = 0
# else:
# t = T + 1
# s = 1
# processed.append([df[idx][i][:-1],t,s])
#%%
X_train, y_train = process_data(dataset.train_data, T)
X_val, y_val = process_data(dataset.val_data, T)
X_test, y_test = process_data(dataset.test_data, T)
#%%
# X_train.head()
#%%
inverse_s = 1-X_train.s
X_train_cph = pd.DataFrame(X_train.x.values.tolist(), columns=columns)
X_train_cph["s"] = inverse_s
X_train_cph["w"] = (inverse_s * ARGS.positive_weight) + X_train.s
X_train_cph["t"] = X_train.t
#%%
cph = CoxPHFitter(penalizer=0.2)
cph.fit(X_train_cph, duration_col='t', event_col='s', weights_col='w', step_size=0.070, show_progress=True, robust=False)
#%%
#cph.check_assumptions(X_train_cph,show_plots=False,plot_n_bootstraps=0)
#cph.print_summary()
#%%
def get_metrics(ty, py, threshold=0.5):
print('-'*20)
auc = roc_auc_score(ty, py)
print(f"AUC = {auc}")
lst = [1 if i >=0.5 else 0 for i in py]
acc = ((lst == ty).sum() / ty.shape[0]) * 100
print(f"Accuracy = {acc}")
c_m = confusion_matrix(ty, np.array(py > threshold).astype(int))
print(c_m)
PPV = c_m[1,1] / (c_m[1,1] + c_m[0,1])
print(f"PPV/Precision = {PPV}")
TPR = c_m[1,1] / c_m[1].sum()
print(f"TPR/Sensitivity/Recall = {TPR}")
TNR = c_m[0,0] / c_m[0].sum()
print(f"TNR/Specificity = {TNR}")
print('-'*20)
#%%
def get_preds(df: pd.DataFrame, columns):
cph_df = pd.DataFrame(df.x.values.tolist(), columns=columns)
preds = 1-cph.predict_survival_function(cph_df,times=[6])
return preds
#%%
print("Train:")
train_preds = get_preds(X_train, columns)
get_metrics(y_train, train_preds, threshold=0.5)
print("Val:")
val_preds = get_preds(X_val, columns)
get_metrics(y_val, val_preds, threshold=0.5)
print("Test:")
test_preds = get_preds(X_test, columns)
get_metrics(y_test, test_preds, threshold=0.5)
#%%
test_preds = test_preds.values
#%%
grouped_preds = []
cur = 0
for x_length in dataset.test_data.x_lengths:
grouped_preds.append(list(test_preds[cur:cur+x_length].reshape((-1,1))))
cur += x_length
#%%
save_output(grouped_preds, list(dataset.test_data.files), "results", "COX", ARGS.imputation_mode, seed=ARGS.seed, threshold=0.5)
print('Finished!', '='*20)
| [
"argparse.ArgumentParser",
"lifelines.CoxPHFitter",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"pandas.DataFrame",
"datautils.dataset.Dataset"
] | [((354, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (377, 379), False, 'import argparse\n'), ((733, 938), 'datautils.dataset.Dataset', 'Dataset', (['"""data/challenge_data"""'], {'batchSize': '(100)', 'train_ratio': '(0.8)', 'normalize': '(True)', 'padding': '(False)', 'imputeForward': "(False if ARGS.imputation_mode == 'mean' else True)", 'calculateDelay': '(False)', 'seed': 'ARGS.seed'}), "('data/challenge_data', batchSize=100, train_ratio=0.8, normalize=\n True, padding=False, imputeForward=False if ARGS.imputation_mode ==\n 'mean' else True, calculateDelay=False, seed=ARGS.seed)\n", (740, 938), False, 'from datautils.dataset import Dataset\n'), ((2929, 2955), 'lifelines.CoxPHFitter', 'CoxPHFitter', ([], {'penalizer': '(0.2)'}), '(penalizer=0.2)\n', (2940, 2955), False, 'from lifelines import CoxPHFitter\n'), ((3252, 3273), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['ty', 'py'], {}), '(ty, py)\n', (3265, 3273), False, 'from sklearn.metrics import roc_auc_score, confusion_matrix\n'), ((2025, 2073), 'pandas.DataFrame', 'pd.DataFrame', (['processed'], {'columns': "['x', 't', 's']"}), "(processed, columns=['x', 't', 's'])\n", (2037, 2073), True, 'import pandas as pd\n'), ((2073, 2089), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2081, 2089), True, 'import numpy as np\n'), ((3456, 3480), 'numpy.array', 'np.array', (['(py > threshold)'], {}), '(py > threshold)\n', (3464, 3480), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.units import Quantity, Unit
from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap
from gammapy.utils.testing import mpl_plot_check, requires_dependency
pytest.importorskip("healpy")
map_axes = [
MapAxis.from_bounds(1.0, 10.0, 3, interp="log", name="energy"),
MapAxis.from_bounds(0.1, 1.0, 4, interp="log", name="time"),
]
mapbase_args = [
(0.1, 10.0, "wcs", SkyCoord(0.0, 30.0, unit="deg"), None, ""),
(0.1, 10.0, "wcs", SkyCoord(0.0, 30.0, unit="deg"), map_axes[:1], ""),
(0.1, 10.0, "wcs", SkyCoord(0.0, 30.0, unit="deg"), map_axes, "m^2"),
(0.1, 10.0, "hpx", SkyCoord(0.0, 30.0, unit="deg"), None, ""),
(0.1, 10.0, "hpx", SkyCoord(0.0, 30.0, unit="deg"), map_axes[:1], ""),
(0.1, 10.0, "hpx", SkyCoord(0.0, 30.0, unit="deg"), map_axes, "s^2"),
]
mapbase_args_with_axes = [_ for _ in mapbase_args if _[4] is not None]
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args
)
def test_map_create(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
assert m.unit == unit
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_copy(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
m_copy = m.copy()
assert repr(m) == repr(m_copy)
m_copy = m.copy(unit="cm-2 s-1")
assert m_copy.unit == "cm-2 s-1"
assert m_copy.unit is not m.unit
m_copy = m.copy(meta={"is_copy": True})
assert m_copy.meta["is_copy"]
assert m_copy.meta is not m.meta
m_copy = m.copy(data=42 * np.ones(m.data.shape))
assert m_copy.data[(0,) * m_copy.data.ndim] == 42
assert m_copy.data is not m.data
def test_map_from_geom():
geom = WcsGeom.create(binsz=1.0, width=10.0)
m = Map.from_geom(geom)
assert isinstance(m, WcsNDMap)
assert m.geom.is_image
geom = HpxGeom.create(binsz=1.0, width=10.0)
m = Map.from_geom(geom)
assert isinstance(m, HpxNDMap)
assert m.geom.is_image
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_get_image_by_coord(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
m.data = np.arange(m.data.size, dtype=float).reshape(m.data.shape)
coords = (3.456, 0.1234)[: len(m.geom.axes)]
m_image = m.get_image_by_coord(coords)
im_geom = m.geom.to_image()
skycoord = im_geom.get_coord().skycoord
m_vals = m.get_by_coord((skycoord,) + coords)
assert_equal(m_image.data, m_vals)
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_get_image_by_pix(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
pix = (1.2345, 0.1234)[: len(m.geom.axes)]
m_image = m.get_image_by_pix(pix)
im_geom = m.geom.to_image()
idx = im_geom.get_idx()
m_vals = m.get_by_pix(idx + pix)
assert_equal(m_image.data, m_vals)
@pytest.mark.parametrize(
("binsz", "width", "map_type", "skydir", "axes", "unit"), mapbase_args_with_axes
)
def test_map_slice_by_idx(binsz, width, map_type, skydir, axes, unit):
m = Map.create(
binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes=axes, unit=unit
)
data = np.arange(m.data.size, dtype=float)
m.data = data.reshape(m.data.shape)
# Test none slicing
sliced = m.slice_by_idx({})
assert_equal(m.geom.shape_axes, sliced.geom.shape_axes)
slices = {"energy": slice(0, 1), "time": slice(0, 2)}
sliced = m.slice_by_idx(slices)
assert not sliced.geom.is_image
slices = tuple([slices[ax.name] for ax in m.geom.axes])
assert_equal(m.data[slices[::-1]], sliced.data)
assert sliced.data.base is data
slices = {"energy": 0, "time": 1}
sliced = m.slice_by_idx(slices)
assert sliced.geom.is_image
slices = tuple([slices[ax.name] for ax in m.geom.axes])
assert_equal(m.data[slices[::-1]], sliced.data)
assert sliced.data.base is data
@pytest.mark.parametrize("map_type", ["wcs", "hpx"])
def test_map_meta_read_write(map_type):
meta = {"user": "test"}
m = Map.create(
binsz=0.1,
width=10.0,
map_type=map_type,
skydir=SkyCoord(0.0, 30.0, unit="deg"),
meta=meta,
)
hdulist = m.to_hdulist(hdu="COUNTS")
header = hdulist["COUNTS"].header
assert header["META"] == '{"user": "test"}'
m2 = Map.from_hdulist(hdulist)
assert m2.meta == meta
@pytest.mark.parametrize("map_type", ["wcs", "hpx"])
def test_map_time_axis_read_write(map_type):
time_axis = TimeMapAxis(
edges_min=[0, 2, 4] * u.d,
edges_max=[1, 3, 5] * u.d,
reference_time="2000-01-01",
)
energy_axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=5)
m = Map.create(
binsz=0.1,
width=10.0,
map_type=map_type,
skydir=SkyCoord(0.0, 30.0, unit="deg"),
axes=[energy_axis, time_axis],
)
hdulist = m.to_hdulist(hdu="COUNTS")
m2 = Map.from_hdulist(hdulist)
time_axis_new = m2.geom.axes["time"]
assert time_axis_new == time_axis
assert time_axis.reference_time.scale == "utc"
assert time_axis_new.reference_time.scale == "tt"
unit_args = [("wcs", "s"), ("wcs", ""), ("wcs", Unit("sr")), ("hpx", "m^2")]
@pytest.mark.parametrize(("map_type", "unit"), unit_args)
def test_map_quantity(map_type, unit):
m = Map.create(binsz=0.1, width=10.0, map_type=map_type, unit=unit)
# This is to test if default constructor with no unit performs as expected
if unit is None:
unit = ""
assert m.quantity.unit == Unit(unit)
m.quantity = Quantity(np.ones_like(m.data), "m2")
assert m.unit == "m2"
m1 = m.__class__(geom=m.geom, data=m.quantity)
assert m1.unit == "m2"
assert_allclose(m1.data, m.data)
@pytest.mark.parametrize(("map_type", "unit"), unit_args)
def test_map_unit_read_write(map_type, unit):
m = Map.create(binsz=0.1, width=10.0, map_type=map_type, unit=unit)
hdu_list = m.to_hdulist(hdu="COUNTS")
header = hdu_list["COUNTS"].header
assert Unit(header["BUNIT"]) == Unit(unit)
m2 = Map.from_hdulist(hdu_list)
assert m2.unit == unit
@pytest.mark.parametrize(("map_type", "unit"), unit_args)
def test_map_repr(map_type, unit):
m = Map.create(binsz=0.1, width=10.0, map_type=map_type, unit=unit)
assert m.__class__.__name__ in repr(m)
def test_map_properties():
# Test default values and types of all map properties,
# as well as the behaviour for the property get and set.
m = Map.create(npix=(2, 1))
assert isinstance(m.unit, u.CompositeUnit)
assert m._unit == u.one
m._unit = u.Unit("cm-2 s-1")
assert m.unit.to_string() == "1 / (cm2 s)"
assert isinstance(m.meta, dict)
m.meta = {"spam": 42}
assert isinstance(m.meta, dict)
# The rest of the tests are for the `data` property
assert isinstance(m.data, np.ndarray)
assert m.data.dtype == np.float32
assert m.data.shape == (1, 2)
assert_equal(m.data, 0)
# Assigning an array of matching shape stores it away
data = np.ones((1, 2))
m.data = data
assert m.data is data
# In-place modification += should work as expected
m.data = np.array([[42, 43]])
data = m.data
m.data += 1
assert m.data is data
assert_equal(m.data, [[43, 44]])
# Assigning to a slice of the map data should work as expected
data = m.data
m.data[:, :1] = 99
assert m.data is data
assert_equal(m.data, [[99, 44]])
# Assigning something that doesn't match raises an appropriate error
with pytest.raises(ValueError):
m.data = np.ones((1, 3))
map_arithmetics_args = [("wcs"), ("hpx")]
@pytest.mark.parametrize(("map_type"), map_arithmetics_args)
def test_map_arithmetics(map_type):
m1 = Map.create(binsz=0.1, width=1.0, map_type=map_type, skydir=(0, 0), unit="m2")
m2 = Map.create(binsz=0.1, width=1.0, map_type=map_type, skydir=(0, 0), unit="m2")
m2.data += 1.0
# addition
m1 += 1 * u.cm ** 2
assert m1.unit == u.Unit("m2")
assert_allclose(m1.data, 1e-4)
m3 = m1 + m2
assert m3.unit == u.Unit("m2")
assert_allclose(m3.data, 1.0001)
# subtraction
m3 -= 1 * u.cm ** 2
assert m3.unit == u.Unit("m2")
assert_allclose(m3.data, 1.0)
m3 = m2 - m1
assert m3.unit == u.Unit("m2")
assert_allclose(m3.data, 0.9999)
m4 = Map.create(binsz=0.1, width=1.0, map_type=map_type, skydir=(0, 0), unit="s")
m4.data += 1.0
# multiplication
m1 *= 1e4
assert m1.unit == u.Unit("m2")
assert_allclose(m1.data, 1)
m5 = m2 * m4
assert m5.unit == u.Unit("m2s")
assert_allclose(m5.data, 1)
# division
m5 /= 10 * u.s
assert m5.unit == u.Unit("m2")
assert_allclose(m5.data, 0.1)
# check unit consistency
with pytest.raises(u.UnitConversionError):
m5 += 1 * u.W
m1.data *= 0.0
m1._unit = u.one
m1 += 4
assert m1.unit == u.Unit("")
assert_allclose(m1.data, 4)
lt_m2 = m2 < 1.5 * u.m ** 2
assert lt_m2.data.dtype == bool
assert_allclose(lt_m2, True)
le_m2 = m2 <= 10000 * u.cm ** 2
assert_allclose(le_m2, True)
gt_m2 = m2 > 15000 * u.cm ** 2
assert_allclose(gt_m2, False)
ge_m2 = m2 >= m2
assert_allclose(ge_m2, True)
eq_m2 = m2 == 500 * u.cm ** 2
assert_allclose(eq_m2, False)
ne_m2 = m2 != 500 * u.cm ** 2
assert_allclose(ne_m2, True)
def test_boolean_arithmetics():
m_1 = Map.create(binsz=1, width=2)
m_1.data = True
m_2 = Map.create(binsz=1, width=2)
m_2.data = False
m_and = m_1 & m_2
assert not np.any(m_and.data)
m_or = m_1 | m_2
assert np.all(m_or.data)
m_not = ~m_2
assert np.all(m_not.data)
m_xor = m_1 ^ m_1
assert not np.any(m_xor.data)
def test_arithmetics_inconsistent_geom():
m_wcs = Map.create(binsz=0.1, width=1.0)
m_wcs_incorrect = Map.create(binsz=0.1, width=2.0)
with pytest.raises(ValueError):
m_wcs += m_wcs_incorrect
m_hpx = Map.create(binsz=0.1, width=1.0, map_type="hpx")
with pytest.raises(ValueError):
m_wcs += m_hpx
# TODO: correct serialization for lin axis for energy
# map_serialization_args = [("log"), ("lin")]
map_serialization_args = [("log")]
@pytest.mark.parametrize(("interp"), map_serialization_args)
def test_arithmetics_after_serialization(tmp_path, interp):
axis = MapAxis.from_bounds(
1.0, 10.0, 3, interp=interp, name="energy", node_type="center", unit="TeV"
)
m_wcs = Map.create(binsz=0.1, width=1.0, map_type="wcs", skydir=(0, 0), axes=[axis])
m_wcs += 1
m_wcs.write(tmp_path / "tmp.fits")
m_wcs_serialized = Map.read(tmp_path / "tmp.fits")
m_wcs += m_wcs_serialized
assert_allclose(m_wcs.data, 2.0)
def test_set_scalar():
m = Map.create(width=1)
m.data = 1
assert m.data.shape == (10, 10)
assert_allclose(m.data, 1)
def test_interp_to_geom():
energy = MapAxis.from_energy_bounds("1 TeV", "300 TeV", nbin=5, name="energy")
energy_target = MapAxis.from_energy_bounds(
"1 TeV", "300 TeV", nbin=7, name="energy"
)
value = 30
coords = {"skycoord": SkyCoord("0 deg", "0 deg"), "energy": energy_target.center[3]}
# WcsNDMap
geom_wcs = WcsGeom.create(
npix=(5, 3), proj="CAR", binsz=60, axes=[energy], skydir=(0, 0)
)
wcs_map = Map.from_geom(geom_wcs, unit="")
wcs_map.data = value * np.ones(wcs_map.data.shape)
wcs_geom_target = WcsGeom.create(
skydir=(0, 0), width=(10, 10), binsz=0.1 * u.deg, axes=[energy_target]
)
interp_wcs_map = wcs_map.interp_to_geom(wcs_geom_target, method="linear")
assert_allclose(interp_wcs_map.get_by_coord(coords)[0], value, atol=1e-7)
assert isinstance(interp_wcs_map, WcsNDMap)
assert interp_wcs_map.geom == wcs_geom_target
# HpxNDMap
geom_hpx = HpxGeom.create(binsz=60, axes=[energy], skydir=(0, 0))
hpx_map = Map.from_geom(geom_hpx, unit="")
hpx_map.data = value * np.ones(hpx_map.data.shape)
hpx_geom_target = HpxGeom.create(
skydir=(0, 0), width=10, binsz=0.1 * u.deg, axes=[energy_target]
)
interp_hpx_map = hpx_map.interp_to_geom(hpx_geom_target)
assert_allclose(interp_hpx_map.get_by_coord(coords)[0], value, atol=1e-7)
assert isinstance(interp_hpx_map, HpxNDMap)
assert interp_hpx_map.geom == hpx_geom_target
# Preserving the counts
geom_initial = WcsGeom.create(
skydir=(20, 20),
width=(5, 5),
binsz=0.2 * u.deg,
)
test_map = Map.from_geom(geom_initial, unit="")
test_map.data = value * np.ones(test_map.data.shape)
geom_target = WcsGeom.create(
skydir=(20, 20),
width=(5, 5),
binsz=0.1 * u.deg,
)
new_map = test_map.interp_to_geom(geom_target, preserve_counts=True)
assert np.floor(np.sum(new_map.data)) == np.sum(test_map.data)
@requires_dependency("matplotlib")
def test_map_plot_mask():
from regions import CircleSkyRegion
skydir = SkyCoord(0, 0, frame="galactic", unit="deg")
m_wcs = Map.create(
map_type="wcs",
binsz=0.02,
skydir=skydir,
width=2.0,
)
exclusion_region = CircleSkyRegion(
center=SkyCoord(0.0, 0.0, unit="deg", frame="galactic"), radius=0.6 * u.deg
)
mask = ~m_wcs.geom.region_mask([exclusion_region])
with mpl_plot_check():
mask.plot_mask()
| [
"numpy.testing.assert_equal",
"numpy.array",
"numpy.arange",
"astropy.units.Unit",
"numpy.testing.assert_allclose",
"gammapy.utils.testing.mpl_plot_check",
"gammapy.maps.Map.create",
"gammapy.maps.Map.from_hdulist",
"gammapy.maps.Map.from_geom",
"gammapy.maps.TimeMapAxis",
"gammapy.utils.testing... | [((421, 450), 'pytest.importorskip', 'pytest.importorskip', (['"""healpy"""'], {}), "('healpy')\n", (440, 450), False, 'import pytest\n'), ((1127, 1226), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('binsz', 'width', 'map_type', 'skydir', 'axes', 'unit')", 'mapbase_args'], {}), "(('binsz', 'width', 'map_type', 'skydir', 'axes',\n 'unit'), mapbase_args)\n", (1150, 1226), False, 'import pytest\n'), ((1438, 1547), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('binsz', 'width', 'map_type', 'skydir', 'axes', 'unit')", 'mapbase_args_with_axes'], {}), "(('binsz', 'width', 'map_type', 'skydir', 'axes',\n 'unit'), mapbase_args_with_axes)\n", (1461, 1547), False, 'import pytest\n'), ((2469, 2578), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('binsz', 'width', 'map_type', 'skydir', 'axes', 'unit')", 'mapbase_args_with_axes'], {}), "(('binsz', 'width', 'map_type', 'skydir', 'axes',\n 'unit'), mapbase_args_with_axes)\n", (2492, 2578), False, 'import pytest\n'), ((3108, 3217), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('binsz', 'width', 'map_type', 'skydir', 'axes', 'unit')", 'mapbase_args_with_axes'], {}), "(('binsz', 'width', 'map_type', 'skydir', 'axes',\n 'unit'), mapbase_args_with_axes)\n", (3131, 3217), False, 'import pytest\n'), ((3635, 3744), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('binsz', 'width', 'map_type', 'skydir', 'axes', 'unit')", 'mapbase_args_with_axes'], {}), "(('binsz', 'width', 'map_type', 'skydir', 'axes',\n 'unit'), mapbase_args_with_axes)\n", (3658, 3744), False, 'import pytest\n'), ((4674, 4725), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""map_type"""', "['wcs', 'hpx']"], {}), "('map_type', ['wcs', 'hpx'])\n", (4697, 4725), False, 'import pytest\n'), ((5149, 5200), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""map_type"""', "['wcs', 'hpx']"], {}), "('map_type', ['wcs', 'hpx'])\n", (5172, 5200), False, 'import pytest\n'), ((5986, 6042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('map_type', 'unit')", 'unit_args'], {}), "(('map_type', 'unit'), unit_args)\n", (6009, 6042), False, 'import pytest\n'), ((6514, 6570), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('map_type', 'unit')", 'unit_args'], {}), "(('map_type', 'unit'), unit_args)\n", (6537, 6570), False, 'import pytest\n'), ((6886, 6942), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('map_type', 'unit')", 'unit_args'], {}), "(('map_type', 'unit'), unit_args)\n", (6909, 6942), False, 'import pytest\n'), ((8409, 8466), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""map_type"""', 'map_arithmetics_args'], {}), "('map_type', map_arithmetics_args)\n", (8432, 8466), False, 'import pytest\n'), ((10995, 11052), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""interp"""', 'map_serialization_args'], {}), "('interp', map_serialization_args)\n", (11018, 11052), False, 'import pytest\n'), ((13620, 13653), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (13639, 13653), False, 'from gammapy.utils.testing import mpl_plot_check, requires_dependency\n'), ((469, 531), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['(1.0)', '(10.0)', '(3)'], {'interp': '"""log"""', 'name': '"""energy"""'}), "(1.0, 10.0, 3, interp='log', name='energy')\n", (488, 531), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((537, 596), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['(0.1)', '(1.0)', '(4)'], {'interp': '"""log"""', 'name': '"""time"""'}), "(0.1, 1.0, 4, interp='log', name='time')\n", (556, 596), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((1302, 1399), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': 'binsz', 'width': 'width', 'map_type': 'map_type', 'skydir': 'skydir', 'axes': 'axes', 'unit': 'unit'}), '(binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes\n =axes, unit=unit)\n', (1312, 1399), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((1621, 1718), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': 'binsz', 'width': 'width', 'map_type': 'map_type', 'skydir': 'skydir', 'axes': 'axes', 'unit': 'unit'}), '(binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes\n =axes, unit=unit)\n', (1631, 1718), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((2198, 2235), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'binsz': '(1.0)', 'width': '(10.0)'}), '(binsz=1.0, width=10.0)\n', (2212, 2235), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((2244, 2263), 'gammapy.maps.Map.from_geom', 'Map.from_geom', (['geom'], {}), '(geom)\n', (2257, 2263), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((2338, 2375), 'gammapy.maps.HpxGeom.create', 'HpxGeom.create', ([], {'binsz': '(1.0)', 'width': '(10.0)'}), '(binsz=1.0, width=10.0)\n', (2352, 2375), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((2384, 2403), 'gammapy.maps.Map.from_geom', 'Map.from_geom', (['geom'], {}), '(geom)\n', (2397, 2403), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((2666, 2763), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': 'binsz', 'width': 'width', 'map_type': 'map_type', 'skydir': 'skydir', 'axes': 'axes', 'unit': 'unit'}), '(binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes\n =axes, unit=unit)\n', (2676, 2763), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((3070, 3104), 'numpy.testing.assert_equal', 'assert_equal', (['m_image.data', 'm_vals'], {}), '(m_image.data, m_vals)\n', (3082, 3104), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3303, 3400), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': 'binsz', 'width': 'width', 'map_type': 'map_type', 'skydir': 'skydir', 'axes': 'axes', 'unit': 'unit'}), '(binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes\n =axes, unit=unit)\n', (3313, 3400), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((3597, 3631), 'numpy.testing.assert_equal', 'assert_equal', (['m_image.data', 'm_vals'], {}), '(m_image.data, m_vals)\n', (3609, 3631), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3826, 3923), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': 'binsz', 'width': 'width', 'map_type': 'map_type', 'skydir': 'skydir', 'axes': 'axes', 'unit': 'unit'}), '(binsz=binsz, width=width, map_type=map_type, skydir=skydir, axes\n =axes, unit=unit)\n', (3836, 3923), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((3944, 3979), 'numpy.arange', 'np.arange', (['m.data.size'], {'dtype': 'float'}), '(m.data.size, dtype=float)\n', (3953, 3979), True, 'import numpy as np\n'), ((4081, 4136), 'numpy.testing.assert_equal', 'assert_equal', (['m.geom.shape_axes', 'sliced.geom.shape_axes'], {}), '(m.geom.shape_axes, sliced.geom.shape_axes)\n', (4093, 4136), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4332, 4379), 'numpy.testing.assert_equal', 'assert_equal', (['m.data[slices[::-1]]', 'sliced.data'], {}), '(m.data[slices[::-1]], sliced.data)\n', (4344, 4379), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4587, 4634), 'numpy.testing.assert_equal', 'assert_equal', (['m.data[slices[::-1]]', 'sliced.data'], {}), '(m.data[slices[::-1]], sliced.data)\n', (4599, 4634), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5093, 5118), 'gammapy.maps.Map.from_hdulist', 'Map.from_hdulist', (['hdulist'], {}), '(hdulist)\n', (5109, 5118), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((5262, 5360), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', ([], {'edges_min': '([0, 2, 4] * u.d)', 'edges_max': '([1, 3, 5] * u.d)', 'reference_time': '"""2000-01-01"""'}), "(edges_min=[0, 2, 4] * u.d, edges_max=[1, 3, 5] * u.d,\n reference_time='2000-01-01')\n", (5273, 5360), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((5407, 5460), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""1 TeV"""', '"""10 TeV"""'], {'nbin': '(5)'}), "('1 TeV', '10 TeV', nbin=5)\n", (5433, 5460), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((5693, 5718), 'gammapy.maps.Map.from_hdulist', 'Map.from_hdulist', (['hdulist'], {}), '(hdulist)\n', (5709, 5718), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((6090, 6153), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(10.0)', 'map_type': 'map_type', 'unit': 'unit'}), '(binsz=0.1, width=10.0, map_type=map_type, unit=unit)\n', (6100, 6153), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((6478, 6510), 'numpy.testing.assert_allclose', 'assert_allclose', (['m1.data', 'm.data'], {}), '(m1.data, m.data)\n', (6493, 6510), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6625, 6688), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(10.0)', 'map_type': 'map_type', 'unit': 'unit'}), '(binsz=0.1, width=10.0, map_type=map_type, unit=unit)\n', (6635, 6688), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((6829, 6855), 'gammapy.maps.Map.from_hdulist', 'Map.from_hdulist', (['hdu_list'], {}), '(hdu_list)\n', (6845, 6855), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((6986, 7049), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(10.0)', 'map_type': 'map_type', 'unit': 'unit'}), '(binsz=0.1, width=10.0, map_type=map_type, unit=unit)\n', (6996, 7049), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((7251, 7274), 'gammapy.maps.Map.create', 'Map.create', ([], {'npix': '(2, 1)'}), '(npix=(2, 1))\n', (7261, 7274), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((7365, 7383), 'astropy.units.Unit', 'u.Unit', (['"""cm-2 s-1"""'], {}), "('cm-2 s-1')\n", (7371, 7383), True, 'import astropy.units as u\n'), ((7706, 7729), 'numpy.testing.assert_equal', 'assert_equal', (['m.data', '(0)'], {}), '(m.data, 0)\n', (7718, 7729), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7800, 7815), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (7807, 7815), True, 'import numpy as np\n'), ((7929, 7949), 'numpy.array', 'np.array', (['[[42, 43]]'], {}), '([[42, 43]])\n', (7937, 7949), True, 'import numpy as np\n'), ((8014, 8046), 'numpy.testing.assert_equal', 'assert_equal', (['m.data', '[[43, 44]]'], {}), '(m.data, [[43, 44]])\n', (8026, 8046), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8186, 8218), 'numpy.testing.assert_equal', 'assert_equal', (['m.data', '[[99, 44]]'], {}), '(m.data, [[99, 44]])\n', (8198, 8218), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8515, 8592), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(1.0)', 'map_type': 'map_type', 'skydir': '(0, 0)', 'unit': '"""m2"""'}), "(binsz=0.1, width=1.0, map_type=map_type, skydir=(0, 0), unit='m2')\n", (8525, 8592), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((8603, 8680), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(1.0)', 'map_type': 'map_type', 'skydir': '(0, 0)', 'unit': '"""m2"""'}), "(binsz=0.1, width=1.0, map_type=map_type, skydir=(0, 0), unit='m2')\n", (8613, 8680), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((8779, 8811), 'numpy.testing.assert_allclose', 'assert_allclose', (['m1.data', '(0.0001)'], {}), '(m1.data, 0.0001)\n', (8794, 8811), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8867, 8899), 'numpy.testing.assert_allclose', 'assert_allclose', (['m3.data', '(1.0001)'], {}), '(m3.data, 1.0001)\n', (8882, 8899), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8982, 9011), 'numpy.testing.assert_allclose', 'assert_allclose', (['m3.data', '(1.0)'], {}), '(m3.data, 1.0)\n', (8997, 9011), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9069, 9101), 'numpy.testing.assert_allclose', 'assert_allclose', (['m3.data', '(0.9999)'], {}), '(m3.data, 0.9999)\n', (9084, 9101), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9112, 9188), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(1.0)', 'map_type': 'map_type', 'skydir': '(0, 0)', 'unit': '"""s"""'}), "(binsz=0.1, width=1.0, map_type=map_type, skydir=(0, 0), unit='s')\n", (9122, 9188), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((9283, 9310), 'numpy.testing.assert_allclose', 'assert_allclose', (['m1.data', '(1)'], {}), '(m1.data, 1)\n', (9298, 9310), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9369, 9396), 'numpy.testing.assert_allclose', 'assert_allclose', (['m5.data', '(1)'], {}), '(m5.data, 1)\n', (9384, 9396), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9471, 9500), 'numpy.testing.assert_allclose', 'assert_allclose', (['m5.data', '(0.1)'], {}), '(m5.data, 0.1)\n', (9486, 9500), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9690, 9717), 'numpy.testing.assert_allclose', 'assert_allclose', (['m1.data', '(4)'], {}), '(m1.data, 4)\n', (9705, 9717), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9791, 9819), 'numpy.testing.assert_allclose', 'assert_allclose', (['lt_m2', '(True)'], {}), '(lt_m2, True)\n', (9806, 9819), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9861, 9889), 'numpy.testing.assert_allclose', 'assert_allclose', (['le_m2', '(True)'], {}), '(le_m2, True)\n', (9876, 9889), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9930, 9959), 'numpy.testing.assert_allclose', 'assert_allclose', (['gt_m2', '(False)'], {}), '(gt_m2, False)\n', (9945, 9959), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9986, 10014), 'numpy.testing.assert_allclose', 'assert_allclose', (['ge_m2', '(True)'], {}), '(ge_m2, True)\n', (10001, 10014), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10054, 10083), 'numpy.testing.assert_allclose', 'assert_allclose', (['eq_m2', '(False)'], {}), '(eq_m2, False)\n', (10069, 10083), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10123, 10151), 'numpy.testing.assert_allclose', 'assert_allclose', (['ne_m2', '(True)'], {}), '(ne_m2, True)\n', (10138, 10151), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10196, 10224), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(1)', 'width': '(2)'}), '(binsz=1, width=2)\n', (10206, 10224), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((10256, 10284), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(1)', 'width': '(2)'}), '(binsz=1, width=2)\n', (10266, 10284), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((10396, 10413), 'numpy.all', 'np.all', (['m_or.data'], {}), '(m_or.data)\n', (10402, 10413), True, 'import numpy as np\n'), ((10443, 10461), 'numpy.all', 'np.all', (['m_not.data'], {}), '(m_not.data)\n', (10449, 10461), True, 'import numpy as np\n'), ((10575, 10607), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(1.0)'}), '(binsz=0.1, width=1.0)\n', (10585, 10607), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((10630, 10662), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(2.0)'}), '(binsz=0.1, width=2.0)\n', (10640, 10662), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((10746, 10794), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(1.0)', 'map_type': '"""hpx"""'}), "(binsz=0.1, width=1.0, map_type='hpx')\n", (10756, 10794), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((11126, 11226), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['(1.0)', '(10.0)', '(3)'], {'interp': 'interp', 'name': '"""energy"""', 'node_type': '"""center"""', 'unit': '"""TeV"""'}), "(1.0, 10.0, 3, interp=interp, name='energy', node_type=\n 'center', unit='TeV')\n", (11145, 11226), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((11248, 11324), 'gammapy.maps.Map.create', 'Map.create', ([], {'binsz': '(0.1)', 'width': '(1.0)', 'map_type': '"""wcs"""', 'skydir': '(0, 0)', 'axes': '[axis]'}), "(binsz=0.1, width=1.0, map_type='wcs', skydir=(0, 0), axes=[axis])\n", (11258, 11324), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((11403, 11434), 'gammapy.maps.Map.read', 'Map.read', (["(tmp_path / 'tmp.fits')"], {}), "(tmp_path / 'tmp.fits')\n", (11411, 11434), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((11471, 11503), 'numpy.testing.assert_allclose', 'assert_allclose', (['m_wcs.data', '(2.0)'], {}), '(m_wcs.data, 2.0)\n', (11486, 11503), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11537, 11556), 'gammapy.maps.Map.create', 'Map.create', ([], {'width': '(1)'}), '(width=1)\n', (11547, 11556), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((11612, 11638), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.data', '(1)'], {}), '(m.data, 1)\n', (11627, 11638), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11681, 11750), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""1 TeV"""', '"""300 TeV"""'], {'nbin': '(5)', 'name': '"""energy"""'}), "('1 TeV', '300 TeV', nbin=5, name='energy')\n", (11707, 11750), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((11771, 11840), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""1 TeV"""', '"""300 TeV"""'], {'nbin': '(7)', 'name': '"""energy"""'}), "('1 TeV', '300 TeV', nbin=7, name='energy')\n", (11797, 11840), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((11990, 12069), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'npix': '(5, 3)', 'proj': '"""CAR"""', 'binsz': '(60)', 'axes': '[energy]', 'skydir': '(0, 0)'}), "(npix=(5, 3), proj='CAR', binsz=60, axes=[energy], skydir=(0, 0))\n", (12004, 12069), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((12098, 12130), 'gammapy.maps.Map.from_geom', 'Map.from_geom', (['geom_wcs'], {'unit': '""""""'}), "(geom_wcs, unit='')\n", (12111, 12130), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((12209, 12300), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'skydir': '(0, 0)', 'width': '(10, 10)', 'binsz': '(0.1 * u.deg)', 'axes': '[energy_target]'}), '(skydir=(0, 0), width=(10, 10), binsz=0.1 * u.deg, axes=[\n energy_target])\n', (12223, 12300), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((12596, 12650), 'gammapy.maps.HpxGeom.create', 'HpxGeom.create', ([], {'binsz': '(60)', 'axes': '[energy]', 'skydir': '(0, 0)'}), '(binsz=60, axes=[energy], skydir=(0, 0))\n', (12610, 12650), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((12665, 12697), 'gammapy.maps.Map.from_geom', 'Map.from_geom', (['geom_hpx'], {'unit': '""""""'}), "(geom_hpx, unit='')\n", (12678, 12697), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((12776, 12861), 'gammapy.maps.HpxGeom.create', 'HpxGeom.create', ([], {'skydir': '(0, 0)', 'width': '(10)', 'binsz': '(0.1 * u.deg)', 'axes': '[energy_target]'}), '(skydir=(0, 0), width=10, binsz=0.1 * u.deg, axes=[energy_target]\n )\n', (12790, 12861), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((13157, 13221), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'skydir': '(20, 20)', 'width': '(5, 5)', 'binsz': '(0.2 * u.deg)'}), '(skydir=(20, 20), width=(5, 5), binsz=0.2 * u.deg)\n', (13171, 13221), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((13269, 13305), 'gammapy.maps.Map.from_geom', 'Map.from_geom', (['geom_initial'], {'unit': '""""""'}), "(geom_initial, unit='')\n", (13282, 13305), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((13381, 13445), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'skydir': '(20, 20)', 'width': '(5, 5)', 'binsz': '(0.1 * u.deg)'}), '(skydir=(20, 20), width=(5, 5), binsz=0.1 * u.deg)\n', (13395, 13445), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((13734, 13778), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0)', '(0)'], {'frame': '"""galactic"""', 'unit': '"""deg"""'}), "(0, 0, frame='galactic', unit='deg')\n", (13742, 13778), False, 'from astropy.coordinates import SkyCoord\n'), ((13792, 13856), 'gammapy.maps.Map.create', 'Map.create', ([], {'map_type': '"""wcs"""', 'binsz': '(0.02)', 'skydir': 'skydir', 'width': '(2.0)'}), "(map_type='wcs', binsz=0.02, skydir=skydir, width=2.0)\n", (13802, 13856), False, 'from gammapy.maps import HpxGeom, HpxNDMap, Map, MapAxis, TimeMapAxis, WcsGeom, WcsNDMap\n'), ((641, 672), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (649, 672), False, 'from astropy.coordinates import SkyCoord\n'), ((708, 739), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (716, 739), False, 'from astropy.coordinates import SkyCoord\n'), ((783, 814), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (791, 814), False, 'from astropy.coordinates import SkyCoord\n'), ((857, 888), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (865, 888), False, 'from astropy.coordinates import SkyCoord\n'), ((924, 955), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (932, 955), False, 'from astropy.coordinates import SkyCoord\n'), ((999, 1030), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (1007, 1030), False, 'from astropy.coordinates import SkyCoord\n'), ((5954, 5964), 'astropy.units.Unit', 'Unit', (['"""sr"""'], {}), "('sr')\n", (5958, 5964), False, 'from astropy.units import Quantity, Unit\n'), ((6303, 6313), 'astropy.units.Unit', 'Unit', (['unit'], {}), '(unit)\n', (6307, 6313), False, 'from astropy.units import Quantity, Unit\n'), ((6341, 6361), 'numpy.ones_like', 'np.ones_like', (['m.data'], {}), '(m.data)\n', (6353, 6361), True, 'import numpy as np\n'), ((6783, 6804), 'astropy.units.Unit', 'Unit', (["header['BUNIT']"], {}), "(header['BUNIT'])\n", (6787, 6804), False, 'from astropy.units import Quantity, Unit\n'), ((6808, 6818), 'astropy.units.Unit', 'Unit', (['unit'], {}), '(unit)\n', (6812, 6818), False, 'from astropy.units import Quantity, Unit\n'), ((8302, 8327), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8315, 8327), False, 'import pytest\n'), ((8346, 8361), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (8353, 8361), True, 'import numpy as np\n'), ((8762, 8774), 'astropy.units.Unit', 'u.Unit', (['"""m2"""'], {}), "('m2')\n", (8768, 8774), True, 'import astropy.units as u\n'), ((8850, 8862), 'astropy.units.Unit', 'u.Unit', (['"""m2"""'], {}), "('m2')\n", (8856, 8862), True, 'import astropy.units as u\n'), ((8965, 8977), 'astropy.units.Unit', 'u.Unit', (['"""m2"""'], {}), "('m2')\n", (8971, 8977), True, 'import astropy.units as u\n'), ((9052, 9064), 'astropy.units.Unit', 'u.Unit', (['"""m2"""'], {}), "('m2')\n", (9058, 9064), True, 'import astropy.units as u\n'), ((9266, 9278), 'astropy.units.Unit', 'u.Unit', (['"""m2"""'], {}), "('m2')\n", (9272, 9278), True, 'import astropy.units as u\n'), ((9351, 9364), 'astropy.units.Unit', 'u.Unit', (['"""m2s"""'], {}), "('m2s')\n", (9357, 9364), True, 'import astropy.units as u\n'), ((9454, 9466), 'astropy.units.Unit', 'u.Unit', (['"""m2"""'], {}), "('m2')\n", (9460, 9466), True, 'import astropy.units as u\n'), ((9540, 9576), 'pytest.raises', 'pytest.raises', (['u.UnitConversionError'], {}), '(u.UnitConversionError)\n', (9553, 9576), False, 'import pytest\n'), ((9675, 9685), 'astropy.units.Unit', 'u.Unit', (['""""""'], {}), "('')\n", (9681, 9685), True, 'import astropy.units as u\n'), ((10344, 10362), 'numpy.any', 'np.any', (['m_and.data'], {}), '(m_and.data)\n', (10350, 10362), True, 'import numpy as np\n'), ((10500, 10518), 'numpy.any', 'np.any', (['m_xor.data'], {}), '(m_xor.data)\n', (10506, 10518), True, 'import numpy as np\n'), ((10673, 10698), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10686, 10698), False, 'import pytest\n'), ((10804, 10829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10817, 10829), False, 'import pytest\n'), ((11896, 11922), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['"""0 deg"""', '"""0 deg"""'], {}), "('0 deg', '0 deg')\n", (11904, 11922), False, 'from astropy.coordinates import SkyCoord\n'), ((12158, 12185), 'numpy.ones', 'np.ones', (['wcs_map.data.shape'], {}), '(wcs_map.data.shape)\n', (12165, 12185), True, 'import numpy as np\n'), ((12725, 12752), 'numpy.ones', 'np.ones', (['hpx_map.data.shape'], {}), '(hpx_map.data.shape)\n', (12732, 12752), True, 'import numpy as np\n'), ((13334, 13362), 'numpy.ones', 'np.ones', (['test_map.data.shape'], {}), '(test_map.data.shape)\n', (13341, 13362), True, 'import numpy as np\n'), ((13595, 13616), 'numpy.sum', 'np.sum', (['test_map.data'], {}), '(test_map.data)\n', (13601, 13616), True, 'import numpy as np\n'), ((14093, 14109), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (14107, 14109), False, 'from gammapy.utils.testing import mpl_plot_check, requires_dependency\n'), ((2786, 2821), 'numpy.arange', 'np.arange', (['m.data.size'], {'dtype': 'float'}), '(m.data.size, dtype=float)\n', (2795, 2821), True, 'import numpy as np\n'), ((4896, 4927), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (4904, 4927), False, 'from astropy.coordinates import SkyCoord\n'), ((5563, 5594), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(30.0)'], {'unit': '"""deg"""'}), "(0.0, 30.0, unit='deg')\n", (5571, 5594), False, 'from astropy.coordinates import SkyCoord\n'), ((13570, 13590), 'numpy.sum', 'np.sum', (['new_map.data'], {}), '(new_map.data)\n', (13576, 13590), True, 'import numpy as np\n'), ((13952, 14000), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(0.0)'], {'unit': '"""deg"""', 'frame': '"""galactic"""'}), "(0.0, 0.0, unit='deg', frame='galactic')\n", (13960, 14000), False, 'from astropy.coordinates import SkyCoord\n'), ((2045, 2066), 'numpy.ones', 'np.ones', (['m.data.shape'], {}), '(m.data.shape)\n', (2052, 2066), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn.metrics import *
import matplotlib.pyplot as plt
import sugeno_integral
def getfile(filename, root="../"):
file = root+filename+'.csv'
df = pd.read_csv(file,header=None)
df = np.asarray(df)
labels=[]
for i in range(376):
labels.append(0)
for i in range(369):
labels.append(1)
labels = np.asarray(labels)
return df,labels
def predicting(ensemble_prob):
prediction = np.zeros((ensemble_prob.shape[0],))
for i in range(ensemble_prob.shape[0]):
temp = ensemble_prob[i]
t = np.where(temp == np.max(temp))[0][0]
prediction[i] = t
return prediction
def metrics(labels,predictions,classes):
print("Classification Report:")
print(classification_report(labels, predictions, target_names = classes,digits = 4))
matrix = confusion_matrix(labels, predictions)
print("Confusion matrix:")
print(matrix)
print("\nClasswise Accuracy :{}".format(matrix.diagonal()/matrix.sum(axis = 1)))
print("\nBalanced Accuracy Score: ",balanced_accuracy_score(labels,predictions))
#Sugeno Integral
def ensemble_sugeno(labels,prob1,prob2,prob3,prob4):
num_classes = prob1.shape[1]
Y = np.zeros(prob1.shape,dtype=float)
for samples in range(prob1.shape[0]):
for classes in range(prob1.shape[1]):
X = np.array([prob1[samples][classes], prob2[samples][classes], prob3[samples][classes], prob4[samples][classes] ])
measure = np.array([1.5, 1.5, 0.01, 1.2])
X_agg = integrals.sugeno_fuzzy_integral_generalized(X,measure)
Y[samples][classes] = X_agg
sugeno_pred = predicting(Y)
correct = np.where(sugeno_pred == labels)[0].shape[0]
total = labels.shape[0]
print("Accuracy = ",correct/total)
classes = ['COVID','Non-COVID']
metrics(sugeno_pred,labels,classes)
| [
"pandas.read_csv",
"numpy.where",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.zeros"
] | [((209, 239), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None'}), '(file, header=None)\n', (220, 239), True, 'import pandas as pd\n'), ((249, 263), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (259, 263), True, 'import numpy as np\n'), ((399, 417), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (409, 417), True, 'import numpy as np\n'), ((492, 527), 'numpy.zeros', 'np.zeros', (['(ensemble_prob.shape[0],)'], {}), '((ensemble_prob.shape[0],))\n', (500, 527), True, 'import numpy as np\n'), ((1269, 1303), 'numpy.zeros', 'np.zeros', (['prob1.shape'], {'dtype': 'float'}), '(prob1.shape, dtype=float)\n', (1277, 1303), True, 'import numpy as np\n'), ((1410, 1525), 'numpy.array', 'np.array', (['[prob1[samples][classes], prob2[samples][classes], prob3[samples][classes],\n prob4[samples][classes]]'], {}), '([prob1[samples][classes], prob2[samples][classes], prob3[samples][\n classes], prob4[samples][classes]])\n', (1418, 1525), True, 'import numpy as np\n'), ((1545, 1576), 'numpy.array', 'np.array', (['[1.5, 1.5, 0.01, 1.2]'], {}), '([1.5, 1.5, 0.01, 1.2])\n', (1553, 1576), True, 'import numpy as np\n'), ((1746, 1777), 'numpy.where', 'np.where', (['(sugeno_pred == labels)'], {}), '(sugeno_pred == labels)\n', (1754, 1777), True, 'import numpy as np\n'), ((636, 648), 'numpy.max', 'np.max', (['temp'], {}), '(temp)\n', (642, 648), True, 'import numpy as np\n')] |
import numpy as np
import math
import re
import os
import colorsys
import pandas as pd
import itertools
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rcParams
from scipy.optimize import curve_fit, leastsq
import seaborn as sns
#########################################################################################
# Dictionaries to be used in functions below
#########################################################################################
IgG_dict = {'27': 'NCR1.11', '72': 'NCR3.12', '78': 'NCR3.18', '3': 'CD16.03',
'8': 'NCR1.01', '20': 'NCR1.05', '79': 'NCR3.19', '98': 'TNFRSF9.01',
'108': 'CD16.08'}
Color_dict = {'27': (0, 0, 1), '72': (1, 0, 0), '78': (0.125, 0.25, 1),
'3': (0.25, 0.5, 1), '8': (1, 0.2, 0.083), '20': (1, 0.4, 0.167),
'79': (0.325, 0.75, 1), '98': (1, 0.6, 0.25), '108': (1, 0.8, 0.33)}
Marker_dict = {'27': ('o', 1), '72': ('o', -1), '78': ('s', 1), '3': ('^', 1),
'8': ('s', -1), '20': ('^', -1), '79': ('v', 1), '98': ('v', -1),
'108': ('d', -1)}
#########################################################################################
# Get file path
#########################################################################################
def getPath(root = "", foldername = ""):
if root == " " or root == "":
root = os.getcwd()
path = root + "/" + foldername
return path
#########################################################################################
# Create a df based on a .csv file
#########################################################################################
def readCSV (filepath):
df = pd.read_csv(filepath, lineterminator='\r', error_bad_lines = False)
Ab_list = ['3', '27', '78', '79', '108', '8', '20', '72', '98']
Ab_set = set(Ab_list)
df_cols = df.columns
# Resort columns to match order in Ab_list
to_sort = set(df_cols).intersection(Ab_list)
sorted = []
for i in Ab_list:
if i in to_sort:
sorted.append(i)
reordered_cols = []
count = 0
for i in df_cols:
if i not in sorted:
reordered_cols.append(i)
else:
reordered_cols.append(sorted[count])
count += 1
return df[reordered_cols]
##########################################################################################
# Plot cytotoxicity curves for redirected lysis assays (with IgG)
##########################################################################################
def plot_cytotox(df, color_dict, marker_dict, label_dict):
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.rm'] = 'sans'
rcParams['pdf.fonttype'] = 42
mean_df = df.groupby('ug/mL').mean().reset_index()
sem_df = df.groupby('ug/mL').sem().reset_index()
def PL3_func(x, a, b, c):
return c/(1.0 +(x/b)**(-a))
# Residuals of actual and fit values
def residuals(parameters, func, y, x):
err = y - func(x, *parameters)
return err
def sum_of_squares(ls):
s = 0
for i in ls:
s += abs(i*i)
return s
sns.set(style = "white", palette = "bright")
f, ax = plt.subplots(figsize = (3.8, 3))
# Get range of concentrations (get more points to make a nice curve)
plot_range = np.arange(mean_df['ug/mL'].min(), mean_df['ug/mL'].max(), 0.01)
r_counter = 0
b_counter = 0
# For each IgG
for column in mean_df.columns:
# Determine if Ab is activating (based on functional screen)
print(column)
if column != 'ug/mL':
if column in color_dict:
color = color_dict[column]
marker = marker_dict[column][0]
if marker_dict[column][1] > 0:
mfc = color
mec = color
mew = 0
else:
mfc = 'None'
mec = color
mew = 2
else:
color = 'black'
marker = 'o'
mfc = color
mec = color
mew = 0
# Plot means for each concentration
if column in label_dict:
label = label_dict[column]
else:
label = column
ax.errorbar(x = mean_df['ug/mL'], y = mean_df[column], ms = 8,
xerr = None, yerr = sem_df[column],
mfc = mfc, mec = mec, mew = mew,
ecolor = color, fmt = marker, label = label)
# Get x and y range
curr_xrange = mean_df['ug/mL'].tolist()
curr_yrange = mean_df[column].tolist()
min0 = 0.02
max0 = max(curr_yrange)
# Generate fit to means
ssr = np.inf
popt = [np.nan, np.nan, np.nan]
while len(curr_xrange) > 5 and np.isinf(ssr):
try:
# Initial guess of parameters
p0 = [0.1, 0.02, max0]
# Fit to data with 3PL function
popt, pcov = leastsq(residuals, p0, args = (PL3_func, curr_yrange, curr_xrange))
ssr = sum_of_squares(residuals(popt, PL3_func, curr_yrange, curr_xrange))
except RuntimeError:
curr_xrange = curr_xrange[:-1]
curr_yrange = curr_yrange[:-1]
if not np.isinf(ssr):
PL3_residuals = mean_df[column][:] - PL3_func(mean_df['ug/mL'][:], *popt)
PL3_ssr = sum_of_squares(PL3_residuals)
func_to_use = PL3_func
popt_to_use = popt
# Plot curve fit
ax.plot(plot_range, func_to_use(plot_range, *popt_to_use),
color = color, linewidth = 3)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
font_dict = {'fontfamily': 'sans-serif', 'fontsize': 8, 'fontstyle': 'normal'}
ax.set_xlim([0.02, mean_df['ug/mL'].max() + 0.1])
ax.set_xlabel(r'Concentration ($\mu$g/mL)', fontdict = font_dict)
mean_cytotox = mean_df.drop('ug/mL', axis = 1)
ax.set_ylim([min(mean_cytotox.min().tolist()) - 5, max(mean_cytotox.max().tolist()) + 5])
ax.tick_params(labelsize = 8)
ax.set_ylabel('% cytotoxicity', fontdict = font_dict)
legend = ax.legend(bbox_to_anchor=(1.01, 1), loc = 2,
prop = {'family': 'sans-serif',
'size': 8, 'style': 'normal'},
labelspacing = 1.3, handletextpad = 0.25, borderaxespad=0.,
columnspacing = 0.5, frameon = False)
plt.tight_layout(pad = 0.5)
#plt.savefig('mIgG_cytotox.pdf')
plt.show()
##########################################################################################
# Plot on cell titrations
##########################################################################################
def plot_titration(df, ax, cols, color_dict, marker_dict, label_dict):
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.rm'] = 'sans'
rcParams['pdf.fonttype'] = 42
def PL4_func(x, a, b, c, d):
return d + (a-d)/(1.0 + (x/c)**b)
# Residuals of actual and fit values
def residuals(parameters, func, y, x):
err = y - func(x, *parameters)
return err
def sum_of_squares(ls):
s = 0
for i in ls:
s += abs(i*i)
return s
# Get range of concentrations (get more points to make a nice curve)
plot_range = np.arange(df['nM'].min(), df['nM'].max(), 0.01)
# For each IgG
min_mfi = -1
max_mfi = -1
for column in cols:
# Determine if Ab is activating (based on functional screen)
print(column)
if column != 'ug/mL':
if min_mfi < 0:
min_mfi = df[column].min()
max_mfi = df[column].max()
else:
curr_min = df[column].min()
curr_max = df[column].max()
if curr_min < min_mfi:
min_mfi = curr_min
if curr_max > max_mfi:
max_mfi = curr_max
if column in color_dict:
color = color_dict[column]
marker = marker_dict[column][0]
if marker_dict[column][1] > 0:
mfc = color
mec = color
mew = 3
else:
mfc = 'None'
mec = color
mew = 3
else:
color = 'black'
marker = 'o'
mfc = color
mec = color
mew = 3
# Plot means for each concentration
if column in label_dict:
label = label_dict[column]
else:
label = column
ax.scatter(x = df['nM'], y = df[column], s = 30,
facecolor = mfc, edgecolors = mec, linewidths = mew,
marker = marker, label = label)
# Shorten yrange if there is hooking
curr_xrange = df['nM'].tolist()[:]
curr_yrange = df[column].tolist()[:]
min0 = 0
max0 = max(curr_yrange)
index_max0 = curr_yrange.index(max0)
curr_xrange = curr_xrange[(index_max0):-1]
curr_yrange = curr_yrange[(index_max0):-1]
# Generate fit to means
ssr = np.inf
popt = [np.nan, np.nan, np.nan, np.nan]
while len(curr_xrange) > 5 and np.isinf(ssr):
try:
# Initial guess of parameters
p0 = [min0, 1, 1, max0]
# Fit to data with 4PL function
popt, pcov = leastsq(residuals, p0, args = (PL4_func, curr_yrange, curr_xrange))
ssr = sum_of_squares(residuals(popt, PL4_func, curr_yrange, curr_xrange))
except RuntimeWarning:
curr_xrange = curr_xrange[:-1]
curr_yrange = curr_yrange[:-1]
if not np.isinf(ssr):
PL4_residuals = df[column][1:] - PL4_func(df['nM'][1:], *popt)
PL4_ssr = sum_of_squares(PL4_residuals)
func_to_use = PL4_func
popt_to_use = popt
# Plot curve fit
ax.plot(plot_range, func_to_use(plot_range, *popt_to_use),
color = color, linewidth = 3)
font_dict = {'fontfamily': 'sans-serif', 'fontsize': 8, 'fontstyle': 'normal'}
ax.set_xscale('log')
ax.set_xlim([0.01, 1500])
ax.set_xlabel(r'Concentration (nM)', fontdict = font_dict)
mean_mfi = df.drop('nM', axis = 1)
ax.set_ylim([min_mfi - 1500, max_mfi + 15000])
ax.tick_params(labelsize = 8)
ax.set_ylabel('MFI', fontdict = font_dict)
legend = ax.legend(loc = 2,
prop = {'family': 'sans-serif',
'size': 8, 'style': 'normal'},
labelspacing = 1, handletextpad = 0.25,
columnspacing = 0.5)
def plot_all_titrations(df, color_dict, marker_dict, label_dict):
CD16 = ['3', '108']
NCR1 = ['27', '8', '20']
NCR3 = ['78', '79', '72']
TNFRSF9 = ['98']
sns.set(style = "ticks", palette = "bright")
f, ax = plt.subplots(nrows = 2, ncols = 2, figsize = (7, 5))
plot_titration(df, ax[0, 0], CD16, color_dict, marker_dict, label_dict)
plot_titration(df, ax[0, 1], NCR3, color_dict, marker_dict, label_dict)
plot_titration(df, ax[1, 0], NCR1, color_dict, marker_dict, label_dict)
plot_titration(df, ax[1, 1], TNFRSF9, color_dict, marker_dict, label_dict)
plt.tight_layout(pad = 0.5)
plt.savefig('NK_MFI.pdf')
##########################################################################################
##########################################################################################
# Run the program
# Uncomment the first block of code to plot cytotoxicity data
# Uncomment the second block of code to plot on cell titration data
##########################################################################################
##########################################################################################
file = '20190502.csv'
cytotox_df = readCSV(getPath(foldername = file))
plot_cytotox(cytotox_df, color_dict = Color_dict, marker_dict = Marker_dict, label_dict = IgG_dict)
"""
file = 'MFI.csv'
mfi_df = readCSV(getPath(foldername = file))
plot_all_titrations(mfi_df, color_dict = Color_dict, marker_dict = Marker_dict, label_dict = IgG_dict)
""" | [
"seaborn.set",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"os.getcwd",
"scipy.optimize.leastsq",
"matplotlib.pyplot.tight_layout",
"numpy.isinf",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1610, 1675), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'lineterminator': "'\\r'", 'error_bad_lines': '(False)'}), "(filepath, lineterminator='\\r', error_bad_lines=False)\n", (1621, 1675), True, 'import pandas as pd\n'), ((3016, 3056), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""', 'palette': '"""bright"""'}), "(style='white', palette='bright')\n", (3023, 3056), True, 'import seaborn as sns\n'), ((3070, 3100), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.8, 3)'}), '(figsize=(3.8, 3))\n', (3082, 3100), True, 'import matplotlib.pyplot as plt\n'), ((5961, 5986), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (5977, 5986), True, 'import matplotlib.pyplot as plt\n'), ((6024, 6034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6032, 6034), True, 'import matplotlib.pyplot as plt\n'), ((9748, 9788), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'palette': '"""bright"""'}), "(style='ticks', palette='bright')\n", (9755, 9788), True, 'import seaborn as sns\n'), ((9802, 9848), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(7, 5)'}), '(nrows=2, ncols=2, figsize=(7, 5))\n', (9814, 9848), True, 'import matplotlib.pyplot as plt\n'), ((10151, 10176), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (10167, 10176), True, 'import matplotlib.pyplot as plt\n'), ((10180, 10205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""NK_MFI.pdf"""'], {}), "('NK_MFI.pdf')\n", (10191, 10205), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1318), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1316, 1318), False, 'import os\n'), ((4354, 4367), 'numpy.isinf', 'np.isinf', (['ssr'], {}), '(ssr)\n', (4362, 4367), True, 'import numpy as np\n'), ((4754, 4767), 'numpy.isinf', 'np.isinf', (['ssr'], {}), '(ssr)\n', (4762, 4767), True, 'import numpy as np\n'), ((8362, 8375), 'numpy.isinf', 'np.isinf', (['ssr'], {}), '(ssr)\n', (8370, 8375), True, 'import numpy as np\n'), ((8765, 8778), 'numpy.isinf', 'np.isinf', (['ssr'], {}), '(ssr)\n', (8773, 8778), True, 'import numpy as np\n'), ((4496, 4561), 'scipy.optimize.leastsq', 'leastsq', (['residuals', 'p0'], {'args': '(PL3_func, curr_yrange, curr_xrange)'}), '(residuals, p0, args=(PL3_func, curr_yrange, curr_xrange))\n', (4503, 4561), False, 'from scipy.optimize import curve_fit, leastsq\n'), ((8505, 8570), 'scipy.optimize.leastsq', 'leastsq', (['residuals', 'p0'], {'args': '(PL4_func, curr_yrange, curr_xrange)'}), '(residuals, p0, args=(PL4_func, curr_yrange, curr_xrange))\n', (8512, 8570), False, 'from scipy.optimize import curve_fit, leastsq\n')] |
#!/usr/bin/env python3
## MIT License
##
## Copyright (c) 2019 <NAME>
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
# Simple RQRMI model example with lognormal record distribution.
import sys
import os
# Add the "bin" directory to import path
my_path=os.path.dirname(os.path.realpath(__file__))
bin_path=os.path.join(my_path, '..', 'bin')
if not os.path.exists(bin_path):
print('Bin directory was not found. Did you compile the library?')
exit(1)
sys.path.append(bin_path)
# Import the RQRMI library
try: from rqrmi_library import RQRMI, initialize_library
except ModuleNotFoundError as e:
# Cannot load a prerequisites module
print(e)
exit(1)
except ImportError:
# Cannot find the rqrmi_library module
print('RQRMI library was not found is bin directory. Did you compile the library?')
exit(1)
# Import dependencies
from timeit import default_timer as timer
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
# ======== Script Start ======== #
# Suppress TF warning
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
tf.logging.set_verbosity(tf.logging.ERROR)
# TF work on CPU only (faster for small NN)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Set no library debug printing
initialize_library(False)
# Parameters for dataset
num_of_records=int(1e6)
max_uint32=np.iinfo(np.uint32).max
print('** Generating random lognormal database with %d records' % num_of_records)
database = np.random.lognormal(0, 1, num_of_records)
# Normalize lognormal from 0 to max uint32
database -= np.min(database)
database /= np.max(database)
database *= max_uint32
# Remove non unique uint32 values
database = np.unique(database.astype(np.uint32))
num_of_records = database.shape[0]
print('** Generating dataset for plotting. Number of records: %d' % num_of_records)
dataset = np.arange(num_of_records) / num_of_records
dataset_indices = np.arange(num_of_records)
# Parameters for RQRMI
submodel_structure=[1, 8, 1]
stages_strcture=[1, 4, 16]
epochs=[10, 10, 20]
batch_size=32
optimizer=tf.train.AdamOptimizer(learning_rate=1e-3)
samples_per_bucket=int(1.5e3)
threshold_submodel_error=0.005
threshold_bucket_coverage = 0.5
retraining_multiplier = 1.5
retraining_times = 3
# The model
print('** Initializing RQRMI model')
model = RQRMI(database, stages_strcture)
# Used for illustrating outputs
fig, ax = plt.subplots(nrows=len(stages_strcture)+1, ncols=2, figsize=(10, 10))
print('** Start training')
traiaing_time_seconds=0
# Train all stages
for i,stage in enumerate(stages_strcture):
print('** Building stage %d' % i)
# Measure training time
time_start = timer()
print('** Calculating responsibilities')
model.calculate_responsibility(i, verbose=1)
submodels_to_train = np.arange(stage).astype(int)
buckets_to_plot = [None for _ in submodels_to_train]
current_samples_per_bucket = samples_per_bucket
# Repeat the training process
for _ in range(retraining_times):
buckets = model.generate_buckets(i, submodels_to_train, current_samples_per_bucket, verbose=1)
model.train_stage(i, submodel_structure, buckets, submodels_to_train, optimizer, epochs[i], batch_size, verbose=1)
model.calculate_transition_set(i, verbose=1)
# Update the buckets to plot
for j,x in enumerate(submodels_to_train):
buckets_to_plot[x] = buckets[j]
# Measure error for refinement
submodels_to_train = []
for j in range(stage):
max_error, coverage = model.calculate_submodel_error(i, j)
# Retrain shallow submodels based on their bucket coverage
if i < len(stages_strcture)-1 and (coverage < threshold_bucket_coverage or coverage > 1):
submodels_to_train.append(j)
# Retrain deep submodels based on their maximum error
if i == len(stages_strcture)-1 and (max_error/num_of_records > threshold_submodel_error):
submodels_to_train.append(j)
# In case no need to retrain
if (len(submodels_to_train) == 0): break
# Update number of samples (more samples = less error = more training time)
current_samples_per_bucket = int(current_samples_per_bucket*retraining_multiplier)
traiaing_time_seconds += (timer() - time_start)
# Plot the input buckets for the current stage
print('** Plot input for stage %d' % i)
ax[i][1].set_xlabel('Input')
ax[i][1].set_ylabel('Expected output')
ax[i][1].set_title('Input for stage %d Divided By Buckets' % i)
for j,b in enumerate(buckets_to_plot):
ax[i][1].scatter(b[:,0], b[:,1], s=3)
print('** Evaluating stage %d (with %d samples)' % (i, num_of_records))
output = model.evaluate(database, verbose=1)
# Plot figure for current stage
print('** Plotting figure for stage %d' % i)
ax[i][0].plot(database, dataset)
ax[i][0].plot(database, output)
ax[i][0].legend(('Database', 'RQRMI output'))
ax[i][0].set_xlabel('Input')
ax[i][0].set_ylabel('Output')
ax[i][0].set_title('Database vs. RQRMI outputs for stage %d' % i)
print('** Total training time: %.f seconds' % traiaing_time_seconds)
# Calculate misprediction error per record
print('** Calculating misprediction error per submodel')
submodel_error=[]
for j in range(stages_strcture[-1]):
max_error, _ = model.calculate_submodel_error(len(stages_strcture)-1, j)
submodel_error.append(100*max_error/num_of_records)
# Plot error chart
ax[-1][0].plot(np.arange(stages_strcture[-1]), submodel_error)
ax[-1][0].set_xlabel('Submodel')
ax[-1][0].set_ylabel('Misprediction Error (%%)')
ax[-1][0].set_title('Misprediction Error per Submodel')
max_error=np.max(submodel_error)
print('** Max prediction error: %.2f%% (%d records)' % (max_error, max_error/100*num_of_records))
# Save the figures to file
figure_name='rqrmi_output.png'
print('** Saving output figure to %s...' % os.path.join(os.getcwd(), figure_name))
plt.tight_layout()
fig.savefig(figure_name)
# Save the model to file
model_file='rqrmi_model.model'
print('** Saved model to %s...' % model_file)
with open(model_file, 'wb') as f:
f.write(model.pack())
print('** Done. It can be loaded with the RQRMI benchmark utility or with Python code')
| [
"os.path.exists",
"numpy.arange",
"timeit.default_timer",
"os.path.join",
"tensorflow.logging.set_verbosity",
"numpy.iinfo",
"numpy.max",
"os.path.realpath",
"rqrmi_library.initialize_library",
"os.getcwd",
"rqrmi_library.RQRMI",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"tensorflow.t... | [((1336, 1370), 'os.path.join', 'os.path.join', (['my_path', '""".."""', '"""bin"""'], {}), "(my_path, '..', 'bin')\n", (1348, 1370), False, 'import os\n'), ((1487, 1512), 'sys.path.append', 'sys.path.append', (['bin_path'], {}), '(bin_path)\n', (1502, 1512), False, 'import sys\n'), ((2098, 2140), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (2122, 2140), True, 'import tensorflow as tf\n'), ((2261, 2286), 'rqrmi_library.initialize_library', 'initialize_library', (['(False)'], {}), '(False)\n', (2279, 2286), False, 'from rqrmi_library import RQRMI, initialize_library\n'), ((2466, 2507), 'numpy.random.lognormal', 'np.random.lognormal', (['(0)', '(1)', 'num_of_records'], {}), '(0, 1, num_of_records)\n', (2485, 2507), True, 'import numpy as np\n'), ((2563, 2579), 'numpy.min', 'np.min', (['database'], {}), '(database)\n', (2569, 2579), True, 'import numpy as np\n'), ((2592, 2608), 'numpy.max', 'np.max', (['database'], {}), '(database)\n', (2598, 2608), True, 'import numpy as np\n'), ((2906, 2931), 'numpy.arange', 'np.arange', (['num_of_records'], {}), '(num_of_records)\n', (2915, 2931), True, 'import numpy as np\n'), ((3057, 3100), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3079, 3100), True, 'import tensorflow as tf\n'), ((3302, 3334), 'rqrmi_library.RQRMI', 'RQRMI', (['database', 'stages_strcture'], {}), '(database, stages_strcture)\n', (3307, 3334), False, 'from rqrmi_library import RQRMI, initialize_library\n'), ((6713, 6735), 'numpy.max', 'np.max', (['submodel_error'], {}), '(submodel_error)\n', (6719, 6735), True, 'import numpy as np\n'), ((6976, 6994), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6992, 6994), True, 'import matplotlib.pyplot as plt\n'), ((1299, 1325), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1315, 1325), False, 'import os\n'), ((1378, 1402), 'os.path.exists', 'os.path.exists', (['bin_path'], {}), '(bin_path)\n', (1392, 1402), False, 'import os\n'), ((2348, 2367), 'numpy.iinfo', 'np.iinfo', (['np.uint32'], {}), '(np.uint32)\n', (2356, 2367), True, 'import numpy as np\n'), ((2845, 2870), 'numpy.arange', 'np.arange', (['num_of_records'], {}), '(num_of_records)\n', (2854, 2870), True, 'import numpy as np\n'), ((3648, 3655), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3653, 3655), True, 'from timeit import default_timer as timer\n'), ((6516, 6546), 'numpy.arange', 'np.arange', (['stages_strcture[-1]'], {}), '(stages_strcture[-1])\n', (6525, 6546), True, 'import numpy as np\n'), ((5299, 5306), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5304, 5306), True, 'from timeit import default_timer as timer\n'), ((3777, 3793), 'numpy.arange', 'np.arange', (['stage'], {}), '(stage)\n', (3786, 3793), True, 'import numpy as np\n'), ((6949, 6960), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6958, 6960), False, 'import os\n')] |
# Import necessary packages
from absl import logging
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import os
import pandas as pd
import re
# Load the model
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4" # @param ["https://tfhub.dev/google/universal-sentence-encoder/4", "https://tfhub.dev/google/universal-sentence-encoder-large/5"]
model = hub.load(module_url)
print("module %s loaded" % module_url)
def embed(input):
return model(input)
# Load the csv files
fake_news_df = pd.read_csv("database_fake.csv")
true_news_df = pd.read_csv("database_true.csv")
collected_news_df = pd.read_csv("database_collected.csv")
# Cleaning data
def clean_data(x):
text = x
text = text.lower()
text = re.sub("\[.*?\]", "", text) # remove square brackets
text = re.sub(r"[^\w\s]", "", text) # remove punctuation
text = re.sub("\w*\d\w*", "", text) # remove words containing numbers
text = re.sub(r"http\S+", "", text)
text = re.sub("\n", "", text)
return text
### Check for verified fake news
verified_fake = []
yet_to_check = []
for i in range(len(collected_news_df)):
found = 0
# Get message1 from collected data
msg1 = collected_news_df.iloc[i][1]
msg1 = clean_data(msg1)
for j in range(len(fake_news_df)):
# Get message2 from verified fake data
msg2 = fake_news_df.iloc[j][1]
msg2 = clean_data(msg2)
messages = [msg1, msg2]
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
# Embed the messages
message_embeddings = embed(messages)
# Find Corelation
features = message_embeddings
corr = np.inner(features, features)
# Check for similarity
if corr[0][1] >= 0.95:
found = 1
break
if found == 1:
verified_fake.append(msg1)
else:
yet_to_check.append(msg1)
### Check for verified true news
probably_fake = []
for i in range(len(yet_to_check)):
found = 0
# Get message 1 from yet_to_check
msg1 = yet_to_check[i]
msg1 = clean_data(msg1)
for j in range(len(true_news_df)):
# Get message2 from verified true data
msg2 = true_news_df.iloc[j][1]
msg2 = clean_data(msg2)
messages = [msg1, msg2]
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
# Embed the messages
message_embeddings = embed(messages)
# Find Corelation
features = message_embeddings
corr = np.inner(features, features)
# Check for similarity
if corr[0][1] >= 0.95:
found = 1
break
if found == 0:
probably_fake.append(msg1)
# The tweets in Probably fake are for further check either manual or using a ML model
# Store the Probably Fake
fake = pd.DataFrame(probably_fake, )
# Converting dataframe to CSV
fake.to_csv('database_probable.csv', sep=',') | [
"pandas.read_csv",
"absl.logging.set_verbosity",
"tensorflow_hub.load",
"numpy.inner",
"pandas.DataFrame",
"re.sub"
] | [((391, 411), 'tensorflow_hub.load', 'hub.load', (['module_url'], {}), '(module_url)\n', (399, 411), True, 'import tensorflow_hub as hub\n'), ((533, 565), 'pandas.read_csv', 'pd.read_csv', (['"""database_fake.csv"""'], {}), "('database_fake.csv')\n", (544, 565), True, 'import pandas as pd\n'), ((581, 613), 'pandas.read_csv', 'pd.read_csv', (['"""database_true.csv"""'], {}), "('database_true.csv')\n", (592, 613), True, 'import pandas as pd\n'), ((634, 671), 'pandas.read_csv', 'pd.read_csv', (['"""database_collected.csv"""'], {}), "('database_collected.csv')\n", (645, 671), True, 'import pandas as pd\n'), ((2845, 2872), 'pandas.DataFrame', 'pd.DataFrame', (['probably_fake'], {}), '(probably_fake)\n', (2857, 2872), True, 'import pandas as pd\n'), ((756, 785), 're.sub', 're.sub', (['"""\\\\[.*?\\\\]"""', '""""""', 'text'], {}), "('\\\\[.*?\\\\]', '', text)\n", (762, 785), False, 'import re\n'), ((821, 850), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""""""', 'text'], {}), "('[^\\\\w\\\\s]', '', text)\n", (827, 850), False, 'import re\n'), ((883, 914), 're.sub', 're.sub', (['"""\\\\w*\\\\d\\\\w*"""', '""""""', 'text'], {}), "('\\\\w*\\\\d\\\\w*', '', text)\n", (889, 914), False, 'import re\n'), ((958, 986), 're.sub', 're.sub', (['"""http\\\\S+"""', '""""""', 'text'], {}), "('http\\\\S+', '', text)\n", (964, 986), False, 'import re\n'), ((998, 1020), 're.sub', 're.sub', (['"""\n"""', '""""""', 'text'], {}), "('\\n', '', text)\n", (1004, 1020), False, 'import re\n'), ((1501, 1537), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.ERROR'], {}), '(logging.ERROR)\n', (1522, 1537), False, 'from absl import logging\n'), ((1691, 1719), 'numpy.inner', 'np.inner', (['features', 'features'], {}), '(features, features)\n', (1699, 1719), True, 'import numpy as np\n'), ((2348, 2384), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.ERROR'], {}), '(logging.ERROR)\n', (2369, 2384), False, 'from absl import logging\n'), ((2538, 2566), 'numpy.inner', 'np.inner', (['features', 'features'], {}), '(features, features)\n', (2546, 2566), True, 'import numpy as np\n')] |
"""
2021 <NAME>, ETHZ, MPI IS
"""
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
import wandb
from healthgen.generation.base_gen_model import BaseGenModel
from healthgen.generation.models import VAE
from absl import flags, app
FLAGS=flags.FLAGS
# Model
flags.DEFINE_enum('encoder', 'mlp', ['mlp', 'conv'], 'Which encoder/decoder architecture to use.')
flags.DEFINE_list('dense_x_z', [256,128], 'List of dimensions for dense x_h layers.')
flags.DEFINE_list('dense_z_x', [128,256], 'List of dimensions for hz_x layers.')
flags.DEFINE_list('conv_x_z', [128,64], 'List of dimensions for encoder conv layers.')
flags.DEFINE_list('conv_z_x', [64,128], 'List of dimensions for decoder conv layers.')
class VAEGenModel(BaseGenModel):
def __init__(self, seed, x_dim, z_dim, seq_len, activation, dropout, encoder, dense_x_z,
dense_z_x, conv_x_z, conv_z_x, beta, data_mode, mask_loss):
# Model parameters
self.x_dim = x_dim
self.z_dim = z_dim
self.seq_len = seq_len
self.activation = activation
self.dropout_p = dropout
self.encoder = encoder
self.data_mode = data_mode
# Inference
self.dense_x_z = [int(i) for i in dense_x_z]
self.conv_x_z = [int(i) for i in conv_x_z]
# Generation
self.dense_z_x = [int(i) for i in dense_z_x]
self.conv_z_x = [int(i) for i in conv_z_x]
# Beta
self.beta = beta
# Training
self.mask_loss = mask_loss
super().__init__(seed)
def build_model(self):
model = VAE(x_dim=self.x_dim, z_dim=self.z_dim, seq_len=self.seq_len, activation=self.activation,
dropout_p=self.dropout_p, encoder=self.encoder, dense_x_z=self.dense_x_z,
dense_z_x=self.dense_z_x, conv_x_z=self.conv_x_z, conv_z_x=self.conv_z_x,
beta=self.beta, data_mode=self.data_mode, mask_loss=self.mask_loss, device=self.device).to(self.device)
return model
def build_dataloader(self, X, y, batch_size):
if FLAGS.data_mode == 'feats':
X_train = X['X_train']
X_val = X['X_val']
elif FLAGS.data_mode == 'mask':
X_train = X['m_train']
X_val = X['m_val']
elif FLAGS.data_mode == 'feats_mask':
X_train = np.concatenate((X['X_train'], X['m_train']), axis=1)
X_val = np.concatenate((X['X_val'], X['m_val']), axis=1)
elif FLAGS.data_mode == 'all':
X_train = np.concatenate((X['X_train'], X['m_train'], X['delta_t_train']), axis=1)
X_val = np.concatenate((X['X_val'], X['m_val'], X['delta_t_val']), axis=1)
# Expand labels to all time steps
time_steps = X_train.shape[2]
y_train = np.expand_dims(np.tile(y['y_train'], (time_steps, 1)).transpose(), axis=1)
y_val = np.expand_dims(np.tile(y['y_val'], (time_steps, 1)).transpose(), axis=1)
# Check if x_dim of model fits data
assert self.x_dim == X_train.shape[1], F'x_dim specified in model does not match data!' \
F'Should be {X_train.shape[1]}!'
train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train))
val_dataset = TensorDataset(torch.Tensor(X_val), torch.Tensor(y_val))
dataloader_train = DataLoader(train_dataset, batch_size=batch_size,
shuffle=True, num_workers=6)
dataloader_val = DataLoader(val_dataset, batch_size=batch_size,
shuffle=False, num_workers=6)
return dataloader_train, dataloader_val | [
"numpy.tile",
"absl.flags.DEFINE_list",
"torch.Tensor",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"absl.flags.DEFINE_enum",
"healthgen.generation.models.VAE"
] | [((294, 396), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""encoder"""', '"""mlp"""', "['mlp', 'conv']", '"""Which encoder/decoder architecture to use."""'], {}), "('encoder', 'mlp', ['mlp', 'conv'],\n 'Which encoder/decoder architecture to use.')\n", (311, 396), False, 'from absl import flags, app\n'), ((393, 483), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""dense_x_z"""', '[256, 128]', '"""List of dimensions for dense x_h layers."""'], {}), "('dense_x_z', [256, 128],\n 'List of dimensions for dense x_h layers.')\n", (410, 483), False, 'from absl import flags, app\n'), ((479, 564), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""dense_z_x"""', '[128, 256]', '"""List of dimensions for hz_x layers."""'], {}), "('dense_z_x', [128, 256],\n 'List of dimensions for hz_x layers.')\n", (496, 564), False, 'from absl import flags, app\n'), ((560, 651), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""conv_x_z"""', '[128, 64]', '"""List of dimensions for encoder conv layers."""'], {}), "('conv_x_z', [128, 64],\n 'List of dimensions for encoder conv layers.')\n", (577, 651), False, 'from absl import flags, app\n'), ((647, 738), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""conv_z_x"""', '[64, 128]', '"""List of dimensions for decoder conv layers."""'], {}), "('conv_z_x', [64, 128],\n 'List of dimensions for decoder conv layers.')\n", (664, 738), False, 'from absl import flags, app\n'), ((3349, 3426), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(6)'}), '(train_dataset, batch_size=batch_size, shuffle=True, num_workers=6)\n', (3359, 3426), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((3490, 3566), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(6)'}), '(val_dataset, batch_size=batch_size, shuffle=False, num_workers=6)\n', (3500, 3566), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((3197, 3218), 'torch.Tensor', 'torch.Tensor', (['X_train'], {}), '(X_train)\n', (3209, 3218), False, 'import torch\n'), ((3220, 3241), 'torch.Tensor', 'torch.Tensor', (['y_train'], {}), '(y_train)\n', (3232, 3241), False, 'import torch\n'), ((3279, 3298), 'torch.Tensor', 'torch.Tensor', (['X_val'], {}), '(X_val)\n', (3291, 3298), False, 'import torch\n'), ((3300, 3319), 'torch.Tensor', 'torch.Tensor', (['y_val'], {}), '(y_val)\n', (3312, 3319), False, 'import torch\n'), ((1606, 1950), 'healthgen.generation.models.VAE', 'VAE', ([], {'x_dim': 'self.x_dim', 'z_dim': 'self.z_dim', 'seq_len': 'self.seq_len', 'activation': 'self.activation', 'dropout_p': 'self.dropout_p', 'encoder': 'self.encoder', 'dense_x_z': 'self.dense_x_z', 'dense_z_x': 'self.dense_z_x', 'conv_x_z': 'self.conv_x_z', 'conv_z_x': 'self.conv_z_x', 'beta': 'self.beta', 'data_mode': 'self.data_mode', 'mask_loss': 'self.mask_loss', 'device': 'self.device'}), '(x_dim=self.x_dim, z_dim=self.z_dim, seq_len=self.seq_len, activation=\n self.activation, dropout_p=self.dropout_p, encoder=self.encoder,\n dense_x_z=self.dense_x_z, dense_z_x=self.dense_z_x, conv_x_z=self.\n conv_x_z, conv_z_x=self.conv_z_x, beta=self.beta, data_mode=self.\n data_mode, mask_loss=self.mask_loss, device=self.device)\n', (1609, 1950), False, 'from healthgen.generation.models import VAE\n'), ((2360, 2412), 'numpy.concatenate', 'np.concatenate', (["(X['X_train'], X['m_train'])"], {'axis': '(1)'}), "((X['X_train'], X['m_train']), axis=1)\n", (2374, 2412), True, 'import numpy as np\n'), ((2433, 2481), 'numpy.concatenate', 'np.concatenate', (["(X['X_val'], X['m_val'])"], {'axis': '(1)'}), "((X['X_val'], X['m_val']), axis=1)\n", (2447, 2481), True, 'import numpy as np\n'), ((2817, 2855), 'numpy.tile', 'np.tile', (["y['y_train']", '(time_steps, 1)'], {}), "(y['y_train'], (time_steps, 1))\n", (2824, 2855), True, 'import numpy as np\n'), ((2908, 2944), 'numpy.tile', 'np.tile', (["y['y_val']", '(time_steps, 1)'], {}), "(y['y_val'], (time_steps, 1))\n", (2915, 2944), True, 'import numpy as np\n'), ((2543, 2615), 'numpy.concatenate', 'np.concatenate', (["(X['X_train'], X['m_train'], X['delta_t_train'])"], {'axis': '(1)'}), "((X['X_train'], X['m_train'], X['delta_t_train']), axis=1)\n", (2557, 2615), True, 'import numpy as np\n'), ((2636, 2702), 'numpy.concatenate', 'np.concatenate', (["(X['X_val'], X['m_val'], X['delta_t_val'])"], {'axis': '(1)'}), "((X['X_val'], X['m_val'], X['delta_t_val']), axis=1)\n", (2650, 2702), True, 'import numpy as np\n')] |
import cv2
import matplotlib.pyplot as mt
import glob
import sys
import numpy as np
import pickle
from sklearn.neural_network import MLPClassifier
hog = cv2.HOGDescriptor()
features = []
labels=[]
for img in glob.glob("C:/Users/koki/Desktop/project/Train/green leaves/*.jpg"):
image= cv2.imread(img)
image=cv2.resize(image,(150, 150))
h = hog.compute(image)
features.append(h)
labels.append(0)
for img in glob.glob("C:/Users/koki/Desktop/project/Train/Brown spot/*.jpg"):
image= cv2.imread(img)
image=cv2.resize(image,(150, 150))
h = hog.compute(image)
features.append(h)
labels.append(1)
for img in glob.glob("C:/Users/koki/Desktop/project/Train/paddy blast/*.jpg"):
image= cv2.imread(img)
image=cv2.resize(image,(150, 150))
h = hog.compute(image)
features.append(h)
labels.append(2)
for img in glob.glob("C:/Users/koki/Desktop/project/Train/bacterial blight/*.jpg"):
image= cv2.imread(img)
image=cv2.resize(image,(150, 150))
h = hog.compute(image)
features.append(h)
labels.append(3)
fet= np.array( features )
features=[]
lb=np.array(labels)
lb=np.reshape(lb,[915, 1])
fet=np.reshape(fet,[915,124740])
print(np.shape(fet))
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(fet,lb)
pickle.dump(clf,open('neural.model', 'wb'))
| [
"numpy.shape",
"sklearn.neural_network.MLPClassifier",
"numpy.reshape",
"cv2.HOGDescriptor",
"numpy.array",
"cv2.resize",
"cv2.imread",
"glob.glob"
] | [((153, 172), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', ([], {}), '()\n', (170, 172), False, 'import cv2\n'), ((208, 275), 'glob.glob', 'glob.glob', (['"""C:/Users/koki/Desktop/project/Train/green leaves/*.jpg"""'], {}), "('C:/Users/koki/Desktop/project/Train/green leaves/*.jpg')\n", (217, 275), False, 'import glob\n'), ((429, 494), 'glob.glob', 'glob.glob', (['"""C:/Users/koki/Desktop/project/Train/Brown spot/*.jpg"""'], {}), "('C:/Users/koki/Desktop/project/Train/Brown spot/*.jpg')\n", (438, 494), False, 'import glob\n'), ((647, 713), 'glob.glob', 'glob.glob', (['"""C:/Users/koki/Desktop/project/Train/paddy blast/*.jpg"""'], {}), "('C:/Users/koki/Desktop/project/Train/paddy blast/*.jpg')\n", (656, 713), False, 'import glob\n'), ((868, 939), 'glob.glob', 'glob.glob', (['"""C:/Users/koki/Desktop/project/Train/bacterial blight/*.jpg"""'], {}), "('C:/Users/koki/Desktop/project/Train/bacterial blight/*.jpg')\n", (877, 939), False, 'import glob\n'), ((1088, 1106), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1096, 1106), True, 'import numpy as np\n'), ((1124, 1140), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1132, 1140), True, 'import numpy as np\n'), ((1144, 1168), 'numpy.reshape', 'np.reshape', (['lb', '[915, 1]'], {}), '(lb, [915, 1])\n', (1154, 1168), True, 'import numpy as np\n'), ((1172, 1202), 'numpy.reshape', 'np.reshape', (['fet', '[915, 124740]'], {}), '(fet, [915, 124740])\n', (1182, 1202), True, 'import numpy as np\n'), ((1228, 1317), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""lbfgs"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(5, 2)', 'random_state': '(1)'}), "(solver='lbfgs', alpha=1e-05, hidden_layer_sizes=(5, 2),\n random_state=1)\n", (1241, 1317), False, 'from sklearn.neural_network import MLPClassifier\n'), ((288, 303), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (298, 303), False, 'import cv2\n'), ((314, 343), 'cv2.resize', 'cv2.resize', (['image', '(150, 150)'], {}), '(image, (150, 150))\n', (324, 343), False, 'import cv2\n'), ((507, 522), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (517, 522), False, 'import cv2\n'), ((533, 562), 'cv2.resize', 'cv2.resize', (['image', '(150, 150)'], {}), '(image, (150, 150))\n', (543, 562), False, 'import cv2\n'), ((726, 741), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (736, 741), False, 'import cv2\n'), ((752, 781), 'cv2.resize', 'cv2.resize', (['image', '(150, 150)'], {}), '(image, (150, 150))\n', (762, 781), False, 'import cv2\n'), ((952, 967), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (962, 967), False, 'import cv2\n'), ((978, 1007), 'cv2.resize', 'cv2.resize', (['image', '(150, 150)'], {}), '(image, (150, 150))\n', (988, 1007), False, 'import cv2\n'), ((1207, 1220), 'numpy.shape', 'np.shape', (['fet'], {}), '(fet)\n', (1215, 1220), True, 'import numpy as np\n')] |
'''
Collects male and female speech of healthy and dysphonic speakers
Calculates 40 log mel-filterbanks, their delta (rate of change) and delta delta (acceleration_of_change)
Puts these as features (total 120) into database
Speaker IDs, sex, and healthy or dysphonia is also saved
'''
import sqlite3
import glob
import numpy as np
import pandas as pd
import librosa
def create_data_table(num_features):
cols = []
for i in range(num_features):
cols.append("'{}' REAL".format(i))
cols_str = ", ".join(cols)
msg = '''CREATE TABLE IF NOT EXISTS speaker_data(sample_id INTEGER PRIMARY KEY, %s,speaker_id INT, sex TEXT,label INT)''' % cols_str
c.execute(msg)
conn.commit()
return None
def save_features_sql(prepped_features,num_features):
cols = ""
for i in range(num_features):
cols += " ?,"
msg = ''' INSERT INTO speaker_data VALUES(NULL,%s ?, ?, ?) ''' % cols
c.executemany(msg,prepped_features)
conn.commit()
return None
def get_filenames(group,gender):
filenames = []
for wav in glob.glob("./speech_data/dataset/{}/{}/sentences/*.wav".format(group,gender)):
filenames.append(wav)
return filenames
def match_condition_lengths(clinical,healthy):
if len(healthy) > len(clinical):
healthy = healthy[:len(clinical)]
elif len(clinical) > len(healthy):
clinical = clinical[:len(healthy)]
return clinical, healthy
def get_mel_spectrogram_derivatives(filename,num_mels):
'''
get mel spectrogram at windows of 25 ms and shifts of 10 ms
'''
y, sr = librosa.load(filename, sr=1600)
spect = librosa.feature.melspectrogram(y,sr=sr,hop_length=int(0.010*sr),n_fft=int(0.025*sr),n_mels=num_mels)
rate_of_change = librosa.feature.delta(spect)
acceleration_of_change = librosa.feature.delta(spect,order=2)
#transpose so features are columns and rows are frames
spect = spect.transpose()
rate_of_change = rate_of_change.transpose()
acceleration_of_change = acceleration_of_change.transpose()
return spect, rate_of_change, acceleration_of_change
def get_all_features(filename,num_mels):
spect, rate_of_change, acceleration_of_change = get_mel_spectrogram_derivatives(filename,num_mels=num_mels)
len_values = len(spect)
speaker_all_features = np.empty((len_values,num_mels*3))
for i in range(len_values):
speaker_all_features[i] = np.concatenate((spect[i],rate_of_change[i],acceleration_of_change[i]))
return speaker_all_features
def prep_features_sql(filename,group,gender):
speaker_id = filename.split("-")[0]
speaker_id = speaker_id.split("/")[-1]
features = get_all_features(filename,num_mels = 40)
df = pd.DataFrame(features)
df["speaker_id"] = speaker_id
df["sex"] = gender
df["label"] = group
vals = df.to_dict(orient="index")
prepped_data = []
for key, value in vals.items():
prepped_data.append(tuple(value.values()))
return prepped_data
def collect_features_save_sql(filename_list):
for filename in filename_list:
if "healthy" in filename:
group = 0
else:
group = 1
if "female" in filename:
gender = "female"
else:
gender = "male"
prepped_features = prep_features_sql(filename,group,gender)
save_features_sql(prepped_features,num_features = 120)
return None
if __name__ == "__main__":
conn = sqlite3.connect("healthy_dysphonia_speech.db")
c = conn.cursor()
healhy_women = get_filenames("healthy","female")
dysphonia_women = get_filenames("dysphonia","female")
healthy_men = get_filenames("healthy","male")
dysphonia_men = get_filenames("dysphonia","male")
#want to randomize this in the future
#but for now, simply matches length of healthy and dysphonia speech recordings
dysphonia_women, healthy_women = match_condition_lengths(dysphonia_women,healhy_women)
dysphonia_men, healthy_men = match_condition_lengths(dysphonia_men,healthy_men)
create_data_table(num_features = 120) #40 mel-filterbanks * 3 (1st and 2nd derivatives are included as features)
try:
collect_features_save_sql(dysphonia_women)
collect_features_save_sql(healthy_women)
collect_features_save_sql(dysphonia_men)
collect_features_save_sql(healthy_men)
except Exception as e:
print(e)
finally:
conn.close()
| [
"sqlite3.connect",
"librosa.feature.delta",
"numpy.empty",
"numpy.concatenate",
"pandas.DataFrame",
"librosa.load"
] | [((1599, 1630), 'librosa.load', 'librosa.load', (['filename'], {'sr': '(1600)'}), '(filename, sr=1600)\n', (1611, 1630), False, 'import librosa\n'), ((1765, 1793), 'librosa.feature.delta', 'librosa.feature.delta', (['spect'], {}), '(spect)\n', (1786, 1793), False, 'import librosa\n'), ((1823, 1860), 'librosa.feature.delta', 'librosa.feature.delta', (['spect'], {'order': '(2)'}), '(spect, order=2)\n', (1844, 1860), False, 'import librosa\n'), ((2337, 2373), 'numpy.empty', 'np.empty', (['(len_values, num_mels * 3)'], {}), '((len_values, num_mels * 3))\n', (2345, 2373), True, 'import numpy as np\n'), ((2751, 2773), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {}), '(features)\n', (2763, 2773), True, 'import pandas as pd\n'), ((3499, 3545), 'sqlite3.connect', 'sqlite3.connect', (['"""healthy_dysphonia_speech.db"""'], {}), "('healthy_dysphonia_speech.db')\n", (3514, 3545), False, 'import sqlite3\n'), ((2453, 2525), 'numpy.concatenate', 'np.concatenate', (['(spect[i], rate_of_change[i], acceleration_of_change[i])'], {}), '((spect[i], rate_of_change[i], acceleration_of_change[i]))\n', (2467, 2525), True, 'import numpy as np\n')] |
import sys
import re
from copy import deepcopy
import numpy as np
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
import numpy.linalg as LA
from typing import Union
from .fingerprint_utils import csc_drop_zerocols
class TargetProduct(object):
def __init__(self, smi, similarity='tanimoto', verbose=False):
self.smi = smi
self.mol = Chem.MolFromSmiles(smi)
self.fp = AllChem.GetMorganFingerprint(self.mol, radius=2, useFeatures=False)
self.l2 = LA.norm(list(self.fp.GetNonzeroElements().values()))
self.l1 = LA.norm(list(self.fp.GetNonzeroElements().values()), 1)
self.similarity = similarity
self.verbose = verbose
def calc_ts(self, smi, distance=False):
""" Calculate tanimoto similarity between target molecule and predicted product arrary.
"""
try:
mol = Chem.MolFromSmiles(smi)
fp = AllChem.GetMorganFingerprint(mol, radius=2, useFeatures=False)
sim = DataStructs.TanimotoSimilarity(self.fp, fp, returnDistance=distance)
except Exception as e:
if distance:
sim = 1
else:
sim = 0
if self.verbose:
print('Original SMILES: {}'.format(smi), file=sys.stderr)
# print(e, file=sys.stderr)
return sim
def calc_l2(self, smi):
try:
mol = Chem.MolFromSmiles(smi)
fp = self.fp - AllChem.GetMorganFingerprint(mol, radius=2, useFeatures=False)
l2 = LA.norm(list(fp.GetNonzeroElements().values()))
except Exception as e:
# l2 = self.l2
l2 = 9999
if self.verbose:
print('Original SMILES: {}'.format(smi), file=sys.stderr)
# print(e, file=sys.stderr)
return l2
def calc_l1(self, smi):
try:
mol = Chem.MolFromSmiles(smi)
fp = self.fp - AllChem.GetMorganFingerprint(mol, radius=2, useFeatures=False)
l1 = LA.norm(list(fp.GetNonzeroElements().values()), 1)
except Exception as e:
# l1 = self.l1
l1 = 9999
if self.verbose:
print('Original SMILES: {}'.format(smi), file=sys.stderr)
# print(e, file=sys.stderr)
return l1
def distance(self, products_array):
products_distance = list()
if self.similarity == 'tanimoto':
for products_each_reaction in products_array:
distance_each_reaction = \
[self.calc_ts(smi, distance=True) for smi in products_each_reaction]
products_distance.append(distance_each_reaction)
elif self.similarity == 'euclidean':
for products_each_reaction in products_array:
distance_each_reaction = [self.calc_l2(smi) for smi in products_each_reaction]
products_distance.append(distance_each_reaction)
elif self.similarity == 'manhattan':
for products_each_reaction in products_array:
distance_each_reaction = [self.calc_l1(smi) for smi in products_each_reaction]
products_distance.append(distance_each_reaction)
else:
raise NotImplementedError
return pd.DataFrame(products_distance)
def likelihood(self, products_array, scores_array):
products_sim = pd.DataFrame(products_array)
if self.similarity == 'tanimoto':
products_sim = products_sim.applymap(self.calc_ts)
else:
raise NotImplementedError
scores_array = np.exp(scores_array)
likelihood = products_sim * scores_array
return likelihood.sum(axis=1)
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
class ParticalsTargetDistanceCalculator(object):
def __init__(self, candidates_smis, predictor, target):
self.candidates_smis = candidates_smis
self.predictor = predictor
self.target = target
def prediction_smi(self, smis_list, **kwargs):
smis_list_zipped = zip(*smis_list)
product_previous_step = [""] * len(smis_list)
for smi_list_step in smis_list_zipped:
smi_list_step = zip(product_previous_step, smi_list_step)
smi_list_step = [".".join(filter(None, smi)) for smi in smi_list_step]
processed_smi = list()
for s in smi_list_step:
token = regex.findall(s)
assert s == ''.join(s)
processed_smi.append(' '.join(token))
step_score, step_product = self.predictor.translate(src_data_iter=processed_smi, **kwargs)
product_previous_step = [step_product[i][0] for i in range(len(smis_list))]
return step_product, step_score
# def prediction_index(self, reactants_index, **kwargs):
# reactants_smi = list()
# for index in reactants_index:
# reactant_smi = ''
# for i in index:
# reactant_smi += self.candidates_smis[i] + '.'
# reactants_smi.append(reactant_smi[:-1])
# scores, products = self.prediction_smi(reactants_smi, **kwargs)
# return products, scores
def distance_index(self, reactants_list, **kwargs):
smis_list = [r.idx2smi(self.candidates_smis) for r in reactants_list]
products, scores = self.prediction_smi(smis_list, **kwargs)
# products, scores = self.prediction_index(reactants_list, **kwargs)
distances = self.target.distance(products)
print(distances.mask(distances == 9999).describe())
distances = distances.values
distances_masked = np.ma.masked_values(distances, 9999)
distances_likelihood = \
loglikelihood_matrix_normalizer(scores, mask=distances_masked.mask)
distances_adjucted = (distances_masked * distances_likelihood).sum(axis=1)
distances_adjusted = distances_adjucted.filled(distances_adjucted.mean())
return products, scores, distances_adjusted
def loglikelihood_matrix_normalizer(array, mask):
"""Normalize loglikelihood matrix. Return likelihood matrix!
Fix this function to normalize in log scale."""
loglikelihood_matrix = np.ma.array(array, mask=mask)
likelihood_matrix = np.exp(loglikelihood_matrix)
norm = likelihood_matrix.sum(axis=1)
return likelihood_matrix / norm[:, np.newaxis]
def reactant_random_sampling(n_reactants: int, n_particles: int,
n_candidates: int, candidates_prob: Union[list, None]) -> list:
"""Disabled replacement in sampling."""
if candidates_prob is not None:
assert n_candidates == len(candidates_prob)
reactants_idx = list()
for i in range(n_reactants):
reactants_idx.append(np.random.choice(n_candidates, size=n_particles,
replace=False, p=candidates_prob))
reactants_idx = list(zip(*reactants_idx))
return reactants_idx
def idx2smi(idx_list, candidates_smi):
smi_list = list()
for index in idx_list:
smi = [candidates_smi[i] for i in index]
smi = ".".join(smi)
smi_list.append(smi)
return smi_list
def idx2fp(idx_list, candidates_fp):
fp_matrix = 0
idx_list_unzipped = zip(*idx_list)
for l in idx_list_unzipped:
fp_matrix += candidates_fp[list(l)]
return fp_matrix
class ReactantList(object):
def __init__(self, reactant_num_list, n_candidates, candidates_prob, gibbs_index=[0, 0]):
r_list = np.random.choice(n_candidates, sum(reactant_num_list), replace=False, p=candidates_prob)
self.reactant_list = np.split(r_list, np.cumsum(reactant_num_list))[:-1]
self.gibbs_index = gibbs_index
self.immutable_list = self.immutable()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.immutable_list == other.immutable_list
return False
def __hash__(self):
return hash(self.immutable_list)
def immutable(self):
return tuple(tuple(sorted(reactant)) for reactant in self.reactant_list)
def idx2smi(self, candidates_smis):
smi_list = list()
for reactant_step in self.reactant_list:
smi_step = [candidates_smis[index] for index in reactant_step]
smi_step = ".".join(smi_step)
smi_list.append(smi_step)
return smi_list
def idx2fp(self, candidates_fp):
r_list = np.concatenate(self.reactant_list)
fp = candidates_fp[r_list]
return fp.sum(0)
def nearest_neighbor(self, idx, exclude=None):
reactant_gibbs = self.reactant_list[self.gibbs_index[0]][self.gibbs_index[1]]
reactant_next = idx[reactant_gibbs]
if exclude in reactant_next:
reactant_next = list(reactant_next)
reactant_next.remove(exclude)
gibbs_index1 = (self.gibbs_index[1] + 1) % len(self.reactant_list[self.gibbs_index[0]])
if (self.gibbs_index[1] + 1) // len(self.reactant_list[self.gibbs_index[0]]):
gibbs_index0 = (self.gibbs_index[0] + 1) % len(self.reactant_list)
else:
gibbs_index0 = self.gibbs_index[0]
new_reactantlist = deepcopy(self)
new_reactantlist.gibbs_index = [gibbs_index0, gibbs_index1]
nn_list = list()
for i in reactant_next:
nn = deepcopy(new_reactantlist)
nn.reactant_list[self.gibbs_index[0]][self.gibbs_index[1]] = i
nn.immutable_list = nn.immutable()
nn_list.append(nn)
return nn_list
# def test_ReactantList(reactant_num_list):
# test_reactantlist = ReactantList(reactant_num_list, n_candidates, candidates_prob)
# # global test_reactantlist
# print("SMILES:", test_reactantlist.idx2smi(candidates_smis))
# print("Fingerprint:", test_reactantlist.idx2fp(candidates_fp))
# print("Nearest neighbor:")
# for t in test_reactantlist.nearest_neighbor(idx)[:10]:
# print(t.__dict__)
def ga_clustering(reactants_candidates_fps, n_clusters):
reactants_candidates_clustering_fps = csc_drop_zerocols(reactants_candidates_fps)
kmeans = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++', n_init=10,
init_size=1000, batch_size=1000)
kmeans.fit(reactants_candidates_clustering_fps)
labels = kmeans.labels_
labels_uniq = np.unique(labels)
n_clusters = len(labels_uniq)
inertia = kmeans.inertia_
print('cluster number:', n_clusters,
'\ninertia:', inertia)
print('Dropped fingerprint matrix shape:',
reactants_candidates_clustering_fps.shape)
return labels
def group_uniform_sampling(reactants_candidates_df, top=100):
df = reactants_candidates_df.sort_values('weights', ascending=False)
grouped = df.groupby('labels')
reactants_candidates_uniform = pd.DataFrame()
i = 0
while len(reactants_candidates_uniform) < top:
step_list = list()
for name, group in grouped:
try:
step_list.append(group.iloc[i])
except IndexError:
pass
step_df = pd.DataFrame(step_list)
reactants_candidates_uniform = pd.concat([reactants_candidates_uniform,
step_df.sort_values('weights', ascending=False)])
i += 1
return reactants_candidates_uniform[:top][['reactants', 'labels', 'distance_pred']]
def group_weight_sampling(reactants_candidates_df, size):
df = reactants_candidates_df.sort_values('weights', ascending=False)
grouped = df.groupby('labels')
group_weight = grouped['weights'].mean()
group_weight = group_weight / group_weight.sum()
group_sampling_size = np.ceil(group_weight*size).astype(int)
def ordering(x):
x['sampling_size'] = group_sampling_size[x['labels'].iloc[0]]
x['order'] = list(range(len(x)))
return x
df = grouped.apply(ordering)
df['proposal'] = df.apply(lambda x: x['order'] < x['sampling_size'], axis=1)
if df['proposal'].sum() < size:
n_reple = size - df['proposal'].sum()
reple_index = df[~df['proposal']].index
reple_index = reple_index[:n_reple]
df.loc[reple_index, 'proposal'] = True
return df[df['proposal']][['reactants', 'labels', 'distance_pred']]
def distance2weights(distance, temperature):
distance = np.where(distance > 0, distance, 0)
return np.exp(-distance/temperature)
| [
"numpy.ma.masked_values",
"numpy.ceil",
"numpy.unique",
"numpy.ma.array",
"re.compile",
"numpy.where",
"sklearn.cluster.MiniBatchKMeans",
"numpy.random.choice",
"rdkit.Chem.MolFromSmiles",
"numpy.exp",
"numpy.cumsum",
"numpy.concatenate",
"copy.deepcopy",
"pandas.DataFrame",
"rdkit.DataS... | [((3934, 3953), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (3944, 3953), False, 'import re\n'), ((6407, 6436), 'numpy.ma.array', 'np.ma.array', (['array'], {'mask': 'mask'}), '(array, mask=mask)\n', (6418, 6436), True, 'import numpy as np\n'), ((6461, 6489), 'numpy.exp', 'np.exp', (['loglikelihood_matrix'], {}), '(loglikelihood_matrix)\n', (6467, 6489), True, 'import numpy as np\n'), ((10351, 10455), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'n_clusters', 'init': '"""k-means++"""', 'n_init': '(10)', 'init_size': '(1000)', 'batch_size': '(1000)'}), "(n_clusters=n_clusters, init='k-means++', n_init=10,\n init_size=1000, batch_size=1000)\n", (10366, 10455), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((10579, 10596), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (10588, 10596), True, 'import numpy as np\n'), ((11060, 11074), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11072, 11074), True, 'import pandas as pd\n'), ((12591, 12626), 'numpy.where', 'np.where', (['(distance > 0)', 'distance', '(0)'], {}), '(distance > 0, distance, 0)\n', (12599, 12626), True, 'import numpy as np\n'), ((12638, 12669), 'numpy.exp', 'np.exp', (['(-distance / temperature)'], {}), '(-distance / temperature)\n', (12644, 12669), True, 'import numpy as np\n'), ((455, 478), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (473, 478), False, 'from rdkit import Chem\n'), ((497, 564), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['self.mol'], {'radius': '(2)', 'useFeatures': '(False)'}), '(self.mol, radius=2, useFeatures=False)\n', (525, 564), False, 'from rdkit.Chem import AllChem\n'), ((3379, 3410), 'pandas.DataFrame', 'pd.DataFrame', (['products_distance'], {}), '(products_distance)\n', (3391, 3410), True, 'import pandas as pd\n'), ((3491, 3519), 'pandas.DataFrame', 'pd.DataFrame', (['products_array'], {}), '(products_array)\n', (3503, 3519), True, 'import pandas as pd\n'), ((3700, 3720), 'numpy.exp', 'np.exp', (['scores_array'], {}), '(scores_array)\n', (3706, 3720), True, 'import numpy as np\n'), ((5844, 5880), 'numpy.ma.masked_values', 'np.ma.masked_values', (['distances', '(9999)'], {}), '(distances, 9999)\n', (5863, 5880), True, 'import numpy as np\n'), ((8654, 8688), 'numpy.concatenate', 'np.concatenate', (['self.reactant_list'], {}), '(self.reactant_list)\n', (8668, 8688), True, 'import numpy as np\n'), ((9407, 9421), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (9415, 9421), False, 'from copy import deepcopy\n'), ((11334, 11357), 'pandas.DataFrame', 'pd.DataFrame', (['step_list'], {}), '(step_list)\n', (11346, 11357), True, 'import pandas as pd\n'), ((963, 986), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (981, 986), False, 'from rdkit import Chem\n'), ((1004, 1066), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['mol'], {'radius': '(2)', 'useFeatures': '(False)'}), '(mol, radius=2, useFeatures=False)\n', (1032, 1066), False, 'from rdkit.Chem import AllChem\n'), ((1085, 1153), 'rdkit.DataStructs.TanimotoSimilarity', 'DataStructs.TanimotoSimilarity', (['self.fp', 'fp'], {'returnDistance': 'distance'}), '(self.fp, fp, returnDistance=distance)\n', (1115, 1153), False, 'from rdkit import DataStructs\n'), ((1502, 1525), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (1520, 1525), False, 'from rdkit import Chem\n'), ((1986, 2009), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (2004, 2009), False, 'from rdkit import Chem\n'), ((6963, 7050), 'numpy.random.choice', 'np.random.choice', (['n_candidates'], {'size': 'n_particles', 'replace': '(False)', 'p': 'candidates_prob'}), '(n_candidates, size=n_particles, replace=False, p=\n candidates_prob)\n', (6979, 7050), True, 'import numpy as np\n'), ((9564, 9590), 'copy.deepcopy', 'deepcopy', (['new_reactantlist'], {}), '(new_reactantlist)\n', (9572, 9590), False, 'from copy import deepcopy\n'), ((11933, 11961), 'numpy.ceil', 'np.ceil', (['(group_weight * size)'], {}), '(group_weight * size)\n', (11940, 11961), True, 'import numpy as np\n'), ((1553, 1615), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['mol'], {'radius': '(2)', 'useFeatures': '(False)'}), '(mol, radius=2, useFeatures=False)\n', (1581, 1615), False, 'from rdkit.Chem import AllChem\n'), ((2037, 2099), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'AllChem.GetMorganFingerprint', (['mol'], {'radius': '(2)', 'useFeatures': '(False)'}), '(mol, radius=2, useFeatures=False)\n', (2065, 2099), False, 'from rdkit.Chem import AllChem\n'), ((7850, 7878), 'numpy.cumsum', 'np.cumsum', (['reactant_num_list'], {}), '(reactant_num_list)\n', (7859, 7878), True, 'import numpy as np\n')] |
from src.models.igre_test import igre_test
import yaml
import numpy as np
from copy import deepcopy
import os
OUT_FILENAME_FORMAT = "x{SHIFT_X}_y{SHIFT_Y}_modality_step{MODALITY_DIFF}_{CUSTOM}.result"
def __create_batch(config, shift, repeats):
""" In principle we expect valid configuration for igre in batch config. But there is possible batch
configuration in "batch" property. Each variable requires special care, therefore there will be an if
block in this method for each batch configuration.
"""
template = deepcopy(config)
del template["batch"]
template["output"] = OUT_FILENAME_FORMAT.replace("{SHIFT_X}", str(shift[0]))\
.replace("{SHIFT_Y}", str(shift[1]))
batch = []
for param in config["batch"]["params"]:
for repeat in range(repeats):
if param == "output_dimension":
for value in np.arange(config["batch"][param]["min"],
config["batch"][param]["max"],
config["batch"][param]["step"]):
new_cfg = deepcopy(template)
new_cfg["output_dimensions"]["min"] = value
new_cfg["output_dimensions"]["max"] = value
new_cfg["output"] = new_cfg["output"]\
.replace("{MODALITY_DIFF}", str(value - new_cfg["input_dimensions"]["min"]))\
.replace("{CUSTOM}", str(repeat))
batch.append(new_cfg)
return batch
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
type=str,
default="./data/interim/examples/config-igre-batch-test.yaml",
help="Config file for IGRE batch. For more see example file.",
)
parser.add_argument(
"-d",
"--batch_dir",
type=str,
default="data/processed/metacentrum/01-registration-experiment",
help="yaml output file, where collected data will be placed",
)
parser.add_argument(
"-x",
"--x-shift",
type=float,
default=0,
help="x-Shift of the input data",
)
parser.add_argument(
"-y",
"--y-shift",
type=float,
default=0,
help="y-Shift of the input data",
)
parser.add_argument(
"-s",
"--repeats",
type=int,
default=20,
help="Number of repeated computations with different random seed",
)
args = parser.parse_args()
if not os.path.exists(args.batch_dir):
os.makedirs(args.batch_dir)
with open(args.config) as config_file:
batch_config = yaml.load(config_file, Loader=yaml.FullLoader)
batch = __create_batch(batch_config, (args.x_shift, args.y_shift), args.repeats)
print("done")
for run_conf in batch:
igre_test(run_conf, (args.x_shift, args.y_shift), os.path.join(args.batch_dir, run_conf["output"]))
| [
"os.path.exists",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"yaml.load",
"copy.deepcopy",
"numpy.arange"
] | [((538, 554), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (546, 554), False, 'from copy import deepcopy\n'), ((1615, 1640), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1638, 1640), False, 'import argparse\n'), ((2610, 2640), 'os.path.exists', 'os.path.exists', (['args.batch_dir'], {}), '(args.batch_dir)\n', (2624, 2640), False, 'import os\n'), ((2650, 2677), 'os.makedirs', 'os.makedirs', (['args.batch_dir'], {}), '(args.batch_dir)\n', (2661, 2677), False, 'import os\n'), ((2744, 2790), 'yaml.load', 'yaml.load', (['config_file'], {'Loader': 'yaml.FullLoader'}), '(config_file, Loader=yaml.FullLoader)\n', (2753, 2790), False, 'import yaml\n'), ((2979, 3027), 'os.path.join', 'os.path.join', (['args.batch_dir', "run_conf['output']"], {}), "(args.batch_dir, run_conf['output'])\n", (2991, 3027), False, 'import os\n'), ((914, 1021), 'numpy.arange', 'np.arange', (["config['batch'][param]['min']", "config['batch'][param]['max']", "config['batch'][param]['step']"], {}), "(config['batch'][param]['min'], config['batch'][param]['max'],\n config['batch'][param]['step'])\n", (923, 1021), True, 'import numpy as np\n'), ((1127, 1145), 'copy.deepcopy', 'deepcopy', (['template'], {}), '(template)\n', (1135, 1145), False, 'from copy import deepcopy\n')] |
"""
DeepSEA architecture (Zhou & Troyanskaya, 2015).
"""
import numpy as np
import torch
import torch.nn as nn
class DeepSEA(nn.Module):
def __init__(self, sequence_length, n_genomic_features):
"""
Parameters
----------
sequence_length : int
n_genomic_features : int
"""
super(DeepSEA, self).__init__()
conv_kernel_size = 8
pool_kernel_size = 4
self.conv_net = nn.Sequential(
nn.Conv1d(4, 320, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.Dropout(p=0.2),
nn.Conv1d(320, 480, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.Dropout(p=0.2),
nn.Conv1d(480, 960, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5))
reduce_by = conv_kernel_size - 1
pool_kernel_size = float(pool_kernel_size)
self.n_channels = int(
np.floor(
(np.floor(
(sequence_length - reduce_by) / pool_kernel_size)
- reduce_by) / pool_kernel_size)
- reduce_by)
self.classifier = nn.Sequential(
nn.Linear(960 * self.n_channels, n_genomic_features),
nn.ReLU(inplace=True),
nn.Linear(n_genomic_features, n_genomic_features),
nn.Sigmoid())
def forward(self, x):
"""Forward propagation of a batch.
"""
out = self.conv_net(x)
reshape_out = out.view(out.size(0), 960 * self.n_channels)
predict = self.classifier(reshape_out)
return predict
def criterion():
"""
The criterion the model aims to minimize.
"""
return nn.BCELoss()
def get_optimizer(lr):
"""
The optimizer and the parameters with which to initialize the optimizer.
At a later time, we initialize the optimizer by also passing in the model
parameters (`model.parameters()`). We cannot initialize the optimizer
until the model has been initialized.
"""
return (torch.optim.SGD,
{"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})
| [
"torch.nn.MaxPool1d",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.Dropout",
"numpy.floor",
"torch.nn.BCELoss",
"torch.nn.Linear",
"torch.nn.Conv1d"
] | [((1936, 1948), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1946, 1948), True, 'import torch.nn as nn\n'), ((475, 522), 'torch.nn.Conv1d', 'nn.Conv1d', (['(4)', '(320)'], {'kernel_size': 'conv_kernel_size'}), '(4, 320, kernel_size=conv_kernel_size)\n', (484, 522), True, 'import torch.nn as nn\n'), ((536, 557), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (543, 557), True, 'import torch.nn as nn\n'), ((571, 638), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': 'pool_kernel_size', 'stride': 'pool_kernel_size'}), '(kernel_size=pool_kernel_size, stride=pool_kernel_size)\n', (583, 638), True, 'import torch.nn as nn\n'), ((669, 686), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (679, 686), True, 'import torch.nn as nn\n'), ((701, 750), 'torch.nn.Conv1d', 'nn.Conv1d', (['(320)', '(480)'], {'kernel_size': 'conv_kernel_size'}), '(320, 480, kernel_size=conv_kernel_size)\n', (710, 750), True, 'import torch.nn as nn\n'), ((764, 785), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (771, 785), True, 'import torch.nn as nn\n'), ((799, 866), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': 'pool_kernel_size', 'stride': 'pool_kernel_size'}), '(kernel_size=pool_kernel_size, stride=pool_kernel_size)\n', (811, 866), True, 'import torch.nn as nn\n'), ((897, 914), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (907, 914), True, 'import torch.nn as nn\n'), ((929, 978), 'torch.nn.Conv1d', 'nn.Conv1d', (['(480)', '(960)'], {'kernel_size': 'conv_kernel_size'}), '(480, 960, kernel_size=conv_kernel_size)\n', (938, 978), True, 'import torch.nn as nn\n'), ((992, 1013), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (999, 1013), True, 'import torch.nn as nn\n'), ((1027, 1044), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1037, 1044), True, 'import torch.nn as nn\n'), ((1417, 1469), 'torch.nn.Linear', 'nn.Linear', (['(960 * self.n_channels)', 'n_genomic_features'], {}), '(960 * self.n_channels, n_genomic_features)\n', (1426, 1469), True, 'import torch.nn as nn\n'), ((1483, 1504), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1490, 1504), True, 'import torch.nn as nn\n'), ((1518, 1567), 'torch.nn.Linear', 'nn.Linear', (['n_genomic_features', 'n_genomic_features'], {}), '(n_genomic_features, n_genomic_features)\n', (1527, 1567), True, 'import torch.nn as nn\n'), ((1581, 1593), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1591, 1593), True, 'import torch.nn as nn\n'), ((1209, 1267), 'numpy.floor', 'np.floor', (['((sequence_length - reduce_by) / pool_kernel_size)'], {}), '((sequence_length - reduce_by) / pool_kernel_size)\n', (1217, 1267), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pylab as plt
import neuroglancer
from analysis_script.utils_format_convert import read_image_vol_from_h5
from ffn.inference.storage import subvolume_path
from neuroglancer_segment_visualize import neuroglancer_visualize
from run_consensus import run_save_consensus
import networkx
import ffn.inference.storage as storage
from ffn.inference.segmentation import relabel_volume
import os
#%%
# seg_dir = "/home/morganlab/Documents/ixP11LGN/p11_1_exp10" # "/Users/binxu/Connectomics_Code/results/LGN/p11_1_exp8" # "/home/morganlab/Documents/ixP11LGN/p11_1_exp8"
# f = np.load(subvolume_path(seg_dir, (0, 0, 0), 'npz'))
# v1 = f['segmentation']
# f.close()
# f = np.load(subvolume_path(seg_dir, (0, 448, 0), 'npz'))
# v2 = f['segmentation']
# f.close()
# f = np.load(subvolume_path(seg_dir, (0, 0, 448), 'npz'))
# v3 = f['segmentation']
# f.close()
# f = np.load(subvolume_path(seg_dir, (0, 448, 448), 'npz'))
# v4 = f['segmentation']
# f.close()
#
# #%%
# seg_dict = {"seg_dir": "/home/morganlab/Documents/ixP11LGN/p11_1_exp10",
# "seg_1": {"corner": (0, 0, 0)},
# "seg_2": {"corner": (0, 0, 448)},
# "seg_3": {"corner": (0, 448, 0)},
# "seg_4": {"corner": (0, 448, 448)}
# }
# image_dir = "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5"
# neuroglancer_visualize(seg_dict, "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5")
# #%%
# config = """
# segmentation1 {
# directory: "/home/morganlab/Documents/ixP11LGN/p11_1_exp10"
# threshold: 0.6
# split_cc: 1
# min_size: 5000
# }
# segmentation2 {
# directory: "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_rev"
# threshold: 0.6
# split_cc: 1
# min_size: 5000
# }
# segmentation_output_dir: "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_consensus_rev/"
# type: CONSENSUS_SPLIT
# split_min_size: 5000
# """
# run_save_consensus(config, [(0, 0, 0), (0, 0, 448), (0, 448, 0), (0, 448, 448)])
# #%%
# seg_dict = {"seg_dir": "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_consensus_rev/",
# "seg_1": {"corner": (0, 0, 0)},
# "seg_2": {"corner": (0, 0, 448)},
# "seg_3": {"corner": (0, 448, 0)},
# "seg_4": {"corner": (0, 448, 448)}
# }
# image_dir = "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5"
# neuroglancer_visualize(seg_dict, "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5")
# #%%
#
# plt.imshow(v1[:,-33:-30,:])
# plt.show()
# plt.imshow(v2[:,31:34,:])
# plt.show()
# #%% volume slicing function
# corner1 = (0, 0, 0)
# corner2 = (0, 448, 0)
# size = (152, 512, 512)
# size2 = (152, 512, 512)
# overlap_d = 3
def _overlap_selection(corner1, corner2, size, size2=None, overlap_d=3):
'''Return the middle of overlap subvolume to do next overlap analysis
:parameter overlap_d : it's actually the overlap_r not d
:return : sel1 sel2 2 slice object that can send into v1 v2
'''
if size2==None:
size2 = size
if corner1[0] == corner2[0] and corner1[2] == corner2[2]: # junction in y axis
if corner2[1] > corner1[1] and corner1[1] + size[1] > corner2[1]:
assert ( corner1[1] + size[1] - corner2[1] )%2 == 0
halfwid = ( corner1[1] + size[1] - corner2[1] )//2
sel1 = (slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d + 1), slice(None))
sel2 = (slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None))
elif corner1[1] > corner2[1] and corner2[1] + size[1] > corner1[1]:
assert (corner2[1] + size[1] - corner1[1]) % 2 == 0
halfwid = (corner2[1] + size[1] - corner1[1]) // 2
sel1 = (slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None))
sel2 = (slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d+ 1), slice(None))
else:
return ([],[],[]), ([],[],[])
elif corner1[0] == corner2[0] and corner1[1] == corner2[1]: # junction in x axis
if corner2[2] > corner1[2] and corner1[2] + size[2] > corner2[2]:
assert ( corner1[2] + size[2] - corner2[2] )%2 == 0
halfwid = ( corner1[2] + size[2] - corner2[2] )//2
sel1 = (slice(None), slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d + 1))
sel2 = (slice(None), slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1))
elif corner1[2] > corner2[2] and corner2[2] + size[2] > corner1[2]:
assert (corner2[2] + size[2] - corner1[2]) % 2 == 0
halfwid = (corner2[2] + size[2] - corner1[2]) // 2
sel1 = (slice(None), slice(None), slice(halfwid - overlap_d, halfwid + overlap_d + 1))
sel2 = (slice(None), slice(None), slice(-halfwid - overlap_d, -halfwid + overlap_d + 1))
else:
return ([],[],[]), ([],[],[])
elif corner1[1] == corner2[1] and corner1[2] == corner2[2]: # junction in z axis
if corner2[0] > corner1[0] and corner1[0] + size[0] > corner2[0]:
assert ( corner1[0] + size[0] - corner2[0] )%2 == 0
halfwid = ( corner1[0] + size[0] - corner2[0] )//2
sel1 = (slice(-halfwid - overlap_d, -halfwid + overlap_d + 1), slice(None), slice(None))
sel2 = (slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None), slice(None))
elif corner1[0] > corner2[0] and corner2[0] + size[0] > corner1[0]:
assert (corner2[0] + size[0] - corner1[0]) % 2 == 0
halfwid = (corner2[0] + size[0] - corner1[0]) // 2
sel1 = (slice(halfwid - overlap_d, halfwid + overlap_d + 1), slice(None), slice(None))
sel2 = (slice(-halfwid - overlap_d, -halfwid + overlap_d + 1), slice(None), slice(None))
else:
return ([],[],[]), ([],[],[])
else:
return ([],[],[]), ([],[],[])
return sel1, sel2
#%%
def merge_segment(v1, v2, corner1, corner2, size, size2=None, overlap_d=3, threshold=100):
v1 = np.uint64(v1)
v2 = np.uint64(v2) # note without enough byte space the coposite map method will fail
sel1, sel2 = _overlap_selection(corner1, corner2, size, size2=size2, overlap_d=overlap_d)
BASE = int(v1.max() + 1)
composite_map = v1[sel1] + v2[sel2] * BASE # note the label width
if composite_map.size==0:
print("Not adjacent, not mergeable!")
return None, None, None
compo_idx, cnt = np.unique(composite_map, return_counts=True)
idx2, idx1 = np.divmod(compo_idx, BASE)
merge_list_2 = []
size_list_2 = []
idx2_set = set(idx2)
for id2 in idx2_set:
overlap_cnt = cnt[idx2 == id2]
overlap_label = idx1[idx2 == id2]
i = overlap_cnt.argmax()
id1 = overlap_label[i]
overlap_size = overlap_cnt[i]
merge_list_2.append((id1, id2))
size_list_2.append(overlap_size)
merge_list_1 = []
size_list_1 = []
idx1_set = set(idx1)
for id1 in idx1_set:
overlap_cnt = cnt[idx1 == id1]
overlap_label = idx2[idx1 == id1]
i = overlap_cnt.argmax()
id2 = overlap_label[i]
overlap_size = overlap_cnt[i]
merge_list_1.append((id1, id2))
size_list_1.append(overlap_size)
consensus_merge = list(set(merge_list_1) & set(merge_list_2))
consensus_size_list = [(size_list_1[merge_list_1.index(pair)], size_list_2[merge_list_2.index(pair)]) for pair in
consensus_merge]
mask = [1 if (size_pair[1] > threshold and size_pair[0] > threshold) else 0 for size_pair in consensus_size_list]
# %% merge and remap index
merge_array = np.array(consensus_merge)
merge_array_filt = merge_array[np.array(mask, dtype=bool), :]
overlap_size_array = np.array(consensus_size_list)[np.array(mask, dtype=bool)]
overlap_size_array = overlap_size_array[:, 1] # to 1d array
global_shift = BASE
v2_new = v2 + global_shift # remap by offset
for id1, id2 in merge_array_filt[:, :]:
v2_new[v2_new == id2 + global_shift] = id1 # merge. (background is merged in this step)
return merge_array_filt, overlap_size_array, v2_new
#%%
# merge_array, overlap_size_array, v2_new = merge_segment(v1, v2, (0,0,0),(0,448,0),size=(152,512,512))
#%% Generate segment list and merge_pair list!
def stitich_subvolume_grid(seg_dir, x_step, y_step, x_num, y_num, size, start_corner = (0,0,0), output_dir=None,
overlap_d=3, overlap_thr=100):
"""Update on Feb.23rd allow non-exist patch"""
x_margin = (size[2] - x_step) // 2
y_margin = (size[1] - y_step) // 2
seg_id_dict = []
merge_pair_list = []
exist_mat = np.zeros((y_num + 2, x_num + 2), dtype=np.bool) # shape of the grid with 1 pad
exist_mat[1:-1, 1:-1] = 1
for i in range(x_num):
for j in range(y_num):
shift = (0, j*y_step, i*x_step)
corner = tuple([shift[i] + start_corner[i] for i in range(3)])
if os.path.exists(subvolume_path(seg_dir, corner, 'npz')):
f = np.load(subvolume_path(seg_dir, corner, 'npz'))
vol = f['segmentation']
f.close()
idx_list = np.unique(vol)
seg_id_dict.extend([(i, j, label) for label in idx_list])
else:
exist_mat[j+1, i+1] = False
vol = np.zeros(size, dtype=np.uint16)
print("Warn: Subvolume at %s not exists." % str(corner))
seg_id_dict.extend([(i, j, 0)])
if i == 0:
pass
elif not exist_mat[j+1, i] or not exist_mat[j+1, i+1]:
merge_pair_list.extend(
[[seg_id_dict.index((i - 1, j, 0)), seg_id_dict.index((i, j, 0))]]) # equalize the 0 background
else:
shift1 = (0, j * y_step, (i - 1) * x_step)
corner1 = tuple([shift1[i] + start_corner[i] for i in range(3)])
f = np.load(subvolume_path(seg_dir, corner1, 'npz'))
v1 = f['segmentation']
f.close()
merge_array, overlap_size_array, _ = merge_segment(v1, vol, corner1, corner, size, overlap_d=overlap_d, threshold=overlap_thr)
merge_pair_list.extend(
[[seg_id_dict.index((i - 1, j, id1)), seg_id_dict.index((i, j, id2))] for id1, id2 in merge_array])
if j == 0:
pass
elif not exist_mat[j, i+1] or not exist_mat[j+1, i+1]:
merge_pair_list.extend(
[[seg_id_dict.index((i, j - 1, 0)), seg_id_dict.index((i, j, 0))]])
else:
shift1 = (0, (j - 1)*y_step, i*x_step)
corner1 = tuple([shift1[i] + start_corner[i] for i in range(3)])
f = np.load(subvolume_path(seg_dir, corner1, 'npz'))
v1 = f['segmentation']
f.close()
merge_array, overlap_size_array, _ = merge_segment(v1, vol, corner1, corner, size, overlap_d=overlap_d, threshold=overlap_thr)
merge_pair_list.extend(
[[seg_id_dict.index((i, j - 1, id1)), seg_id_dict.index((i, j, id2))] for id1, id2 in merge_array])
# full_segment[:, global_y_sel(j), global_x_sel(i)] = vol[:, local_y_sel(j), local_x_sel(i)]
#%% find the network component in this global network!
segment_graph = networkx.Graph()
segment_graph.add_edges_from(merge_pair_list)
segment_graph.add_nodes_from(range(len(seg_id_dict)))
final_idx = []
for component in networkx.connected_components(segment_graph):
final_idx.append(min(component))
#%%
def global_x_sel(j, i):
start = i * x_step + x_margin
end = (i + 1) * x_step + x_margin
if not exist_mat[j+1, i]:
start -= x_margin
if not exist_mat[j+1, i+2]:
end += x_margin
return slice(start, end)
# if i == 0 and x_num == 1:
# return slice(None)
# elif i == 0:
# return slice(0, x_step + x_margin)
# elif i == x_num - 1:
# return slice((x_num - 1) * x_step + x_margin, x_num * x_step + 2 * x_margin)
# else:
def global_y_sel(j, i):
start = j * y_step + y_margin
end = (j + 1) * y_step + y_margin
if not exist_mat[j, i+1]:
start -= y_margin
if not exist_mat[j+2, i+1]:
end += y_margin
return slice(start, end)
# if i == 0 and y_num == 1:
# return slice(None)
# elif i == 0:
# return slice(0, y_step + y_margin)
# elif i == y_num - 1:
# return slice((y_num - 1) * y_step + y_margin, y_num * y_step + 2 * y_margin)
# else:
# return slice(i * y_step + y_margin, (i + 1) * y_step + y_margin)
def local_x_sel(j, i):
start = x_margin
end = x_step + x_margin
if not exist_mat[j + 1, i]:
start -= x_margin
if not exist_mat[j + 1, i + 2]:
end += x_margin
return slice(start, end)
# if i == 0 and x_num == 1:
# return slice(None)
# elif i == 0:
# return slice(0, -x_margin)
# elif i == x_num - 1:
# return slice(x_margin, None)
# else:
# return slice(x_margin, -x_margin)
def local_y_sel(j, i):
start = y_margin
end = y_step + y_margin
if not exist_mat[j, i+1]:
start -= y_margin
if not exist_mat[j+2, i+1]:
end += y_margin
return slice(start, end)
# if i == 0 and y_num == 1:
# return slice(None)
# elif i == 0:
# return slice(0, -y_margin)
# elif i == y_num - 1:
# return slice(y_margin, None)
# else:
# return slice(y_margin, -y_margin)
full_segment = np.zeros((size[0], y_num * y_step + 2 * y_margin, x_num * x_step + 2 * x_margin), dtype=np.uint32)
for i in range(x_num):
for j in range(y_num):
if not exist_mat[j+1, i+1]:
continue
shift = (0, j * y_step, i * x_step)
corner = tuple([shift[i] + start_corner[i] for i in range(3)])
f = np.load(subvolume_path(seg_dir, corner, 'npz'))
vol = f['segmentation']
f.close()
idx_list = np.unique(vol)
idx_glob_list = [min( networkx.node_connected_component(segment_graph, seg_id_dict.index((i, j, id_loc))))
for id_loc in idx_list]
# for id_loc in idx_list:
# id_glob = seg_id_dict.index((i, j, id_loc))
# equiv_group = networkx.node_connected_component(segment_graph, id_glob)
# id_glob = min(equiv_group)
# vol[vol == id_loc] = id_glob
# FIX bug!!!! The code above may make the voxel id change multiple times!! Make error merging in result!!!!
relabel_vol, label_pair = relabel_volume(vol, idx_glob_list)
full_segment[:, global_y_sel(j, i), global_x_sel(j, i)] = relabel_vol[:, local_y_sel(j, i), local_x_sel(j, i)]
if output_dir:
seg_path = storage.segmentation_path(output_dir, start_corner) # FIXED: Use beg_corner instead
storage.save_subvolume(full_segment, start_corner, seg_path)
return full_segment, segment_graph, seg_id_dict
if __name__=="__main__":
# Example usage
seg_dir = "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_consensus_rev/"
full_segment, segment_graph, seg_id_dict = stitich_subvolume_grid(seg_dir, x_step=448, y_step=448, x_num=2, y_num=2,
size=(152, 512, 512),
output_dir="/home/morganlab/Documents/ixP11LGN/p11_1_exp10_full")
seg_dict = {"seg_full": {"corner": (0, 0, 0), "vol": full_segment}}
image_dir = "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5"
neuroglancer_visualize(seg_dict, image_dir)
#%% Experiment 1
seg_dir = "/home/morganlab/Documents/ixP11LGN/p11_3_exp1/"
full_segment, segment_graph = stitich_subvolume_grid(seg_dir, x_step=448, y_step=448, x_num=2, y_num = 2, size=(152, 512, 512))
#%%
seg_dict = {"seg_full": {"corner": (0, 0, 0), "vol": full_segment}}
image_dir = "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_3_norm.h5"
neuroglancer_visualize(seg_dict, image_dir)
#%% Experiment 2
seg_dir = "/home/morganlab/Documents/ixP11LGN/p11_1_exp10_consensus_rev/"
full_segment, segment_graph = stitich_subvolume_grid(seg_dir, x_step=448, y_step=448, x_num=2, y_num=2, size=(152, 512, 512),
output_dir="/home/morganlab/Documents/ixP11LGN/p11_1_exp10_full")
seg_dict = {"seg_full": {"corner": (0, 0, 0), "vol": full_segment}}
image_dir = "/home/morganlab/Documents/ixP11LGN/grayscale_ixP11_1_norm.h5"
neuroglancer_visualize(seg_dict, image_dir)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Garbage sites!!!!!
BASE = int(v1.max()+1)
composite_map = v1[:, -35,-29, :] + v2[:, 29:35, :] * BASE # note the label width
compo_idx, cnt = np.unique(composite_map, return_counts=True)
#%%
idx2, idx1 = np.divmod(compo_idx, BASE)
#%%
merge_list_2 = []
size_list_2 = []
idx2_set = set(idx2)
for id2 in idx2_set:
overlap_cnt = cnt[idx2 == id2]
overlap_label = idx1[idx2 == id2]
i = overlap_cnt.argmax()
id1 = overlap_label[i]
overlap_size = overlap_cnt[i]
merge_list_2.append((id1, id2))
size_list_2.append(overlap_size)
#%%
merge_list_1 = []
size_list_1 = []
idx1_set = set(idx1)
for id1 in idx1_set:
overlap_cnt = cnt[idx1 == id1]
overlap_label = idx2[idx1 == id1]
i = overlap_cnt.argmax()
id2 = overlap_label[i]
overlap_size = overlap_cnt[i]
merge_list_1.append((id1, id2))
size_list_1.append(overlap_size)
#%%
consensus_merge = list(set(merge_list_1) & set(merge_list_2))
consensus_size_list = [(size_list_1[merge_list_1.index(pair)], size_list_2[merge_list_2.index(pair)] ) for pair in consensus_merge]
#%%
threshold = 100 # minimum size for threshold
mask = [1 if (size_pair[1] > threshold and size_pair[0]>threshold) else 0 for size_pair in consensus_size_list]
#%% merge and remap index
merge_array = np.array(consensus_merge)
merge_array_filt = merge_array[np.array(mask, dtype=bool), :]
overlap_size_array = np.array(consensus_size_list)[np.array(mask, dtype=bool)]
overlap_size_array = overlap_size_array[:, 1] # to 1d array
#%%
merge_array, overlap_size_array, v2_new = merge_segment(v1, v2, (0, 0, 0), (0, 448, 0), size=(152, 512, 512))
#%%
full_segment = np.zeros((size[0], y_num * y_step + 2 * y_margin, x_num * x_step + 2 * x_margin),dtype=np.uint16)
for i in range(x_num):
for j in range(y_num):
corner = (0, j*y_step, i*x_step)
f = np.load(subvolume_path(seg_dir, corner, 'npz'))
vol = f['segmentation']
f.close()
if i==0:
pass
else:
corner1 = (0, j*y_step, (i - 1)*x_step)
f = np.load(subvolume_path(seg_dir, corner1, 'npz'))
v1 = f['segmentation']
f.close()
merge_array, overlap_size_array, vol_new = merge_segment(v1, vol, corner1, corner, size, overlap_d=3, threshold=100)
vol = vol_new
if j==0:
pass
else:
corner1 = (0, (j - 1)*y_step, i*x_step)
f = np.load(subvolume_path(seg_dir, corner1, 'npz'))
v1 = f['segmentation']
f.close()
merge_array, overlap_size_array, vol_new = merge_segment(v1, vol, corner1, corner, size, overlap_d=3, threshold=100)
vol = vol_new
full_segment[:, global_y_sel(j), global_x_sel(i)] = vol[:, local_y_sel(j), local_x_sel(i)]
| [
"neuroglancer_segment_visualize.neuroglancer_visualize",
"numpy.unique",
"ffn.inference.storage.save_subvolume",
"numpy.divmod",
"networkx.Graph",
"networkx.connected_components",
"ffn.inference.segmentation.relabel_volume",
"numpy.array",
"numpy.uint64",
"numpy.zeros",
"ffn.inference.storage.se... | [((6102, 6115), 'numpy.uint64', 'np.uint64', (['v1'], {}), '(v1)\n', (6111, 6115), True, 'import numpy as np\n'), ((6125, 6138), 'numpy.uint64', 'np.uint64', (['v2'], {}), '(v2)\n', (6134, 6138), True, 'import numpy as np\n'), ((6529, 6573), 'numpy.unique', 'np.unique', (['composite_map'], {'return_counts': '(True)'}), '(composite_map, return_counts=True)\n', (6538, 6573), True, 'import numpy as np\n'), ((6591, 6617), 'numpy.divmod', 'np.divmod', (['compo_idx', 'BASE'], {}), '(compo_idx, BASE)\n', (6600, 6617), True, 'import numpy as np\n'), ((7729, 7754), 'numpy.array', 'np.array', (['consensus_merge'], {}), '(consensus_merge)\n', (7737, 7754), True, 'import numpy as np\n'), ((8757, 8804), 'numpy.zeros', 'np.zeros', (['(y_num + 2, x_num + 2)'], {'dtype': 'np.bool'}), '((y_num + 2, x_num + 2), dtype=np.bool)\n', (8765, 8804), True, 'import numpy as np\n'), ((11482, 11498), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (11496, 11498), False, 'import networkx\n'), ((11647, 11691), 'networkx.connected_components', 'networkx.connected_components', (['segment_graph'], {}), '(segment_graph)\n', (11676, 11691), False, 'import networkx\n'), ((13980, 14082), 'numpy.zeros', 'np.zeros', (['(size[0], y_num * y_step + 2 * y_margin, x_num * x_step + 2 * x_margin)'], {'dtype': 'np.uint32'}), '((size[0], y_num * y_step + 2 * y_margin, x_num * x_step + 2 *\n x_margin), dtype=np.uint32)\n', (13988, 14082), True, 'import numpy as np\n'), ((16103, 16146), 'neuroglancer_segment_visualize.neuroglancer_visualize', 'neuroglancer_visualize', (['seg_dict', 'image_dir'], {}), '(seg_dict, image_dir)\n', (16125, 16146), False, 'from neuroglancer_segment_visualize import neuroglancer_visualize\n'), ((16527, 16570), 'neuroglancer_segment_visualize.neuroglancer_visualize', 'neuroglancer_visualize', (['seg_dict', 'image_dir'], {}), '(seg_dict, image_dir)\n', (16549, 16570), False, 'from neuroglancer_segment_visualize import neuroglancer_visualize\n'), ((17082, 17125), 'neuroglancer_segment_visualize.neuroglancer_visualize', 'neuroglancer_visualize', (['seg_dict', 'image_dir'], {}), '(seg_dict, image_dir)\n', (17104, 17125), False, 'from neuroglancer_segment_visualize import neuroglancer_visualize\n'), ((17412, 17456), 'numpy.unique', 'np.unique', (['composite_map'], {'return_counts': '(True)'}), '(composite_map, return_counts=True)\n', (17421, 17456), True, 'import numpy as np\n'), ((17482, 17508), 'numpy.divmod', 'np.divmod', (['compo_idx', 'BASE'], {}), '(compo_idx, BASE)\n', (17491, 17508), True, 'import numpy as np\n'), ((18670, 18695), 'numpy.array', 'np.array', (['consensus_merge'], {}), '(consensus_merge)\n', (18678, 18695), True, 'import numpy as np\n'), ((19061, 19163), 'numpy.zeros', 'np.zeros', (['(size[0], y_num * y_step + 2 * y_margin, x_num * x_step + 2 * x_margin)'], {'dtype': 'np.uint16'}), '((size[0], y_num * y_step + 2 * y_margin, x_num * x_step + 2 *\n x_margin), dtype=np.uint16)\n', (19069, 19163), True, 'import numpy as np\n'), ((7846, 7875), 'numpy.array', 'np.array', (['consensus_size_list'], {}), '(consensus_size_list)\n', (7854, 7875), True, 'import numpy as np\n'), ((7876, 7902), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (7884, 7902), True, 'import numpy as np\n'), ((15294, 15345), 'ffn.inference.storage.segmentation_path', 'storage.segmentation_path', (['output_dir', 'start_corner'], {}), '(output_dir, start_corner)\n', (15319, 15345), True, 'import ffn.inference.storage as storage\n'), ((15387, 15447), 'ffn.inference.storage.save_subvolume', 'storage.save_subvolume', (['full_segment', 'start_corner', 'seg_path'], {}), '(full_segment, start_corner, seg_path)\n', (15409, 15447), True, 'import ffn.inference.storage as storage\n'), ((18787, 18816), 'numpy.array', 'np.array', (['consensus_size_list'], {}), '(consensus_size_list)\n', (18795, 18816), True, 'import numpy as np\n'), ((18817, 18843), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (18825, 18843), True, 'import numpy as np\n'), ((7790, 7816), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (7798, 7816), True, 'import numpy as np\n'), ((14470, 14484), 'numpy.unique', 'np.unique', (['vol'], {}), '(vol)\n', (14479, 14484), True, 'import numpy as np\n'), ((15097, 15131), 'ffn.inference.segmentation.relabel_volume', 'relabel_volume', (['vol', 'idx_glob_list'], {}), '(vol, idx_glob_list)\n', (15111, 15131), False, 'from ffn.inference.segmentation import relabel_volume\n'), ((18731, 18757), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (18739, 18757), True, 'import numpy as np\n'), ((9074, 9112), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner', '"""npz"""'], {}), "(seg_dir, corner, 'npz')\n", (9088, 9112), False, 'from ffn.inference.storage import subvolume_path\n'), ((9276, 9290), 'numpy.unique', 'np.unique', (['vol'], {}), '(vol)\n', (9285, 9290), True, 'import numpy as np\n'), ((9449, 9480), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.uint16'}), '(size, dtype=np.uint16)\n', (9457, 9480), True, 'import numpy as np\n'), ((14349, 14387), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner', '"""npz"""'], {}), "(seg_dir, corner, 'npz')\n", (14363, 14387), False, 'from ffn.inference.storage import subvolume_path\n'), ((19286, 19324), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner', '"""npz"""'], {}), "(seg_dir, corner, 'npz')\n", (19300, 19324), False, 'from ffn.inference.storage import subvolume_path\n'), ((9143, 9181), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner', '"""npz"""'], {}), "(seg_dir, corner, 'npz')\n", (9157, 9181), False, 'from ffn.inference.storage import subvolume_path\n'), ((19528, 19567), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner1', '"""npz"""'], {}), "(seg_dir, corner1, 'npz')\n", (19542, 19567), False, 'from ffn.inference.storage import subvolume_path\n'), ((19941, 19980), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner1', '"""npz"""'], {}), "(seg_dir, corner1, 'npz')\n", (19955, 19980), False, 'from ffn.inference.storage import subvolume_path\n'), ((10058, 10097), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner1', '"""npz"""'], {}), "(seg_dir, corner1, 'npz')\n", (10072, 10097), False, 'from ffn.inference.storage import subvolume_path\n'), ((10888, 10927), 'ffn.inference.storage.subvolume_path', 'subvolume_path', (['seg_dir', 'corner1', '"""npz"""'], {}), "(seg_dir, corner1, 'npz')\n", (10902, 10927), False, 'from ffn.inference.storage import subvolume_path\n')] |
#<NAME>
# This program displays a plot of the functions f(x)=x ,g(x)=x2 and h(x)=x3 in the range [0,4]
import matplotlib.pyplot as plt
import numpy as np
#After importing both libraries above I created variable x with range [0,4] evenly separated
x= np.linspace(0, 4, 10)
fig, ax= plt.subplots() #I created figure and an ax using Object oriented approach
f= x
g= x**2
h= x**3
ax.plot (f,f, label= "linear")
ax.plot (f,g,"r--", label=" squared")#After creating variables plot data in axes and added labels for each line.
ax.plot (f,h, label=" cubed")
ax.legend()#I added legend and title to plot
ax.set_title("My first plot")
fig.show()
input()#with this command I make the figure wait for input ( in this case close window) before stop showing
| [
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((253, 274), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(10)'], {}), '(0, 4, 10)\n', (264, 274), True, 'import numpy as np\n'), ((285, 299), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (297, 299), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import os
import argparse
import Pyro4
import random
import logging
import torch
import numpy as np
import matplotlib.pyplot as plt
from shutil import rmtree
from pathlib import Path
from ggcnn_torch import GGCNNTorch
from utils.dataset_processing.grasp import detect_grasps
GMDATA_PATH = Path.home().joinpath('Project/gmdata')
MODEL_PATH = GMDATA_PATH.joinpath('datasets/models/gg/small_data')
TEST_PATH = GMDATA_PATH.joinpath('datasets/test/test_poses')
TEST_OUTPUT = GMDATA_PATH.joinpath('ggtest')
class Grasp2D(object):
"""
2D夹爪类型,夹爪投影到深度图像上的坐标.
这里使用的都是图像坐标和numpy数组的轴顺序相反
"""
def __init__(self, center, angle, depth, width=0.0, z_depth=0.0, quality=None, coll=None, gh=None):
""" 一个带斜向因子z的2d抓取, 这里需要假定p1的深度比较大
center : 夹爪中心坐标,像素坐标表示
angle : 抓取方向和相机x坐标的夹角, 由深度较小的p0指向深度较大的p1, (-pi, pi)
depth : 夹爪中心点的深度
width : 夹爪的宽度像素坐标
z_depth: 抓取端点到抓取中心点z轴的距离,单位为m而非像素
quality: 抓取质量
coll: 抓取是否碰撞
"""
self.center = center
self.angle = angle
self.depth = depth
self.width_px = width
self.z_depth = z_depth
self.quality = quality
self.coll = coll
self.gh = gh
@property
def norm_angle(self):
""" 归一化到-pi/2到pi/2的角度 """
a = self.angle
while a >= np.pi/2:
a = a-np.pi
while a < -np.pi/2:
a = a+np.pi
return a
@property
def axis(self):
""" Returns the grasp axis. """
return np.array([np.cos(self.angle), np.sin(self.angle)])
@property
def endpoints(self):
""" Returns the grasp endpoints """
p0 = self.center - (float(self.width_px) / 2) * self.axis
p1 = self.center + (float(self.width_px) / 2) * self.axis
p0 = p0.astype(np.int)
p1 = p1.astype(np.int)
return p0, p1
@classmethod
def from_jaq(cls, jaq_string):
jaq_string = jaq_string.strip()
x, y, theta, w, h = [float(v) for v in jaq_string[:-1].split(';')]
return cls(np.array([x, y]), theta/180.0*np.pi, 0, w, gh=h)
def plot_output(depth_img, grasp_q_img, grasp_angle_img, no_grasps=1, grasp_width_img=None):
"""
Plot the output of a GG-CNN
:param rgb_img: RGB Image
:param depth_img: Depth Image
:param grasp_q_img: Q output of GG-CNN
:param grasp_angle_img: Angle output of GG-CNN
:param no_grasps: Maximum number of grasps to plot
:param grasp_width_img: (optional) Width output of GG-CNN
:return:
"""
gs = detect_grasps(grasp_q_img, grasp_angle_img,
width_img=grasp_width_img, no_grasps=1)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(depth_img, cmap='gray')
for g in gs:
g.plot(ax)
ax.set_title('Depth')
ax.axis('off')
return gs
def get_model(model_path):
model_path = Path(model_path).resolve()
max_fn = 0
max_f = None
for f in model_path.iterdir():
fs = f.name.split('_')
if len(fs) == 4:
fn = int(fs[1])
if fn > max_fn:
max_fn = fn
max_f = f
return max_f
@Pyro4.expose
class Planer(object):
def __init__(self, model_path):
self.ggcnn = GGCNNTorch(model_path)
def plan(self, image, width):
random.seed(0)
np.random.seed(0)
im = image.copy()
try_num = 5
qs = []
gs = []
for _ in range(try_num):
try:
points_out, ang_out, width_out, depth_out = self.ggcnn.predict(im, 300)
ggs = detect_grasps(points_out, ang_out, width_img=width_out, no_grasps=1)
if len(ggs) == 0:
continue
g = Grasp2D.from_jaq(ggs[0].to_jacquard(scale=1))
g.width_px = width
q = points_out[int(g.center[1]), int(g.center[0])]
except Exception as e:
print('--------------------出错了----------------------')
print(e)
else:
qs.append(q)
gs.append(g)
# if q > 0.9:
# break
if len(gs) == 0:
return None
g = gs[np.argmax(qs)]
q = qs[np.argmax(qs)]
p0, p1 = g.endpoints
print('-------------------------')
print([p0, p1, g.depth, g.depth, q])
return [p0, p1, g.depth, g.depth, q]
def main(args):
model_path = MODEL_PATH.joinpath(args.model_name)
model_path = get_model(model_path)
pp = Planer(model_path)
Pyro4.config.SERIALIZERS_ACCEPTED.add('pickle')
Pyro4.Daemon.serveSimple({pp: 'grasp'}, ns=False, host='', port=6665)
def test():
model_path = get_model(MODEL_PATH.joinpath('gmd'))
pp = Planer(model_path)
ggcnn = GGCNNTorch(model_path)
for p in TEST_PATH.iterdir():
if p.joinpath('image.npy').exists():
test_im = p.joinpath('image.npy')
out_path = TEST_OUTPUT.joinpath(p.name)
depth = np.load(test_im)
points_out, ang_out, width_out, depth_out = ggcnn.predict(depth, 300)
if out_path.exists():
rmtree(out_path)
out_path.mkdir(parents=True)
np.save((out_path.joinpath('points_out.npy')), points_out)
np.save((out_path.joinpath('ang_out.npy')), ang_out)
np.save((out_path.joinpath('width_out.npy')), width_out)
np.save((out_path.joinpath('depth_out.npy')), depth_out)
np.save((out_path.joinpath('depth.npy')), depth)
gs = plot_output(depth_out, points_out, ang_out, 1, width_out)
plt.savefig(out_path.joinpath('result.png'))
gj = [g.to_jacquard() for g in gs]
np.save((out_path.joinpath('gj.npy')), gj)
rr = pp.plan(depth, 90)
if rr is not None:
np.save((out_path.joinpath('pp.npy')), np.array(rr))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='dataset to npz')
parser.add_argument('-m', '--model-name', metavar='gmd', type=str, default='gmd',
help='使用的模型的名字')
parser.add_argument('-t', '--test', action='store_true')
args = parser.parse_args()
if args.test:
test()
else:
main(args)
| [
"ggcnn_torch.GGCNNTorch",
"argparse.ArgumentParser",
"pathlib.Path",
"Pyro4.config.SERIALIZERS_ACCEPTED.add",
"pathlib.Path.home",
"numpy.argmax",
"random.seed",
"utils.dataset_processing.grasp.detect_grasps",
"shutil.rmtree",
"numpy.array",
"matplotlib.pyplot.figure",
"Pyro4.Daemon.serveSimpl... | [((2553, 2640), 'utils.dataset_processing.grasp.detect_grasps', 'detect_grasps', (['grasp_q_img', 'grasp_angle_img'], {'width_img': 'grasp_width_img', 'no_grasps': '(1)'}), '(grasp_q_img, grasp_angle_img, width_img=grasp_width_img,\n no_grasps=1)\n', (2566, 2640), False, 'from utils.dataset_processing.grasp import detect_grasps\n'), ((2671, 2699), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2681, 2699), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4658), 'Pyro4.config.SERIALIZERS_ACCEPTED.add', 'Pyro4.config.SERIALIZERS_ACCEPTED.add', (['"""pickle"""'], {}), "('pickle')\n", (4648, 4658), False, 'import Pyro4\n'), ((4663, 4732), 'Pyro4.Daemon.serveSimple', 'Pyro4.Daemon.serveSimple', (["{pp: 'grasp'}"], {'ns': '(False)', 'host': '""""""', 'port': '(6665)'}), "({pp: 'grasp'}, ns=False, host='', port=6665)\n", (4687, 4732), False, 'import Pyro4\n'), ((4842, 4864), 'ggcnn_torch.GGCNNTorch', 'GGCNNTorch', (['model_path'], {}), '(model_path)\n', (4852, 4864), False, 'from ggcnn_torch import GGCNNTorch\n'), ((6016, 6069), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""dataset to npz"""'}), "(description='dataset to npz')\n", (6039, 6069), False, 'import argparse\n'), ((301, 312), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (310, 312), False, 'from pathlib import Path\n'), ((3287, 3309), 'ggcnn_torch.GGCNNTorch', 'GGCNNTorch', (['model_path'], {}), '(model_path)\n', (3297, 3309), False, 'from ggcnn_torch import GGCNNTorch\n'), ((3353, 3367), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3364, 3367), False, 'import random\n'), ((3376, 3393), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3390, 3393), True, 'import numpy as np\n'), ((2064, 2080), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (2072, 2080), True, 'import numpy as np\n'), ((2915, 2931), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (2919, 2931), False, 'from pathlib import Path\n'), ((4261, 4274), 'numpy.argmax', 'np.argmax', (['qs'], {}), '(qs)\n', (4270, 4274), True, 'import numpy as np\n'), ((4291, 4304), 'numpy.argmax', 'np.argmax', (['qs'], {}), '(qs)\n', (4300, 4304), True, 'import numpy as np\n'), ((5062, 5078), 'numpy.load', 'np.load', (['test_im'], {}), '(test_im)\n', (5069, 5078), True, 'import numpy as np\n'), ((1536, 1554), 'numpy.cos', 'np.cos', (['self.angle'], {}), '(self.angle)\n', (1542, 1554), True, 'import numpy as np\n'), ((1556, 1574), 'numpy.sin', 'np.sin', (['self.angle'], {}), '(self.angle)\n', (1562, 1574), True, 'import numpy as np\n'), ((3632, 3700), 'utils.dataset_processing.grasp.detect_grasps', 'detect_grasps', (['points_out', 'ang_out'], {'width_img': 'width_out', 'no_grasps': '(1)'}), '(points_out, ang_out, width_img=width_out, no_grasps=1)\n', (3645, 3700), False, 'from utils.dataset_processing.grasp import detect_grasps\n'), ((5211, 5227), 'shutil.rmtree', 'rmtree', (['out_path'], {}), '(out_path)\n', (5217, 5227), False, 'from shutil import rmtree\n'), ((5960, 5972), 'numpy.array', 'np.array', (['rr'], {}), '(rr)\n', (5968, 5972), True, 'import numpy as np\n')] |
import os
import json
import math
import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
class Plotter:
METRIC_UNIT = {'duration' : 'seconds',
'route_length': 'meters',
'time_loss': 'seconds',
'traffic' : 'traffic load score',
'crimes': 'insecurity level',
'crashes': 'accident probability'}
def read_xml_file(self, file):
f = open(file)
data = f.read()
soup = BeautifulSoup(data, "xml")
f.close()
return soup
def read_json_file(self, file):
with open(file, "r") as file:
return json.load(file)
def mean_confidence_interval(self, data, confidence=0.95):
# Ref: https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), np.std(a)
h = 1.96 * (se/math.sqrt(n))
return (m, h)
def get_reroute_metrics(self, ires):
duration, route_length, time_loss = [], [], []
tripinfos = ires.find('tripinfos')
for info in tripinfos.findAll('tripinfo'):
try:
dur = float(info['duration'])
rou = float(info['routeLength'])
tim = float(info['timeLoss'])
if dur > 10.00 and rou > 50.00:
duration.append(dur)
route_length.append(rou)
time_loss.append(tim)
except Exception:
pass
return np.mean(duration), np.mean(route_length), np.mean(time_loss)
def calculate_reroute_metrics(self, accumulated):
return {'duration': self.mean_confidence_interval(accumulated['duration']),
'route_length': self.mean_confidence_interval(accumulated['route_length']),
'time_loss': self.mean_confidence_interval(accumulated['time_loss'])}
def read_reroute_files(self, results, days, cities):
for city in cities:
for folder in os.listdir('./output/data/monday/{0}'.format(city)):
accumulated = {'duration': [],
'route_length': [],
'time_loss': []}
for day in days:
for iterate in range(20):
ires = self.read_xml_file('./output/data/{0}/{1}/{2}/{3}_reroute.xml'.format(day, city, folder, iterate))
dur, rou, tim = self.get_reroute_metrics(ires)
accumulated['duration'].append(dur)
accumulated['route_length'].append(rou)
accumulated['time_loss'].append(tim)
results['reroute_{0}_{1}'.format(city, folder)] = self.calculate_reroute_metrics(accumulated)
return results
def get_contextual_metrics(self, ires):
return float(ires['traffic']['mean']), float(ires['crimes']['mean']), float(ires['crashes']['mean'])
def calculate_contextual_metrics(self, accumulated):
return {'traffic': self.mean_confidence_interval(accumulated['traffic']),
'crimes': self.mean_confidence_interval(accumulated['crimes']),
'crashes': self.mean_confidence_interval(accumulated['crashes'])}
def read_contextual_files(self, results, days, cities):
for city in cities:
for folder in os.listdir('./output/data/monday/{0}'.format(city)):
accumulated = {'traffic': [],
'crimes': [],
'crashes': []}
for day in days:
for iterate in range(20):
ires = self.read_json_file('./output/data/{0}/{1}/{2}/{3}_metrics.json'.format(day, city, folder, iterate))
tra, cri, cra = self.get_contextual_metrics(ires)
accumulated['traffic'].append(tra)
accumulated['crimes'].append(cri)
accumulated['crashes'].append(cra)
results['context_{0}_{1}'.format(city, folder)] = self.calculate_contextual_metrics(accumulated)
return results
def save_calculation(self, results, file='all'):
if not os.path.exists('results'):
os.makedirs('results')
with open('results/{0}_results.json'.format(file), "w") as write_file:
json.dump(results, write_file, indent=4)
def read_calculation(self):
results = {}
for file in os.listdir('results/'):
with open('results/{0}'.format(file), "r") as write_file:
results[file] = json.load(write_file)
return results
def filter_keys(self, results, sfilter='context'):
filtered_keys = [x for x in results.keys() if sfilter in x]
filtered_dict = {}
for f in filtered_keys:
filtered_dict[f] = results[f]
metrics = results[filtered_keys[0]].keys()
return filtered_dict, metrics
def separate_mean_std(self, just_to_plot, metric, keys_order, cities):
means, stds = [], []
for city in cities:
for key in keys_order:
k = [x for x in just_to_plot if key in x and city in x][0]
means.append(just_to_plot[k][metric][0])
stds.append(just_to_plot[k][metric][1])
return means, stds
def plot_dots(self, just_to_plot, metric, file, cities):
if not os.path.exists('metric_plots'):
os.makedirs('metric_plots')
plt.clf()
ax = plt.subplot(111)
keys_order = ['traffic', 'crimes', 'crashes', 'mtraffic', 'mcrimes', 'mcrashes', 'same', 'traandcri', 'traandcra', 'craandtra', 'craandcri', 'criandtra', 'criandcra', 'none']
xlabels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'None']
means, stds = self.separate_mean_std(just_to_plot, metric, keys_order, cities)
colorlist = ['#1d4484', '#7c0404', '#86a4ca', '#5dddd0', '#874a97', '#e6f0fc', '#424564']
for indx, city in enumerate(cities):
plt.errorbar([x for x in range(14)], means[indx*14:(indx+1)*14], yerr=stds[indx*14:(indx+1)*14], fmt='o-.', color=colorlist[indx], label=city.capitalize(), capsize=5)
plt.xlabel('Execution Configuration')
plt.ylabel('{0} ({1})'.format(metric.replace('_', ' ').capitalize(), self.METRIC_UNIT[metric]))
plt.xticks(np.arange(0, len(xlabels)), xlabels, rotation=50)
ax.legend()
plt.savefig('metric_plots/{0}_{1}.pdf'.format(file, metric), bbox_inches="tight", format='pdf')
def plot(self, results, file, cities):
contextual, cmetrics = self.filter_keys(results)
mobility, mmetrics = self.filter_keys(results, sfilter='reroute')
for metric in cmetrics:
self.plot_dots(contextual, metric, file, cities)
for metric in mmetrics:
self.plot_dots(mobility, metric, file, cities)
def main(self, cities=['austin']):
results = {}
days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
# print('Read reroute files')
# self.read_reroute_files(results, days, cities)
# print('Read contextual files')
# self.read_contextual_files(results, days, cities)
# print('Save calculations')
# self.save_calculation(results)
# for day in days:
# print(day)
# results = {}
# print('Read reroute files')
# self.read_reroute_files(results, [day], cities)
# print('Read contextual files')
# self.read_contextual_files(results, [day], cities)
# print('Save calculations')
# self.save_calculation(results, day)
print('Read calculation')
results = self.read_calculation()
print('Plot')
for res in results:
self.plot(results[res], res, cities)
| [
"numpy.mean",
"os.path.exists",
"os.listdir",
"os.makedirs",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"math.sqrt",
"bs4.BeautifulSoup",
"numpy.array",
"numpy.std",
"json.load",
"matplotlib.pyplot.subplot",
"json.dump"
] | [((428, 454), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data', '"""xml"""'], {}), "(data, 'xml')\n", (441, 454), False, 'from bs4 import BeautifulSoup\n'), ((3766, 3788), 'os.listdir', 'os.listdir', (['"""results/"""'], {}), "('results/')\n", (3776, 3788), False, 'import os\n'), ((4650, 4659), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4657, 4659), True, 'import matplotlib.pyplot as plt\n'), ((4667, 4683), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4678, 4683), True, 'import matplotlib.pyplot as plt\n'), ((5337, 5374), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Execution Configuration"""'], {}), "('Execution Configuration')\n", (5347, 5374), True, 'import matplotlib.pyplot as plt\n'), ((560, 575), 'json.load', 'json.load', (['file'], {}), '(file)\n', (569, 575), False, 'import json\n'), ((751, 765), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (759, 765), True, 'import numpy as np\n'), ((789, 799), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (796, 799), True, 'import numpy as np\n'), ((801, 810), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (807, 810), True, 'import numpy as np\n'), ((1306, 1323), 'numpy.mean', 'np.mean', (['duration'], {}), '(duration)\n', (1313, 1323), True, 'import numpy as np\n'), ((1325, 1346), 'numpy.mean', 'np.mean', (['route_length'], {}), '(route_length)\n', (1332, 1346), True, 'import numpy as np\n'), ((1348, 1366), 'numpy.mean', 'np.mean', (['time_loss'], {}), '(time_loss)\n', (1355, 1366), True, 'import numpy as np\n'), ((3533, 3558), 'os.path.exists', 'os.path.exists', (['"""results"""'], {}), "('results')\n", (3547, 3558), False, 'import os\n'), ((3563, 3585), 'os.makedirs', 'os.makedirs', (['"""results"""'], {}), "('results')\n", (3574, 3585), False, 'import os\n'), ((3663, 3703), 'json.dump', 'json.dump', (['results', 'write_file'], {'indent': '(4)'}), '(results, write_file, indent=4)\n', (3672, 3703), False, 'import json\n'), ((4581, 4611), 'os.path.exists', 'os.path.exists', (['"""metric_plots"""'], {}), "('metric_plots')\n", (4595, 4611), False, 'import os\n'), ((4619, 4646), 'os.makedirs', 'os.makedirs', (['"""metric_plots"""'], {}), "('metric_plots')\n", (4630, 4646), False, 'import os\n'), ((828, 840), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (837, 840), False, 'import math\n'), ((3872, 3893), 'json.load', 'json.load', (['write_file'], {}), '(write_file)\n', (3881, 3893), False, 'import json\n')] |
#--------------------------------------------------------------#
import universe
from universe.spaces import PointerEvent
#--------------------------------------------------------------#
import numpy as np
import tensorflow as tf
#--------------------------------------------------------------#
from AC import Actor, Critic
from env import createEnv
from utils import preprocess_screen, visualize
from utils import transform_acton, repeat_action, env_wrapper
#--------------------------------------------------------------#
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
universe.configure_logging()
#--------------------------------------------------------------#
import os
if not os.path.exists('_data'): os.makedirs('_data')
#--------------------------------------------------------------#
env = createEnv(env_id='internet.SlitherIO-v0', remotes=1)
#--------------------------------------------------------------#
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
actor = Actor(sess, n_features=[150, 250, 1], n_actions=8)
critic = Critic(sess, n_features=[150, 250, 1])
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, '_data/model.ckpt') # load model
#--------------------------------------------------------------#
MAX_EPISODE = 500
PRINT = bool(0)
RENDER = bool(1)
#--------------------------------------------------------------#
env.reset()
READY = False
t = 0
while True:
if RENDER: env.render()
if not READY:
a = 4
action = [transform_acton(a)] # dummy action
s, r, done = env_wrapper(env.step(action))
# when observation is ready, start training
if np.shape(s) == (300, 500, 3):
screen = preprocess_screen(s)
READY = True
else:
probs, a = actor.choose_action(screen, PLAY=True)
action = [transform_acton(a)]
s, r, done = env_wrapper(env.step(action))
if done:
t = 0
READY = False
screen = np.zeros((150, 250, 1))
else:
screen = preprocess_screen(s)
# Get screen image
if PRINT:
print('---------------')
print('Time step:', t)
print('Action :', a)
print('Reward:', r)
if (t % 5 == 0):
visualize(screen=screen, time=t)
#--------------------------------------------------------------#
| [
"logging.getLogger",
"os.path.exists",
"numpy.shape",
"os.makedirs",
"AC.Critic",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"utils.preprocess_screen",
"utils.visualize",
"AC.Actor",
"utils.transform_acton",
"tensorflow.ConfigProto",
"universe.confi... | [((548, 575), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (565, 575), False, 'import logging\n'), ((606, 634), 'universe.configure_logging', 'universe.configure_logging', ([], {}), '()\n', (632, 634), False, 'import universe\n'), ((836, 888), 'env.createEnv', 'createEnv', ([], {'env_id': '"""internet.SlitherIO-v0"""', 'remotes': '(1)'}), "(env_id='internet.SlitherIO-v0', remotes=1)\n", (845, 888), False, 'from env import createEnv\n'), ((1033, 1083), 'AC.Actor', 'Actor', (['sess'], {'n_features': '[150, 250, 1]', 'n_actions': '(8)'}), '(sess, n_features=[150, 250, 1], n_actions=8)\n', (1038, 1083), False, 'from AC import Actor, Critic\n'), ((1093, 1131), 'AC.Critic', 'Critic', (['sess'], {'n_features': '[150, 250, 1]'}), '(sess, n_features=[150, 250, 1])\n', (1099, 1131), False, 'from AC import Actor, Critic\n'), ((1139, 1172), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1170, 1172), True, 'import tensorflow as tf\n'), ((1196, 1212), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1210, 1212), True, 'import tensorflow as tf\n'), ((717, 740), 'os.path.exists', 'os.path.exists', (['"""_data"""'], {}), "('_data')\n", (731, 740), False, 'import os\n'), ((742, 762), 'os.makedirs', 'os.makedirs', (['"""_data"""'], {}), "('_data')\n", (753, 762), False, 'import os\n'), ((982, 1023), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(True)'}), '(log_device_placement=True)\n', (996, 1023), True, 'import tensorflow as tf\n'), ((1556, 1574), 'utils.transform_acton', 'transform_acton', (['a'], {}), '(a)\n', (1571, 1574), False, 'from utils import transform_acton, repeat_action, env_wrapper\n'), ((1687, 1698), 'numpy.shape', 'np.shape', (['s'], {}), '(s)\n', (1695, 1698), True, 'import numpy as np\n'), ((1729, 1749), 'utils.preprocess_screen', 'preprocess_screen', (['s'], {}), '(s)\n', (1746, 1749), False, 'from utils import preprocess_screen, visualize\n'), ((1837, 1855), 'utils.transform_acton', 'transform_acton', (['a'], {}), '(a)\n', (1852, 1855), False, 'from utils import transform_acton, repeat_action, env_wrapper\n'), ((1952, 1975), 'numpy.zeros', 'np.zeros', (['(150, 250, 1)'], {}), '((150, 250, 1))\n', (1960, 1975), True, 'import numpy as np\n'), ((1996, 2016), 'utils.preprocess_screen', 'preprocess_screen', (['s'], {}), '(s)\n', (2013, 2016), False, 'from utils import preprocess_screen, visualize\n'), ((2176, 2208), 'utils.visualize', 'visualize', ([], {'screen': 'screen', 'time': 't'}), '(screen=screen, time=t)\n', (2185, 2208), False, 'from utils import preprocess_screen, visualize\n')] |
import numpy as np
import skimage.feature as ski
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from aniso.derivatives import apply_anisotropic_smoothing
from aniso.utils import as_ndarrays, check_2d, eigsorted
# class definitions
class StructureTensor2D(object):
""" 2D structure tensor class.
Container for a structure tensor of a 2D image.
A structure tensor for image I at pixel (x, y) is
defined as grad(I) * grad (I).T (where * denotes)
the outer product.
| Ixx Ixy |
| Ixy Iyy |
"""
def __init__(self, t11, t12, t22):
"""
t11, t12, t22 should be structure tensors as
output by skimage.feature.structure_tensor
"""
# check input and sizes
self.check(t11, t12, t22)
# Initialize structure tensor
self.t11 = t11
self.t12 = t12
self.t22 = t22
# t11, t12, t22 should have size nx, ny
self.shape = t11.shape
def check(self, t11, t12, t22):
# verify input
t11, t12, t22 = as_ndarrays(t11, t12, t22)
check_2d(t11, t12, t22)
# alternate constructor methods.
@classmethod
def fromimage(cls, image, sigma=1.0):
""" Construct a structure tensor from an input image.
Parameters
----------
image: array_like
Input image.
sigma: float, optional.
standard deviation of Gaussian kernel.
(See skimage.feature.structure_tensor)
Returns
-------
output: StructureTensor2D
Instance of structure tensor.
"""
image = np.asarray(image)
if not (image.ndim == 2 or image.ndim == 3):
raise ValueError('Image must be 2 or 3-dimensional!')
image = image / abs(image.max())
if image.ndim == 3:
t11, t12, t22 = ski.structure_tensor(image[:, :, 0], sigma=sigma, mode='constant', cval=0.0)
else:
t11, t12, t22 = ski.structure_tensor(image, sigma=sigma, mode='constant', cval=0.0)
return cls(t11, t12, t22)
@classmethod
def isotropic(cls, nx, ny):
""" Sets an isotropic, spatially-invariant structure tensor field.
Parameters
----------
nx, ny: int, required.
dimensions of 2D field.
Returns
-------
output: StructureTensor2D
Instance of structure tensor.
"""
return cls(np.ones((ny, nx)), np.zeros((ny, nx)), np.ones((ny, nx)))
@property
def structure_tensor(self):
""" Return structure tensor as a 3-tuple
in order [Ixx, Ixy, Iyy]
"""
return self.t11, self.t12, self.t22
def get_tensor_at(self, ix, iy):
return self.t11[iy, ix], self.t12[iy, ix], self.t22[iy, ix]
def plot_tensor(self, image, ax=None, interval=0.05, color='g'):
if ax is None:
ax = plt.gca()
image = np.asarray(image)
if image.shape[:2] != self.shape:
raise ValueError('Image does not match structure tensor dimensions.')
ax.imshow(image)
ny, nx = self.shape
dh = int(max(self.shape) * interval)
for i in range(0, ny, dh):
for j in range(0, nx, dh):
D = [[self.t11[i][j], self.t12[i][j]], [self.t12[i][j], self.t22[i][j]]]
vals, vecs = eigsorted(D)
ells = [_draw_eig_ellipse([j ,i], vals, vecs, scale=20.0, fill=False, color=color)]
for e in ells:
ax.add_artist(e)
return ax
class DiffusionTensor2D(StructureTensor2D):
def __init__(self, t11, t12, t22):
super().__init__(t11, t12, t22)
@classmethod
def fromimage(cls, image, sigma=10.0, weighted=False):
""" Construct a structure tensor from an input image.
Parameters
----------
image: array_like
Input image.
sigma: float, optional.
standard deviation of Gaussian kernel.
(See skimage.feature.structure_tensor)
weighted: boolean, optional
Defines how eigenvalues of diffusion tensor are
computed. weighted=False does not produce isotropic
diffusion tensors.
Returns
-------
output: StructureTensor2D
Instance of structure tensor.
"""
image = np.asarray(image)
if not (image.ndim == 2 or image.ndim == 3):
raise ValueError('Image must be 2 or 3-dimensional!')
if image.ndim == 3:
t11, t12, t22 = cls.diffusion_tensor(image[:, :, 0], sigma=sigma, weighted=weighted)
else:
t11, t12, t22 = cls.diffusion_tensor(image, sigma=sigma, weighted=weighted)
return cls(t11, t12, t22)
@staticmethod
def diffusion_tensor(image, sigma=10.0, weighted=False):
""" Compute spatially variant diffusion tensor.
For a given image I, the structure tensor is given by
| Ixx Ixy |
| Ixy Iyy |
with eigenvectors u and v. Eigenvectors v correspond to the smaller
eigenvalue and point in directions of maximum coherence.
The diffusion tensor D is given by, D = v * transpose(v).
This tensor is designed for anisotropic smoothing.
Local D tensors are singular.
Parameters
----------
image: array_like
Input image.
sigma: float, optional
Standard deviation of Gaussian kernel.
(See skimage.feature.structure_tensor)
weighted: boolean, optional
Defines how eigenvalues of diffusion tensor are
computed. weighted=False does not produce isotropic
diffusion tensors.
Returns
-------
d11, d12, d22: ndarray
Independent components of diffusion tensor.
"""
image = np.asarray(image)
if image.ndim != 2:
image = np.asarray(image)
# normalize image
image = image / abs(image.max())
# Compute gradient of scalar field using derivative filter
Ixx, Ixy, Iyy = ski.structure_tensor(image, sigma=sigma, mode='nearest', cval=0.0)
d11, d12, d22 = _get_diffusion_tensor(Ixx, Ixy, Iyy, weighted=weighted)
return d11, d12, d22
def smooth(self, image, alpha=10, maxiter=500):
""" Compute spatially variant diffusion tensor.
For a given image I, the structure tensor is given by
| Ixx Ixy |
| Ixy Iyy |
with eigenvectors u and v. Eigenvectors v correspond to the smaller
eigenvalue and point in directions of maximum coherence.
The diffusion tensor D is given by, D = v * transpose(v).
This tensor is designed for anisotropic smoothing.
Local D tensors are singular.
Parameters
----------
image: array_like
Input image.
sigma: float, optional
Standard deviation of Gaussian kernel.
(See skimage.feature.structure_tensor)
Returns
-------
smooth_image: ndarray
Independent components of diffusion tensor.
"""
if image.ndim == 3:
smooth_image = np.zeros_like(image)
# loop over RGB channels
for i in range(3):
smooth_image[:, :, i] = apply_anisotropic_smoothing(image[:, :, i], self.t11, self.t12, self.t22, alpha=alpha, maxiter=maxiter)
else:
smooth_image = apply_anisotropic_smoothing(image, self.t11, self.t12, self.t22, alpha=alpha, maxiter=maxiter)
return smooth_image
# plotting utilities
def _draw_eig_ellipse(pos, vals, vecs, scale, **kwargs):
""" Draw ellipses from eigenvec/val pair
"""
# Generate ellipse
eps = 1e-6
isotropic = 1 / (np.sqrt(vals[0]) + eps)
linearity = 1 / (np.sqrt(vals[1]) + eps)
escale = linearity + isotropic
linearity /= escale
isotropic /= escale
width = 0.9 * scale * linearity
height = 0.9 * scale * isotropic
theta = np.degrees(np.arctan2(*vecs[:, 1][::-1]))
ellip = Ellipse(xy=pos, width=height, height=width, angle=theta, **kwargs)
return ellip
def _get_diffusion_tensor(Ixx, Ixy, Iyy, weighted=False):
""" Compute diffusion tensor with numpy broadcasting
"""
trace = Ixx + Iyy
det = Ixx*Iyy - Ixy*Ixy
# compute eigenvalues
val1 = 0.5 * (trace + np.sqrt(trace**2 - 4*det))
val2 = 0.5 * (trace - np.sqrt(trace**2 - 4*det))
# first eigenvector
u1 = val1 - Iyy
u2 = Ixy
u_mag = np.sqrt(u1**2 + u2**2)
u1 = u1 / u_mag
u2 = u2 / u_mag
# second eigenvector
v1 = val2 - Iyy
v2 = Ixy
v_mag = np.sqrt(v1**2 + v2**2)
v1 = v1 / v_mag
v2 = v2 / v_mag
if weighted:
mask = val1 == val2
linearity = val1-val2 / val1
alpha = 0.01
lam1 = alpha
lam2 = alpha + (1-alpha)*np.exp(-1./linearity**2)
lam_sum = lam1 + lam2
lam1 = lam1 / lam_sum
lam2 = lam2 / lam_sum
lam1[mask] = 0.5
lam2[mask] = 0.5
else:
lam1 = 0.05
lam2 = 0.95
d11 = lam1 * u1 * u1 + lam2 * v1 * v1
d12 = lam1 * u1 * u2 + lam2 * v1 * v2
d22 = lam1 * u2 * u2 + lam2 * v2 * v2
return d11, d12, d22
def _get_diffusion_tensor_ref(Ixx, Ixy, Iyy, weighted=False):
ny, nx = Ixx.shape
# Initialize diffusion tensor components
d11 = np.zeros((ny, nx))
d12 = np.zeros((ny, nx))
d22 = np.zeros((ny, nx))
for i in range(ny):
for j in range(nx):
A = [[Ixx[i, j], Ixy[i, j]], [Ixy[i, j], Iyy[i, j]]]
vals, vecs = eigsorted(A)
alpha = 0.01
if weighted:
if vals[0] == vals[1]:
lam1 = 0.5
lam2 = 0.5
else:
linearity = vals[0] - vals[1] / vals[0]
lam1 = alpha
lam2 = alpha + (1-alpha)*np.exp(-1./linearity**2)
lam_sum = lam1 + lam2
lam1 /= lam_sum
lam2 /= lam_sum
else:
lam1 = 0.05
lam2 = 0.95
# eigen-decomposition of diffusion tensor
D = lam1 * np.outer(vecs[:, 0], vecs[:, 0]) + \
lam2 * np.outer(vecs[:, -1], vecs[:, -1])
# Assign components
d11[i][j] = D[0][0]
d12[i][j] = D[0][1]
d22[i][j] = D[1][1]
return d11, d12, d22 | [
"aniso.derivatives.apply_anisotropic_smoothing",
"numpy.sqrt",
"numpy.ones",
"matplotlib.pyplot.gca",
"aniso.utils.as_ndarrays",
"numpy.asarray",
"numpy.exp",
"numpy.zeros",
"aniso.utils.check_2d",
"numpy.arctan2",
"numpy.outer",
"skimage.feature.structure_tensor",
"matplotlib.patches.Ellips... | [((8226, 8292), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos', 'width': 'height', 'height': 'width', 'angle': 'theta'}), '(xy=pos, width=height, height=width, angle=theta, **kwargs)\n', (8233, 8292), False, 'from matplotlib.patches import Ellipse\n'), ((8690, 8716), 'numpy.sqrt', 'np.sqrt', (['(u1 ** 2 + u2 ** 2)'], {}), '(u1 ** 2 + u2 ** 2)\n', (8697, 8716), True, 'import numpy as np\n'), ((8824, 8850), 'numpy.sqrt', 'np.sqrt', (['(v1 ** 2 + v2 ** 2)'], {}), '(v1 ** 2 + v2 ** 2)\n', (8831, 8850), True, 'import numpy as np\n'), ((9564, 9582), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (9572, 9582), True, 'import numpy as np\n'), ((9593, 9611), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (9601, 9611), True, 'import numpy as np\n'), ((9622, 9640), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (9630, 9640), True, 'import numpy as np\n'), ((1086, 1112), 'aniso.utils.as_ndarrays', 'as_ndarrays', (['t11', 't12', 't22'], {}), '(t11, t12, t22)\n', (1097, 1112), False, 'from aniso.utils import as_ndarrays, check_2d, eigsorted\n'), ((1121, 1144), 'aniso.utils.check_2d', 'check_2d', (['t11', 't12', 't22'], {}), '(t11, t12, t22)\n', (1129, 1144), False, 'from aniso.utils import as_ndarrays, check_2d, eigsorted\n'), ((1676, 1693), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1686, 1693), True, 'import numpy as np\n'), ((2997, 3014), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (3007, 3014), True, 'import numpy as np\n'), ((4470, 4487), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (4480, 4487), True, 'import numpy as np\n'), ((5989, 6006), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (5999, 6006), True, 'import numpy as np\n'), ((6234, 6300), 'skimage.feature.structure_tensor', 'ski.structure_tensor', (['image'], {'sigma': 'sigma', 'mode': '"""nearest"""', 'cval': '(0.0)'}), "(image, sigma=sigma, mode='nearest', cval=0.0)\n", (6254, 6300), True, 'import skimage.feature as ski\n'), ((8183, 8212), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 1][::-1]'], {}), '(*vecs[:, 1][::-1])\n', (8193, 8212), True, 'import numpy as np\n'), ((1912, 1988), 'skimage.feature.structure_tensor', 'ski.structure_tensor', (['image[:, :, 0]'], {'sigma': 'sigma', 'mode': '"""constant"""', 'cval': '(0.0)'}), "(image[:, :, 0], sigma=sigma, mode='constant', cval=0.0)\n", (1932, 1988), True, 'import skimage.feature as ski\n'), ((2031, 2098), 'skimage.feature.structure_tensor', 'ski.structure_tensor', (['image'], {'sigma': 'sigma', 'mode': '"""constant"""', 'cval': '(0.0)'}), "(image, sigma=sigma, mode='constant', cval=0.0)\n", (2051, 2098), True, 'import skimage.feature as ski\n'), ((2506, 2523), 'numpy.ones', 'np.ones', (['(ny, nx)'], {}), '((ny, nx))\n', (2513, 2523), True, 'import numpy as np\n'), ((2525, 2543), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (2533, 2543), True, 'import numpy as np\n'), ((2545, 2562), 'numpy.ones', 'np.ones', (['(ny, nx)'], {}), '((ny, nx))\n', (2552, 2562), True, 'import numpy as np\n'), ((2970, 2979), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2977, 2979), True, 'import matplotlib.pyplot as plt\n'), ((6056, 6073), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (6066, 6073), True, 'import numpy as np\n'), ((7339, 7359), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (7352, 7359), True, 'import numpy as np\n'), ((7615, 7714), 'aniso.derivatives.apply_anisotropic_smoothing', 'apply_anisotropic_smoothing', (['image', 'self.t11', 'self.t12', 'self.t22'], {'alpha': 'alpha', 'maxiter': 'maxiter'}), '(image, self.t11, self.t12, self.t22, alpha=\n alpha, maxiter=maxiter)\n', (7642, 7714), False, 'from aniso.derivatives import apply_anisotropic_smoothing\n'), ((7931, 7947), 'numpy.sqrt', 'np.sqrt', (['vals[0]'], {}), '(vals[0])\n', (7938, 7947), True, 'import numpy as np\n'), ((7976, 7992), 'numpy.sqrt', 'np.sqrt', (['vals[1]'], {}), '(vals[1])\n', (7983, 7992), True, 'import numpy as np\n'), ((8540, 8569), 'numpy.sqrt', 'np.sqrt', (['(trace ** 2 - 4 * det)'], {}), '(trace ** 2 - 4 * det)\n', (8547, 8569), True, 'import numpy as np\n'), ((8593, 8622), 'numpy.sqrt', 'np.sqrt', (['(trace ** 2 - 4 * det)'], {}), '(trace ** 2 - 4 * det)\n', (8600, 8622), True, 'import numpy as np\n'), ((9784, 9796), 'aniso.utils.eigsorted', 'eigsorted', (['A'], {}), '(A)\n', (9793, 9796), False, 'from aniso.utils import as_ndarrays, check_2d, eigsorted\n'), ((3440, 3452), 'aniso.utils.eigsorted', 'eigsorted', (['D'], {}), '(D)\n', (3449, 3452), False, 'from aniso.utils import as_ndarrays, check_2d, eigsorted\n'), ((7469, 7576), 'aniso.derivatives.apply_anisotropic_smoothing', 'apply_anisotropic_smoothing', (['image[:, :, i]', 'self.t11', 'self.t12', 'self.t22'], {'alpha': 'alpha', 'maxiter': 'maxiter'}), '(image[:, :, i], self.t11, self.t12, self.t22,\n alpha=alpha, maxiter=maxiter)\n', (7496, 7576), False, 'from aniso.derivatives import apply_anisotropic_smoothing\n'), ((9046, 9075), 'numpy.exp', 'np.exp', (['(-1.0 / linearity ** 2)'], {}), '(-1.0 / linearity ** 2)\n', (9052, 9075), True, 'import numpy as np\n'), ((10403, 10435), 'numpy.outer', 'np.outer', (['vecs[:, 0]', 'vecs[:, 0]'], {}), '(vecs[:, 0], vecs[:, 0])\n', (10411, 10435), True, 'import numpy as np\n'), ((10463, 10497), 'numpy.outer', 'np.outer', (['vecs[:, -1]', 'vecs[:, -1]'], {}), '(vecs[:, -1], vecs[:, -1])\n', (10471, 10497), True, 'import numpy as np\n'), ((10109, 10138), 'numpy.exp', 'np.exp', (['(-1.0 / linearity ** 2)'], {}), '(-1.0 / linearity ** 2)\n', (10115, 10138), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 15:56:17 2020
@author: Guido
"""
import time
import os
import csv
import matplotlib.pyplot as plt
import cv2
import numpy as np
t0=time.time() #Set initial time
os.chdir("C:/Users/Guido/Desktop/beta_simulator_windows/Take_3") #Setting the directory
## Extracting data from the csv driving_log
lines=[]
with open("driving_log.csv") as csvfile:
reader=csv.reader(csvfile)
for line in reader:
lines.append(line)
print("txt read. %s lines"%(len(lines)))
t1=time.time()
## Importing the images and flipping them
images=[]
measurements=[]
i=0 #counter of lines
for line in lines:
i+=1
if i%100==0: print("%s lines"%i) #Counter of lines read
if i%1000==0: print("Time: %s s"%(time.time()-t0)) #Display time
for j in range(3):
path=line[j].split('data/')[-1]
image=plt.imread(path)
flip=cv2.flip(image, 1) #Mirroring the image
#Setting the correction factor for center and side cameras
if j==0: correction=0
elif j==1:correction=0.3
elif j==2:correction=-0.3
measurement=float(line[3])
#Append the images and measurements values on lists
images.append(image)
measurements.append(measurement+correction)
images.append(flip)
measurements.append(-1.0*(measurement+correction))
t2=time.time()
delta=t2-t0
print("Time: %f"%delta)
print("All set. Showtime!")
print("Number of images: %s"%len(measurements))
#################### Time for CNN! ####################
import keras
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D as conv2d
from keras.layers.pooling import MaxPooling2D
#Creating the numpy arrays for CNN
images= np.array(images)
y_train= np.array(measurements)
#Building the CNN Architecture
#Initializing
model=Sequential()
model.add(Lambda(lambda x: x/255-0.5,input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
#Convolutional part
model.add(conv2d(24,5,strides=(2,2)))
model.add(conv2d(36,5,strides=(2,2)))
model.add(conv2d(48,5,strides=(2,2)))
model.add(conv2d(64,3))
model.add(conv2d(64,3))
model.add(Flatten())
#Fully connected
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
#Training the CNN
model.compile(optimizer='adam', loss='mse')
model.fit(images,y_train,validation_split=0.2,shuffle=True,nb_epoch=1)
print("Saving...")
model.save('../model.h5')
| [
"keras.layers.Flatten",
"keras.layers.convolutional.Convolution2D",
"cv2.flip",
"keras.layers.Lambda",
"matplotlib.pyplot.imread",
"keras.models.Sequential",
"os.chdir",
"numpy.array",
"keras.layers.Cropping2D",
"csv.reader",
"keras.layers.Dense",
"time.time"
] | [((184, 195), 'time.time', 'time.time', ([], {}), '()\n', (193, 195), False, 'import time\n'), ((215, 279), 'os.chdir', 'os.chdir', (['"""C:/Users/Guido/Desktop/beta_simulator_windows/Take_3"""'], {}), "('C:/Users/Guido/Desktop/beta_simulator_windows/Take_3')\n", (223, 279), False, 'import os\n'), ((531, 542), 'time.time', 'time.time', ([], {}), '()\n', (540, 542), False, 'import time\n'), ((1390, 1401), 'time.time', 'time.time', ([], {}), '()\n', (1399, 1401), False, 'import time\n'), ((1844, 1860), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1852, 1860), True, 'import numpy as np\n'), ((1870, 1892), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (1878, 1892), True, 'import numpy as np\n'), ((1947, 1959), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1957, 1959), False, 'from keras.models import Sequential\n'), ((411, 430), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (421, 430), False, 'import csv\n'), ((1970, 2028), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255 - 0.5, input_shape=(160, 320, 3))\n', (1976, 2028), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((2033, 2072), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (2043, 2072), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((2102, 2131), 'keras.layers.convolutional.Convolution2D', 'conv2d', (['(24)', '(5)'], {'strides': '(2, 2)'}), '(24, 5, strides=(2, 2))\n', (2108, 2131), True, 'from keras.layers.convolutional import Convolution2D as conv2d\n'), ((2140, 2169), 'keras.layers.convolutional.Convolution2D', 'conv2d', (['(36)', '(5)'], {'strides': '(2, 2)'}), '(36, 5, strides=(2, 2))\n', (2146, 2169), True, 'from keras.layers.convolutional import Convolution2D as conv2d\n'), ((2178, 2207), 'keras.layers.convolutional.Convolution2D', 'conv2d', (['(48)', '(5)'], {'strides': '(2, 2)'}), '(48, 5, strides=(2, 2))\n', (2184, 2207), True, 'from keras.layers.convolutional import Convolution2D as conv2d\n'), ((2216, 2229), 'keras.layers.convolutional.Convolution2D', 'conv2d', (['(64)', '(3)'], {}), '(64, 3)\n', (2222, 2229), True, 'from keras.layers.convolutional import Convolution2D as conv2d\n'), ((2240, 2253), 'keras.layers.convolutional.Convolution2D', 'conv2d', (['(64)', '(3)'], {}), '(64, 3)\n', (2246, 2253), True, 'from keras.layers.convolutional import Convolution2D as conv2d\n'), ((2265, 2274), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2272, 2274), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((2304, 2314), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (2309, 2314), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((2326, 2335), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (2331, 2335), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((2347, 2356), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (2352, 2356), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((2368, 2376), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2373, 2376), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((870, 886), 'matplotlib.pyplot.imread', 'plt.imread', (['path'], {}), '(path)\n', (880, 886), True, 'import matplotlib.pyplot as plt\n'), ((900, 918), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (908, 918), False, 'import cv2\n'), ((761, 772), 'time.time', 'time.time', ([], {}), '()\n', (770, 772), False, 'import time\n')] |
from sympy import *
import numpy as np
import matplotlib.pyplot as plt
def draw_plot(values, label, three_d=False):
"""
Функция отрисовки графика функции по её передаваемым значениям
Parameters
----------
values : [[float, float, ...],
[float, float, ...],
...]
Список из списков групп значений x, y, y', y'' и тд.
label : str
Подпись функции на графике.
three_d : bool, optional
Отрисовка графика в 3D.
По умолчанию False.
Raises
------
ValueError
Возникает при попытке нарисовать 3D график используя только 2 переменные.
"""
if not three_d or len(values) < 3:
if three_d:
raise ValueError('Предупреждение, вы не можете построить 3D график используя только две переменные')
try:
x_list = values[:, 1]
y_list = values[:, 2]
except:
x_list = values[0]
y_list = values[1]
plt.plot(x_list, y_list, label=label)
plt.legend()
plt.show()
else:
try:
x_list = values[:, 1]
y_list = values[:, 2]
z_list = values[:, 3]
except:
x_list = values[0]
y_list = values[1]
z_list = values[2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot(x_list, y_list, z_list, label=label)
ax.legend(loc='upper left')
plt.show()
def draw_plots(values, labels, colors=[], three_d=False):
"""
Parameters
----------
values : [[[float, float, ...],
[float, float, ...],
...],
[[float, float, ...],
[float, float, ...],
...], ...]
Список из нескольких списков из групп значений x, y, y', y'' и тд. (Несколько графиков)
labels : [str, str, ...]
Список подписей функций на графике.
colors : [str, str, ...], optional
Список цветов отрисовки графиков.
По умолчанию [].
three_d : bool, optional
Отрисовка графика в 3D.
По умолчанию False.
"""
if not three_d:
for i in range(len(values)):
try:
x_list = values[i][:, 1]
y_list = values[i][:, 2]
except:
x_list = values[i][0]
y_list = values[i][1]
if colors:
plt.plot(x_list, y_list, colors[i], label=labels[i])
else:
plt.plot(x_list, y_list, label=labels[i])
plt.legend()
plt.show()
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
for i in range(len(values)):
try:
x_list = values[i][:, 1]
y_list = values[i][:, 2]
z_list = values[i][:, 3]
except:
x_list = values[i][0]
y_list = values[i][1]
z_list = values[i][2]
if colors:
ax.plot(x_list, y_list, z_list, color=colors[i], label=labels[i])
else:
ax.plot(x_list, y_list, z_list, label=labels[i])
ax.legend(loc='upper left')
plt.show()
def input_expression():
"""
Возвращает введённую функцию\функции
Returns
-------
expressions : optional - (exp1, exp2) or (exp, )
Кортеж из функции\системы двух функции.
n : int
Колв-во введённых функций.
"""
n = int(input('Введите кол-во уравнений, которые хотите задать (1|2)\n ->'))
if n == 1:
expression = input('Введите уравнение\n ->')
return (expression, ), 1
exp1 = input("Введите первое уравнение\n ->y'=")
exp2 = input("Введите второе уравнение\n ->z'=")
return (exp1, exp2), 2
def lambda_func(string, variables):
"""
Генерирует на основании введённого уравнения - его представление в функциях
Parameters
----------
string : srt
Введённое уравнение.
variables : [str, str, ...]
Список переменных, встречающихся в уравнении.
Returns
-------
func: function
Лямбда-функция, основанная на введённом уравнении.
"""
var = []
for v in variables:
exec(f'{v} = Symbol("{v}")')
eval(f'var.append({v})')
expr = eval(string.replace('^', '**'))
return lambdify(var, expr)
def parse_string(string):
"""
Парсер, приводящий любое уравнение к виду y'(n) = x + y' + y'' + y''' + ... + y'(n-1)
Parameters
----------
string : str
Введённое уравнение.
Returns
-------
exp : str
Преобразованное уравнение.
max_val : int
Степень диф. уравнения.
variables : [str, str, ...]
Список участвующих в уравнении переменных.
"""
string = string.replace(' ', '')
expression = []
variable = ''
for i in range(len(string)):
variable += string[i]
if i == len(string) - 1:
expression.append(variable)
variable = ''
elif not (string[i + 1] in "'^" or string[i + 1].isalpha() or string[i + 1].isdigit()) or string[i] in '=+*/-':
expression.append(variable)
variable = ''
max_ind = 0
max_val = -1
for var in expression:
if var.count("'") > max_val:
max_val = var.count("'")
max_ind = expression.index(var)
val = expression[max_ind]
eq = expression.index('=')
if eq < max_ind:
expression = expression[eq + 1:] + ['='] + expression[:eq]
eq = expression.index('=')
left = expression[:eq]
if left[0] not in '+-' and ((val in left) == (len(left) > 2)) or len(left) == 1:
left = ['+'] + left
right = expression[eq + 1:]
if right[0] not in '+-' and ((val in right) == (len(right) > 2)) or len(left) == 1:
right = ['+'] + right
new_ind = left.index(val)
sign = left[new_ind - 1]
left.pop(new_ind)
left.pop(new_ind - 1)
if sign == '+' and len(left) != 0:
expression = right + ['-', '('] + left + [')']
elif sign == '+':
expression = right
elif sign == '-' and len(right) != 0:
expression = left + ['-', '('] + right + [')']
else:
expression = left
if expression[0] == '+':
expression.pop(0)
variables = []
for v in expression:
temp = []
for s in v:
temp.append(s.isalpha())
if sum(temp):
new_temp = []
for s in v:
if s.isalpha() or s == "'":
new_temp.append(s)
variables.append(''.join(new_temp))
variables = sorted(variables)
variables = sorted(variables, key=lambda x: len(x))
print(f"Функция : {' '.join(expression)}\nСтепень дифф. уравнения: {max_val}")
return ' '.join(expression), max_val, variables
def eyler_cauchy(f, power, var, n=10000):
"""
Решение диф. уравнение методом Эйлера-Коши
Parameters
----------
f : str
Введённая функция.
power : int
Степень диф. уравнения.
var : [str, str, ...]
Список переменных.
n : int, optional
Кол-во получаемых значений.
По умолчанию 10000.
Returns
-------
result : [[float, float, ...],
[float, float, ...],
...]
Список из списков групп значений x, y, y', y'' и тд.
"""
for i in range(len(var)):
if "'" in var[i]:
var[i] = "y" + str(var[i].count("'") - 1)
for v in var[::-1]:
if v[-1].isdigit():
f = f.replace('y' + "'" * (int(v[-1]) + 1), v)
func = lambda_func(f, var)
values_dict = {}
for i in range(len(var)):
values_dict[(i, var[i])] = [float(input(f'Введите начальные условия ({var[i]}_0)'))]
a = float(input('Введите начальные условия (a)'))
b = float(input('Введите начальные условия (b)'))
h = (b-a)/n
for i in range(n):
values_i = []
values_i1 = []
for v in values_dict.keys():
if v[0] == 0:
values_dict[v].append(values_dict[v][i] + h)
values_i.append(values_dict[v][i])
values_i1.append(values_dict[v][i + 1])
elif v[0] != len(values_dict.keys()) - 1:
values_dict[v].append(values_dict[v][i] + h * values_dict[(v[0] + 1, var[v[0] + 1])][i])
values_i.append(values_dict[v][i])
values_i1.append(values_dict[v][i + 1])
else:
values_i.append(values_dict[v][i])
values_i1.append(values_dict[v][i] + h * func(*values_i))
values_dict[v].append(values_dict[v][i] + h / 2 * (func(*values_i) + func(*values_i1)))
result = []
for i in range(n + 1):
result.append([])
result[i].append(i)
for v in values_dict.keys():
result[i].append(values_dict[v][i])
result[i].append(func(*result[i][1:]))
return np.array(result)
def runge_kutti(f, power, var, n=10000):
"""
Решение диф. уравнение методом Рунге-Кутти
Parameters
----------
f : str
Введённая функция.
power : int
Степень диф. уравнения.
var : [str, str, ...]
Список переменных.
n : int, optional
Кол-во получаемых значений.
По умолчанию 10000.
Returns
-------
result : [[float, float, ...],
[float, float, ...],
...]
Список из списков групп значений x, y, y', y'' и тд.
"""
for i in range(len(var)):
if "'" in var[i]:
var[i] = "y" + str(var[i].count("'") - 1)
for v in var[::-1]:
if v[-1].isdigit():
f = f.replace('y' + "'" * (int(v[-1]) + 1), v)
func = lambda_func(f, var)
values_dict = {}
for i in range(len(var)):
values_dict[(i, var[i])] = [float(input(f'Введите начальные условия ({var[i]}_0)'))]
a = float(input('Введите начальные условия (a)'))
b = float(input('Введите начальные условия (b)'))
h = (b-a)/n
koefs = np.zeros((len(var), 4))
koefs[0] = np.array([0, h, h, h])
funcs = {}
variables = []
for v in var:
exec(f'{v} = Symbol("{v}")')
eval(f'variables.append({v})')
for v in values_dict.keys():
if v[0] != 0 and v[0] != len(values_dict.keys()) - 1:
funcs[v[0]] = lambdify(variables, var[v[0] + 1])
elif v[0] == len(values_dict.keys()) - 1:
funcs[v[0]] = func
for i in range(n):
values_dict[(0, 'x')].append(values_dict[(0, 'x')][i] + h)
values_i = []
for v1 in values_dict.keys():
values_i.append(values_dict[v1][i])
values_i = np.array(values_i)
for j in range(1, len(values_i)):
koefs[j][0] = h * funcs[j](*values_i)
for j in range(1, len(values_i)):
koefs[j][1] = h * funcs[j](*(values_i + koefs[:, 0]/2))
for j in range(1, len(values_i)):
koefs[j][2] = h * funcs[j](*(values_i + koefs[:, 1]/2))
for j in range(1, len(values_i)):
koefs[j][3] = h * funcs[j](*(values_i + koefs[:, 2]))
for v in values_dict.keys():
if v[0] != 0:
values_dict[v].append(values_dict[v][i] + 1/6*(koefs[v[0]][0] + 2 * koefs[v[0]][1] + 2 * koefs[v[0]][2] + koefs[v[0]][3]))
result = []
for i in range(n + 1):
result.append([])
result[i].append(i)
for v in values_dict.keys():
result[i].append(values_dict[v][i])
result[i].append(func(*result[i][1:]))
return np.array(result)
def eyler_cauchy_system(expressions, var=['x', 'y', 'z'], n=1000):
"""
Решение системы из двух диф. уравнений методом Эйлера-Коши
Parameters
----------
expressions : [str, str]
Список из двух введённых функций.
var : [str, str, str], optional
Список переменных.
По умолчанию ['x', 'y', 'z']
n : int, optional
Кол-во получаемых значений.
По умолчанию 10000.
Returns
-------
result : [[float, float, ...],
[float, float, ...],
...]
Список из списков групп значений x, y, y', y'' и тд.
"""
funcs = {'y': lambda x, y, z: eval(expressions[0].replace('^', '**')),
'z': lambda x, y, z: eval(expressions[0].replace('^', '**'))}
x0 = float(input(f'Введите начальные условия (x0)\n ->'))
y0 = float(input(f'Введите начальные условия (y0)\n ->'))
z0 = float(input(f'Введите начальные условия (z0)\n ->'))
a = float(input('Введите начальные условия (a)'))
b = float(input('Введите начальные условия (b)'))
h = (b-a)/n
values_dict = {'x': [x0],
'y': [y0],
'z': [z0]}
for i in range(n):
values_dict['x'].append(values_dict['x'][i] + h)
values_i = []
for v1 in values_dict.keys():
values_i.append(values_dict[v1][i])
yw1, zw1 = values_dict['y'][i] + h * funcs['y'](*values_i),\
values_dict['z'][i] + h * funcs['z'](*values_i)
values_i1 = [values_dict['x'][i + 1], yw1, zw1]
values_dict['y'].append(values_dict['y'][i] + h / 2 * (funcs['y'](*values_i) + funcs['y'](*values_i1)))
values_dict['z'].append(values_dict['z'][i] + h / 2 * (funcs['z'](*values_i) + funcs['z'](*values_i1)))
result = []
for i in range(n + 1):
result.append([])
result[i].append(i)
for v in values_dict.keys():
result[i].append(values_dict[v][i])
return np.array(result)
def runge_kutti_system(expressions, var=['x', 'y', 'z'], n=1000):
"""
Решение системы из двух диф. уравнений методом Рунге-Кутти
Parameters
----------
expressions : [str, str]
Список из двух введённых функций.
var : [str, str, str], optional
Список переменных.
По умолчанию ['x', 'y', 'z']
n : int, optional
Кол-во получаемых значений.
По умолчанию 10000.
Returns
-------
result : [[float, float, ...],
[float, float, ...],
...]
Список из списков групп значений x, y, y', y'' и тд.
"""
funcs = {'y': lambda x, y, z: eval(expressions[0].replace('^', '**')),
'z': lambda x, y, z: eval(expressions[0].replace('^', '**'))}
x0 = float(input(f'Введите начальные условия (x0)\n ->'))
y0 = float(input(f'Введите начальные условия (y0)\n ->'))
z0 = float(input(f'Введите начальные условия (z0)\n ->'))
a = float(input('Введите начальные условия (a)'))
b = float(input('Введите начальные условия (b)'))
h = (b-a)/n
values_dict = {'x': [x0],
'y': [y0],
'z': [z0]}
for i in range(n):
values_i = []
for v1 in values_dict.keys():
values_i.append(values_dict[v1][i])
values_i = np.array(values_i)
k1 = h * funcs['y'](*values_i)
l1 = h * funcs['z'](*values_i)
k2 = h * funcs['y'](*(values_i + np.array([h/2, k1/2, l1/2])))
l2 = h * funcs['z'](*(values_i + np.array([h/2, k1/2, l1/2])))
k3 = h * funcs['y'](*(values_i + np.array([h/2, k2/2, l2/2])))
l3 = h * funcs['z'](*(values_i + np.array([h/2, k2/2, l2/2])))
k4 = h * funcs['y'](*(values_i + np.array([h, k3, l3])))
l4 = h * funcs['z'](*(values_i + np.array([h, k3, l3])))
values_dict['x'].append(values_dict['x'][i] + h)
values_dict['y'].append(values_dict['y'][i] + 1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4))
values_dict['z'].append(values_dict['z'][i] + 1 / 6 * (l1 + 2 * l2 + 2 * l3 + l4))
result = []
for i in range(n + 1):
result.append([])
result[i].append(i)
for v in values_dict.keys():
result[i].append(values_dict[v][i])
return np.array(result)
def solve_expressions(expressions, function):
"""
Решает диф. уравнение или систему диф. уравнение
Parameters
----------
expressions : optional (str, ) or (str1, str2)
Введённые диф. уравнения.
function : function
Метод для решения диф. уравнений.
Returns
-------
result : [[float, float, ...],
[float, float, ...],
...]
Список из списков групп значений x, y, y', y'' и тд.
"""
if len(expressions) == 1:
f, power, v = parse_string(expressions[0])
res = function(f, power, v, n=100)
return res
return function(expressions)
| [
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((9273, 9289), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (9281, 9289), True, 'import numpy as np\n'), ((10410, 10432), 'numpy.array', 'np.array', (['[0, h, h, h]'], {}), '([0, h, h, h])\n', (10418, 10432), True, 'import numpy as np\n'), ((11923, 11939), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (11931, 11939), True, 'import numpy as np\n'), ((13909, 13925), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (13917, 13925), True, 'import numpy as np\n'), ((16199, 16215), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (16207, 16215), True, 'import numpy as np\n'), ((991, 1028), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {'label': 'label'}), '(x_list, y_list, label=label)\n', (999, 1028), True, 'import matplotlib.pyplot as plt\n'), ((1037, 1049), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1047, 1049), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1066, 1068), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1329), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1327, 1329), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1567, 1569), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2689), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2687, 2689), True, 'import matplotlib.pyplot as plt\n'), ((2698, 2708), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2706, 2708), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2745), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2743, 2745), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3483), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3481, 3483), True, 'import matplotlib.pyplot as plt\n'), ((11024, 11042), 'numpy.array', 'np.array', (['values_i'], {}), '(values_i)\n', (11032, 11042), True, 'import numpy as np\n'), ((15242, 15260), 'numpy.array', 'np.array', (['values_i'], {}), '(values_i)\n', (15250, 15260), True, 'import numpy as np\n'), ((2540, 2592), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list', 'colors[i]'], {'label': 'labels[i]'}), '(x_list, y_list, colors[i], label=labels[i])\n', (2548, 2592), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2668), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {'label': 'labels[i]'}), '(x_list, y_list, label=labels[i])\n', (2635, 2668), True, 'import matplotlib.pyplot as plt\n'), ((15380, 15413), 'numpy.array', 'np.array', (['[h / 2, k1 / 2, l1 / 2]'], {}), '([h / 2, k1 / 2, l1 / 2])\n', (15388, 15413), True, 'import numpy as np\n'), ((15451, 15484), 'numpy.array', 'np.array', (['[h / 2, k1 / 2, l1 / 2]'], {}), '([h / 2, k1 / 2, l1 / 2])\n', (15459, 15484), True, 'import numpy as np\n'), ((15522, 15555), 'numpy.array', 'np.array', (['[h / 2, k2 / 2, l2 / 2]'], {}), '([h / 2, k2 / 2, l2 / 2])\n', (15530, 15555), True, 'import numpy as np\n'), ((15593, 15626), 'numpy.array', 'np.array', (['[h / 2, k2 / 2, l2 / 2]'], {}), '([h / 2, k2 / 2, l2 / 2])\n', (15601, 15626), True, 'import numpy as np\n'), ((15664, 15685), 'numpy.array', 'np.array', (['[h, k3, l3]'], {}), '([h, k3, l3])\n', (15672, 15685), True, 'import numpy as np\n'), ((15729, 15750), 'numpy.array', 'np.array', (['[h, k3, l3]'], {}), '([h, k3, l3])\n', (15737, 15750), True, 'import numpy as np\n')] |
from permutation import Permutation
from itertools import permutations
from time import time
import numpy as np
import math
"""
This has several perfect hash functions to give each position of the cube
a unique coordinate. It can also reverse the hash function to give the
relavent cube information back from a coordinate.
"""
def co_ori(co_ori):
'''
co_perm_ori 8-bit ternary, but only the first 7 bits are used
'''
rank = 0
for i in range(7):
rank += co_ori[i]*(3**(6-i))
return rank
def co_ori_inv(rank):
'''
0 <= rank < 3^7
'''
rank = np.base_repr(rank, base=3)
co_ori = bytearray([0]*8)
start = 7-len(rank)
for i in range(start, 7):
co_ori[i] = int(rank[i-start])
co_ori[7] = (3-sum(co_ori)%3)%3
return co_ori
def eg_ori(eg_ori):
'''
eg_ori is a 12-bit binary, but only the first 11 bits are used
'''
rank = 0
for i in range(11):
rank += eg_ori[i]*(2**(10-i))
return rank
def eg_ori_inv(rank):
'''
0 <= rank < 2^11
'''
rank = np.base_repr(rank, base=2)
eg_ori = bytearray([0]*12)
start = 11-len(rank)
for i in range(start, 11):
eg_ori[i] = int(rank[i-start])
eg_ori[11] = (2-sum(eg_ori)%2)%2
return eg_ori
def ud_edges(egs):
'''
egs is a set of 12 numbers ranging from 0 to 11
we are only interested in entries that are bigger than 7
'''
start = False
k = -1
sum = 0
for n, eg in enumerate(egs):
if eg >= 8:
start = True
k += 1
elif start:
sum += math.comb(n, k)
return sum
def ud_edges_inv(rank):
k = 3
egs = [0]*12
for n in reversed(range(12)):
n_choose_k = math.comb(n, k)
if rank-n_choose_k >= 0:
rank -= n_choose_k
else:
egs[n] = 8+k
k -= 1
if k < 0:
break
return egs
def co_perm(co_perm):
'''
co_perm is a permutation of 0-7
'''
return Permutation(*[i+1 for i in co_perm]).lehmer(8)
def co_perm_inv(rank):
return [i-1 for i in (Permutation.from_lehmer(rank, 8)).to_image(8)]
def eg_perm(eg_perm):
'''
eg_perm is a permutation of 0-7, so same as corners
'''
return Permutation(*[i+1 for i in eg_perm]).lehmer(8)
def eg_perm_inv(rank):
return [i-1 for i in (Permutation.from_lehmer(rank, 8)).to_image(8)]
def ud_perm(ud_perm):
'''
We treat ud_perm as a permutation of 0-3
'''
return Permutation(*[i-7 for i in ud_perm]).lehmer(4)
def ud_perm_inv(rank):
return [i+7 for i in (Permutation.from_lehmer(rank, 4)).to_image(4)]
if __name__ == "__main__":
print(co_ori([2, 1, 2, 1, 1, 0, 0, 2]))
pass | [
"permutation.Permutation",
"permutation.Permutation.from_lehmer",
"math.comb",
"numpy.base_repr"
] | [((591, 617), 'numpy.base_repr', 'np.base_repr', (['rank'], {'base': '(3)'}), '(rank, base=3)\n', (603, 617), True, 'import numpy as np\n'), ((1067, 1093), 'numpy.base_repr', 'np.base_repr', (['rank'], {'base': '(2)'}), '(rank, base=2)\n', (1079, 1093), True, 'import numpy as np\n'), ((1740, 1755), 'math.comb', 'math.comb', (['n', 'k'], {}), '(n, k)\n', (1749, 1755), False, 'import math\n'), ((2023, 2063), 'permutation.Permutation', 'Permutation', (['*[(i + 1) for i in co_perm]'], {}), '(*[(i + 1) for i in co_perm])\n', (2034, 2063), False, 'from permutation import Permutation\n'), ((2273, 2313), 'permutation.Permutation', 'Permutation', (['*[(i + 1) for i in eg_perm]'], {}), '(*[(i + 1) for i in eg_perm])\n', (2284, 2313), False, 'from permutation import Permutation\n'), ((2512, 2552), 'permutation.Permutation', 'Permutation', (['*[(i - 7) for i in ud_perm]'], {}), '(*[(i - 7) for i in ud_perm])\n', (2523, 2552), False, 'from permutation import Permutation\n'), ((1602, 1617), 'math.comb', 'math.comb', (['n', 'k'], {}), '(n, k)\n', (1611, 1617), False, 'import math\n'), ((2120, 2152), 'permutation.Permutation.from_lehmer', 'Permutation.from_lehmer', (['rank', '(8)'], {}), '(rank, 8)\n', (2143, 2152), False, 'from permutation import Permutation\n'), ((2370, 2402), 'permutation.Permutation.from_lehmer', 'Permutation.from_lehmer', (['rank', '(8)'], {}), '(rank, 8)\n', (2393, 2402), False, 'from permutation import Permutation\n'), ((2609, 2641), 'permutation.Permutation.from_lehmer', 'Permutation.from_lehmer', (['rank', '(4)'], {}), '(rank, 4)\n', (2632, 2641), False, 'from permutation import Permutation\n')] |
import numpy as np
import nibabel as nib
import scipy.ndimage as ND
import uncertify.data.preprocessing.histogram_matching.KernelDensityEstimation as KDE
def ComputeHistograms(I, nbins=50, skip=50, kernel_size=50.):
my = I.min()
My = I.max()
dy = My - my
y = np.linspace(my - dy / 20., My, nbins)
h = KDE.SilvermanWidthEstimate(np.float(I.size) / skip, 1) * kernel_size
P = KDE.KernelDensityEstimate(KDE.GaussianKernel, I[range(0, I.size, skip), np.newaxis], y[:, np.newaxis], h)
return y, P
def MeanIntensityThreshold(V):
W = V[V > V.mean()]
return W
def ComputeImageHistogram(I, nbins=50, skip=50, mean_threshold=True, mask=None, kernel_size=50.):
if mask is not None: # meaning there is a binary mask that will be applied to I before computing histogram
J = I * mask
else:
J = I
if mean_threshold:
W = MeanIntensityThreshold(J.astype(np.double))
else:
W = J.reshape(np.prod(J.shape)).astype(np.double)
y, P = ComputeHistograms(W, nbins=nbins, skip=skip, kernel_size=kernel_size)
return (y, P)
def ComputePercentiles(H, L):
# H is a histogram
# L is a list of percentile values
C = np.cumsum(H[1])
C = C / C[-1]
p = np.zeros(len(L))
for n in range(len(L)):
r = np.where(C >= L[n])[0][0]
if r == 0 or r == len(H[0]):
p[n] = H[0][r]
else:
# then we should do linear interpolation to avoid jagged results
p[n] = H[0][r - 1] + (L[n] - C[r - 1]) / (C[r] - C[r - 1]) * (H[0][r] - H[0][r - 1])
return p
def MultiLinearMap(J, PJ, Target):
K = J.copy().astype(np.double)
# for normal values that are within range
for n in range(len(PJ) - 1):
rows = (J >= PJ[n]) * (J <= PJ[n + 1])
K[rows] = (J[rows] - PJ[n]).astype(np.double) / (PJ[n + 1] - PJ[n]) * (Target[n + 1] - Target[n]) + Target[n]
# for values that are outside the ranges of the percentiles
# if you use the entire image this normally should not happen.
# if you use only the masks to compute PJ then this is likely to happen
# in this case we simply extend the line towards the values that are outside.
# lower values
rows = J < PJ[0]
K[rows] = (J[rows] - PJ[0]).astype(np.double) / (PJ[1] - PJ[0]) * (Target[1] - Target[0]) + Target[0]
# higher values
rows = J > PJ[-1]
K[rows] = (J[rows] - PJ[-2]).astype(np.double) / (PJ[-1] - PJ[-2]) * (Target[-1] - Target[-2]) + Target[-2]
return K
def MatchHistogramsTwoImages(I, J, L, nbins=50, skip=50, begval=0., finval=0.998, train_mask=None, test_mask=None):
if type(I) == str:
I = nib.load(I).get_data()
if type(J) == str:
J = nib.load(J).get_data()
if type(train_mask) == str:
train_mask = nib.load(train_mask).get_data()
if type(test_mask) == str:
test_mask = nib.load(test_mask).get_data()
HI = ComputeImageHistogram(I, nbins=nbins, skip=skip, mask=train_mask)
HJ = ComputeImageHistogram(J, nbins=nbins, skip=skip, mask=test_mask)
if np.isscalar(L):
L_ = np.linspace(begval, 1, L + 1)
L_[-1] = finval
L = L_
PI = ComputePercentiles(HI, L)
PJ = ComputePercentiles(HJ, L)
K = MultiLinearMap(J, PJ, PI)
return K
def MatchHistogramsWithMultipleTargets(I, J, L, nbins=50, skip=50, begval=0., finval=0.998, train_mask=None,
test_mask=None):
print("Computing population histogram")
P = PopulationPercentiles(I, L, nbins=nbins, skip=skip, begval=begval, finval=finval, mask=train_mask)
print("Matching test image")
K = MatchHistogramsWithPopulation(P, J, nbins=nbins, skip=skip, begval=begval, finval=finval, mask=test_mask)
return K
def MatchHistogramsWithMultipleTargetsMultipleMasks(I, J, L, train_masks, test_masks, nbins=50, skip=50, begval=0.,
finval=0.998, h=25.):
print("Computing population histogram")
Pmm = PopulationPercentilesMultipleMasks(I, L, train_masks, nbins=nbins, skip=skip, begval=begval, finval=finval)
print("Matching test image to each mask")
Kl = MatchHistogramsWithPopulationMultipleMasks(Pmm, J, test_masks, nbins=nbins, skip=skip, begval=begval,
finval=finval)
print("Interpolating the final image from individual ones")
K = InterpolateHistogramCorrection(Kl, test_masks, h=h)[0]
return (K)
def MatchHistogramsWithPopulation(P, J, nbins=50, skip=50, begval=0., finval=0.998, mask=None):
# if there is a mask then you compute the histogram on the mask but apply the
# correction to everywhere in the image.
if type(J) == str:
J = nib.load(J).get_data()
if type(mask) == str:
mask = nib.load(mask).get_data()
HJ = ComputeImageHistogram(J, nbins=nbins, skip=skip, mask=mask)
L = P[1]
PJ = ComputePercentiles(HJ, L)
K = MultiLinearMap(J, PJ, P[0])
return K
def MatchHistogramsWithPopulationMultipleMasks(P, J, masks, nbins=50, skip=50, begval=0., finval=0.998):
numMasks = len(masks)
K = []
for l in range(numMasks):
Kl = MatchHistogramsWithPopulation(P[l], J, nbins=nbins, skip=skip, begval=begval, finval=finval, mask=masks[l])
K = K + [Kl]
return K
def PopulationPercentilesMultipleMasks(J, L, masks, nbins=50, skip=50, begval=0., finval=0.998):
numMasks = len(masks)
P = []
for l in range(numMasks):
P = P + [PopulationPercentiles(J, L, nbins=nbins, skip=skip, begval=begval, mask=masks[l])]
return P
def PopulationPercentiles(I, L, nbins=50, skip=50, begval=0., finval=0.998, mask=None):
numIm = len(I)
HI = []
PI = []
M = []
if np.isscalar(L):
L_ = np.linspace(begval, 1, L + 1)
L_[-1] = finval
L = L_
# computing the percentiles for each image in the set
for n in range(numIm):
if type(I[n]) == str:
I[n] = nib.load(I[n]).get_data()
if mask is None:
M = M + [None]
elif type(mask[n]) == str:
M = M + [nib.load(mask[n]).get_data()]
else:
M[n] = mask[n]
HI = HI + [ComputeImageHistogram(I[n], nbins=nbins, skip=skip, mask=M[n])]
PI = PI + [ComputePercentiles(HI[n], L)]
# computing the average end points.
PI = np.asarray(PI)
m_begval = np.mean(PI[:, 0])
m_finval = np.mean(PI[:, -1])
# mapping the volumes to the average frame and computing the new values
for n in range(numIm):
K = MultiLinearMap(I[n], [PI[n, 0], PI[n, -1]], [m_begval, m_finval])
HI[n] = ComputeImageHistogram(K, nbins=nbins, skip=skip, mask=M[n])
PI[n, :] = ComputePercentiles(HI[n], L)
PI = np.mean(PI, axis=0)
return (PI, L)
def InterpolateHistogramCorrection(Kl, masks, h=25.):
# reading masks:
numMasks = len(masks)
M = []
for l in range(numMasks):
if type(masks[l]) == str:
M = M + [nib.load(masks[l]).get_data()]
else:
M[l] = masks[l]
# computing the distance transforms for the masks:
DM = DistanceTransformsForMasks(M)
# computing the sum of distances
SW = np.zeros(DM[0].shape)
for l in range(numMasks):
SW += np.exp(-DM[l] / h)
# computing the interpolated and corrected image
K = np.zeros(SW.shape)
for l in range(numMasks):
K += Kl[l] * np.exp(-DM[l] / h)
SW[SW == 0] = 1e-15
K = K / SW
return K, SW
def DistanceTransformsForMasks(masks):
numMasks = len(masks)
Dmask = []
for l in range(numMasks):
Dmask = Dmask + [ND.distance_transform_edt(1 - masks[l])]
return Dmask
def Map2UINT8(K):
J = K.copy()
J[K < 0] = 0.
J[K > 255.] = 255.
return J.astype(np.uint8)
| [
"numpy.mean",
"scipy.ndimage.distance_transform_edt",
"numpy.prod",
"numpy.float",
"numpy.isscalar",
"nibabel.load",
"numpy.where",
"numpy.asarray",
"numpy.exp",
"numpy.linspace",
"numpy.zeros",
"numpy.cumsum"
] | [((277, 315), 'numpy.linspace', 'np.linspace', (['(my - dy / 20.0)', 'My', 'nbins'], {}), '(my - dy / 20.0, My, nbins)\n', (288, 315), True, 'import numpy as np\n'), ((1199, 1214), 'numpy.cumsum', 'np.cumsum', (['H[1]'], {}), '(H[1])\n', (1208, 1214), True, 'import numpy as np\n'), ((3065, 3079), 'numpy.isscalar', 'np.isscalar', (['L'], {}), '(L)\n', (3076, 3079), True, 'import numpy as np\n'), ((5753, 5767), 'numpy.isscalar', 'np.isscalar', (['L'], {}), '(L)\n', (5764, 5767), True, 'import numpy as np\n'), ((6374, 6388), 'numpy.asarray', 'np.asarray', (['PI'], {}), '(PI)\n', (6384, 6388), True, 'import numpy as np\n'), ((6404, 6421), 'numpy.mean', 'np.mean', (['PI[:, 0]'], {}), '(PI[:, 0])\n', (6411, 6421), True, 'import numpy as np\n'), ((6437, 6455), 'numpy.mean', 'np.mean', (['PI[:, -1]'], {}), '(PI[:, -1])\n', (6444, 6455), True, 'import numpy as np\n'), ((6771, 6790), 'numpy.mean', 'np.mean', (['PI'], {'axis': '(0)'}), '(PI, axis=0)\n', (6778, 6790), True, 'import numpy as np\n'), ((7222, 7243), 'numpy.zeros', 'np.zeros', (['DM[0].shape'], {}), '(DM[0].shape)\n', (7230, 7243), True, 'import numpy as np\n'), ((7368, 7386), 'numpy.zeros', 'np.zeros', (['SW.shape'], {}), '(SW.shape)\n', (7376, 7386), True, 'import numpy as np\n'), ((3094, 3123), 'numpy.linspace', 'np.linspace', (['begval', '(1)', '(L + 1)'], {}), '(begval, 1, L + 1)\n', (3105, 3123), True, 'import numpy as np\n'), ((5782, 5811), 'numpy.linspace', 'np.linspace', (['begval', '(1)', '(L + 1)'], {}), '(begval, 1, L + 1)\n', (5793, 5811), True, 'import numpy as np\n'), ((7288, 7306), 'numpy.exp', 'np.exp', (['(-DM[l] / h)'], {}), '(-DM[l] / h)\n', (7294, 7306), True, 'import numpy as np\n'), ((7438, 7456), 'numpy.exp', 'np.exp', (['(-DM[l] / h)'], {}), '(-DM[l] / h)\n', (7444, 7456), True, 'import numpy as np\n'), ((350, 366), 'numpy.float', 'np.float', (['I.size'], {}), '(I.size)\n', (358, 366), True, 'import numpy as np\n'), ((1298, 1317), 'numpy.where', 'np.where', (['(C >= L[n])'], {}), '(C >= L[n])\n', (1306, 1317), True, 'import numpy as np\n'), ((2660, 2671), 'nibabel.load', 'nib.load', (['I'], {}), '(I)\n', (2668, 2671), True, 'import nibabel as nib\n'), ((2718, 2729), 'nibabel.load', 'nib.load', (['J'], {}), '(J)\n', (2726, 2729), True, 'import nibabel as nib\n'), ((2794, 2814), 'nibabel.load', 'nib.load', (['train_mask'], {}), '(train_mask)\n', (2802, 2814), True, 'import nibabel as nib\n'), ((2877, 2896), 'nibabel.load', 'nib.load', (['test_mask'], {}), '(test_mask)\n', (2885, 2896), True, 'import nibabel as nib\n'), ((4738, 4749), 'nibabel.load', 'nib.load', (['J'], {}), '(J)\n', (4746, 4749), True, 'import nibabel as nib\n'), ((4802, 4816), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (4810, 4816), True, 'import nibabel as nib\n'), ((7650, 7689), 'scipy.ndimage.distance_transform_edt', 'ND.distance_transform_edt', (['(1 - masks[l])'], {}), '(1 - masks[l])\n', (7675, 7689), True, 'import scipy.ndimage as ND\n'), ((961, 977), 'numpy.prod', 'np.prod', (['J.shape'], {}), '(J.shape)\n', (968, 977), True, 'import numpy as np\n'), ((5985, 5999), 'nibabel.load', 'nib.load', (['I[n]'], {}), '(I[n])\n', (5993, 5999), True, 'import nibabel as nib\n'), ((7009, 7027), 'nibabel.load', 'nib.load', (['masks[l]'], {}), '(masks[l])\n', (7017, 7027), True, 'import nibabel as nib\n'), ((6119, 6136), 'nibabel.load', 'nib.load', (['mask[n]'], {}), '(mask[n])\n', (6127, 6136), True, 'import nibabel as nib\n')] |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import numpy as np
from onnx import TensorProto, helper
import finn.core.onnx_exec as oxe
from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.custom_op.registry import getCustomOp
from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
from finn.transformation.fpgadataflow.prepare_ip import PrepareIP
from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim
from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
from finn.transformation.general import GiveUniqueNodeNames
from finn.util.basic import gen_finn_dt_tensor
def make_addstreams_modelwrapper(ch, pe, idt):
inp1 = helper.make_tensor_value_info("inp1", TensorProto.FLOAT, [1, ch])
inp2 = helper.make_tensor_value_info("inp2", TensorProto.FLOAT, [1, ch])
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ch])
addstreams_node = helper.make_node(
"AddStreams_Batch",
["inp1", "inp2"],
["outp"],
domain="finn.custom_op.fpgadataflow",
backend="fpgadataflow",
NumChannels=ch,
PE=pe,
inputDataType=idt.name,
)
graph = helper.make_graph(
nodes=[addstreams_node],
name="graph",
inputs=[inp1, inp2],
outputs=[outp],
)
model = helper.make_model(graph, producer_name="addstreams-model")
model = ModelWrapper(model)
model.set_tensor_datatype("inp1", idt)
model.set_tensor_datatype("inp2", idt)
return model
def prepare_inputs(input1, input2):
return {"inp1": input1, "inp2": input2}
# data types
@pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT8"]])
# channels
@pytest.mark.parametrize("ch", [1, 64])
# folding
@pytest.mark.parametrize("fold", [-1, 2, 1])
# execution mode
@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
@pytest.mark.vivado
def test_fpgadataflow_addstreams(idt, ch, fold, exec_mode):
if fold == -1:
pe = 1
else:
pe = max(1, ch // fold)
assert ch % pe == 0
# generate input data
x1 = gen_finn_dt_tensor(idt, (1, ch))
x2 = gen_finn_dt_tensor(idt, (1, ch))
model = make_addstreams_modelwrapper(ch, pe, idt)
if exec_mode == "cppsim":
model = model.transform(PrepareCppSim())
model = model.transform(CompileCppSim())
model = model.transform(SetExecMode("cppsim"))
elif exec_mode == "rtlsim":
model = model.transform(SetExecMode("rtlsim"))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(PrepareIP("xc7z020clg400-1", 5))
model = model.transform(HLSSynthIP())
model = model.transform(PrepareRTLSim())
else:
raise Exception("Unknown exec_mode")
# prepare input data
input_dict = prepare_inputs(x1, x2)
oshape = model.get_tensor_shape("outp")
y = x1 + x2
y_expected = y.reshape(oshape)
# execute model
y_produced = oxe.execute_onnx(model, input_dict)["outp"]
y_produced = y_produced.reshape(y_expected.shape)
assert (y_produced == y_expected).all(), exec_mode + " failed"
if exec_mode == "rtlsim":
node = model.get_nodes_by_op_type("AddStreams_Batch")[0]
inst = getCustomOp(node)
cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim")
exp_cycles_dict = model.analysis(exp_cycles_per_layer)
exp_cycles = exp_cycles_dict[node.name]
assert np.isclose(exp_cycles, cycles_rtlsim, atol=10)
assert exp_cycles != 0
| [
"onnx.helper.make_graph",
"onnx.helper.make_node",
"finn.core.onnx_exec.execute_onnx",
"numpy.isclose",
"finn.transformation.fpgadataflow.prepare_cppsim.PrepareCppSim",
"finn.util.basic.gen_finn_dt_tensor",
"onnx.helper.make_tensor_value_info",
"finn.transformation.fpgadataflow.prepare_rtlsim.PrepareR... | [((3375, 3445), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""idt"""', "[DataType['UINT4'], DataType['UINT8']]"], {}), "('idt', [DataType['UINT4'], DataType['UINT8']])\n", (3398, 3445), False, 'import pytest\n'), ((3458, 3496), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ch"""', '[1, 64]'], {}), "('ch', [1, 64])\n", (3481, 3496), False, 'import pytest\n'), ((3508, 3551), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fold"""', '[-1, 2, 1]'], {}), "('fold', [-1, 2, 1])\n", (3531, 3551), False, 'import pytest\n'), ((3570, 3628), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""exec_mode"""', "['cppsim', 'rtlsim']"], {}), "('exec_mode', ['cppsim', 'rtlsim'])\n", (3593, 3628), False, 'import pytest\n'), ((2435, 2500), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp1"""', 'TensorProto.FLOAT', '[1, ch]'], {}), "('inp1', TensorProto.FLOAT, [1, ch])\n", (2464, 2500), False, 'from onnx import TensorProto, helper\n'), ((2512, 2577), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp2"""', 'TensorProto.FLOAT', '[1, ch]'], {}), "('inp2', TensorProto.FLOAT, [1, ch])\n", (2541, 2577), False, 'from onnx import TensorProto, helper\n'), ((2589, 2654), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""outp"""', 'TensorProto.FLOAT', '[1, ch]'], {}), "('outp', TensorProto.FLOAT, [1, ch])\n", (2618, 2654), False, 'from onnx import TensorProto, helper\n'), ((2678, 2860), 'onnx.helper.make_node', 'helper.make_node', (['"""AddStreams_Batch"""', "['inp1', 'inp2']", "['outp']"], {'domain': '"""finn.custom_op.fpgadataflow"""', 'backend': '"""fpgadataflow"""', 'NumChannels': 'ch', 'PE': 'pe', 'inputDataType': 'idt.name'}), "('AddStreams_Batch', ['inp1', 'inp2'], ['outp'], domain=\n 'finn.custom_op.fpgadataflow', backend='fpgadataflow', NumChannels=ch,\n PE=pe, inputDataType=idt.name)\n", (2694, 2860), False, 'from onnx import TensorProto, helper\n'), ((2935, 3033), 'onnx.helper.make_graph', 'helper.make_graph', ([], {'nodes': '[addstreams_node]', 'name': '"""graph"""', 'inputs': '[inp1, inp2]', 'outputs': '[outp]'}), "(nodes=[addstreams_node], name='graph', inputs=[inp1, inp2\n ], outputs=[outp])\n", (2952, 3033), False, 'from onnx import TensorProto, helper\n'), ((3081, 3139), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {'producer_name': '"""addstreams-model"""'}), "(graph, producer_name='addstreams-model')\n", (3098, 3139), False, 'from onnx import TensorProto, helper\n'), ((3152, 3171), 'finn.core.modelwrapper.ModelWrapper', 'ModelWrapper', (['model'], {}), '(model)\n', (3164, 3171), False, 'from finn.core.modelwrapper import ModelWrapper\n'), ((3845, 3877), 'finn.util.basic.gen_finn_dt_tensor', 'gen_finn_dt_tensor', (['idt', '(1, ch)'], {}), '(idt, (1, ch))\n', (3863, 3877), False, 'from finn.util.basic import gen_finn_dt_tensor\n'), ((3887, 3919), 'finn.util.basic.gen_finn_dt_tensor', 'gen_finn_dt_tensor', (['idt', '(1, ch)'], {}), '(idt, (1, ch))\n', (3905, 3919), False, 'from finn.util.basic import gen_finn_dt_tensor\n'), ((4715, 4750), 'finn.core.onnx_exec.execute_onnx', 'oxe.execute_onnx', (['model', 'input_dict'], {}), '(model, input_dict)\n', (4731, 4750), True, 'import finn.core.onnx_exec as oxe\n'), ((4992, 5009), 'finn.custom_op.registry.getCustomOp', 'getCustomOp', (['node'], {}), '(node)\n', (5003, 5009), False, 'from finn.custom_op.registry import getCustomOp\n'), ((5195, 5241), 'numpy.isclose', 'np.isclose', (['exp_cycles', 'cycles_rtlsim'], {'atol': '(10)'}), '(exp_cycles, cycles_rtlsim, atol=10)\n', (5205, 5241), True, 'import numpy as np\n'), ((4038, 4053), 'finn.transformation.fpgadataflow.prepare_cppsim.PrepareCppSim', 'PrepareCppSim', ([], {}), '()\n', (4051, 4053), False, 'from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim\n'), ((4087, 4102), 'finn.transformation.fpgadataflow.compile_cppsim.CompileCppSim', 'CompileCppSim', ([], {}), '()\n', (4100, 4102), False, 'from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim\n'), ((4136, 4157), 'finn.transformation.fpgadataflow.set_exec_mode.SetExecMode', 'SetExecMode', (['"""cppsim"""'], {}), "('cppsim')\n", (4147, 4157), False, 'from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode\n'), ((4223, 4244), 'finn.transformation.fpgadataflow.set_exec_mode.SetExecMode', 'SetExecMode', (['"""rtlsim"""'], {}), "('rtlsim')\n", (4234, 4244), False, 'from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode\n'), ((4278, 4299), 'finn.transformation.general.GiveUniqueNodeNames', 'GiveUniqueNodeNames', ([], {}), '()\n', (4297, 4299), False, 'from finn.transformation.general import GiveUniqueNodeNames\n'), ((4333, 4364), 'finn.transformation.fpgadataflow.prepare_ip.PrepareIP', 'PrepareIP', (['"""xc7z020clg400-1"""', '(5)'], {}), "('xc7z020clg400-1', 5)\n", (4342, 4364), False, 'from finn.transformation.fpgadataflow.prepare_ip import PrepareIP\n'), ((4398, 4410), 'finn.transformation.fpgadataflow.hlssynth_ip.HLSSynthIP', 'HLSSynthIP', ([], {}), '()\n', (4408, 4410), False, 'from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP\n'), ((4444, 4459), 'finn.transformation.fpgadataflow.prepare_rtlsim.PrepareRTLSim', 'PrepareRTLSim', ([], {}), '()\n', (4457, 4459), False, 'from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim\n')] |
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import argparse
import json
import logging
import sys
import numpy as np
from easy_rec.python.inference.predictor import Predictor
logging.basicConfig(
level=logging.INFO, format='[%(asctime)s][%(levelname)s] %(message)s')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--saved_model_dir', type=str, default=None, help='saved model directory')
parser.add_argument(
'--input_path', type=str, default=None, help='input feature path')
parser.add_argument('--save_path', type=str, default=None, help='save path')
parser.add_argument(
'--cmp_res_path', type=str, default=None, help='compare result path')
parser.add_argument(
'--cmp_key', type=str, default='probs', help='compare key')
parser.add_argument('--tol', type=float, default=1e-5, help='tolerance')
parser.add_argument(
'--label_id',
nargs='*',
type=int,
help='the label column, which is to be excluded')
parser.add_argument(
'--separator', type=str, default='', help='separator between features')
parser.add_argument(
'--rtp_separator', type=str, default='', help='separator')
args = parser.parse_args()
if not args.saved_model_dir:
logging.error('saved_model_dir is not set')
sys.exit(1)
if not args.input_path:
logging.error('input_path is not set')
sys.exit(1)
if args.label_id is None:
args.label_id = []
logging.info('input_path: ' + args.input_path)
logging.info('save_path: ' + args.save_path)
logging.info('separator: ' + args.separator)
predictor = Predictor(args.saved_model_dir)
with open(args.input_path, 'r') as fin:
batch_input = []
for line_str in fin:
line_str = line_str.strip()
line_tok = line_str.split(args.rtp_separator)
feature = line_tok[-1]
feature = [
x for fid, x in enumerate(feature.split(args.separator))
if fid not in args.label_id
]
if len(predictor.input_names) == 1:
feature = args.separator.join(feature)
batch_input.append(feature)
output = predictor.predict(batch_input)
if args.save_path:
fout = open(args.save_path, 'w')
for one in output:
fout.write(str(one) + '\n')
fout.close()
if args.cmp_res_path:
logging.info('compare result path: ' + args.cmp_res_path)
logging.info('compare key: ' + args.cmp_key)
logging.info('tolerance: ' + str(args.tol))
with open(args.cmp_res_path, 'r') as fin:
for line_id, line_str in enumerate(fin):
line_str = line_str.strip()
line_pred = json.loads(line_str)
assert np.abs(
line_pred[args.cmp_key] -
output[line_id][args.cmp_key]) < args.tol, 'line[%d]: %.8f' % (
line_id,
np.abs(line_pred[args.cmp_key] - output[line_id][args.cmp_key]))
| [
"logging.basicConfig",
"numpy.abs",
"json.loads",
"argparse.ArgumentParser",
"sys.exit",
"logging.info",
"logging.error",
"easy_rec.python.inference.predictor.Predictor"
] | [((209, 304), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""[%(asctime)s][%(levelname)s] %(message)s"""'}), "(level=logging.INFO, format=\n '[%(asctime)s][%(levelname)s] %(message)s')\n", (228, 304), False, 'import logging\n'), ((344, 369), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (367, 369), False, 'import argparse\n'), ((1501, 1547), 'logging.info', 'logging.info', (["('input_path: ' + args.input_path)"], {}), "('input_path: ' + args.input_path)\n", (1513, 1547), False, 'import logging\n'), ((1550, 1594), 'logging.info', 'logging.info', (["('save_path: ' + args.save_path)"], {}), "('save_path: ' + args.save_path)\n", (1562, 1594), False, 'import logging\n'), ((1597, 1641), 'logging.info', 'logging.info', (["('separator: ' + args.separator)"], {}), "('separator: ' + args.separator)\n", (1609, 1641), False, 'import logging\n'), ((1657, 1688), 'easy_rec.python.inference.predictor.Predictor', 'Predictor', (['args.saved_model_dir'], {}), '(args.saved_model_dir)\n', (1666, 1688), False, 'from easy_rec.python.inference.predictor import Predictor\n'), ((1300, 1343), 'logging.error', 'logging.error', (['"""saved_model_dir is not set"""'], {}), "('saved_model_dir is not set')\n", (1313, 1343), False, 'import logging\n'), ((1348, 1359), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1356, 1359), False, 'import sys\n'), ((1391, 1429), 'logging.error', 'logging.error', (['"""input_path is not set"""'], {}), "('input_path is not set')\n", (1404, 1429), False, 'import logging\n'), ((1434, 1445), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1442, 1445), False, 'import sys\n'), ((2352, 2409), 'logging.info', 'logging.info', (["('compare result path: ' + args.cmp_res_path)"], {}), "('compare result path: ' + args.cmp_res_path)\n", (2364, 2409), False, 'import logging\n'), ((2414, 2458), 'logging.info', 'logging.info', (["('compare key: ' + args.cmp_key)"], {}), "('compare key: ' + args.cmp_key)\n", (2426, 2458), False, 'import logging\n'), ((2656, 2676), 'json.loads', 'json.loads', (['line_str'], {}), '(line_str)\n', (2666, 2676), False, 'import json\n'), ((2692, 2755), 'numpy.abs', 'np.abs', (['(line_pred[args.cmp_key] - output[line_id][args.cmp_key])'], {}), '(line_pred[args.cmp_key] - output[line_id][args.cmp_key])\n', (2698, 2755), True, 'import numpy as np\n'), ((2855, 2918), 'numpy.abs', 'np.abs', (['(line_pred[args.cmp_key] - output[line_id][args.cmp_key])'], {}), '(line_pred[args.cmp_key] - output[line_id][args.cmp_key])\n', (2861, 2918), True, 'import numpy as np\n')] |
from nltk.tokenize import word_tokenize
from tqdm.notebook import tqdm
from pathlib import Path
import numpy as np
import pickle
import nltk
import re
import os
nltk.download('punkt')
def load_glove(glove_path):
emmbed_dict = {}
glove_path = Path(glove_path)
dict_path = glove_path.with_name(f'{glove_path.stem}.pickle')
if os.path.exists(dict_path):
with open(dict_path, 'rb') as f:
glove_dict = pickle.load(f)
return glove_dict
with open(glove_path, 'r') as f:
n_words = len(f.readlines())
f.seek(0)
for line in tqdm(f, total = n_words, desc = 'loading glove embeddings'):
values = line.split()
vector = list(filter(lambda x: re.match('-?\d{1}\.\d+', x), values))
word = ''.join([v for v in values if v not in vector])
try:
vector = np.asarray(vector, 'float32')
emmbed_dict[word]=vector
except:
pass
with open(dict_path, 'wb') as f:
pickle.dump(emmbed_dict, f)
return emmbed_dict
def convert_glove_to_features(texts, emmbed_dict):
def mean_vectorizer(text):
tokens = word_tokenize(text)
len_tokens = len(tokens)
mean_vector = np.zeros(emb_size)
for token in tokens:
try:
mean_vector += emmbed_dict[token]
except:
pass
mean_vector /= len_tokens
return mean_vector
emb_size = len(list(emmbed_dict.values())[0])
features = tqdm(map(lambda x: mean_vectorizer(x), texts), total=len(texts), desc='converting glove to features')
features = np.vstack(list(features))
return features | [
"os.path.exists",
"pickle.dump",
"nltk.download",
"pathlib.Path",
"pickle.load",
"numpy.asarray",
"re.match",
"nltk.tokenize.word_tokenize",
"numpy.zeros",
"tqdm.notebook.tqdm"
] | [((161, 183), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (174, 183), False, 'import nltk\n'), ((252, 268), 'pathlib.Path', 'Path', (['glove_path'], {}), '(glove_path)\n', (256, 268), False, 'from pathlib import Path\n'), ((343, 368), 'os.path.exists', 'os.path.exists', (['dict_path'], {}), '(dict_path)\n', (357, 368), False, 'import os\n'), ((590, 645), 'tqdm.notebook.tqdm', 'tqdm', (['f'], {'total': 'n_words', 'desc': '"""loading glove embeddings"""'}), "(f, total=n_words, desc='loading glove embeddings')\n", (594, 645), False, 'from tqdm.notebook import tqdm\n'), ((1033, 1060), 'pickle.dump', 'pickle.dump', (['emmbed_dict', 'f'], {}), '(emmbed_dict, f)\n', (1044, 1060), False, 'import pickle\n'), ((1184, 1203), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (1197, 1203), False, 'from nltk.tokenize import word_tokenize\n'), ((1259, 1277), 'numpy.zeros', 'np.zeros', (['emb_size'], {}), '(emb_size)\n', (1267, 1277), True, 'import numpy as np\n'), ((436, 450), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (447, 450), False, 'import pickle\n'), ((875, 904), 'numpy.asarray', 'np.asarray', (['vector', '"""float32"""'], {}), "(vector, 'float32')\n", (885, 904), True, 'import numpy as np\n'), ((728, 758), 're.match', 're.match', (['"""-?\\\\d{1}\\\\.\\\\d+"""', 'x'], {}), "('-?\\\\d{1}\\\\.\\\\d+', x)\n", (736, 758), False, 'import re\n')] |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms.transforms import Orientation
TEST_CASES = [
[{'axcodes': 'RAS'},
np.ones((2, 10, 15, 20)), {'original_axcodes': 'ALS'}, (2, 15, 10, 20)],
[{'axcodes': 'AL'},
np.ones((2, 10, 15)), {'original_axcodes': 'AR'}, (2, 10, 15)],
[{'axcodes': 'L'},
np.ones((2, 10)), {'original_axcodes': 'R'}, (2, 10)],
]
class TestOrientationCase(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_ornt(self, init_param, img, data_param, expected_shape):
res = Orientation(**init_param)(img, **data_param)
np.testing.assert_allclose(res[0].shape, expected_shape)
if __name__ == '__main__':
unittest.main()
| [
"numpy.ones",
"monai.transforms.transforms.Orientation",
"parameterized.parameterized.expand",
"numpy.testing.assert_allclose",
"unittest.main"
] | [((1055, 1087), 'parameterized.parameterized.expand', 'parameterized.expand', (['TEST_CASES'], {}), '(TEST_CASES)\n', (1075, 1087), False, 'from parameterized import parameterized\n'), ((1315, 1330), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1328, 1330), False, 'import unittest\n'), ((750, 774), 'numpy.ones', 'np.ones', (['(2, 10, 15, 20)'], {}), '((2, 10, 15, 20))\n', (757, 774), True, 'import numpy as np\n'), ((852, 872), 'numpy.ones', 'np.ones', (['(2, 10, 15)'], {}), '((2, 10, 15))\n', (859, 872), True, 'import numpy as np\n'), ((944, 960), 'numpy.ones', 'np.ones', (['(2, 10)'], {}), '((2, 10))\n', (951, 960), True, 'import numpy as np\n'), ((1225, 1281), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res[0].shape', 'expected_shape'], {}), '(res[0].shape, expected_shape)\n', (1251, 1281), True, 'import numpy as np\n'), ((1172, 1197), 'monai.transforms.transforms.Orientation', 'Orientation', ([], {}), '(**init_param)\n', (1183, 1197), False, 'from monai.transforms.transforms import Orientation\n')] |
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
#N,C.E.,MultiP.0,T.0,TDS.1,MultiP.1,T.1,TDS.2,MultiP.2,T.2,time,arduino N,comm,.
#1,0,4.58,20.6,0,7.36,20,0,3.46,20.1,7:20 AM,14-Jan,,
#2,92.773,639,20.2,297.852,184.4,20.2,214.844,254,20.3,7:34,15-23,"Se agrega súp
SEP = ','
# `data`: N, C.E, MultiP.0, T.0, TDS.1, MultiP.1, T.1, TDS.2, MultiP.2, T.2, time, arduino N, comm
TYPE = [ int, float, float, float, float, float, float, float, float, float, str, str, str]
data = {}
header = None
with open('2020-10-30_datos_ce.csv', 'r') as f:
for line in f:
line = line.strip().replace('\ufeff','')
if header is None:
header = line.split(SEP)
data = dict([(x, []) for x in header])
continue
if line == '':
break
row = line.split(SEP)
for i, (t, d) in enumerate(zip(TYPE, row)):
try:
val = t(d)
except ValueError:
float('nan')
data[header[i]].append( val )
# EC.Gravity, TDS.Gravity.1, TDS.Gravity.2
SENSORS = ['C.E.', 'TDS.1', 'TDS.2']
def get_time():
time_column = data['time']
def hh_mm_to_min(hh_mm):
hh, mm = hh_mm.split(':')
return 60*int(hh) + int(mm) - 440 # 440 = '7:20'
return list(map(hh_mm_to_min, time_column))
# Returns the label of the Multiparameter column corresponding to
# a given sensor column
def label_multi(label):
return header[header.index(label)+1]
"""
plt.scatter(data['N'], data['TDS.1'], c='red', label='TDS.1')
plt.scatter(data['N'], data['TDS.2'], c='orange', label='TDS.2')
plt.ylabel('Voltage [mV]')
plt.xlabel('#')
plt.show()
"""
# Remove values according to criterion
def remove_values(filter_func):
to_remove = set()
for reference_column in [data[label_multi(label)] for label in SENSORS]:
for row_index, value in enumerate(reference_column):
if filter_func(value):
to_remove.add(row_index)
for index in sorted(to_remove, reverse=True): # Must be done in reverse order (we're removing data from a list)
for key in data:
data[key].pop(index)
# Fitting TDS.Gravity values
for k in data:
# N=15 looks like an outlier
data[k].pop(14)
# Last datum is out of range
data[k].pop()
# Remove values over 3500
remove_values(lambda ec: ec > 3500)
# Plot Reference EC vs TDS voltage
plt.plot(data['TDS.1'], data[label_multi('TDS.1')], 'o--', c='red',
label='TDS.1')
plt.plot(data['TDS.2'], data[label_multi('TDS.2')], 'o--', c='orange',
label='TDS.2')
"""
# Tag number of each measure
for sensor in ['TDS.1', 'TDS.2']:
for x,y,n in zip(data[sensor], data[label_multi(sensor)], data['N']):
plt.annotate("[{}]{}".format(sensor,n), (x,y), fontsize=6)
"""
plt.xlabel('Voltage [mV]')
plt.ylabel('Reference EC [$\mu$S/cm]')
plt.title('TDS characteristic')
plt.legend()
plt.show()
import scipy.optimize
# Fit to EC = ln(a*Voltage + b)
def tds_fit_function(v, a, b, c):
return a*v**2 + b*v + c#a*v**b + c*v #a*np.log(v + b)
fit_tds1, _ = scipy.optimize.curve_fit(tds_fit_function,
data['TDS.1'],
data[label_multi('TDS.1')],
maxfev=20000)
print("TDS1: {}".format(fit_tds1))
fit_tds2, _ = scipy.optimize.curve_fit(tds_fit_function,
data['TDS.2'],
data[label_multi('TDS.2')],
maxfev=20000)
print("TDS2: {}".format(fit_tds2))
"""
# a_TDS1, b_TDS1, c_TDS1, d_TDS1 = np.polyfit(data['TDS.1'], data[label_multi('TDS.1')],
fit_tds1 = np.polyfit(data['TDS.1'], data[label_multi('TDS.1')],
3)
print("TDS1: a=%.5f\tb=%.5f\tc=%.5f\td=%.5f" % (a_TDS1, b_TDS1, c_TDS1, d_TDS1))
fit_tds2 = np.polyfit(data[label_multi('TDS.2')], data['TDS.2'],
3)
print("TDS2: a=%.5f\tb=%.5f\tc=%.5f\td=%.5f" % (a_TDS2, b_TDS2, c_TDS2, d_TDS2))
"""
"""
plt.scatter(data[label_multi('TDS.1')], data['TDS.1'], c='red', label='TDS.1')
plt.scatter(data[label_multi('TDS.2')], data['TDS.2'], c='orange', label='TDS.2')
for sensor in ['TDS.1', 'TDS.2']:
for x,y,n in zip(data[label_multi(sensor)], data[sensor], data['N']):
plt.annotate("[{}]{}".format(sensor,n), (x,y), fontsize=6)
"""
plt.plot(data[label_multi('TDS.1')][:-1], list(map(lambda v :
tds_fit_function(v, *fit_tds1),
data['TDS.1'][:-1])), '*-', c='red', label='TDS.1 Fit')
plt.plot(data[label_multi('TDS.2')][:-1], list(map(lambda v :
tds_fit_function(v, *fit_tds2),
data['TDS.2'][:-1])), '*-', c='orange', label='TDS.2 Fit')
multi_labels = list(map(label_multi, ['TDS.1', 'TDS.2']))
multi_data = list(map(lambda label: data[label], multi_labels))
min_multi = min(map(min, multi_data))
max_multi = max(map(max, multi_data))
plt.plot([min_multi, max_multi],
[min_multi, max_multi],
c='b', label='real')
plt.xlabel('Reference EC (multiparameter) [$\mu S/cm$]')
plt.ylabel('Measured EC [$\mu S/cm$]')
plt.legend()
plt.suptitle('TDS EC(voltage) fit: $a v^2 + b v + c$', y=1.005) # $a v^{b} + c v$', y=1.005)
plt.title(('TDS.1: a={:.3e}, b={:.3e}, c={:.3e}\n'
'TDS.2: a={:.3e}, b={:.3e}, c={:.3e}').format(fit_tds1[0],fit_tds1[1],fit_tds1[2],
fit_tds2[0],fit_tds2[1],fit_tds2[2]),
fontsize=10)
plt.show()
# EC.Gravity, TDS.Gravity.1 and TDS.Gravity.2 need preprocessing
K_ECG_1413 = 5.67417
K_ECG_12880 = 5.86183
K_TDS1_1413 = 0.73822
K_TDS1_12880 = 5.30749
K_TDS2_1413 = 1.03721
K_TDS2_12880 = 6.51315
THRESHOLD = 2.5
def voltage_to_ec_ecg(v):
ec = K_ECG_1413*v
if ec > THRESHOLD:
ec = K_ECG_12880*v
return ec
"""
def voltage_to_ec_tds1(v):
ec = K_TDS1_1413*v
if ec > THRESHOLD:
ec = K_TDS1_12880*v
return ec
def voltage_to_ec_tds2(v):
ec = K_TDS2_1413*v
if ec > THRESHOLD:
ec = K_TDS2_12880*v
return ec
"""
def calibrated_K(v, EC=1413):
return EC/(133.42*v**3 - 255.86*v**2 + 857*v)
# only for gravity TDS
def calibrate_label(label, k):
# Using k-th data point
return calibrated_K(data[label][k], EC=data[label_multi(label)][k])
original_data = data.copy()
for calibration in ["USING_FIT", "USING_GRAVITY_FORMULA"]:
print(f"---{calibration}---")
if calibration == "USING_FIT":
def voltage_to_ec_tds1(v):
return tds_fit_function(v, *fit_tds1)
def voltage_to_ec_tds2(v):
return tds_fit_function(v, *fit_tds2)
elif calibration == "USING_GRAVITY_FORMULA":
K_TDS1 = calibrate_label('TDS.1', 1)
K_TDS2 = calibrate_label('TDS.2', 1)
print(f'K (TDS.1): {K_TDS1}')
print(f'K (TDS.2): {K_TDS2}')
def voltage_to_ec_tds1(v):
return (133.42*v**3 - 255.86*v**2 + 857.39*v)*K_TDS1
def voltage_to_ec_tds2(v):
return (133.42*v**3 - 255.86*v**2 + 857.39*v)*K_TDS2
## overwrite
# On C.E, this assumes a linear behaviour locally around 1413, and locally around 12880
# (each with it's own slope)
data['C.E.'] = list(map(voltage_to_ec_ecg, original_data['C.E.']))
data['TDS.1'] = list(map(voltage_to_ec_tds1, original_data['TDS.1']))
data['TDS.2'] = list(map(voltage_to_ec_tds2, original_data['TDS.2']))
# Ready to plot
TO_PLOT = SENSORS
COLORS = ['green', 'red', 'orange']
N = max(data['N'])
# First plot: EC vs EC
for color, label in zip(COLORS, TO_PLOT):
ys = data[label]
xs = data[label_multi(label)]
plt.scatter(xs, ys, c=color, s=20, label=label)
multi_labels = list(map(label_multi, TO_PLOT))
multi_data = list(map(lambda label: data[label], multi_labels))
min_multi = min(map(min, multi_data))
max_multi = max(map(max, multi_data))
plt.plot([min_multi, max_multi],
[min_multi, max_multi],
c='b', label='real')
plt.xlabel('Real EC (calibration solution) [$\mu S/cm$]')
plt.ylabel('Measured EC [$\mu S/cm$]')
plt.title(f'EC vs EC [{calibration}]')
plt.legend()
plt.show()
#sensors_data = list(map(lambda label: data[label], TO_PLOT))
# Second plot: EC over time
for color, label in zip(COLORS, TO_PLOT):
ys = data[label]
real_ys = data[label_multi(label)]
plt.scatter(get_time(), ys, c=color, label=label)
plt.plot(get_time(), real_ys, '--', c=color, label=label_multi(label))
## Real time
plt.xlabel('Time [min]')
plt.ylabel('EC [$\mu S$]')
plt.title('EC over time')
plt.legend()
plt.show()
# Third plot: Square error over time
for color, label in zip(COLORS, TO_PLOT):
ys = np.array(data[label])
indices = data['N']
real_ys = np.array(data[label_multi(label)])
abs_errors = abs(real_ys - ys)
plt.scatter(get_time(), abs_errors, c=color, label=label)
plt.xlabel('Time [min]')
plt.ylabel('Absolute error $\mid EC_{real} - EC_{meas} \mid$')
plt.title(f'Absolute error {calibration}')
plt.legend()
plt.show()
# Fourth plot: Square error over time
square_errors_all = {}
for color, label in zip(COLORS, TO_PLOT):
ys = np.array(data[label])
real_ys = np.array(data[label_multi(label)])
square_errors = (real_ys - ys)**2
square_errors_all[label] = square_errors
plt.scatter(get_time(), square_errors, c=color, label=label)
plt.xlabel('Time [min]')
plt.ylabel('Square error $(EC_{real} - EC_{meas})^2$')
plt.title(f'Square error {calibration}')
plt.legend()
plt.show()
print('Root-mean-square error (uS):')
for color, label in zip(COLORS, TO_PLOT):
ys = np.array(data[label])
N = len(ys)
real_ys = np.array(data[label_multi(label)])
square_errors = (real_ys - ys)**2
square_errors = square_errors[ np.where( np.logical_not( np.isnan(square_errors)) )]
rmse = np.sqrt( sum(square_errors) / len(square_errors) )
print('{sensor} ({n}):\t{error:.3f}'.format(sensor=label, n=N, error=rmse))
# Fifth plot: EC over time and SqErr over time comparison
fig, axs = plt.subplots(2,1)
## (2nd plot)
for color, label in zip(COLORS, TO_PLOT):
axs[0].scatter(get_time(), data[label], c=color, label=label)
axs[0].plot(get_time(), data[label_multi(label)], '--', c=color, label='ref. '+label)
axs[0].set_ylabel('EC [$\mu S$]')
axs[0].set_title('EC over time')
## (4th plot)
for color, label in zip(COLORS, TO_PLOT):
axs[1].scatter(get_time(), square_errors_all[label], c=color, label=label)
axs[1].set_xlabel('Time [min]')
axs[1].set_ylabel('Square error $(EC_{real} - EC_{meas})^2$')
axs[1].set_title(f'Square error {calibration}')
axs[1].legend()
fig.tight_layout()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2866, 2892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Voltage [mV]"""'], {}), "('Voltage [mV]')\n", (2876, 2892), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2932), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reference EC [$\\\\mu$S/cm]"""'], {}), "('Reference EC [$\\\\mu$S/cm]')\n", (2903, 2932), True, 'import matplotlib.pyplot as plt\n'), ((2932, 2963), 'matplotlib.pyplot.title', 'plt.title', (['"""TDS characteristic"""'], {}), "('TDS characteristic')\n", (2941, 2963), True, 'import matplotlib.pyplot as plt\n'), ((2964, 2976), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2974, 2976), True, 'import matplotlib.pyplot as plt\n'), ((2977, 2987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2985, 2987), True, 'import matplotlib.pyplot as plt\n'), ((5167, 5244), 'matplotlib.pyplot.plot', 'plt.plot', (['[min_multi, max_multi]', '[min_multi, max_multi]'], {'c': '"""b"""', 'label': '"""real"""'}), "([min_multi, max_multi], [min_multi, max_multi], c='b', label='real')\n", (5175, 5244), True, 'import matplotlib.pyplot as plt\n'), ((5264, 5321), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Reference EC (multiparameter) [$\\\\mu S/cm$]"""'], {}), "('Reference EC (multiparameter) [$\\\\mu S/cm$]')\n", (5274, 5321), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5360), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Measured EC [$\\\\mu S/cm$]"""'], {}), "('Measured EC [$\\\\mu S/cm$]')\n", (5331, 5360), True, 'import matplotlib.pyplot as plt\n'), ((5360, 5372), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5370, 5372), True, 'import matplotlib.pyplot as plt\n'), ((5373, 5436), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""TDS EC(voltage) fit: $a v^2 + b v + c$"""'], {'y': '(1.005)'}), "('TDS EC(voltage) fit: $a v^2 + b v + c$', y=1.005)\n", (5385, 5436), True, 'import matplotlib.pyplot as plt\n'), ((5729, 5739), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5737, 5739), True, 'import matplotlib.pyplot as plt\n'), ((8157, 8234), 'matplotlib.pyplot.plot', 'plt.plot', (['[min_multi, max_multi]', '[min_multi, max_multi]'], {'c': '"""b"""', 'label': '"""real"""'}), "([min_multi, max_multi], [min_multi, max_multi], c='b', label='real')\n", (8165, 8234), True, 'import matplotlib.pyplot as plt\n'), ((8265, 8323), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Real EC (calibration solution) [$\\\\mu S/cm$]"""'], {}), "('Real EC (calibration solution) [$\\\\mu S/cm$]')\n", (8275, 8323), True, 'import matplotlib.pyplot as plt\n'), ((8327, 8366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Measured EC [$\\\\mu S/cm$]"""'], {}), "('Measured EC [$\\\\mu S/cm$]')\n", (8337, 8366), True, 'import matplotlib.pyplot as plt\n'), ((8370, 8408), 'matplotlib.pyplot.title', 'plt.title', (['f"""EC vs EC [{calibration}]"""'], {}), "(f'EC vs EC [{calibration}]')\n", (8379, 8408), True, 'import matplotlib.pyplot as plt\n'), ((8413, 8425), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8423, 8425), True, 'import matplotlib.pyplot as plt\n'), ((8430, 8440), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8438, 8440), True, 'import matplotlib.pyplot as plt\n'), ((8814, 8838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [min]"""'], {}), "('Time [min]')\n", (8824, 8838), True, 'import matplotlib.pyplot as plt\n'), ((8843, 8870), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""EC [$\\\\mu S$]"""'], {}), "('EC [$\\\\mu S$]')\n", (8853, 8870), True, 'import matplotlib.pyplot as plt\n'), ((8874, 8899), 'matplotlib.pyplot.title', 'plt.title', (['"""EC over time"""'], {}), "('EC over time')\n", (8883, 8899), True, 'import matplotlib.pyplot as plt\n'), ((8904, 8916), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8914, 8916), True, 'import matplotlib.pyplot as plt\n'), ((8921, 8931), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8929, 8931), True, 'import matplotlib.pyplot as plt\n'), ((9246, 9270), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [min]"""'], {}), "('Time [min]')\n", (9256, 9270), True, 'import matplotlib.pyplot as plt\n'), ((9275, 9339), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absolute error $\\\\mid EC_{real} - EC_{meas} \\\\mid$"""'], {}), "('Absolute error $\\\\mid EC_{real} - EC_{meas} \\\\mid$')\n", (9285, 9339), True, 'import matplotlib.pyplot as plt\n'), ((9342, 9384), 'matplotlib.pyplot.title', 'plt.title', (['f"""Absolute error {calibration}"""'], {}), "(f'Absolute error {calibration}')\n", (9351, 9384), True, 'import matplotlib.pyplot as plt\n'), ((9389, 9401), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9399, 9401), True, 'import matplotlib.pyplot as plt\n'), ((9406, 9416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9414, 9416), True, 'import matplotlib.pyplot as plt\n'), ((9786, 9810), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [min]"""'], {}), "('Time [min]')\n", (9796, 9810), True, 'import matplotlib.pyplot as plt\n'), ((9815, 9869), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Square error $(EC_{real} - EC_{meas})^2$"""'], {}), "('Square error $(EC_{real} - EC_{meas})^2$')\n", (9825, 9869), True, 'import matplotlib.pyplot as plt\n'), ((9874, 9914), 'matplotlib.pyplot.title', 'plt.title', (['f"""Square error {calibration}"""'], {}), "(f'Square error {calibration}')\n", (9883, 9914), True, 'import matplotlib.pyplot as plt\n'), ((9919, 9931), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9929, 9931), True, 'import matplotlib.pyplot as plt\n'), ((9936, 9946), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9944, 9946), True, 'import matplotlib.pyplot as plt\n'), ((10507, 10525), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (10519, 10525), True, 'import matplotlib.pyplot as plt\n'), ((11176, 11186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11184, 11186), True, 'import matplotlib.pyplot as plt\n'), ((7901, 7948), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'c': 'color', 's': '(20)', 'label': 'label'}), '(xs, ys, c=color, s=20, label=label)\n', (7912, 7948), True, 'import matplotlib.pyplot as plt\n'), ((9033, 9054), 'numpy.array', 'np.array', (['data[label]'], {}), '(data[label])\n', (9041, 9054), True, 'import numpy as np\n'), ((9546, 9567), 'numpy.array', 'np.array', (['data[label]'], {}), '(data[label])\n', (9554, 9567), True, 'import numpy as np\n'), ((10049, 10070), 'numpy.array', 'np.array', (['data[label]'], {}), '(data[label])\n', (10057, 10070), True, 'import numpy as np\n'), ((10251, 10274), 'numpy.isnan', 'np.isnan', (['square_errors'], {}), '(square_errors)\n', (10259, 10274), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from collections import OrderedDict
from typing import Callable, Dict, List
# pyre-fixme[21]: Could not find module `gym`.
import gym
import numpy as np
import reagent.types as rlt
import torch
from reagent.core.dataclasses import dataclass
from reagent.gym.envs import RecSim
from reagent.gym.preprocessors.default_preprocessors import RecsimObsPreprocessor
from scipy import stats
logger = logging.getLogger(__name__)
# score function takes user and doc features, and outputs a score
SCORE_FUNCTION_T = Callable[[np.ndarray, np.ndarray], float]
def make_default_score_fn(fn_i: int) -> SCORE_FUNCTION_T:
"""
Make ith score_fn (constructor of ith score)
"""
def fn(user: np.ndarray, doc: np.ndarray) -> float:
return doc[fn_i]
# user = user ** (fn_i + 1)
# doc = doc ** (fn_i + 1)
# return np.inner(user, doc)
# return user[fn_i] * doc[fn_i]
return fn
VM_WEIGHT_LOW = -1.0
VM_WEIGHT_HIGH = 1.0
MATCH_REWARD_BOOST = 3.0
def get_default_score_fns(num_weights):
return [make_default_score_fn(i) for i in range(num_weights)]
def get_ground_truth_weights(num_weights):
return np.array([1] * num_weights)
@dataclass
class OraclePVM(RecSim):
"""
Wrapper over RecSim for simulating (Personalized) VM Tuning.
The state is the same as for RecSim (user feature + candidate features).
There are num_weights VM weights to tune, and so action space is a vector
of length num_weights.
OraclePVM hides num_weights number of
(1) score_fns (akin to VM models), that take in
user + candidate_i feature and produces a score for candidate_i.
(2) ground_truth_weights, that are used to produce "ground truth", a.k.a.
"Oracle", rankings.
Reward is the Kendall-Tau between ground truth and the ranking created from the
weights given by action. If the rankings match exactly, the reward is boosted to 3.
NOTE: This environment only tests if the Agent can learn the hidden ground
truth weights, which may be far from optimal (in terms of RecSim's rewards,
which we're ignoring). This is easier for unit tests, but in the real world
we will be trying to learn the optimal weights, and the reward signal would
reflect that.
TODO: made environment easier to learn from by not using RecSim.
"""
user_feat_dim: int = 1
candidate_feat_dim: int = 3
num_weights: int = 3
def __post_init_post_parse__(self):
assert (
self.slate_size == self.num_candidates
), f"Must be equal (slate_size) {self.slate_size} != (num_candidates) {self.num_candidates}"
super().__post_init_post_parse__()
self.score_fns: List[SCORE_FUNCTION_T] = get_default_score_fns(self.num_weights)
self.ground_truth_weights: List[float] = get_ground_truth_weights(
self.num_weights
)
assert len(self.score_fns) == len(
self.ground_truth_weights
), f"{len(self.score_fns)} != {len(self.ground_truth_weights)}"
assert (
len(self.ground_truth_weights) == self.num_weights
), f"{self.ground_truth_weights.shape} != {self.num_weights}"
def reset(self):
self.prev_obs = super().reset()
self.prev_obs.update(
{
"user": np.random.rand(self.user_feat_dim),
"doc": OrderedDict(
[
(str(i), np.random.rand(self.candidate_feat_dim))
for i in range(self.num_candidates)
]
),
}
)
return self.prev_obs
def step(self, action):
user_feat = self.prev_obs["user"]
doc_feats = self.prev_obs["doc"]
scores = self._get_scores(user_feat, doc_feats)
ground_truth_ranking = self._get_ranking(scores, self.ground_truth_weights)
policy_ranking = self._get_ranking(scores, action)
t = True
# comment out to avoid non-stationary
# self.prev_obs, _, t, i = super().step(policy_ranking)
num_matches = (ground_truth_ranking == policy_ranking).sum()
if num_matches == self.slate_size:
reward = MATCH_REWARD_BOOST
else:
reward, _p_value = stats.kendalltau(ground_truth_ranking, policy_ranking)
return self.prev_obs, reward, t, None
def is_match(self, reward):
# for evaluation, return true iff the reward represents a match
return reward > (MATCH_REWARD_BOOST - 1e-6)
@property
def action_space(self):
return gym.spaces.Box(
low=VM_WEIGHT_LOW, high=VM_WEIGHT_HIGH, shape=(self.num_weights,)
)
@action_space.setter
def action_space(self, val):
pass
def _get_scores(
self, user_feat: np.ndarray, doc_feats: Dict[str, np.ndarray]
) -> np.ndarray:
# num_docs x num_scores where i,j coordinate is jth score for ith doc
scores = np.array(
[
# pyre-fixme[16]: `OraclePVM` has no attribute `score_fns`.
[score_fn(user_feat, doc_feat) for score_fn in self.score_fns]
for _k, doc_feat in doc_feats.items()
]
)
return scores
def _get_ranking(self, scores: np.ndarray, weights: np.ndarray):
assert weights.shape == (scores.shape[1],), f"{weights.shape}, {scores.shape}"
weighted_scores = scores * weights
values = weighted_scores.sum(axis=1)
indices = np.argsort(-values)
return indices[: self.slate_size]
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
preprocessor = RecsimObsPreprocessor.create_from_env(self)
preprocessed_obs = preprocessor(obs)
return rlt._embed_states(preprocessed_obs)
def serving_obs_preprocessor(self, obs: np.ndarray):
preprocessor = RecsimObsPreprocessor.create_from_env(self)
x = preprocessor(obs)
# user was batch_size x state_size, stack
user = x.float_features.unsqueeze(1).repeat_interleave(
self.num_candidates, dim=1
)
candidates = x.candidate_docs.float_features
combined = torch.cat([user, candidates], dim=2).squeeze(0)
return (combined, torch.ones_like(combined, dtype=torch.uint8))
| [
"logging.getLogger",
"torch.ones_like",
"numpy.random.rand",
"reagent.types._embed_states",
"gym.spaces.Box",
"numpy.argsort",
"numpy.array",
"reagent.gym.preprocessors.default_preprocessors.RecsimObsPreprocessor.create_from_env",
"torch.cat",
"scipy.stats.kendalltau"
] | [((506, 533), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (523, 533), False, 'import logging\n'), ((1264, 1291), 'numpy.array', 'np.array', (['([1] * num_weights)'], {}), '([1] * num_weights)\n', (1272, 1291), True, 'import numpy as np\n'), ((4693, 4779), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'VM_WEIGHT_LOW', 'high': 'VM_WEIGHT_HIGH', 'shape': '(self.num_weights,)'}), '(low=VM_WEIGHT_LOW, high=VM_WEIGHT_HIGH, shape=(self.\n num_weights,))\n', (4707, 4779), False, 'import gym\n'), ((5619, 5638), 'numpy.argsort', 'np.argsort', (['(-values)'], {}), '(-values)\n', (5629, 5638), True, 'import numpy as np\n'), ((5773, 5816), 'reagent.gym.preprocessors.default_preprocessors.RecsimObsPreprocessor.create_from_env', 'RecsimObsPreprocessor.create_from_env', (['self'], {}), '(self)\n', (5810, 5816), False, 'from reagent.gym.preprocessors.default_preprocessors import RecsimObsPreprocessor\n'), ((5877, 5912), 'reagent.types._embed_states', 'rlt._embed_states', (['preprocessed_obs'], {}), '(preprocessed_obs)\n', (5894, 5912), True, 'import reagent.types as rlt\n'), ((5994, 6037), 'reagent.gym.preprocessors.default_preprocessors.RecsimObsPreprocessor.create_from_env', 'RecsimObsPreprocessor.create_from_env', (['self'], {}), '(self)\n', (6031, 6037), False, 'from reagent.gym.preprocessors.default_preprocessors import RecsimObsPreprocessor\n'), ((4377, 4431), 'scipy.stats.kendalltau', 'stats.kendalltau', (['ground_truth_ranking', 'policy_ranking'], {}), '(ground_truth_ranking, policy_ranking)\n', (4393, 4431), False, 'from scipy import stats\n'), ((6377, 6421), 'torch.ones_like', 'torch.ones_like', (['combined'], {'dtype': 'torch.uint8'}), '(combined, dtype=torch.uint8)\n', (6392, 6421), False, 'import torch\n'), ((3419, 3453), 'numpy.random.rand', 'np.random.rand', (['self.user_feat_dim'], {}), '(self.user_feat_dim)\n', (3433, 3453), True, 'import numpy as np\n'), ((6303, 6339), 'torch.cat', 'torch.cat', (['[user, candidates]'], {'dim': '(2)'}), '([user, candidates], dim=2)\n', (6312, 6339), False, 'import torch\n'), ((3546, 3585), 'numpy.random.rand', 'np.random.rand', (['self.candidate_feat_dim'], {}), '(self.candidate_feat_dim)\n', (3560, 3585), True, 'import numpy as np\n')] |
import os
import os.path as op
import numpy as np
import settings
from config import opts
from tfrecords.tfrecord_reader import TfrecordGenerator
import utils.util_funcs as uf
def evaluate_by_user_interaction():
options = {"data_dir_name": "kitti_raw_test",
"model_name": "vode_model",
}
print("\n===== Select evaluation options")
print(f"Default options:")
for key, value in options.items():
print(f"\t{key} = {value}")
print("\nIf you are happy with default options, please press enter")
print("Otherwise, please press any other key")
select = input()
if select == "":
print(f"You selected default options.")
else:
message = "Type 1 or 2 to specify dataset: 1) kitti_raw_test, 2) kitti_odom_test"
ds_id = uf.input_integer(message, 1, 2)
if ds_id == 1:
options["data_dir_name"] = "kitti_raw_test"
if ds_id == 2:
options["data_dir_name"] = "kitti_odom_test"
print("Type model_name: dir name under opts.DATAPATH_CKP and opts.DATAPATH_PRD")
options["model_name"] = input()
print("Prediction options:", options)
evaluate(**options)
def evaluate(data_dir_name, model_name):
total_depth_pred, total_pose_pred = load_predictions(model_name)
dataset = TfrecordGenerator(op.join(opts.DATAPATH_TFR, data_dir_name), batch_size=1).get_generator()
depth_errors = []
trajectory_errors = []
rotational_errors = []
uf.print_progress(None, is_total=True)
for i, (x, y) in enumerate(dataset):
if i >= total_pose_pred.shape[0]:
break
uf.print_progress(i)
depth_true = x["depth_gt"].numpy()[0]
pose_true = x["pose_gt"].numpy()[0]
depth_pred = total_depth_pred[i]
pose_pred = total_pose_pred[i]
depth_err = evaluate_depth(depth_pred, depth_true)
trj_err, rot_err = evaluate_pose(pose_pred, pose_true)
depth_errors.append(depth_err)
trajectory_errors.append(trj_err)
rotational_errors.append(rot_err)
depth_errors = np.array(depth_errors)
trajectory_errors = np.array(trajectory_errors)
rotational_errors = np.array(rotational_errors)
print("depth error shape:", depth_errors.shape)
print(depth_errors[:5])
print("trajectory error shape:", trajectory_errors.shape)
print(trajectory_errors[:5])
print("rotational error shape:", rotational_errors.shape)
print(rotational_errors[:5])
os.makedirs(op.join(opts.DATAPATH_EVL, model_name), exist_ok=True)
np.savetxt(op.join(opts.DATAPATH_EVL, model_name, "depthe_error.txt"), depth_errors, fmt="%1.4f")
np.savetxt(op.join(opts.DATAPATH_EVL, model_name, "trajectory_error.txt"), trajectory_errors, fmt="%1.4f")
np.savetxt(op.join(opts.DATAPATH_EVL, model_name, "rotation_error.txt"), rotational_errors, fmt="%1.4f")
def load_predictions(model_name):
pred_dir_path = op.join(opts.DATAPATH_PRD, model_name)
os.makedirs(pred_dir_path, exist_ok=True)
depth_pred = np.load(op.join(pred_dir_path, "depth.npy"))
print(f"[load_predictions] load depth from {pred_dir_path}, shape={depth_pred.shape}")
pose_pred = np.load(op.join(pred_dir_path, "pose.npy"))
print(f"[load_predictions] load pose from {pred_dir_path}, shape={pose_pred.shape}")
return depth_pred, pose_pred
def evaluate_depth(depth_pred, depth_true):
mask = np.logical_and(depth_true > opts.MIN_DEPTH, depth_true < opts.MAX_DEPTH)
# crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
# if used on gt_size 370x1224 produces a crop of [-218, -3, 44, 1180]
gt_height, gt_width, _ = depth_true.shape
crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,
0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
# scale matching
scaler = np.median(depth_true[mask]) / np.median(depth_pred[mask])
depth_pred[mask] *= scaler
# clip prediction and compute error metrics
depth_pred = np.clip(depth_pred, opts.MIN_DEPTH, opts.MAX_DEPTH)
metrics = compute_errors(depth_true[mask], depth_pred[mask])
return metrics
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
return [abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3]
def evaluate_pose(pose_pred, pose_true):
"""
:param pose_pred: predicted source poses that transforms points in target to source frame
format=(tx, ty, tz, ux, uy, uz) shape=[num_src, 6]
:param pose_true: ground truth source poses that transforms points in target to source frame
format=(4x4 transformation), shape=[num_src, 4, 4]
"""
# convert source and target poses to relative poses w.r.t first source pose
# in 4x4 transformation matrix form
pose_pred_mat = recover_pred_snippet_poses(pose_pred)
pose_true_mat = recover_true_snippet_poses(pose_true)
trj_error = calc_trajectory_error(pose_pred_mat, pose_true_mat)
rot_error = calc_rotational_error(pose_pred_mat, pose_true_mat)
return trj_error, rot_error
def recover_pred_snippet_poses(poses):
"""
:param poses: source poses that transforms points in target to source frame
format=(tx, ty, tz, ux, uy, uz) shape=[num_src, 6]
:return: snippet pose matrices that transforms points in source[i] frame to source[0] frame
format=(4x4 transformation) shape=[snippet_len, 5, 4, 4]
order=[source[0], source[1], target, source[2], source[3]]
"""
target_pose = np.zeros(shape=(1, 6), dtype=np.float32)
poses_vec = np.concatenate([poses[:2], target_pose, poses[2:]], axis=0)
poses_mat = uf.pose_rvec2matr(poses_vec)
recovered_pose = relative_pose_from_first(poses_mat)
return recovered_pose
def recover_true_snippet_poses(poses):
"""
:param poses: source poses that transforms points in target to source frame
format=(4x4 transformation), shape=[snippet_len, 4, 4, 4]
:return: snippet pose matrices that transforms points in source[i] frame to source[0] frame
format=(4x4 transformation) shape=[snippet_len, 5, 4, 4]
order=[source[0], source[1], target, source[2], source[3]]
"""
target_pose = np.expand_dims(np.identity(4, dtype=np.float32), axis=0)
poses_mat = np.concatenate([poses[:2], target_pose, poses[2:]], axis=0)
recovered_pose = relative_pose_from_first(poses_mat)
return recovered_pose
def relative_pose_from_first(poses_mat):
"""
:param poses_mat: 4x4 transformation matrices, [N, 4, 4]
:return: 4x4 transformation matrices with origin of poses_mat[0], [N, 4, 4]
"""
poses_mat_transformed = []
pose_origin = poses_mat[0]
for pose_mat in poses_mat:
# inv(source[0] to target) * (source[i] to target)
# = (target to source[0]) * (source[i] to target)
# = (source[i] to source[0])
pose_mat_tfm = np.matmul(np.linalg.inv(pose_origin), pose_mat)
poses_mat_transformed.append(pose_mat_tfm)
poses_mat_transformed = np.stack(poses_mat_transformed, axis=0)
return poses_mat_transformed
def calc_trajectory_error(pose_pred_mat, pose_true_mat):
"""
:param pose_pred_mat: predicted snippet pose matrices w.r.t the first frame, [snippet_len, 5, 4, 4]
:param pose_true_mat: ground truth snippet pose matrices w.r.t the first frame, [snippet_len, 5, 4, 4]
:return: trajectory error in meter [snippet_len]
"""
xyz_pred = pose_pred_mat[:, :3, 3]
xyz_true = pose_true_mat[:, :3, 3]
# optimize the scaling factor
scale = np.sum(xyz_true * xyz_pred) / np.sum(xyz_pred ** 2)
traj_error = xyz_true - xyz_pred * scale
rmse = np.sqrt(np.sum(traj_error ** 2, axis=1)) / len(traj_error)
return rmse
def calc_rotational_error(pose_pred_mat, pose_true_mat):
"""
:param pose_pred_mat: predicted snippet pose matrices w.r.t the first frame, [snippet_len, 5, 4, 4]
:param pose_true_mat: ground truth snippet pose matrices w.r.t the first frame, [snippet_len, 5, 4, 4]
:return: rotational error in rad [snippet_len]
"""
rot_pred = pose_pred_mat[:, :3, :3]
rot_true = pose_true_mat[:, :3, :3]
rot_rela = np.matmul(np.linalg.inv(rot_pred), rot_true)
trace = np.trace(rot_rela, axis1=1, axis2=2)
angle = np.arccos((trace - 1.) / 2.)
return angle
if __name__ == "__main__":
np.set_printoptions(precision=3, suppress=True, linewidth=100)
evaluate_by_user_interaction()
| [
"numpy.clip",
"numpy.trace",
"numpy.arccos",
"numpy.log",
"numpy.array",
"numpy.mean",
"numpy.stack",
"numpy.concatenate",
"numpy.maximum",
"numpy.identity",
"numpy.abs",
"utils.util_funcs.pose_rvec2matr",
"utils.util_funcs.input_integer",
"utils.util_funcs.print_progress",
"numpy.set_pr... | [((1496, 1534), 'utils.util_funcs.print_progress', 'uf.print_progress', (['None'], {'is_total': '(True)'}), '(None, is_total=True)\n', (1513, 1534), True, 'import utils.util_funcs as uf\n'), ((2101, 2123), 'numpy.array', 'np.array', (['depth_errors'], {}), '(depth_errors)\n', (2109, 2123), True, 'import numpy as np\n'), ((2148, 2175), 'numpy.array', 'np.array', (['trajectory_errors'], {}), '(trajectory_errors)\n', (2156, 2175), True, 'import numpy as np\n'), ((2200, 2227), 'numpy.array', 'np.array', (['rotational_errors'], {}), '(rotational_errors)\n', (2208, 2227), True, 'import numpy as np\n'), ((2948, 2986), 'os.path.join', 'op.join', (['opts.DATAPATH_PRD', 'model_name'], {}), '(opts.DATAPATH_PRD, model_name)\n', (2955, 2986), True, 'import os.path as op\n'), ((2991, 3032), 'os.makedirs', 'os.makedirs', (['pred_dir_path'], {'exist_ok': '(True)'}), '(pred_dir_path, exist_ok=True)\n', (3002, 3032), False, 'import os\n'), ((3425, 3497), 'numpy.logical_and', 'np.logical_and', (['(depth_true > opts.MIN_DEPTH)', '(depth_true < opts.MAX_DEPTH)'], {}), '(depth_true > opts.MIN_DEPTH, depth_true < opts.MAX_DEPTH)\n', (3439, 3497), True, 'import numpy as np\n'), ((3853, 3873), 'numpy.zeros', 'np.zeros', (['mask.shape'], {}), '(mask.shape)\n', (3861, 3873), True, 'import numpy as np\n'), ((3937, 3968), 'numpy.logical_and', 'np.logical_and', (['mask', 'crop_mask'], {}), '(mask, crop_mask)\n', (3951, 3968), True, 'import numpy as np\n'), ((4157, 4208), 'numpy.clip', 'np.clip', (['depth_pred', 'opts.MIN_DEPTH', 'opts.MAX_DEPTH'], {}), '(depth_pred, opts.MIN_DEPTH, opts.MAX_DEPTH)\n', (4164, 4208), True, 'import numpy as np\n'), ((4338, 4370), 'numpy.maximum', 'np.maximum', (['(gt / pred)', '(pred / gt)'], {}), '(gt / pred, pred / gt)\n', (4348, 4370), True, 'import numpy as np\n'), ((4695, 4725), 'numpy.mean', 'np.mean', (['((gt - pred) ** 2 / gt)'], {}), '((gt - pred) ** 2 / gt)\n', (4702, 4725), True, 'import numpy as np\n'), ((6059, 6099), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 6)', 'dtype': 'np.float32'}), '(shape=(1, 6), dtype=np.float32)\n', (6067, 6099), True, 'import numpy as np\n'), ((6116, 6175), 'numpy.concatenate', 'np.concatenate', (['[poses[:2], target_pose, poses[2:]]'], {'axis': '(0)'}), '([poses[:2], target_pose, poses[2:]], axis=0)\n', (6130, 6175), True, 'import numpy as np\n'), ((6192, 6220), 'utils.util_funcs.pose_rvec2matr', 'uf.pose_rvec2matr', (['poses_vec'], {}), '(poses_vec)\n', (6209, 6220), True, 'import utils.util_funcs as uf\n'), ((6862, 6921), 'numpy.concatenate', 'np.concatenate', (['[poses[:2], target_pose, poses[2:]]'], {'axis': '(0)'}), '([poses[:2], target_pose, poses[2:]], axis=0)\n', (6876, 6921), True, 'import numpy as np\n'), ((7603, 7642), 'numpy.stack', 'np.stack', (['poses_mat_transformed'], {'axis': '(0)'}), '(poses_mat_transformed, axis=0)\n', (7611, 7642), True, 'import numpy as np\n'), ((8811, 8847), 'numpy.trace', 'np.trace', (['rot_rela'], {'axis1': '(1)', 'axis2': '(2)'}), '(rot_rela, axis1=1, axis2=2)\n', (8819, 8847), True, 'import numpy as np\n'), ((8860, 8890), 'numpy.arccos', 'np.arccos', (['((trace - 1.0) / 2.0)'], {}), '((trace - 1.0) / 2.0)\n', (8869, 8890), True, 'import numpy as np\n'), ((8939, 9001), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)', 'linewidth': '(100)'}), '(precision=3, suppress=True, linewidth=100)\n', (8958, 9001), True, 'import numpy as np\n'), ((811, 842), 'utils.util_funcs.input_integer', 'uf.input_integer', (['message', '(1)', '(2)'], {}), '(message, 1, 2)\n', (827, 842), True, 'import utils.util_funcs as uf\n'), ((1644, 1664), 'utils.util_funcs.print_progress', 'uf.print_progress', (['i'], {}), '(i)\n', (1661, 1664), True, 'import utils.util_funcs as uf\n'), ((2515, 2553), 'os.path.join', 'op.join', (['opts.DATAPATH_EVL', 'model_name'], {}), '(opts.DATAPATH_EVL, model_name)\n', (2522, 2553), True, 'import os.path as op\n'), ((2585, 2643), 'os.path.join', 'op.join', (['opts.DATAPATH_EVL', 'model_name', '"""depthe_error.txt"""'], {}), "(opts.DATAPATH_EVL, model_name, 'depthe_error.txt')\n", (2592, 2643), True, 'import os.path as op\n'), ((2687, 2749), 'os.path.join', 'op.join', (['opts.DATAPATH_EVL', 'model_name', '"""trajectory_error.txt"""'], {}), "(opts.DATAPATH_EVL, model_name, 'trajectory_error.txt')\n", (2694, 2749), True, 'import os.path as op\n'), ((2798, 2858), 'os.path.join', 'op.join', (['opts.DATAPATH_EVL', 'model_name', '"""rotation_error.txt"""'], {}), "(opts.DATAPATH_EVL, model_name, 'rotation_error.txt')\n", (2805, 2858), True, 'import os.path as op\n'), ((3058, 3093), 'os.path.join', 'op.join', (['pred_dir_path', '"""depth.npy"""'], {}), "(pred_dir_path, 'depth.npy')\n", (3065, 3093), True, 'import os.path as op\n'), ((3210, 3244), 'os.path.join', 'op.join', (['pred_dir_path', '"""pose.npy"""'], {}), "(pred_dir_path, 'pose.npy')\n", (3217, 3244), True, 'import os.path as op\n'), ((4003, 4030), 'numpy.median', 'np.median', (['depth_true[mask]'], {}), '(depth_true[mask])\n', (4012, 4030), True, 'import numpy as np\n'), ((4033, 4060), 'numpy.median', 'np.median', (['depth_pred[mask]'], {}), '(depth_pred[mask])\n', (4042, 4060), True, 'import numpy as np\n'), ((6804, 6836), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (6815, 6836), True, 'import numpy as np\n'), ((8139, 8166), 'numpy.sum', 'np.sum', (['(xyz_true * xyz_pred)'], {}), '(xyz_true * xyz_pred)\n', (8145, 8166), True, 'import numpy as np\n'), ((8169, 8190), 'numpy.sum', 'np.sum', (['(xyz_pred ** 2)'], {}), '(xyz_pred ** 2)\n', (8175, 8190), True, 'import numpy as np\n'), ((8764, 8787), 'numpy.linalg.inv', 'np.linalg.inv', (['rot_pred'], {}), '(rot_pred)\n', (8777, 8787), True, 'import numpy as np\n'), ((3694, 3802), 'numpy.array', 'np.array', (['[0.40810811 * gt_height, 0.99189189 * gt_height, 0.03594771 * gt_width, \n 0.96405229 * gt_width]'], {}), '([0.40810811 * gt_height, 0.99189189 * gt_height, 0.03594771 *\n gt_width, 0.96405229 * gt_width])\n', (3702, 3802), True, 'import numpy as np\n'), ((4562, 4572), 'numpy.log', 'np.log', (['gt'], {}), '(gt)\n', (4568, 4572), True, 'import numpy as np\n'), ((4575, 4587), 'numpy.log', 'np.log', (['pred'], {}), '(pred)\n', (4581, 4587), True, 'import numpy as np\n'), ((4657, 4674), 'numpy.abs', 'np.abs', (['(gt - pred)'], {}), '(gt - pred)\n', (4663, 4674), True, 'import numpy as np\n'), ((7485, 7511), 'numpy.linalg.inv', 'np.linalg.inv', (['pose_origin'], {}), '(pose_origin)\n', (7498, 7511), True, 'import numpy as np\n'), ((8255, 8286), 'numpy.sum', 'np.sum', (['(traj_error ** 2)'], {'axis': '(1)'}), '(traj_error ** 2, axis=1)\n', (8261, 8286), True, 'import numpy as np\n'), ((1343, 1384), 'os.path.join', 'op.join', (['opts.DATAPATH_TFR', 'data_dir_name'], {}), '(opts.DATAPATH_TFR, data_dir_name)\n', (1350, 1384), True, 'import os.path as op\n')] |
from typing import Dict, List, Tuple, Union, Any, TypeVar
from scipy.sparse.csr import csr_matrix
from numpy import memmap
from sqlitedict import SqliteDict
from tempfile import mkdtemp
from DocumentFeatureSelection.init_logger import logger
from numpy import ndarray, int32, int64
import pickle
import json
import csv
import os
import shutil
# this class is from https://code.activestate.com/recipes/576642/
class PersistentDict(dict):
''' Persistent dictionary with an API compatible with shelve and anydbm.
The dict is kept in memory, so the dictionary operations run as fast as
a regular dictionary.
Write to disk is delayed until close or sync (similar to gdbm's fast mode).
Input file format is automatically discovered.
Output file format is selectable between pickle, json, and csv.
All three serialization formats are backed by fast C implementations.
'''
def __init__(self, filename, flag='c', mode=None, format='pickle', *args, **kwds):
self.flag = flag # r=readonly, c=create, or n=new
self.mode = mode # None or an octal triple like 0644
self.format = format # 'csv', 'json', or 'pickle'
self.filename = filename
if flag != 'n' and os.access(filename, os.R_OK):
fileobj = open(filename, 'rb' if format=='pickle' else 'r')
with fileobj:
self.load(fileobj)
dict.__init__(self, *args, **kwds)
def sync(self):
'Write dict to disk'
if self.flag == 'r':
return
filename = self.filename
tempname = filename + '.tmp'
fileobj = open(tempname, 'wb' if self.format=='pickle' else 'w')
try:
self.dump(fileobj)
except Exception:
os.remove(tempname)
raise
finally:
fileobj.close()
shutil.move(tempname, self.filename) # atomic commit
if self.mode is not None:
os.chmod(self.filename, self.mode)
def close(self):
self.sync()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def dump(self, fileobj):
if self.format == 'csv':
csv.writer(fileobj).writerows(self.items())
elif self.format == 'json':
json.dump(self, fileobj, separators=(',', ':'))
elif self.format == 'pickle':
pickle.dump(dict(self), fileobj, 2)
else:
raise NotImplementedError('Unknown format: ' + repr(self.format))
def load(self, fileobj):
# try formats from most restrictive to least restrictive
for loader in (pickle.load, json.load, csv.reader):
fileobj.seek(0)
try:
return self.update(loader(fileobj))
except Exception:
pass
raise ValueError('File not in a supported format')
class SetDocumentInformation(object):
__slots__ = ['matrix_object', 'label2id', 'feature2id']
def __init__(self, dict_matrix_index:Union[Dict[str,Any], SqliteDict, PersistentDict]):
"""
* Keys
- matrix_object:Union[csr_matrix, ndarray]
- label2id: Dict[str, str]
- feature2id: Dict[str, str]
"""
if not "matrix_object" in dict_matrix_index:
raise Exception("dict_matrix_index must have key='matrix_object'")
if not "label2id" in dict_matrix_index:
raise Exception("dict_matrix_index must have key='label2id'")
if not "feature2id" in dict_matrix_index:
raise Exception("dict_matrix_index must have key='feature2id'")
self.matrix_object = dict_matrix_index['matrix_object']
self.label2id = dict_matrix_index['label2id']
self.feature2id = dict_matrix_index['feature2id']
if isinstance(dict_matrix_index, dict):
pass
elif isinstance(dict_matrix_index, PersistentDict):
dict_matrix_index.sync()
elif isinstance(dict_matrix_index, SqliteDict):
dict_matrix_index.sync()
else:
raise Exception()
class DataCsrMatrix(object):
"""* What you can do
- You can keep information for keeping matrix object.
"""
__slots__ = ['cache_backend', 'csr_matrix_',
'label2id_dict', 'vocabulary',
'n_docs_distribution', 'n_term_freq_distribution', 'path_working_dir']
def __init__(self,
csr_matrix_: csr_matrix,
label2id_dict: Dict[str, int],
vocabulary: Dict[str, int],
n_docs_distribution: ndarray,
n_term_freq_distribution: ndarray,
is_use_cache: bool=False,
is_use_memmap: bool=False,
cache_backend: str='PersistentDict',
path_working_dir: str=None):
"""* Parameters
-----------------
- csr_matrix_: Matrix object which saves term frequency or document frequency
- label2id_dict: Dict object whose key is label-name, value is row-index of the given matrix.
>>> {'label_b': 0, 'label_c': 1, 'label_a': 2}
- vocabulary: Dict object whose key is feature-name, value is column-index of the given matrix.
>>> {'label_b': 0, 'label_c': 1, 'label_a': 2}
- n_docs_distribution: Sequence object(list,ndarray). It saves a distribution of N(docs) in each label.
- n_term_freq_distribution: Sequence object(list,ndarray). It saves a distribution of N(all terms) in each label.
- is_use_cache: boolean. It True; the matrix object is saved on the disk. It saves memory of your machine.
- is_use_memmap: boolean. It True; the matrix object is saved on the disk. It saves memory of your machine.
- cache_backend: str. {PersistentDict, SqliteDict}, backend to save this object on the disk.
- path_working_dir: str. Path to save temporary cache objects.
"""
self.n_docs_distribution = n_docs_distribution
self.n_term_freq_distribution = n_term_freq_distribution
self.cache_backend = cache_backend
if (is_use_memmap or is_use_cache) and path_working_dir is None:
self.path_working_dir = mkdtemp()
logger.info("Temporary files are at {}".format(self.path_working_dir))
else:
self.path_working_dir = path_working_dir
if is_use_cache:
"""You use disk-drive for keeping object.
"""
path_vocabulary_cache_obj = os.path.join(self.path_working_dir, 'vocabulary.cache')
path_label_2_dict_cache_obj = os.path.join(self.path_working_dir, 'label_2_dict.cache')
self.vocabulary = self.initialize_cache_dict_object(path_vocabulary_cache_obj)
self.vocabulary = vocabulary
self.label2id_dict = self.initialize_cache_dict_object(path_label_2_dict_cache_obj)
logger.info("Now saving into local file...")
for k, v in label2id_dict.items():
self.label2id_dict[k] = v
if isinstance(self.label2id_dict, PersistentDict):
self.label2id_dict.sync()
else:
"""Keep everything on memory
"""
self.label2id_dict = label2id_dict
self.vocabulary = vocabulary
if is_use_memmap:
"""You use disk-drive for keeping object
"""
path_memmap_obj = os.path.join(self.path_working_dir, 'matrix.memmap')
self.csr_matrix_ = self.initialize_memmap_object(csr_matrix_, path_memmap_object=path_memmap_obj)
else:
self.csr_matrix_ = csr_matrix_
def initialize_cache_dict_object(self, path_cache_file):
if self.cache_backend == 'PersistentDict':
return PersistentDict(path_cache_file, flag='c', format='json')
elif self.cache_backend == 'SqliteDict':
return SqliteDict(path_cache_file, autocommit=True)
else:
raise Exception('No such cache_backend option named {}'.format(self.cache_backend))
def initialize_memmap_object(self, matrix_object: csr_matrix, path_memmap_object: str)->memmap:
fp = memmap(path_memmap_object, dtype='float64', mode='w+', shape=matrix_object.shape)
fp[:] = matrix_object.todense()[:]
return fp
def __str__(self):
return """matrix-type={}, matrix-size={}, path_working_dir={}""".format(type(self.csr_matrix_),
self.csr_matrix_.shape,
self.path_working_dir)
class ROW_COL_VAL(object):
"""Data class to keep value of one item in CSR-matrix"""
__slots__ = ('row', 'col', 'val')
def __init__(self, row: int, col:int, val:int):
self.row = row
self.col = col
self.val = val
class ScoredResultObject(object):
""""""
def __init__(self,
scored_matrix:csr_matrix,
label2id_dict:Union[Dict[str,Any], ndarray],
feature2id_dict=Union[Dict[str,Any], ndarray],
method:str=None,
matrix_form:str=None,
frequency_matrix:csr_matrix=None):
"""*Parameters
------------
- scored_matrix: Matrix object which saves result of feature-extraction
- label2id_dict: Dict object whose key is label-name, value is row-index of the matrix.
- feature2id_dict: Dict object whose key is feature-name, value is column-index of the matrix.
- method: a name of feature-extraction method.
- matrix_form: a type of the given matrix for feature-extraction computation. {term_freq, doc_freq}
- frequency_matrix: Matrix object(term-frequency or document-frequency). The matrix is data-source of feature-extraction computation.
"""
self.scored_matrix = scored_matrix
self.label2id_dict = label2id_dict
self.feature2id_dict = feature2id_dict
self.method = method
self.matrix_form = matrix_form
self.frequency_matrix = frequency_matrix
# For keeping old version
self.ScoreMatrix2ScoreDictionary = self.convert_score_matrix2score_record
def __conv_into_dict_format(self, word_score_items):
out_format_structure = {}
for item in word_score_items:
if item['label'] not in out_format_structure :
out_format_structure[item['label']] = [{'feature': item['word'], 'score': item['score']}]
else:
out_format_structure[item['label']].append({'feature': item['word'], 'score': item['score']})
return out_format_structure
def convert_score_matrix2score_record(self,
outformat:str='items',
sort_desc:bool=True):
"""* What you can do
- Get dictionary structure from weighted-featured scores.
- You can choose 'dict' or 'items' for ```outformat``` parameter.
* Output
---------------------
- If outformat='dict', you get
>>> {label_name:{feature: score}}
Else if outformat='items', you get
>>> [{feature: score}]
"""
scored_objects = self.get_feature_dictionary(
weighted_matrix=self.scored_matrix,
vocabulary=self.feature2id_dict,
label_group_dict=self.label2id_dict,
frequency_matrix=self.frequency_matrix
)
if sort_desc: scored_objects = \
sorted(scored_objects, key=lambda x: x['score'], reverse=True)
if outformat=='dict':
out_format_structure = self.__conv_into_dict_format(scored_objects)
elif outformat=='items':
out_format_structure = scored_objects
else:
raise ValueError('outformat must be either of {dict, items}')
return out_format_structure
def __get_value_index(self, row_index, column_index, weight_csr_matrix, verbose=False):
assert isinstance(row_index, (int, int32, int64))
assert isinstance(column_index, (int, int32, int64))
assert isinstance(weight_csr_matrix, (ndarray,csr_matrix))
value = weight_csr_matrix[row_index, column_index]
return value
def make_non_zero_information(self, weight_csr_matrix: csr_matrix)->List[ROW_COL_VAL]:
"""Construct Tuple of matrix value. Return value is array of ROW_COL_VAL namedtuple.
:param weight_csr_matrix:
:return:
"""
assert isinstance(weight_csr_matrix, (csr_matrix, ndarray))
row_col_index_array = weight_csr_matrix.nonzero()
row_indexes = row_col_index_array[0]
column_indexes = row_col_index_array[1]
assert len(row_indexes) == len(column_indexes)
value_index_items = [None] * len(row_indexes) # type: List[ROW_COL_VAL]
for i in range(0, len(row_indexes)):
value_index_items[i] = ROW_COL_VAL(row_indexes[i],
column_indexes[i],
self.__get_value_index(row_indexes[i], column_indexes[i], weight_csr_matrix))
return value_index_items
def SUB_FUNC_feature_extraction(self,
weight_row_col_val_obj: ROW_COL_VAL,
dict_index_information: Dict[str, Dict[str, str]],
dict_position2value: Dict[Tuple[int, int], float]=None)->Dict[str, Any]:
"""This function returns weighted score between label and words.
Input csr matrix must be 'document-frequency' matrix, where records #document that word appears in document set.
[NOTE] This is not TERM-FREQUENCY.
For example,
If 'iPhone' appears in 5 documents of 'IT' category document set, value must be 5.
Even if 10 'iPhone' words in 'IT' category document set, value is still 5.
"""
assert isinstance(weight_row_col_val_obj, ROW_COL_VAL)
feature_score_record = {
'score': weight_row_col_val_obj.val,
'label': self.get_label(weight_row_col_val_obj, dict_index_information['id2label']),
'feature': self.get_word(weight_row_col_val_obj, dict_index_information['id2vocab'])
}
if not dict_position2value is None:
if (weight_row_col_val_obj.col,weight_row_col_val_obj.row) in dict_position2value:
frequency = dict_position2value[tuple([weight_row_col_val_obj.col,weight_row_col_val_obj.row])]
else:
"""When a feature-extraction method is BNS, frequency=0 is possible."""
frequency = 0
feature_score_record.update({"frequency": frequency})
return feature_score_record
def get_feature_dictionary(self,
weighted_matrix: csr_matrix,
vocabulary:Dict[str, int],
label_group_dict:Dict[str, int],
cache_backend: str = 'PersistentDict',
is_use_cache: bool=True,
frequency_matrix: csr_matrix=None)->List[Dict[str, Any]]:
"""* What you can do
- Get dictionary structure from weighted-featured scores.
"""
assert isinstance(weighted_matrix, csr_matrix)
assert isinstance(vocabulary, dict)
assert isinstance(label_group_dict, dict)
logger.debug(msg='Start making scored dictionary object from scored matrix')
logger.debug(msg='Input matrix size= {} * {}'.format(weighted_matrix.shape[0], weighted_matrix.shape[1]))
weight_value_index_items = self.make_non_zero_information(weighted_matrix)
if not frequency_matrix is None:
frequency_value_index_items = self.make_non_zero_information(frequency_matrix)
dict_position2value = {(t_col_row.col,t_col_row.row): t_col_row.val for t_col_row in frequency_value_index_items}
else:
dict_position2value = None
if is_use_cache:
dict_index_information = self.initialize_cache_dict_object(cache_backend, file_name='dict_index_information')
else:
dict_index_information = {}
dict_index_information['id2label'] = {value:key for key, value in label_group_dict.items()}
dict_index_information['id2vocab'] = {value:key for key, value in vocabulary.items()}
if isinstance(dict_index_information, SqliteDict):
dict_index_information.commit()
elif isinstance(dict_index_information, PersistentDict):
dict_index_information.sync()
else:
pass
# TODO may be this func takes too much time. consider cython.
seq_score_objects = [None] * len(weight_value_index_items) # type: List[Dict[str,Any]]
for i, weight_row_col_val_tuple in enumerate(weight_value_index_items):
seq_score_objects[i] = self.SUB_FUNC_feature_extraction(
weight_row_col_val_tuple,
dict_index_information,
dict_position2value)
logger.debug(msg='Finished making scored dictionary')
return seq_score_objects
def get_label(self, row_col_val_tuple, label_id)->str:
assert isinstance(row_col_val_tuple, ROW_COL_VAL)
assert isinstance(label_id, dict)
label = label_id[row_col_val_tuple.row]
return label
def get_word(self, row_col_val_tuple:ROW_COL_VAL, vocabulary:Dict[int,str])->Union[str,List[str],Tuple[str,...]]:
"""* what u can do
- It gets feature name from the given matrix object.
- A feature is json serialized, thus this method tries to de-serialize json string into python object.
- Original feature object is possibly string(word), list of str, list of str.
"""
assert isinstance(row_col_val_tuple, ROW_COL_VAL)
assert isinstance(vocabulary, dict)
vocab = vocabulary[row_col_val_tuple.col]
try:
feature_object = json.loads(vocab)
if len(feature_object)==1:
# When feature is word, the length is 1 #
feature_object = feature_object[0]
except:
feature_object = vocab
return feature_object
def initialize_cache_dict_object(self, cache_backend:str, file_name:str, path_cache_file=mkdtemp()):
if cache_backend == 'PersistentDict':
return PersistentDict(os.path.join(path_cache_file, file_name), flag='c', format='json')
elif cache_backend == 'SqliteDict':
return SqliteDict(os.path.join(path_cache_file, file_name), autocommit=True)
else:
raise Exception('No such cache_backend option named {}'.format(cache_backend))
FeatureType = TypeVar('T', str, Tuple[Any])
AvailableInputTypes = TypeVar('T', PersistentDict,
SqliteDict,
Dict[str,List[List[Union[str,Tuple[Any]]]]]) | [
"json.loads",
"DocumentFeatureSelection.init_logger.logger.info",
"shutil.move",
"numpy.memmap",
"os.access",
"os.path.join",
"csv.writer",
"os.chmod",
"os.remove",
"sqlitedict.SqliteDict",
"tempfile.mkdtemp",
"DocumentFeatureSelection.init_logger.logger.debug",
"json.dump",
"typing.TypeVa... | [((18883, 18912), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'str', 'Tuple[Any]'], {}), "('T', str, Tuple[Any])\n", (18890, 18912), False, 'from typing import Dict, List, Tuple, Union, Any, TypeVar\n'), ((18935, 19026), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'PersistentDict', 'SqliteDict', 'Dict[str, List[List[Union[str, Tuple[Any]]]]]'], {}), "('T', PersistentDict, SqliteDict, Dict[str, List[List[Union[str,\n Tuple[Any]]]]])\n", (18942, 19026), False, 'from typing import Dict, List, Tuple, Union, Any, TypeVar\n'), ((1901, 1937), 'shutil.move', 'shutil.move', (['tempname', 'self.filename'], {}), '(tempname, self.filename)\n', (1912, 1937), False, 'import shutil\n'), ((8250, 8336), 'numpy.memmap', 'memmap', (['path_memmap_object'], {'dtype': '"""float64"""', 'mode': '"""w+"""', 'shape': 'matrix_object.shape'}), "(path_memmap_object, dtype='float64', mode='w+', shape=matrix_object.\n shape)\n", (8256, 8336), False, 'from numpy import memmap\n'), ((15526, 15602), 'DocumentFeatureSelection.init_logger.logger.debug', 'logger.debug', ([], {'msg': '"""Start making scored dictionary object from scored matrix"""'}), "(msg='Start making scored dictionary object from scored matrix')\n", (15538, 15602), False, 'from DocumentFeatureSelection.init_logger import logger\n'), ((17194, 17247), 'DocumentFeatureSelection.init_logger.logger.debug', 'logger.debug', ([], {'msg': '"""Finished making scored dictionary"""'}), "(msg='Finished making scored dictionary')\n", (17206, 17247), False, 'from DocumentFeatureSelection.init_logger import logger\n'), ((18470, 18479), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (18477, 18479), False, 'from tempfile import mkdtemp\n'), ((1281, 1309), 'os.access', 'os.access', (['filename', 'os.R_OK'], {}), '(filename, os.R_OK)\n', (1290, 1309), False, 'import os\n'), ((2003, 2037), 'os.chmod', 'os.chmod', (['self.filename', 'self.mode'], {}), '(self.filename, self.mode)\n', (2011, 2037), False, 'import os\n'), ((6286, 6295), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (6293, 6295), False, 'from tempfile import mkdtemp\n'), ((6582, 6637), 'os.path.join', 'os.path.join', (['self.path_working_dir', '"""vocabulary.cache"""'], {}), "(self.path_working_dir, 'vocabulary.cache')\n", (6594, 6637), False, 'import os\n'), ((6680, 6737), 'os.path.join', 'os.path.join', (['self.path_working_dir', '"""label_2_dict.cache"""'], {}), "(self.path_working_dir, 'label_2_dict.cache')\n", (6692, 6737), False, 'import os\n'), ((6979, 7023), 'DocumentFeatureSelection.init_logger.logger.info', 'logger.info', (['"""Now saving into local file..."""'], {}), "('Now saving into local file...')\n", (6990, 7023), False, 'from DocumentFeatureSelection.init_logger import logger\n'), ((7504, 7556), 'os.path.join', 'os.path.join', (['self.path_working_dir', '"""matrix.memmap"""'], {}), "(self.path_working_dir, 'matrix.memmap')\n", (7516, 7556), False, 'import os\n'), ((18127, 18144), 'json.loads', 'json.loads', (['vocab'], {}), '(vocab)\n', (18137, 18144), False, 'import json\n'), ((1810, 1829), 'os.remove', 'os.remove', (['tempname'], {}), '(tempname)\n', (1819, 1829), False, 'import os\n'), ((2350, 2397), 'json.dump', 'json.dump', (['self', 'fileobj'], {'separators': "(',', ':')"}), "(self, fileobj, separators=(',', ':'))\n", (2359, 2397), False, 'import json\n'), ((7981, 8025), 'sqlitedict.SqliteDict', 'SqliteDict', (['path_cache_file'], {'autocommit': '(True)'}), '(path_cache_file, autocommit=True)\n', (7991, 8025), False, 'from sqlitedict import SqliteDict\n'), ((18562, 18602), 'os.path.join', 'os.path.join', (['path_cache_file', 'file_name'], {}), '(path_cache_file, file_name)\n', (18574, 18602), False, 'import os\n'), ((2258, 2277), 'csv.writer', 'csv.writer', (['fileobj'], {}), '(fileobj)\n', (2268, 2277), False, 'import csv\n'), ((18703, 18743), 'os.path.join', 'os.path.join', (['path_cache_file', 'file_name'], {}), '(path_cache_file, file_name)\n', (18715, 18743), False, 'import os\n')] |
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
from vtkplotter import ProgressBar, shapes, merge, load
from vtkplotter.mesh import Mesh as Actor
from morphapi.morphology.morphology import Neuron
import brainrender
from brainrender.Utils.data_io import load_mesh_from_file, load_json
from brainrender.Utils.data_manipulation import get_coords, flatten_list, is_any_item_in_list
from brainrender.morphology.utils import edit_neurons, get_neuron_actors_with_morphapi
from brainrender import STREAMLINES_RESOLUTION, INJECTION_VOLUME_SIZE
from brainrender.Utils.webqueries import request
from brainrender import *
from brainrender.Utils import actors_funcs
from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors
from brainrender.colors import get_n_shades_of
from allensdk.core.mouse_connectivity_cache import MouseConnectivityCache
from allensdk.api.queries.ontologies_api import OntologiesApi
from allensdk.api.queries.reference_space_api import ReferenceSpaceApi
from allensdk.api.queries.mouse_connectivity_api import MouseConnectivityApi
from allensdk.api.queries.tree_search_api import TreeSearchApi
from allensdk.core.reference_space_cache import ReferenceSpaceCache
from brainrender.atlases.base import Atlas
class ABA(Atlas):
"""
This class handles interaction with the Allen Brain Atlas datasets and APIs to get structure trees,
experimental metadata and results, tractography data etc.
"""
ignore_regions = ['retina', 'brain', 'fiber tracts', 'grey'] # ignored when rendering
# useful vars for analysis
excluded_regions = ["fiber tracts"]
resolution = 25
_root_bounds = [[-17, 13193],
[ 134, 7564],
[486, 10891]]
_root_midpoint = [np.mean([-17, 13193]),
np.mean([134, 7564]),
np.mean([486, 10891])]
atlas_name = "ABA"
mesh_format = 'obj'
base_url = "https://neuroinformatics.nl/HBP/allen-connectivity-viewer/json/streamlines_NNN.json.gz"
# Used for streamlines
def __init__(self, base_dir=None, **kwargs):
"""
Set up file paths and Allen SDKs
:param base_dir: path to directory to use for saving data (default value None)
:param kwargs: can be used to pass path to individual data folders. See brainrender/Utils/paths_manager.py
"""
Atlas.__init__(self, base_dir=base_dir, **kwargs)
self.meshes_folder = self.mouse_meshes # where the .obj mesh for each region is saved
# get mouse connectivity cache and structure tree
self.mcc = MouseConnectivityCache(manifest_file=os.path.join(self.mouse_connectivity_cache, "manifest.json"))
self.structure_tree = self.mcc.get_structure_tree()
# get ontologies API and brain structures sets
self.oapi = OntologiesApi()
self.get_structures_sets()
# get reference space
self.space = ReferenceSpaceApi()
self.spacecache = ReferenceSpaceCache(
manifest=os.path.join(self.annotated_volume_fld, "manifest.json"), # downloaded files are stored relative to here
resolution=self.resolution,
reference_space_key="annotation/ccf_2017" # use the latest version of the CCF
)
self.annotated_volume, _ = self.spacecache.get_annotation_volume()
# mouse connectivity API [used for tractography]
self.mca = MouseConnectivityApi()
# Get tree search api
self.tree_search = TreeSearchApi()
# Store all regions metadata [If there's internet connection]
if self.other_sets is not None:
self.regions = self.other_sets["Structures whose surfaces are represented by a precomputed mesh"].sort_values('acronym')
self.region_acronyms = list(self.other_sets["Structures whose surfaces are represented by a precomputed mesh"].sort_values(
'acronym').acronym.values)
# ---------------------------------------------------------------------------- #
# Methods to support Scene creation #
# ---------------------------------------------------------------------------- #
"""
These methods are used by brainrender.scene to populate a scene using
the Allen brain atlas meshes. They overwrite methods of the base atlas class
"""
# ------------------------- Adding elements to scene ------------------------- #
def get_brain_regions(self, brain_regions, VIP_regions=None, VIP_color=None,
add_labels=False,
colors=None, use_original_color=True,
alpha=None, hemisphere=None, verbose=False, **kwargs):
"""
Gets brain regions meshes for rendering
Many parameters can be passed to specify how the regions should be rendered.
To treat a subset of the rendered regions, specify which regions are VIP.
Use the kwargs to specify more detailes on how the regins should be rendered (e.g. wireframe look)
:param brain_regions: str list of acronyms of brain regions
:param VIP_regions: if a list of brain regions are passed, these are rendered differently compared to those in brain_regions (Default value = None)
:param VIP_color: if passed, this color is used for the VIP regions (Default value = None)
:param colors: str, color of rendered brian regions (Default value = None)
:param use_original_color: bool, if True, the allen's default color for the region is used. (Default value = False)
:param alpha: float, transparency of the rendered brain regions (Default value = None)
:param hemisphere: str (Default value = None)
:param add_labels: bool (default False). If true a label is added to each regions' actor. The label is visible when hovering the mouse over the actor
:param **kwargs: used to determine a bunch of thigs, including the look and location of lables from scene.add_labels
"""
# Check that the atlas has brain regions data
if self.region_acronyms is None:
print(f"The atlas {self.atlas_name} has no brain regions data")
return
# Parse arguments
if VIP_regions is None:
VIP_regions = brainrender.DEFAULT_VIP_REGIONS
if VIP_color is None:
VIP_color = brainrender.DEFAULT_VIP_COLOR
if alpha is None:
_alpha = brainrender.DEFAULT_STRUCTURE_ALPHA
else: _alpha = alpha
# check that we have a list
if not isinstance(brain_regions, list):
brain_regions = [brain_regions]
# check the colors input is correct
if colors is not None:
if isinstance(colors[0], (list, tuple)):
if not len(colors) == len(brain_regions):
raise ValueError("when passing colors as a list, the number of colors must match the number of brain regions")
for col in colors:
if not check_colors(col): raise ValueError("Invalide colors in input: {}".format(col))
else:
if not check_colors(colors): raise ValueError("Invalide colors in input: {}".format(colors))
colors = [colors for i in range(len(brain_regions))]
# loop over all brain regions
actors = {}
for i, region in enumerate(brain_regions):
self._check_valid_region_arg(region)
if region in self.ignore_regions: continue
if verbose: print("Rendering: ({})".format(region))
# get the structure and check if we need to download the object file
if region not in self.region_acronyms:
print(f"The region {region} doesn't seem to belong to the atlas being used: {self.atlas_name}. Skipping")
continue
obj_file = os.path.join(self.meshes_folder, "{}.{}".format(region, self.mesh_format))
if not self._check_obj_file(region, obj_file):
print("Could not render {}, maybe we couldn't get the mesh?".format(region))
continue
# check which color to assign to the brain region
if use_original_color:
color = [x/255 for x in self.get_region_color(region)]
else:
if region in VIP_regions:
color = VIP_color
else:
if colors is None:
color = brainrender.DEFAULT_STRUCTURE_COLOR
elif isinstance(colors, list):
color = colors[i]
else:
color = colors
if region in VIP_regions:
alpha = 1
else:
alpha = _alpha
# Load the object file as a mesh and store the actor
if hemisphere is not None:
if hemisphere.lower() == "left" or hemisphere.lower() == "right":
obj = self.get_region_unilateral(region, hemisphere=hemisphere, color=color, alpha=alpha)
else:
raise ValueError(f'Invalid hemisphere argument: {hemisphere}')
else:
obj = load(obj_file, c=color, alpha=alpha)
if obj is not None:
actors_funcs.edit_actor(obj, **kwargs)
actors[region] = obj
else:
print(f"Something went wrong while loading mesh data for {region}")
return actors
@staticmethod # static method because this should inherit from scene
def add_neurons(self, neurons, color=None, display_axon=True, display_dendrites=True,
alpha=1, neurite_radius=None):
"""
Adds rendered morphological data of neurons reconstructions downloaded from the
Mouse Light project at Janelia (or other sources).
Accepts neurons argument as:
- file(s) with morphological data
- vtkplotter mesh actor(s) of entire neurons reconstructions
- dictionary or list of dictionary with actors for different neuron parts
:param self: instance of brainrender Scene to use to render neurons
:param neurons: str, list, dict. File(s) with neurons data or list of rendered neurons.
:param display_axon, display_dendrites: if set to False the corresponding neurite is not rendered
:param color: default None. Can be:
- None: each neuron is given a random color
- color: rbg, hex etc. If a single color is passed all neurons will have that color
- cmap: str with name of a colormap: neurons are colored based on their sequential order and cmap
- dict: a dictionary specifying a color for soma, dendrites and axon actors, will be the same for all neurons
- list: a list of length = number of neurons with either a single color for each neuron
or a dictionary of colors for each neuron
:param alpha: float in range 0,1. Neurons transparency
:param neurite_radius: float > 0 , radius of tube actor representing neurites
"""
if not isinstance(neurons, (list, tuple)):
neurons = [neurons]
# ------------------------------ Prepare colors ------------------------------ #
N = len(neurons)
colors = dict(
soma = None,
axon = None,
dendrites = None,
)
# If no color is passed, get random colors
if color is None:
cols = get_random_colors(N)
colors = dict(
soma = cols.copy(),
axon = cols.copy(),
dendrites = cols.copy(),)
else:
if isinstance(color, str):
# Deal with a a cmap being passed
if color in _mapscales_cmaps:
cols = [colorMap(n, name=color, vmin=-2, vmax=N+2) for n in np.arange(N)]
colors = dict(
soma = cols.copy(),
axon = cols.copy(),
dendrites = cols.copy(),)
else:
# Deal with a single color being passed
cols = [getColor(color) for n in np.arange(N)]
colors = dict(
soma = cols.copy(),
axon = cols.copy(),
dendrites = cols.copy(),)
elif isinstance(color, dict):
# Deal with a dictionary with color for each component
if not 'soma' in color.keys():
raise ValueError(f"When passing a dictionary as color argument, \
soma should be one fo the keys: {color}")
dendrites_color = color.pop('dendrites', color['soma'])
axon_color = color.pop('axon', color['soma'])
colors = dict(
soma = [color['soma'] for n in np.arange(N)],
axon = [axon_color for n in np.arange(N)],
dendrites = [dendrites_color for n in np.arange(N)],)
elif isinstance(color, (list, tuple)):
# Check that the list content makes sense
if len(color) != N:
raise ValueError(f"When passing a list of color arguments, the list length"+
f" ({len(color)}) should match the number of neurons ({N}).")
if len(set([type(c) for c in color])) > 1:
raise ValueError(f"When passing a list of color arguments, all list elements"+
" should have the same type (e.g. str or dict)")
if isinstance(color[0], dict):
# Deal with a list of dictionaries
soma_colors, dendrites_colors, axon_colors = [], [], []
for col in colors:
if not 'soma' in col.keys():
raise ValueError(f"When passing a dictionary as col argument, \
soma should be one fo the keys: {col}")
dendrites_colors.append(col.pop('dendrites', col['soma']))
axon_colors.append(col.pop('axon', col['soma']))
soma_colors.append(col['soma'])
colors = dict(
soma = soma_colors,
axon = axon_colors,
dendrites = dendrites_colors,)
else:
# Deal with a list of colors
colors = dict(
soma = color.copy(),
axon = color.copy(),
dendrites = color.copy(),)
else:
raise ValueError(f"Color argument passed is not valid. Should be a \
str, dict, list or None, not {type(color)}:{color}")
# Check colors, if everything went well we should have N colors per entry
for k,v in colors.items():
if len(v) != N:
raise ValueError(f"Something went wrong while preparing colors. Not all \
entries have right length. We got: {colors}")
# ---------------------------------- Render ---------------------------------- #
_neurons_actors = []
for neuron in neurons:
neuron_actors = {'soma':None, 'dendrites':None, 'axon': None}
# Deal with neuron as filepath
if isinstance(neuron, str):
if os.path.isfile(neuron):
if neuron.endswith('.swc'):
neuron_actors, _ = get_neuron_actors_with_morphapi(swcfile=neuron, neurite_radius=neurite_radius)
else:
raise NotImplementedError('Currently we can only parse morphological reconstructions from swc files')
else:
raise ValueError(f"Passed neruon {neuron} is not a valid input. Maybe the file doesn't exist?")
# Deal with neuron as single actor
elif isinstance(neuron, Actor):
# A single actor was passed, maybe it's the entire neuron
neuron_actors['soma'] = neuron # store it as soma anyway
pass
# Deal with neuron as dictionary of actor
elif isinstance(neuron, dict):
neuron_actors['soma'] = neuron.pop('soma', None)
neuron_actors['axon'] = neuron.pop('axon', None)
# Get dendrites actors
if 'apical_dendrites' in neuron.keys() or 'basal_dendrites' in neuron.keys():
if 'apical_dendrites' not in neuron.keys():
neuron_actors['dendrites'] = neuron['basal_dendrites']
elif 'basal_dendrites' not in neuron.keys():
neuron_actors['dendrites'] = neuron['apical_dendrites']
else:
neuron_ctors['dendrites'] = merge(neuron['apical_dendrites'], neuron['basal_dendrites'])
else:
neuron_actors['dendrites'] = neuron.pop('dendrites', None)
# Deal with neuron as instance of Neuron from morphapi
elif isinstance(neuron, Neuron):
neuron_actors, _ = get_neuron_actors_with_morphapi(neuron=neuron)
# Deal with other inputs
else:
raise ValueError(f"Passed neuron {neuron} is not a valid input")
# Check that we don't have anything weird in neuron_actors
for key, act in neuron_actors.items():
if act is not None:
if not isinstance(act, Actor):
raise ValueError(f"Neuron actor {key} is {act.__type__} but should be a vtkplotter Mesh. Not: {act}")
if not display_axon:
neuron_actors['axon'] = None
if not display_dendrites:
neuron_actors['dendrites'] = None
_neurons_actors.append(neuron_actors)
# Color actors
for n, neuron in enumerate(_neurons_actors):
if neuron['axon'] is not None:
neuron['axon'].c(colors['axon'][n])
neuron['soma'].c(colors['soma'][n])
if neuron['dendrites'] is not None:
neuron['dendrites'].c(colors['dendrites'][n])
# Add to actors storage
self.actors["neurons"].extend(_neurons_actors)
# Return
if len(_neurons_actors) == 1:
return _neurons_actors[0]
elif not _neurons_actors:
return None
else:
return _neurons_actors
@staticmethod
def add_tractography(self, tractography, color=None, display_injection_structure=False,
display_onlyVIP_injection_structure=False, color_by="manual", others_alpha=1, verbose=True,
VIP_regions=[], VIP_color=None, others_color="white", include_all_inj_regions=False,
extract_region_from_inj_coords=False, display_injection_volume=True):
"""
Renders tractography data and adds it to the scene. A subset of tractography data can receive special treatment using the with VIP regions argument:
if the injection site for the tractography data is in a VIP regions, this is colored differently.
:param tractography: list of dictionaries with tractography data
:param color: color of rendered tractography data
:param display_injection_structure: Bool, if True the injection structure is rendered (Default value = False)
:param display_onlyVIP_injection_structure: bool if true displays the injection structure only for VIP regions (Default value = False)
:param color_by: str, specifies which criteria to use to color the tractography (Default value = "manual")
:param others_alpha: float (Default value = 1)
:param verbose: bool (Default value = True)
:param VIP_regions: list of brain regions with VIP treatement (Default value = [])
:param VIP_color: str, color to use for VIP data (Default value = None)
:param others_color: str, color for not VIP data (Default value = "white")
:param include_all_inj_regions: bool (Default value = False)
:param extract_region_from_inj_coords: bool (Default value = False)
:param display_injection_volume: float, if True a spehere is added to display the injection coordinates and volume (Default value = True)
"""
# check argument
if not isinstance(tractography, list):
if isinstance(tractography, dict):
tractography = [tractography]
else:
raise ValueError("the 'tractography' variable passed must be a list of dictionaries")
else:
if not isinstance(tractography[0], dict):
raise ValueError("the 'tractography' variable passed must be a list of dictionaries")
if not isinstance(VIP_regions, list):
raise ValueError("VIP_regions should be a list of acronyms")
# check coloring mode used and prepare a list COLORS to use for coloring stuff
if color_by == "manual":
# check color argument
if color is None:
color = TRACT_DEFAULT_COLOR
COLORS = [color for i in range(len(tractography))]
elif isinstance(color, list):
if not len(color) == len(tractography):
raise ValueError("If a list of colors is passed, it must have the same number of items as the number of tractography traces")
else:
for col in color:
if not check_colors(col): raise ValueError("Color variable passed to tractography is invalid: {}".format(col))
COLORS = color
else:
if not check_colors(color):
raise ValueError("Color variable passed to tractography is invalid: {}".format(color))
else:
COLORS = [color for i in range(len(tractography))]
elif color_by == "region":
COLORS = [self.atlas.get_region_color(t['structure-abbrev']) for t in tractography]
elif color_by == "target_region":
if VIP_color is not None:
if not check_colors(VIP_color) or not check_colors(others_color):
raise ValueError("Invalid VIP or other color passed")
try:
if include_all_inj_regions:
COLORS = [VIP_color if is_any_item_in_list( [x['abbreviation'] for x in t['injection-structures']], VIP_regions)\
else others_color for t in tractography]
else:
COLORS = [VIP_color if t['structure-abbrev'] in VIP_regions else others_color for t in tractography]
except:
raise ValueError("Something went wrong while getting colors for tractography")
else:
COLORS = [self.atlas.get_region_color(t['structure-abbrev']) if t['structure-abbrev'] in VIP_regions else others_color for t in tractography]
else:
raise ValueError("Unrecognised 'color_by' argument {}".format(color_by))
# add actors to represent tractography data
actors, structures_acronyms = [], []
if VERBOSE and verbose:
print("Structures found to be projecting to target: ")
# Loop over injection experiments
for i, (t, color) in enumerate(zip(tractography, COLORS)):
# Use allen metadata
if include_all_inj_regions:
inj_structures = [x['abbreviation'] for x in t['injection-structures']]
else:
inj_structures = [self.atlas.get_structure_parent(t['structure-abbrev'])['acronym']]
# show brain structures in which injections happened
if display_injection_structure:
if not is_any_item_in_list(inj_structures, list(self.actors['regions'].keys())):
if display_onlyVIP_injection_structure and is_any_item_in_list(inj_structures, VIP_regions):
self.add_brain_regions([t['structure-abbrev']], colors=color)
elif not display_onlyVIP_injection_structure:
self.add_brain_regions([t['structure-abbrev']], colors=color)
if VERBOSE and verbose and not is_any_item_in_list(inj_structures, structures_acronyms):
print(" -- ({})".format(t['structure-abbrev']))
structures_acronyms.append(t['structure-abbrev'])
# get tractography points and represent as list
if color_by == "target_region" and not is_any_item_in_list(inj_structures, VIP_regions):
alpha = others_alpha
else:
alpha = TRACTO_ALPHA
if alpha == 0:
continue # skip transparent ones
# check if we need to manually check injection coords
if extract_region_from_inj_coords:
try:
region = self.atlas.get_structure_from_coordinates(t['injection-coordinates'],
just_acronym=False)
if region is None: continue
inj_structures = [self.atlas.get_structure_parent(region['acronym'])['acronym']]
except:
raise ValueError(self.atlas.get_structure_from_coordinates(t['injection-coordinates'],
just_acronym=False))
if inj_structures is None: continue
elif isinstance(extract_region_from_inj_coords, list):
# check if injection coord are in one of the brain regions in list, otherwise skip
if not is_any_item_in_list(inj_structures, extract_region_from_inj_coords):
continue
# represent injection site as sphere
if display_injection_volume:
actors.append(shapes.Sphere(pos=t['injection-coordinates'],
c=color, r=INJECTION_VOLUME_SIZE*t['injection-volume'], alpha=TRACTO_ALPHA))
points = [p['coord'] for p in t['path']]
actors.append(shapes.Tube(points, r=TRACTO_RADIUS, c=color, alpha=alpha, res=TRACTO_RES))
self.actors['tracts'].extend(actors)
@staticmethod
def parse_streamline(*args, filepath=None, data=None, show_injection_site=True, color='ivory', alpha=.8, radius=10, **kwargs):
"""
Given a path to a .json file with streamline data (or the data themselves), render the streamline as tubes actors.
Either filepath or data should be passed
:param filepath: str, optional. Path to .json file with streamline data (Default value = None)
:param data: panadas.DataFrame, optional. DataFrame with streamline data. (Default value = None)
:param color: str color of the streamlines (Default value = 'ivory')
:param alpha: float transparency of the streamlines (Default value = .8)
:param radius: int radius of the streamlines actor (Default value = 10)
:param show_injection_site: bool, if True spheres are used to render the injection volume (Default value = True)
:param *args:
:param **kwargs:
"""
if filepath is not None and data is None:
data = load_json(filepath)
# data = {k:{int(k2):v2 for k2, v2 in v.items()} for k,v in data.items()}
elif filepath is None and data is not None:
pass
else:
raise ValueError("Need to pass eiteher a filepath or data argument to parse_streamline")
# create actors for streamlines
lines = []
if len(data['lines']) == 1:
lines_data = data['lines'][0]
else:
lines_data = data['lines']
for line in lines_data:
points = [[l['x'], l['y'], l['z']] for l in line]
lines.append(shapes.Tube(points, r=radius, c=color, alpha=alpha, res=STREAMLINES_RESOLUTION))
coords = []
if show_injection_site:
if len(data['injection_sites']) == 1:
injection_data = data['injection_sites'][0]
else:
injection_data = data['injection_sites']
for inj in injection_data:
coords.append(list(inj.values()))
spheres = [shapes.Spheres(coords, r=INJECTION_VOLUME_SIZE)]
else:
spheres = []
merged = merge(*lines, *spheres)
merged.color(color)
merged.alpha(alpha)
return [merged]
@staticmethod
def add_streamlines(self, sl_file, *args, colorby=None, color_each=False, **kwargs):
"""
Render streamline data downloaded from https://neuroinformatics.nl/HBP/allen-connectivity-viewer/streamline-downloader.html
:param sl_file: path to JSON file with streamliens data [or list of files]
:param colorby: str, criteria for how to color the streamline data (Default value = None)
:param color_each: bool, if True, the streamlines for each injection is colored differently (Default value = False)
:param *args:
:param **kwargs:
"""
color = None
if not color_each:
if colorby is not None:
try:
color = self.structure_tree.get_structures_by_acronym([colorby])[0]['rgb_triplet']
if "color" in kwargs.keys():
del kwargs["color"]
except:
raise ValueError("Could not extract color for region: {}".format(colorby))
else:
if colorby is not None:
color = kwargs.pop("color", None)
try:
get_n_shades_of(color, 1)
except:
raise ValueError("Invalide color argument: {}".format(color))
if isinstance(sl_file, list):
if isinstance(sl_file[0], (str, pd.DataFrame)): # we have a list of files to add
for slf in tqdm(sl_file):
if not color_each:
if color is not None:
if isinstance(slf, str):
streamlines = self.atlas.parse_streamline(filepath=slf, *args, color=color, **kwargs)
else:
streamlines = self.atlas.parse_streamline(data=slf, *args, color=color, **kwargs)
else:
if isinstance(slf, str):
streamlines = self.atlas.parse_streamline(filepath=slf, *args, **kwargs)
else:
streamlines = self.atlas.parse_streamline(data=slf, *args, **kwargs)
else:
if color is not None:
col = get_n_shades_of(color, 1)[0]
else:
col = get_random_colors(n_colors=1)
if isinstance(slf, str):
streamlines = self.atlas.parse_streamline(filepath=slf, color=col, *args, **kwargs)
else:
streamlines = self.atlas.parse_streamline(data= slf, color=col, *args, **kwargs)
self.actors['tracts'].extend(streamlines)
else:
raise ValueError("unrecognized argument sl_file: {}".format(sl_file))
else:
if not isinstance(sl_file, (str, pd.DataFrame)):
raise ValueError("unrecognized argument sl_file: {}".format(sl_file))
if not color_each:
if isinstance(sl_file, str):
streamlines = parse_streamline(filepath=sl_file, *args, **kwargs)
else:
streamlines = parse_streamline(data=sl_file, *args, **kwargs)
else:
if color is not None:
col = get_n_shades_of(color, 1)[0]
else:
col = get_random_colors(n_colors=1)
if isinstance(sl_file, str):
streamlines = parse_streamline(filepath=sl_file, color=col, *args, **kwargs)
else:
streamlines = parse_streamline(data=sl_file, color=col, *args, **kwargs)
self.actors['tracts'].extend(streamlines)
return streamlines
@staticmethod
def add_injection_sites(self, experiments, color=None):
"""
Creates Spherse at the location of injections with a volume proportional to the injected volume
:param experiments: list of dictionaries with tractography data
:param color: (Default value = None)
"""
# check arguments
if not isinstance(experiments, list):
raise ValueError("experiments must be a list")
if not isinstance(experiments[0], dict):
raise ValueError("experiments should be a list of dictionaries")
#c= cgeck color
if color is None:
color = INJECTION_DEFAULT_COLOR
injection_sites = []
for exp in experiments:
injection_sites.append(shapes.Sphere(pos=(exp["injection_x"], exp["injection_y"], exp["injection_z"]),
r = INJECTION_VOLUME_SIZE*exp["injection_volume"]*3,
c=color
))
self.actors['injection_sites'].extend(injection_sites)
# ---------------------------------------------------------------------------- #
# STRUCTURE TREE INTERACTION #
# ---------------------------------------------------------------------------- #
# ------------------------- Get/Print structures sets ------------------------ #
def get_structures_sets(self):
"""
Get the Allen's structure sets.
"""
summary_structures = self.structure_tree.get_structures_by_set_id([167587189]) # main summary structures
summary_structures = [s for s in summary_structures if s["acronym"] not in self.excluded_regions]
self.structures = pd.DataFrame(summary_structures)
# Other structures sets
try:
all_sets = pd.DataFrame(self.oapi.get_structure_sets())
except:
print("Could not retrieve data, possibly because there is no internet connection. Limited functionality available.")
else:
sets = ["Summary structures of the pons", "Summary structures of the thalamus",
"Summary structures of the hypothalamus", "List of structures for ABA Fine Structure Search",
"Structures representing the major divisions of the mouse brain", "Summary structures of the midbrain", "Structures whose surfaces are represented by a precomputed mesh"]
self.other_sets = {}
for set_name in sets:
set_id = all_sets.loc[all_sets.description == set_name].id.values[0]
self.other_sets[set_name] = pd.DataFrame(self.structure_tree.get_structures_by_set_id([set_id]))
self.all_avaliable_meshes = sorted(self.other_sets["Structures whose surfaces are represented by a precomputed mesh"].acronym.values)
def print_structures_list_to_text(self):
"""
Saves the name of every brain structure for which a 3d mesh (.obj file) is available in a text file.
"""
s = self.other_sets["Structures whose surfaces are represented by a precomputed mesh"].sort_values('acronym')
with open('all_regions.txt', 'w') as o:
for acr, name in zip(s.acronym.values, s['name'].values):
o.write("({}) -- {}\n".format(acr, name))
def print_structures(self):
"""
Prints the name of every structure in the structure tree to the console.
"""
acronyms, names = self.structures.acronym.values, self.structures['name'].values
sort_idx = np.argsort(acronyms)
acronyms, names = acronyms[sort_idx], names[sort_idx]
[print("({}) - {}".format(a, n)) for a,n in zip(acronyms, names)]
# -------------------------- Parents and descendants ------------------------- #
def get_structure_ancestors(self, regions, ancestors=True, descendants=False):
"""
Get's the ancestors of the region(s) passed as arguments
:param regions: str, list of str with acronums of regions of interest
:param ancestors: if True, returns the ancestors of the region (Default value = True)
:param descendants: if True, returns the descendants of the region (Default value = False)
"""
if not isinstance(regions, list):
struct_id = self.structure_tree.get_structures_by_acronym([regions])[0]['id']
return pd.DataFrame(self.tree_search.get_tree('Structure', struct_id, ancestors=ancestors, descendants=descendants))
else:
ancestors = []
for region in regions:
struct_id = self.structure_tree.get_structures_by_acronym([region])[0]['id']
ancestors.append(pd.DataFrame(self.tree_search.get_tree('Structure', struct_id, ancestors=ancestors, descendants=descendants)))
return ancestors
def get_structure_descendants(self, regions):
return self.get_structure_ancestors(regions, ancestors=False, descendants=True)
def get_structure_parent(self, acronyms):
"""
Gets the parent of a brain region (or list of regions) from the hierarchical structure of the
Allen Brain Atals.
:param acronyms: list of acronyms of brain regions.
"""
if not isinstance(acronyms, list):
self._check_valid_region_arg(acronyms)
s = self.structure_tree.get_structures_by_acronym([acronyms])[0]
if s['id'] in self.structures.id.values:
return s
else:
return self.get_structure_ancestors(s['acronym']).iloc[-1]
else:
parents = []
for region in acronyms:
self._check_valid_region_arg(region)
s = self.structure_tree.get_structures_by_acronym(acronyms)[0]
if s['id'] in self.structures.id.values:
parents.append(s)
parents.append(self.get_structure_ancestors(s['acronym']).iloc[-1])
return parents
# ---------------------------------------------------------------------------- #
# UTILS #
# ---------------------------------------------------------------------------- #
def get_hemisphere_from_point(self, point):
if point[2] < self._root_midpoint[2]:
return 'left'
else:
return 'right'
def mirror_point_across_hemispheres(self, point):
delta = point[2] - self._root_midpoint[2]
point[2] = self._root_midpoint[2] - delta
return point
def get_region_color(self, regions):
"""
Gets the RGB color of a brain region from the Allen Brain Atlas.
:param regions: list of regions acronyms.
"""
if not isinstance(regions, list):
return self.structure_tree.get_structures_by_acronym([regions])[0]['rgb_triplet']
else:
return [self.structure_tree.get_structures_by_acronym([r])[0]['rgb_triplet'] for r in regions]
def _check_obj_file(self, region, obj_file):
"""
If the .obj file for a brain region hasn't been downloaded already, this function downloads it and saves it.
:param region: string, acronym of brain region
:param obj_file: path to .obj file to save downloaded data.
"""
# checks if the obj file has been downloaded already, if not it takes care of downloading it
if not os.path.isfile(obj_file):
try:
if isinstance(region, dict):
region = region['acronym']
structure = self.structure_tree.get_structures_by_acronym([region])[0]
except Exception as e:
raise ValueError(f'Could not find region with name {region}, got error: {e}')
try:
self.space.download_structure_mesh(structure_id = structure["id"],
ccf_version ="annotation/ccf_2017",
file_name=obj_file)
return True
except:
print("Could not get mesh for: {}".format(obj_file))
return False
else: return True
def _get_structure_mesh(self, acronym, **kwargs):
"""
Fetches the mesh for a brain region from the Allen Brain Atlas SDK.
:param acronym: string, acronym of brain region
:param **kwargs:
"""
structure = self.structure_tree.get_structures_by_acronym([acronym])[0]
obj_path = os.path.join(self.mouse_meshes, "{}.obj".format(acronym))
if self._check_obj_file(structure, obj_path):
mesh = load_mesh_from_file(obj_path, **kwargs)
return mesh
else:
return None
def get_region_unilateral(self, region, hemisphere="both", color=None, alpha=None):
"""
Regions meshes are loaded with both hemispheres' meshes by default.
This function splits them in two.
:param region: str, actors of brain region
:param hemisphere: str, which hemisphere to return ['left', 'right' or 'both'] (Default value = "both")
:param color: color of each side's mesh. (Default value = None)
:param alpha: transparency of each side's mesh. (Default value = None)
"""
if color is None: color = ROOT_COLOR
if alpha is None: alpha = ROOT_ALPHA
bilateralmesh = self._get_structure_mesh(region, c=color, alpha=alpha)
if bilateralmesh is None:
print(f'Failed to get mesh for {region}, returning None')
return None
com = bilateralmesh.centerOfMass() # this will always give a point that is on the midline
cut = bilateralmesh.cutWithPlane(origin=com, normal=(0, 0, 1))
right = bilateralmesh.cutWithPlane( origin=com, normal=(0, 0, 1))
# left is the mirror right # WIP
com = self.get_region_CenterOfMass('root', unilateral=False)[2]
left = actors_funcs.mirror_actor_at_point(right.clone(), com, axis='x')
if hemisphere == "both":
return left, right
elif hemisphere == "left":
return left
else:
return right
@staticmethod
def _check_valid_region_arg(region):
"""
Check that the string passed is a valid brain region name.
:param region: string, acronym of a brain region according to the Allen Brain Atlas.
"""
if not isinstance(region, int) and not isinstance(region, str):
raise ValueError("region must be a list, integer or string, not: {}".format(type(region)))
else:
return True
def get_hemispere_from_point(self, p0):
if p0[2] > self._root_midpoint[2]:
return 'right'
else:
return 'left'
def get_structure_from_coordinates(self, p0, just_acronym=True):
"""
Given a point in the Allen Mouse Brain reference space, returns the brain region that the point is in.
:param p0: list of floats with XYZ coordinates.
"""
voxel = np.round(np.array(p0) / self.resolution).astype(int)
try:
structure_id = self.annotated_volume[voxel[0], voxel[1], voxel[2]]
except:
return None
# Each voxel in the annotation volume is annotated as specifically as possible
structure = self.structure_tree.get_structures_by_id([structure_id])[0]
if structure is not None:
if just_acronym:
return structure['acronym']
return structure
def get_colors_from_coordinates(self, p0):
"""
Given a point or a list of points returns a list of colors where
each item is the color of the brain region each point is in
"""
if isinstance(p0[0], (float, int)):
struct = self.get_structure_from_coordinates(p0, just_acronym=False)
if struct is not None:
return struct['rgb_triplet']
else:
return None
else:
structures = [self.get_structure_from_coordinates(p, just_acronym=False) for p in p0]
colors = [struct['rgb_triplet'] if struct is not None else None
for struct in structures]
return colors
# ---------------------------------------------------------------------------- #
# TRACTOGRAPHY FETCHING #
# ---------------------------------------------------------------------------- #
def get_projection_tracts_to_target(self, p0=None, **kwargs):
"""
Gets tractography data for all experiments whose projections reach the brain region or location of iterest.
:param p0: list of 3 floats with XYZ coordinates of point to be used as seed (Default value = None)
:param **kwargs:
"""
# check args
if p0 is None:
raise ValueError("Please pass coordinates")
elif isinstance(p0, np.ndarray):
p0 = list(p0)
elif not isinstance(p0, (list, tuple)):
raise ValueError("Invalid argument passed (p0): {}".format(p0))
tract = self.mca.experiment_spatial_search(seed_point=p0, **kwargs)
if isinstance(tract, str):
raise ValueError('Something went wrong with query, query error message:\n{}'.format(tract))
else:
return tract
# ---------------------------------------------------------------------------- #
# STREAMLINES FETCHING #
# ---------------------------------------------------------------------------- #
def download_streamlines_for_region(self, region, *args, **kwargs):
"""
Using the Allen Mouse Connectivity data and corresponding API, this function finds expeirments whose injections
were targeted to the region of interest and downloads the corresponding streamlines data. By default, experiements
are selected for only WT mice and onl when the region was the primary injection target. Look at "ABA.experiments_source_search"
to see how to change this behaviour.
:param region: str with region to use for research
:param *args: arguments for ABA.experiments_source_search
:param **kwargs: arguments for ABA.experiments_source_search
"""
# Get experiments whose injections were targeted to the region
region_experiments = self.experiments_source_search(region, *args, **kwargs)
try:
return self.download_streamlines(region_experiments.id.values)
except:
print(f"Could not download streamlines for region {region}")
return [], [] # <- there were no experiments in the target region
def download_streamlines_to_region(self, p0, *args, mouse_line = "wt", **kwargs):
"""
Using the Allen Mouse Connectivity data and corresponding API, this function finds injection experiments
which resulted in fluorescence being found in the target point, then downloads the streamlines data.
:param p0: list of floats with XYZ coordinates
:param mouse_line: str with name of the mouse line to use(Default value = "wt")
:param *args:
:param **kwargs:
"""
experiments = pd.DataFrame(self.get_projection_tracts_to_target(p0=p0))
if mouse_line == "wt":
experiments = experiments.loc[experiments["transgenic-line"] == ""]
else:
if not isinstance(mouse_line, list):
experiments = experiments.loc[experiments["transgenic-line"] == mouse_line]
else:
raise NotImplementedError("ops, you've found a bug!. For now you can only pass one mouse line at the time, sorry.")
return self.download_streamlines(experiments.id.values)
@staticmethod
def make_url_given_id(expid):
"""
Get url of JSON file for an experiment, give it's ID number
:param expid: int with experiment ID number
"""
return "https://neuroinformatics.nl/HBP/allen-connectivity-viewer/json/streamlines_{}.json.gz".format(expid)
def extract_ids_from_csv(self, csv_file, download=False, **kwargs):
"""
Parse CSV file to extract experiments IDs and link to downloadable streamline data
Given a CSV file with info about experiments downloaded from: http://connectivity.brain-map.org
extract experiments ID and get links to download (compressed) streamline data from https://neuroinformatics.nl.
Also return the experiments IDs to download data from: https://neuroinformatics.nl/HBP/allen-connectivity-viewer/streamline-downloader.html
:param csv_file: str with path to csv file
:param download: if True the data are downloaded automatically (Default value = False)
:param **kwargs:
"""
try:
data = pd.read_csv(csv_file)
except:
raise FileNotFoundError("Could not load: {}".format(csv_file))
else:
if not download:
print("Found {} experiments.\n".format(len(data.id.values)))
if not download:
print("To download compressed data, click on the following URLs:")
for eid in data.id.values:
url = self.make_url_given_id(eid)
print(url)
print("\n")
string = ""
for x in data.id.values:
string += "{},".format(x)
print("To download JSON directly, go to: https://neuroinformatics.nl/HBP/allen-connectivity-viewer/streamline-downloader.html")
print("and copy and paste the following experiments ID in the 'Enter the Allen Connectivity Experiment number:' field.")
print("You can copy and paste each individually or a list of IDs separated by a comma")
print("IDs: {}".format(string[:-1]))
print("\n")
return data.id.values
else:
return self.download_streamlines(data.id.values, **kwargs)
def download_streamlines(self, eids, streamlines_folder=None):
"""
Given a list of expeirmental IDs, it downloads the streamline data from the https://neuroinformatics.nl cache and saves them as
json files.
:param eids: list of integers with experiments IDs
:param streamlines_folder: str path to the folder where the JSON files should be saved, if None the default is used (Default value = None)
"""
if streamlines_folder is None:
streamlines_folder = self.streamlines_cache
if not isinstance(eids, (list, np.ndarray, tuple)): eids = [eids]
filepaths, data = [], []
for eid in tqdm(eids):
url = self.make_url_given_id(eid)
jsonpath = os.path.join(streamlines_folder, str(eid)+".json")
filepaths.append(jsonpath)
if not os.path.isfile(jsonpath):
response = request(url)
# Write the response content as a temporary compressed file
temp_path = os.path.join(streamlines_folder, "temp.gz")
with open(temp_path, "wb") as temp:
temp.write(response.content)
# Open in pandas and delete temp
url_data = pd.read_json(temp_path, lines=True, compression='gzip')
os.remove(temp_path)
# save json
url_data.to_json(jsonpath)
# append to lists and return
data.append(url_data)
else:
data.append(pd.read_json(jsonpath))
return filepaths, data
| [
"brainrender.Utils.data_manipulation.is_any_item_in_list",
"pandas.read_csv",
"brainrender.Utils.webqueries.request",
"numpy.argsort",
"allensdk.api.queries.ontologies_api.OntologiesApi",
"numpy.array",
"brainrender.Utils.actors_funcs.edit_actor",
"brainrender.colors.get_n_shades_of",
"numpy.arange"... | [((1825, 1846), 'numpy.mean', 'np.mean', (['[-17, 13193]'], {}), '([-17, 13193])\n', (1832, 1846), True, 'import numpy as np\n'), ((1873, 1893), 'numpy.mean', 'np.mean', (['[134, 7564]'], {}), '([134, 7564])\n', (1880, 1893), True, 'import numpy as np\n'), ((1919, 1940), 'numpy.mean', 'np.mean', (['[486, 10891]'], {}), '([486, 10891])\n', (1926, 1940), True, 'import numpy as np\n'), ((2460, 2509), 'brainrender.atlases.base.Atlas.__init__', 'Atlas.__init__', (['self'], {'base_dir': 'base_dir'}), '(self, base_dir=base_dir, **kwargs)\n', (2474, 2509), False, 'from brainrender.atlases.base import Atlas\n'), ((2925, 2940), 'allensdk.api.queries.ontologies_api.OntologiesApi', 'OntologiesApi', ([], {}), '()\n', (2938, 2940), False, 'from allensdk.api.queries.ontologies_api import OntologiesApi\n'), ((3028, 3047), 'allensdk.api.queries.reference_space_api.ReferenceSpaceApi', 'ReferenceSpaceApi', ([], {}), '()\n', (3045, 3047), False, 'from allensdk.api.queries.reference_space_api import ReferenceSpaceApi\n'), ((3519, 3541), 'allensdk.api.queries.mouse_connectivity_api.MouseConnectivityApi', 'MouseConnectivityApi', ([], {}), '()\n', (3539, 3541), False, 'from allensdk.api.queries.mouse_connectivity_api import MouseConnectivityApi\n'), ((3600, 3615), 'allensdk.api.queries.tree_search_api.TreeSearchApi', 'TreeSearchApi', ([], {}), '()\n', (3613, 3615), False, 'from allensdk.api.queries.tree_search_api import TreeSearchApi\n'), ((15436, 15450), 'brainrender.colors.colors.items', 'colors.items', ([], {}), '()\n', (15448, 15450), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((29294, 29317), 'vtkplotter.merge', 'merge', (['*lines', '*spheres'], {}), '(*lines, *spheres)\n', (29299, 29317), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((34999, 35031), 'pandas.DataFrame', 'pd.DataFrame', (['summary_structures'], {}), '(summary_structures)\n', (35011, 35031), True, 'import pandas as pd\n'), ((36844, 36864), 'numpy.argsort', 'np.argsort', (['acronyms'], {}), '(acronyms)\n', (36854, 36864), True, 'import numpy as np\n'), ((52387, 52397), 'tqdm.tqdm', 'tqdm', (['eids'], {}), '(eids)\n', (52391, 52397), False, 'from tqdm import tqdm\n'), ((11821, 11841), 'brainrender.colors.get_random_colors', 'get_random_colors', (['N'], {}), '(N)\n', (11838, 11841), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((28155, 28174), 'brainrender.Utils.data_io.load_json', 'load_json', (['filepath'], {}), '(filepath)\n', (28164, 28174), False, 'from brainrender.Utils.data_io import load_mesh_from_file, load_json\n'), ((40773, 40797), 'os.path.isfile', 'os.path.isfile', (['obj_file'], {}), '(obj_file)\n', (40787, 40797), False, 'import os\n'), ((42028, 42067), 'brainrender.Utils.data_io.load_mesh_from_file', 'load_mesh_from_file', (['obj_path'], {}), '(obj_path, **kwargs)\n', (42047, 42067), False, 'from brainrender.Utils.data_io import load_mesh_from_file, load_json\n'), ((50540, 50561), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (50551, 50561), True, 'import pandas as pd\n'), ((2719, 2779), 'os.path.join', 'os.path.join', (['self.mouse_connectivity_cache', '"""manifest.json"""'], {}), "(self.mouse_connectivity_cache, 'manifest.json')\n", (2731, 2779), False, 'import os\n'), ((3116, 3172), 'os.path.join', 'os.path.join', (['self.annotated_volume_fld', '"""manifest.json"""'], {}), "(self.annotated_volume_fld, 'manifest.json')\n", (3128, 3172), False, 'import os\n'), ((9457, 9493), 'vtkplotter.load', 'load', (['obj_file'], {'c': 'color', 'alpha': 'alpha'}), '(obj_file, c=color, alpha=alpha)\n', (9461, 9493), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((9543, 9581), 'brainrender.Utils.actors_funcs.edit_actor', 'actors_funcs.edit_actor', (['obj'], {}), '(obj, **kwargs)\n', (9566, 9581), False, 'from brainrender.Utils import actors_funcs\n'), ((15989, 16011), 'os.path.isfile', 'os.path.isfile', (['neuron'], {}), '(neuron)\n', (16003, 16011), False, 'import os\n'), ((26958, 27032), 'vtkplotter.shapes.Tube', 'shapes.Tube', (['points'], {'r': 'TRACTO_RADIUS', 'c': 'color', 'alpha': 'alpha', 'res': 'TRACTO_RES'}), '(points, r=TRACTO_RADIUS, c=color, alpha=alpha, res=TRACTO_RES)\n', (26969, 27032), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((28755, 28834), 'vtkplotter.shapes.Tube', 'shapes.Tube', (['points'], {'r': 'radius', 'c': 'color', 'alpha': 'alpha', 'res': 'STREAMLINES_RESOLUTION'}), '(points, r=radius, c=color, alpha=alpha, res=STREAMLINES_RESOLUTION)\n', (28766, 28834), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((29188, 29235), 'vtkplotter.shapes.Spheres', 'shapes.Spheres', (['coords'], {'r': 'INJECTION_VOLUME_SIZE'}), '(coords, r=INJECTION_VOLUME_SIZE)\n', (29202, 29235), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((30871, 30884), 'tqdm.tqdm', 'tqdm', (['sl_file'], {}), '(sl_file)\n', (30875, 30884), False, 'from tqdm import tqdm\n'), ((34041, 34193), 'vtkplotter.shapes.Sphere', 'shapes.Sphere', ([], {'pos': "(exp['injection_x'], exp['injection_y'], exp['injection_z'])", 'r': "(INJECTION_VOLUME_SIZE * exp['injection_volume'] * 3)", 'c': 'color'}), "(pos=(exp['injection_x'], exp['injection_y'], exp[\n 'injection_z']), r=INJECTION_VOLUME_SIZE * exp['injection_volume'] * 3,\n c=color)\n", (34054, 34193), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((52577, 52601), 'os.path.isfile', 'os.path.isfile', (['jsonpath'], {}), '(jsonpath)\n', (52591, 52601), False, 'import os\n'), ((52630, 52642), 'brainrender.Utils.webqueries.request', 'request', (['url'], {}), '(url)\n', (52637, 52642), False, 'from brainrender.Utils.webqueries import request\n'), ((52748, 52791), 'os.path.join', 'os.path.join', (['streamlines_folder', '"""temp.gz"""'], {}), "(streamlines_folder, 'temp.gz')\n", (52760, 52791), False, 'import os\n'), ((52970, 53025), 'pandas.read_json', 'pd.read_json', (['temp_path'], {'lines': '(True)', 'compression': '"""gzip"""'}), "(temp_path, lines=True, compression='gzip')\n", (52982, 53025), True, 'import pandas as pd\n'), ((53042, 53062), 'os.remove', 'os.remove', (['temp_path'], {}), '(temp_path)\n', (53051, 53062), False, 'import os\n'), ((7356, 7376), 'brainrender.colors.check_colors', 'check_colors', (['colors'], {}), '(colors)\n', (7368, 7376), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((25047, 25103), 'brainrender.Utils.data_manipulation.is_any_item_in_list', 'is_any_item_in_list', (['inj_structures', 'structures_acronyms'], {}), '(inj_structures, structures_acronyms)\n', (25066, 25103), False, 'from brainrender.Utils.data_manipulation import get_coords, flatten_list, is_any_item_in_list\n'), ((25351, 25399), 'brainrender.Utils.data_manipulation.is_any_item_in_list', 'is_any_item_in_list', (['inj_structures', 'VIP_regions'], {}), '(inj_structures, VIP_regions)\n', (25370, 25399), False, 'from brainrender.Utils.data_manipulation import get_coords, flatten_list, is_any_item_in_list\n'), ((26723, 26851), 'vtkplotter.shapes.Sphere', 'shapes.Sphere', ([], {'pos': "t['injection-coordinates']", 'c': 'color', 'r': "(INJECTION_VOLUME_SIZE * t['injection-volume'])", 'alpha': 'TRACTO_ALPHA'}), "(pos=t['injection-coordinates'], c=color, r=\n INJECTION_VOLUME_SIZE * t['injection-volume'], alpha=TRACTO_ALPHA)\n", (26736, 26851), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((30580, 30605), 'brainrender.colors.get_n_shades_of', 'get_n_shades_of', (['color', '(1)'], {}), '(color, 1)\n', (30595, 30605), False, 'from brainrender.colors import get_n_shades_of\n'), ((32897, 32926), 'brainrender.colors.get_random_colors', 'get_random_colors', ([], {'n_colors': '(1)'}), '(n_colors=1)\n', (32914, 32926), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((53265, 53287), 'pandas.read_json', 'pd.read_json', (['jsonpath'], {}), '(jsonpath)\n', (53277, 53287), True, 'import pandas as pd\n'), ((7235, 7252), 'brainrender.colors.check_colors', 'check_colors', (['col'], {}), '(col)\n', (7247, 7252), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((12160, 12204), 'brainrender.colors.colorMap', 'colorMap', (['n'], {'name': 'color', 'vmin': '(-2)', 'vmax': '(N + 2)'}), '(n, name=color, vmin=-2, vmax=N + 2)\n', (12168, 12204), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((12510, 12525), 'brainrender.colors.getColor', 'getColor', (['color'], {}), '(color)\n', (12518, 12525), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((16104, 16182), 'brainrender.morphology.utils.get_neuron_actors_with_morphapi', 'get_neuron_actors_with_morphapi', ([], {'swcfile': 'neuron', 'neurite_radius': 'neurite_radius'}), '(swcfile=neuron, neurite_radius=neurite_radius)\n', (16135, 16182), False, 'from brainrender.morphology.utils import edit_neurons, get_neuron_actors_with_morphapi\n'), ((22443, 22462), 'brainrender.colors.check_colors', 'check_colors', (['color'], {}), '(color)\n', (22455, 22462), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((24715, 24763), 'brainrender.Utils.data_manipulation.is_any_item_in_list', 'is_any_item_in_list', (['inj_structures', 'VIP_regions'], {}), '(inj_structures, VIP_regions)\n', (24734, 24763), False, 'from brainrender.Utils.data_manipulation import get_coords, flatten_list, is_any_item_in_list\n'), ((32820, 32845), 'brainrender.colors.get_n_shades_of', 'get_n_shades_of', (['color', '(1)'], {}), '(color, 1)\n', (32835, 32845), False, 'from brainrender.colors import get_n_shades_of\n'), ((44495, 44507), 'numpy.array', 'np.array', (['p0'], {}), '(p0)\n', (44503, 44507), True, 'import numpy as np\n'), ((12212, 12224), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (12221, 12224), True, 'import numpy as np\n'), ((12535, 12547), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (12544, 12547), True, 'import numpy as np\n'), ((17795, 17841), 'brainrender.morphology.utils.get_neuron_actors_with_morphapi', 'get_neuron_actors_with_morphapi', ([], {'neuron': 'neuron'}), '(neuron=neuron)\n', (17826, 17841), False, 'from brainrender.morphology.utils import edit_neurons, get_neuron_actors_with_morphapi\n'), ((26500, 26567), 'brainrender.Utils.data_manipulation.is_any_item_in_list', 'is_any_item_in_list', (['inj_structures', 'extract_region_from_inj_coords'], {}), '(inj_structures, extract_region_from_inj_coords)\n', (26519, 26567), False, 'from brainrender.Utils.data_manipulation import get_coords, flatten_list, is_any_item_in_list\n'), ((31813, 31842), 'brainrender.colors.get_random_colors', 'get_random_colors', ([], {'n_colors': '(1)'}), '(n_colors=1)\n', (31830, 31842), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((22262, 22279), 'brainrender.colors.check_colors', 'check_colors', (['col'], {}), '(col)\n', (22274, 22279), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((22900, 22923), 'brainrender.colors.check_colors', 'check_colors', (['VIP_color'], {}), '(VIP_color)\n', (22912, 22923), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((22931, 22957), 'brainrender.colors.check_colors', 'check_colors', (['others_color'], {}), '(others_color)\n', (22943, 22957), False, 'from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors\n'), ((31720, 31745), 'brainrender.colors.get_n_shades_of', 'get_n_shades_of', (['color', '(1)'], {}), '(color, 1)\n', (31735, 31745), False, 'from brainrender.colors import get_n_shades_of\n'), ((13279, 13291), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13288, 13291), True, 'import numpy as np\n'), ((13346, 13358), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13355, 13358), True, 'import numpy as np\n'), ((13423, 13435), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13432, 13435), True, 'import numpy as np\n'), ((17473, 17533), 'vtkplotter.merge', 'merge', (["neuron['apical_dendrites']", "neuron['basal_dendrites']"], {}), "(neuron['apical_dendrites'], neuron['basal_dendrites'])\n", (17478, 17533), False, 'from vtkplotter import ProgressBar, shapes, merge, load\n'), ((23149, 23241), 'brainrender.Utils.data_manipulation.is_any_item_in_list', 'is_any_item_in_list', (["[x['abbreviation'] for x in t['injection-structures']]", 'VIP_regions'], {}), "([x['abbreviation'] for x in t['injection-structures']],\n VIP_regions)\n", (23168, 23241), False, 'from brainrender.Utils.data_manipulation import get_coords, flatten_list, is_any_item_in_list\n')] |
import pandas as pd
import numpy as np
import copy
import re
import string
# Note: this requires nltk.download() first as described in the README.
# from nltk.book import *
from nltk.corpus import stopwords
from nltk.tokenize import TreebankWordTokenizer
from collections import Counter, OrderedDict
from sklearn.model_selection import train_test_split
from app.lib.utils.jsonl import jsonl_to_df
"""
Sources:
Loading JSONL: https://medium.com/@galea/how-to-love-jsonl-using-json-line-format-in-your-workflow-b6884f65175b
NLTK Reference: http://www.nltk.org/book/ch01.html
NLTK word counter reference: https://www.strehle.de/tim/weblog/archives/2015/09/03/1569
"""
class WordTokenizer(object):
def __init__(self):
pass
def _user_grouper(self, filename):
# For each unique user, join all tweets into one tweet row in the new df.
db_cols = ['search_query', 'id_str', 'full_text', 'created_at', 'favorite_count', 'username', 'user_description']
tweets_df = jsonl_to_df(filename, db_cols)
users = list(tweets_df['username'].unique())
tweets_by_user_df = pd.DataFrame(columns=['username', 'user_description', 'tweets'])
# Iterate through all users.
for i, user in enumerate(users):
trunc_df = tweets_df[tweets_df['username'] == user]
user_description = trunc_df['user_description'].tolist()[0]
string = ' '.join(trunc_df["full_text"])
tweets_by_user_df = tweets_by_user_df.append({'username': user, 'user_description': user_description, 'tweets': string}, ignore_index=True)
# Return the data frame with one row per user, tweets concatenated into one string.
return tweets_by_user_df
def _parse_doc(self, text):
text = text.lower()
text = re.sub(r'&(.)+', "", text) # no & references
text = re.sub(r'pct', 'percent', text) # replace pct abreviation
text = re.sub(r"[^\w\d'\s]+", '', text) # no punct except single quote
text = re.sub(r'[^\x00-\x7f]', r'', text) # no non-ASCII strings
# Omit words that are all digits
if text.isdigit():
text = ""
# # Get rid of escape codes
# for code in codelist:
# text = re.sub(code, ' ', text)
# Replace multiple spacess with one space
text = re.sub('\s+', ' ', text)
return text
def _parse_words(self, text):
# split document into individual words
tokens = text.split()
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
tokens = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out tokens that are one or two characters long
tokens = [word for word in tokens if len(word) > 2]
# filter out tokens that are more than twenty characters long
tokens = [word for word in tokens if len(word) < 21]
# recreate the document string from parsed words
text = ''
for token in tokens:
text = text + ' ' + token
return tokens, text
def _get_train_test_data(self, filename, only_known=True):
# Get df, and list of all users' tweets.
tweets_by_user_df = self._user_grouper(filename)
# Get user classes
db_cols = ['class', 'user_description', 'username']
user_class_df = jsonl_to_df('users', db_cols)
user_class_df = user_class_df[['username', 'class']]
tagged_df = pd.merge(tweets_by_user_df, user_class_df, left_on='username', right_on='username')
if only_known:
tagged_df = tagged_df[tagged_df['class'] != 'U']
train, test = train_test_split(tagged_df, test_size=0.2, random_state=60)
train_target = train['class']
test_target = test['class']
return train, test, train_target, test_target
def _get_all_classes(self, filename, sample_ratio=1):
# Get df, and list of all users' tweets.
tweets_by_user_df = self._user_grouper(filename)
# Get user classes
db_cols = ['class', 'user_description', 'username']
user_class_df = jsonl_to_df('users', db_cols)
user_class_df = user_class_df[['username', 'class']]
tagged_df = pd.merge(tweets_by_user_df, user_class_df, left_on='username', right_on='username')
tagged_df = tagged_df.sample(frac=sample_ratio, replace=False, random_state=60)
return tagged_df
def analyst_judgement(self, filename, count_words):
print('[WordTokenizer] Getting analyst judgement vectors...')
# Get df, and list of all users' tweets.
tweets_by_user_df, tweets_by_user_df_test, train_target, test_target = self._get_train_test_data(filename)
# Tokenize whole corpus, where lexicon counts are counts of each word across all tweets in the file
tokenizer = TreebankWordTokenizer()
all_stopwords = list(stopwords.words('english'))
# Manual exclusion of twitter specific stuff and punctuation
all_stopwords.extend(['rt', '#', '\'', '@', '!', '``', '\'\'', '\'s', '?', '`', ':', ',', 'https'])
all_tweets = ' '.join(tweets_by_user_df['tweets'])
lexicon = sorted(tokenizer.tokenize(all_tweets.lower()))
lexicon = [x for x in lexicon if x not in all_stopwords]
# Get top X words
lexicon_counts = Counter(lexicon)
top_words = [w[0] for w in lexicon_counts.most_common(count_words)]
# Vectorization:
# Take X most common words, that is the size of the vector for each document.
# The value in each vector: # times that word is present in the doc / total doc length
# Apply this for the training and test dfs
zero_vector = OrderedDict((word, 0) for word in top_words)
tweets_by_user_vec = []
for index, row in tweets_by_user_df.iterrows():
vec = copy.copy(zero_vector)
tokens = tokenizer.tokenize(row['tweets'].lower())
token_counts = Counter(tokens)
# Iterate through all words in document, seeing if they should be assigned to value in vector
for key, value in token_counts.items():
try:
vec[key] = value / len(tokens)
except KeyError:
# Word is not top word, not doing anything with that information.
continue
# Transform ordered dict to list
vec_list = [i[1] for i in vec.items()]
vec_array = np.array(vec_list)
tweets_by_user_vec.append(vec_array)
tweets_by_user_vec_test = []
for index, row in tweets_by_user_df_test.iterrows():
vec = copy.copy(zero_vector)
tokens = tokenizer.tokenize(row['tweets'].lower())
token_counts = Counter(tokens)
# Iterate through all words in document, seeing if they should be assigned to value in vector
for key, value in token_counts.items():
try:
vec[key] = value / len(tokens)
except KeyError:
# Word is not top word, not doing anything with that information.
continue
# Transform ordered dict to list
vec_list = [i[1] for i in vec.items()]
vec_array = np.array(vec_list)
tweets_by_user_vec_test.append(vec_array)
tweets_by_user_array = np.array(tweets_by_user_vec)
tweets_by_user_array_test = np.array(tweets_by_user_vec_test)
return top_words, tweets_by_user_array, tweets_by_user_array_test, train_target, test_target
| [
"collections.OrderedDict",
"re.escape",
"nltk.corpus.stopwords.words",
"sklearn.model_selection.train_test_split",
"pandas.merge",
"collections.Counter",
"numpy.array",
"app.lib.utils.jsonl.jsonl_to_df",
"pandas.DataFrame",
"re.sub",
"copy.copy",
"nltk.tokenize.TreebankWordTokenizer"
] | [((1000, 1030), 'app.lib.utils.jsonl.jsonl_to_df', 'jsonl_to_df', (['filename', 'db_cols'], {}), '(filename, db_cols)\n', (1011, 1030), False, 'from app.lib.utils.jsonl import jsonl_to_df\n'), ((1112, 1176), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['username', 'user_description', 'tweets']"}), "(columns=['username', 'user_description', 'tweets'])\n", (1124, 1176), True, 'import pandas as pd\n'), ((1799, 1824), 're.sub', 're.sub', (['"""&(.)+"""', '""""""', 'text'], {}), "('&(.)+', '', text)\n", (1805, 1824), False, 'import re\n'), ((1860, 1890), 're.sub', 're.sub', (['"""pct"""', '"""percent"""', 'text'], {}), "('pct', 'percent', text)\n", (1866, 1890), False, 'import re\n'), ((1934, 1968), 're.sub', 're.sub', (['"""[^\\\\w\\\\d\'\\\\s]+"""', '""""""', 'text'], {}), '("[^\\\\w\\\\d\'\\\\s]+", \'\', text)\n', (1940, 1968), False, 'import re\n'), ((2014, 2048), 're.sub', 're.sub', (['"""[^\\\\x00-\\\\x7f]"""', '""""""', 'text'], {}), "('[^\\\\x00-\\\\x7f]', '', text)\n", (2020, 2048), False, 'import re\n'), ((2344, 2369), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'text'], {}), "('\\\\s+', ' ', text)\n", (2350, 2369), False, 'import re\n'), ((3502, 3531), 'app.lib.utils.jsonl.jsonl_to_df', 'jsonl_to_df', (['"""users"""', 'db_cols'], {}), "('users', db_cols)\n", (3513, 3531), False, 'from app.lib.utils.jsonl import jsonl_to_df\n'), ((3614, 3702), 'pandas.merge', 'pd.merge', (['tweets_by_user_df', 'user_class_df'], {'left_on': '"""username"""', 'right_on': '"""username"""'}), "(tweets_by_user_df, user_class_df, left_on='username', right_on=\n 'username')\n", (3622, 3702), True, 'import pandas as pd\n'), ((3806, 3865), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tagged_df'], {'test_size': '(0.2)', 'random_state': '(60)'}), '(tagged_df, test_size=0.2, random_state=60)\n', (3822, 3865), False, 'from sklearn.model_selection import train_test_split\n'), ((4271, 4300), 'app.lib.utils.jsonl.jsonl_to_df', 'jsonl_to_df', (['"""users"""', 'db_cols'], {}), "('users', db_cols)\n", (4282, 4300), False, 'from app.lib.utils.jsonl import jsonl_to_df\n'), ((4383, 4471), 'pandas.merge', 'pd.merge', (['tweets_by_user_df', 'user_class_df'], {'left_on': '"""username"""', 'right_on': '"""username"""'}), "(tweets_by_user_df, user_class_df, left_on='username', right_on=\n 'username')\n", (4391, 4471), True, 'import pandas as pd\n'), ((5000, 5023), 'nltk.tokenize.TreebankWordTokenizer', 'TreebankWordTokenizer', ([], {}), '()\n', (5021, 5023), False, 'from nltk.tokenize import TreebankWordTokenizer\n'), ((5500, 5516), 'collections.Counter', 'Counter', (['lexicon'], {}), '(lexicon)\n', (5507, 5516), False, 'from collections import Counter, OrderedDict\n'), ((5874, 5918), 'collections.OrderedDict', 'OrderedDict', (['((word, 0) for word in top_words)'], {}), '((word, 0) for word in top_words)\n', (5885, 5918), False, 'from collections import Counter, OrderedDict\n'), ((7573, 7601), 'numpy.array', 'np.array', (['tweets_by_user_vec'], {}), '(tweets_by_user_vec)\n', (7581, 7601), True, 'import numpy as np\n'), ((7638, 7671), 'numpy.array', 'np.array', (['tweets_by_user_vec_test'], {}), '(tweets_by_user_vec_test)\n', (7646, 7671), True, 'import numpy as np\n'), ((5053, 5079), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (5068, 5079), False, 'from nltk.corpus import stopwords\n'), ((6025, 6047), 'copy.copy', 'copy.copy', (['zero_vector'], {}), '(zero_vector)\n', (6034, 6047), False, 'import copy\n'), ((6138, 6153), 'collections.Counter', 'Counter', (['tokens'], {}), '(tokens)\n', (6145, 6153), False, 'from collections import Counter, OrderedDict\n'), ((6654, 6672), 'numpy.array', 'np.array', (['vec_list'], {}), '(vec_list)\n', (6662, 6672), True, 'import numpy as np\n'), ((6839, 6861), 'copy.copy', 'copy.copy', (['zero_vector'], {}), '(zero_vector)\n', (6848, 6861), False, 'import copy\n'), ((6952, 6967), 'collections.Counter', 'Counter', (['tokens'], {}), '(tokens)\n', (6959, 6967), False, 'from collections import Counter, OrderedDict\n'), ((7468, 7486), 'numpy.array', 'np.array', (['vec_list'], {}), '(vec_list)\n', (7476, 7486), True, 'import numpy as np\n'), ((2541, 2570), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (2550, 2570), False, 'import re\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import dill
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.optim import Adam
from tensorboardX import SummaryWriter
from utils import metric_report, t2n, get_n_params
from config import BertConfig
from predictive_models import GBERT_Predict_Side
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class Voc(object):
def __init__(self):
self.idx2word = {}
self.word2idx = {}
def add_sentence(self, sentence):
for word in sentence:
if word not in self.word2idx:
self.idx2word[len(self.word2idx)] = word
self.word2idx[word] = len(self.word2idx)
class EHRTokenizer(object):
"""Runs end-to-end tokenization"""
def __init__(self, data_dir, special_tokens=("[PAD]", "[CLS]", "[MASK]")):
self.vocab = Voc()
# special tokens
self.vocab.add_sentence(special_tokens)
self.rx_voc = self.add_vocab(os.path.join(data_dir, 'rx-vocab.txt'))
self.dx_voc = self.add_vocab(os.path.join(data_dir, 'dx-vocab.txt'))
# code only in multi-visit data
self.rx_voc_multi = Voc()
self.dx_voc_multi = Voc()
with open(os.path.join(data_dir, 'rx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.rx_voc_multi.add_sentence([code.rstrip('\n')])
with open(os.path.join(data_dir, 'dx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.dx_voc_multi.add_sentence([code.rstrip('\n')])
def add_vocab(self, vocab_file):
voc = self.vocab
specific_voc = Voc()
with open(vocab_file, 'r') as fin:
for code in fin:
voc.add_sentence([code.rstrip('\n')])
specific_voc.add_sentence([code.rstrip('\n')])
return specific_voc
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab.word2idx[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.vocab.idx2word[i])
return tokens
class EHRDataset(Dataset):
def __init__(self, data_pd, tokenizer: EHRTokenizer, max_seq_len):
self.data_pd = data_pd
self.tokenizer = tokenizer
self.seq_len = max_seq_len
self.sample_counter = 0
self.side_len = len(self.data_pd.iloc[0, 5:])
logger.info('side len %d' % self.side_len)
def transform_data(data):
"""
:param data: raw data form
:return: {subject_id, [adm, 2, codes]},
"""
records = {}
side_records = {}
for subject_id in data['SUBJECT_ID'].unique():
item_df = data[data['SUBJECT_ID'] == subject_id]
patient = []
sides = []
for _, row in item_df.iterrows():
admission = [list(row['ICD9_CODE']), list(row['ATC4'])]
patient.append(admission)
sides.append(row[5:].values)
if len(patient) < 2:
continue
records[subject_id] = patient
side_records[subject_id] = sides
return records, side_records
self.records, self.side_records = transform_data(data_pd)
def __len__(self):
return len(self.records)
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
subject_id = list(self.records.keys())[item]
def fill_to_max(l, seq):
while len(l) < seq:
l.append('[PAD]')
return l
"""extract input and output tokens
"""
input_tokens = [] # (2*max_len*adm)
output_dx_tokens = [] # (adm-1, l)
output_rx_tokens = [] # (adm-1, l)
for idx, adm in enumerate(self.records[subject_id]):
input_tokens.extend(
['[CLS]'] + fill_to_max(list(adm[0]), self.seq_len - 1))
input_tokens.extend(
['[CLS]'] + fill_to_max(list(adm[1]), self.seq_len - 1))
# output_rx_tokens.append(list(adm[1]))
if idx != 0:
output_rx_tokens.append(list(adm[1]))
output_dx_tokens.append(list(adm[0]))
"""convert tokens to id
"""
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
output_dx_labels = [] # (adm-1, dx_voc_size)
output_rx_labels = [] # (adm-1, rx_voc_size)
dx_voc_size = len(self.tokenizer.dx_voc_multi.word2idx)
rx_voc_size = len(self.tokenizer.rx_voc_multi.word2idx)
for tokens in output_dx_tokens:
tmp_labels = np.zeros(dx_voc_size)
tmp_labels[list(
map(lambda x: self.tokenizer.dx_voc_multi.word2idx[x], tokens))] = 1
output_dx_labels.append(tmp_labels)
for tokens in output_rx_tokens:
tmp_labels = np.zeros(rx_voc_size)
tmp_labels[list(
map(lambda x: self.tokenizer.rx_voc_multi.word2idx[x], tokens))] = 1
output_rx_labels.append(tmp_labels)
if cur_id < 5:
logger.info("*** Example ***")
logger.info("subject_id: %s" % subject_id)
logger.info("input tokens: %s" % " ".join(
[str(x) for x in input_tokens]))
logger.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
assert len(input_ids) == (self.seq_len *
2 * len(self.records[subject_id]))
assert len(output_dx_labels) == (len(self.records[subject_id]) - 1)
# assert len(output_rx_labels) == len(self.records[subject_id])-1
"""extract side
"""
sides = self.side_records[subject_id][1:]
assert len(sides) == len(output_dx_labels)
cur_tensors = (torch.tensor(input_ids).view(-1, self.seq_len),
torch.tensor(output_dx_labels, dtype=torch.float),
torch.tensor(output_rx_labels, dtype=torch.float),
torch.tensor(sides, dtype=torch.float))
return cur_tensors
def load_dataset(args):
data_dir = args.data_dir
max_seq_len = args.max_seq_length
# load tokenizer
tokenizer = EHRTokenizer(data_dir)
# load data
data = pd.read_pickle(os.path.join(data_dir, 'data-multi-visit.pkl'))
# load side
side_pd = pd.read_pickle(os.path.join(data_dir, 'data-multi-side.pkl'))
# concat
data = data.merge(side_pd, how='inner', on=['SUBJECT_ID', 'HADM_ID'])
# load trian, eval, test data
ids_file = [os.path.join(data_dir, 'train-id.txt'),
os.path.join(data_dir, 'eval-id.txt'),
os.path.join(data_dir, 'test-id.txt')]
def load_ids(data, file_name):
"""
:param data: multi-visit data
:param file_name:
:return: raw data form
"""
ids = []
with open(file_name, 'r') as f:
for line in f:
ids.append(int(line.rstrip('\n')))
return data[data['SUBJECT_ID'].isin(ids)].reset_index(drop=True)
return tokenizer, tuple(map(lambda x: EHRDataset(load_ids(data, x), tokenizer, max_seq_len), ids_file))
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_name", default='GBert-predict-side', type=str, required=False,
help="model name")
parser.add_argument("--data_dir",
default='../data',
type=str,
required=False,
help="The input data dir.")
parser.add_argument("--pretrain_dir", default='../saved/GBert-predict', type=str, required=False,
help="pretraining model dir.")
parser.add_argument("--train_file", default='data-multi-visit.pkl', type=str, required=False,
help="training data file.")
parser.add_argument("--output_dir",
default='../saved/',
type=str,
required=False,
help="The output directory where the model checkpoints will be written.")
# Other parameters
parser.add_argument("--use_pretrain",
default=True,
action='store_true',
help="is use pretrain")
parser.add_argument("--graph",
default=False,
action='store_true',
help="if use ontology embedding")
parser.add_argument("--therhold",
default=0.3,
type=float,
help="therhold.")
parser.add_argument("--max_seq_length",
default=55,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=True,
action='store_true',
help="Whether to run on the dev set.")
parser.add_argument("--do_test",
default=True,
action='store_true',
help="Whether to run on the test set.")
parser.add_argument("--train_batch_size",
default=1,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=40.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=1203,
help="random seed for initialization")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.model_name)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available()
and not args.no_cuda else "cpu")
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True.")
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
# raise ValueError(
# "Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
print("Loading Dataset")
tokenizer, (train_dataset, eval_dataset, test_dataset) = load_dataset(args)
train_dataloader = DataLoader(train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=1)
eval_dataloader = DataLoader(eval_dataset,
sampler=SequentialSampler(eval_dataset),
batch_size=1)
test_dataloader = DataLoader(test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=1)
print('Loading Model: ' + args.model_name)
# config = BertConfig(vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx), side_len=train_dataset.side_len)
# config.graph = args.graph
# model = SeperateBertTransModel(config, tokenizer.dx_voc, tokenizer.rx_voc)
if args.use_pretrain:
logger.info("Use Pretraining model")
model = GBERT_Predict_Side.from_pretrained(
args.pretrain_dir, tokenizer=tokenizer, side_len=train_dataset.side_len)
else:
config = BertConfig(
vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx))
config.graph = args.graph
model = GBERT_Predict_Side(config, tokenizer, train_dataset.side_len)
logger.info('# of model parameters: ' + str(get_n_params(model)))
model.to(device)
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
rx_output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin")
# Prepare optimizer
# num_train_optimization_steps = int(
# len(train_dataset) / args.train_batch_size) * args.num_train_epochs
# param_optimizer = list(model.named_parameters())
# no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(
# nd in n for nd in no_decay)], 'weight_decay': 0.01},
# {'params': [p for n, p in param_optimizer if any(
# nd in n for nd in no_decay)], 'weight_decay': 0.0}
# ]
# optimizer = BertAdam(optimizer_grouped_parameters,
# lr=args.learning_rate,
# warmup=args.warmup_proportion,
# t_total=num_train_optimization_steps)
optimizer = Adam(model.parameters(), lr=args.learning_rate)
global_step = 0
if args.do_train:
writer = SummaryWriter(args.output_dir)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", 1)
dx_acc_best, rx_acc_best = 0, 0
acc_name = 'prauc'
dx_history = {'prauc': []}
rx_history = {'prauc': []}
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
print('')
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
prog_iter = tqdm(train_dataloader, leave=False, desc='Training')
model.train()
for _, batch in enumerate(prog_iter):
batch = tuple(t.to(device) for t in batch)
input_ids, dx_labels, rx_labels, input_sides = batch
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
dim=0), dx_labels.squeeze(dim=0), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
loss, rx_logits = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels,
epoch=global_step, input_sides=input_sides)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += 1
nb_tr_steps += 1
# Display loss
prog_iter.set_postfix(loss='%.4f' % (tr_loss / nb_tr_steps))
optimizer.step()
optimizer.zero_grad()
writer.add_scalar('train/loss', tr_loss / nb_tr_steps, global_step)
global_step += 1
if args.do_eval:
print('')
logger.info("***** Running eval *****")
model.eval()
rx_y_preds = []
rx_y_trues = []
for eval_input in tqdm(eval_dataloader, desc="Evaluating"):
eval_input = tuple(t.to(device) for t in eval_input)
input_ids, dx_labels, rx_labels, input_sides = eval_input
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
), dx_labels.squeeze(), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
with torch.no_grad():
loss, rx_logits = model(
input_ids, dx_labels=dx_labels, rx_labels=rx_labels, input_sides=input_sides)
rx_y_preds.append(t2n(torch.sigmoid(rx_logits)))
rx_y_trues.append(t2n(rx_labels))
print('')
rx_acc_container = metric_report(np.concatenate(rx_y_preds, axis=0), np.concatenate(rx_y_trues, axis=0),
args.therhold)
writer.add_scalars(
'eval_rx', rx_acc_container, global_step)
if rx_acc_container[acc_name] > rx_acc_best:
rx_acc_best = rx_acc_container[acc_name]
# save model
torch.save(model_to_save.state_dict(),
rx_output_model_file)
with open(os.path.join(args.output_dir, 'bert_config.json'), 'w', encoding='utf-8') as fout:
fout.write(model.config.to_json_string())
if args.do_test:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", 1)
def test(task=0):
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(rx_output_model_file)
model.load_state_dict(model_state_dict)
model.to(device)
model.eval()
y_preds = []
y_trues = []
for test_input in tqdm(test_dataloader, desc="Testing"):
test_input = tuple(t.to(device) for t in test_input)
input_ids, dx_labels, rx_labels, input_sides = test_input
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
), dx_labels.squeeze(), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
with torch.no_grad():
loss, rx_logits = model(
input_ids, dx_labels=dx_labels, rx_labels=rx_labels, input_sides=input_sides)
y_preds.append(t2n(torch.sigmoid(rx_logits)))
y_trues.append(t2n(rx_labels))
print('')
acc_container = metric_report(np.concatenate(y_preds, axis=0), np.concatenate(y_trues, axis=0),
args.therhold)
# save report
writer.add_scalars('test', acc_container, 0)
return acc_container
test(task=0)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"torch.cuda.is_available",
"utils.get_n_params",
"predictive_models.GBERT_Predict_Side.from_pretrained",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.concatenate",
"torch.utils.data.SequentialSampler",
"predictive_models.GBERT_Predict_Si... | [((666, 809), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (685, 809), False, 'import logging\n'), ((849, 876), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (866, 876), False, 'import logging\n'), ((8071, 8096), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8094, 8096), False, 'import argparse\n'), ((11701, 11747), 'os.path.join', 'os.path.join', (['args.output_dir', 'args.model_name'], {}), '(args.output_dir, args.model_name)\n', (11713, 11747), False, 'import os\n'), ((11753, 11775), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (11764, 11775), False, 'import random\n'), ((11780, 11805), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (11794, 11805), True, 'import numpy as np\n'), ((11810, 11838), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (11827, 11838), False, 'import torch\n'), ((12325, 12368), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (12336, 12368), False, 'import os\n'), ((13932, 13982), 'os.path.join', 'os.path.join', (['args.output_dir', '"""pytorch_model.bin"""'], {}), "(args.output_dir, 'pytorch_model.bin')\n", (13944, 13982), False, 'import os\n'), ((7144, 7190), 'os.path.join', 'os.path.join', (['data_dir', '"""data-multi-visit.pkl"""'], {}), "(data_dir, 'data-multi-visit.pkl')\n", (7156, 7190), False, 'import os\n'), ((7237, 7282), 'os.path.join', 'os.path.join', (['data_dir', '"""data-multi-side.pkl"""'], {}), "(data_dir, 'data-multi-side.pkl')\n", (7249, 7282), False, 'import os\n'), ((7422, 7460), 'os.path.join', 'os.path.join', (['data_dir', '"""train-id.txt"""'], {}), "(data_dir, 'train-id.txt')\n", (7434, 7460), False, 'import os\n'), ((7478, 7515), 'os.path.join', 'os.path.join', (['data_dir', '"""eval-id.txt"""'], {}), "(data_dir, 'eval-id.txt')\n", (7490, 7515), False, 'import os\n'), ((7533, 7570), 'os.path.join', 'os.path.join', (['data_dir', '"""test-id.txt"""'], {}), "(data_dir, 'test-id.txt')\n", (7545, 7570), False, 'import os\n'), ((13353, 13464), 'predictive_models.GBERT_Predict_Side.from_pretrained', 'GBERT_Predict_Side.from_pretrained', (['args.pretrain_dir'], {'tokenizer': 'tokenizer', 'side_len': 'train_dataset.side_len'}), '(args.pretrain_dir, tokenizer=tokenizer,\n side_len=train_dataset.side_len)\n', (13387, 13464), False, 'from predictive_models import GBERT_Predict_Side\n'), ((13637, 13698), 'predictive_models.GBERT_Predict_Side', 'GBERT_Predict_Side', (['config', 'tokenizer', 'train_dataset.side_len'], {}), '(config, tokenizer, train_dataset.side_len)\n', (13655, 13698), False, 'from predictive_models import GBERT_Predict_Side\n'), ((14914, 14944), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.output_dir'], {}), '(args.output_dir)\n', (14927, 14944), False, 'from tensorboardX import SummaryWriter\n'), ((1490, 1528), 'os.path.join', 'os.path.join', (['data_dir', '"""rx-vocab.txt"""'], {}), "(data_dir, 'rx-vocab.txt')\n", (1502, 1528), False, 'import os\n'), ((1567, 1605), 'os.path.join', 'os.path.join', (['data_dir', '"""dx-vocab.txt"""'], {}), "(data_dir, 'dx-vocab.txt')\n", (1579, 1605), False, 'import os\n'), ((5464, 5485), 'numpy.zeros', 'np.zeros', (['dx_voc_size'], {}), '(dx_voc_size)\n', (5472, 5485), True, 'import numpy as np\n'), ((5714, 5735), 'numpy.zeros', 'np.zeros', (['rx_voc_size'], {}), '(rx_voc_size)\n', (5722, 5735), True, 'import numpy as np\n'), ((6731, 6780), 'torch.tensor', 'torch.tensor', (['output_dx_labels'], {'dtype': 'torch.float'}), '(output_dx_labels, dtype=torch.float)\n', (6743, 6780), False, 'import torch\n'), ((6805, 6854), 'torch.tensor', 'torch.tensor', (['output_rx_labels'], {'dtype': 'torch.float'}), '(output_rx_labels, dtype=torch.float)\n', (6817, 6854), False, 'import torch\n'), ((6879, 6917), 'torch.tensor', 'torch.tensor', (['sides'], {'dtype': 'torch.float'}), '(sides, dtype=torch.float)\n', (6891, 6917), False, 'import torch\n'), ((12570, 12598), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (12583, 12598), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset\n'), ((12736, 12767), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (12753, 12767), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset\n'), ((12904, 12935), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['test_dataset'], {}), '(test_dataset)\n', (12921, 12935), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset\n'), ((15428, 15480), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'leave': '(False)', 'desc': '"""Training"""'}), "(train_dataloader, leave=False, desc='Training')\n", (15432, 15480), False, 'from tqdm import tqdm, trange\n'), ((18454, 18486), 'torch.load', 'torch.load', (['rx_output_model_file'], {}), '(rx_output_model_file)\n', (18464, 18486), False, 'import torch\n'), ((18674, 18711), 'tqdm.tqdm', 'tqdm', (['test_dataloader'], {'desc': '"""Testing"""'}), "(test_dataloader, desc='Testing')\n", (18678, 18711), False, 'from tqdm import tqdm, trange\n'), ((1734, 1778), 'os.path.join', 'os.path.join', (['data_dir', '"""rx-vocab-multi.txt"""'], {}), "(data_dir, 'rx-vocab-multi.txt')\n", (1746, 1778), False, 'import os\n'), ((1908, 1952), 'os.path.join', 'os.path.join', (['data_dir', '"""dx-vocab-multi.txt"""'], {}), "(data_dir, 'dx-vocab-multi.txt')\n", (1920, 1952), False, 'import os\n'), ((11876, 11901), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11899, 11901), False, 'import torch\n'), ((13748, 13767), 'utils.get_n_params', 'get_n_params', (['model'], {}), '(model)\n', (13760, 13767), False, 'from utils import metric_report, t2n, get_n_params\n'), ((16722, 16762), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (16726, 16762), False, 'from tqdm import tqdm, trange\n'), ((18023, 18072), 'os.path.join', 'os.path.join', (['args.output_dir', '"""bert_config.json"""'], {}), "(args.output_dir, 'bert_config.json')\n", (18035, 18072), False, 'import os\n'), ((19398, 19429), 'numpy.concatenate', 'np.concatenate', (['y_preds'], {'axis': '(0)'}), '(y_preds, axis=0)\n', (19412, 19429), True, 'import numpy as np\n'), ((19431, 19462), 'numpy.concatenate', 'np.concatenate', (['y_trues'], {'axis': '(0)'}), '(y_trues, axis=0)\n', (19445, 19462), True, 'import numpy as np\n'), ((6660, 6683), 'torch.tensor', 'torch.tensor', (['input_ids'], {}), '(input_ids)\n', (6672, 6683), False, 'import torch\n'), ((17502, 17536), 'numpy.concatenate', 'np.concatenate', (['rx_y_preds'], {'axis': '(0)'}), '(rx_y_preds, axis=0)\n', (17516, 17536), True, 'import numpy as np\n'), ((17538, 17572), 'numpy.concatenate', 'np.concatenate', (['rx_y_trues'], {'axis': '(0)'}), '(rx_y_trues, axis=0)\n', (17552, 17572), True, 'import numpy as np\n'), ((19052, 19067), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19065, 19067), False, 'import torch\n'), ((17123, 17138), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17136, 17138), False, 'import torch\n'), ((19317, 19331), 'utils.t2n', 't2n', (['rx_labels'], {}), '(rx_labels)\n', (19320, 19331), False, 'from utils import metric_report, t2n, get_n_params\n'), ((17410, 17424), 'utils.t2n', 't2n', (['rx_labels'], {}), '(rx_labels)\n', (17413, 17424), False, 'from utils import metric_report, t2n, get_n_params\n'), ((19255, 19279), 'torch.sigmoid', 'torch.sigmoid', (['rx_logits'], {}), '(rx_logits)\n', (19268, 19279), False, 'import torch\n'), ((17341, 17365), 'torch.sigmoid', 'torch.sigmoid', (['rx_logits'], {}), '(rx_logits)\n', (17354, 17365), False, 'import torch\n')] |
import os
import dill
import numpy as np
def get_function_path(base_path=None, experiment_name=None, make=True):
"""
This function gets the path to where the function is expected to be stored.
Parameters
----------
base_path : str
Path to the directory where the experiments are to be stored.
This defaults to AICROWD_OUTPUT_PATH (see `get_config` above) and which in turn
defaults to './scratch/shared'.
experiment_name : str
Name of the experiment. This defaults to AICROWD_EVALUATION_NAME which in turn
defaults to 'experiment_name'.
make : Makes the directory where the returned path leads to (if it doesn't exist already)
Returns
-------
str
Path to where the model should be stored (to be found by the evaluation function later).
"""
base_path = os.getenv("AICROWD_OUTPUT_PATH","../scratch/shared") \
if base_path is None else base_path
experiment_name = os.getenv("AICROWD_EVALUATION_NAME", "experiment_name") \
if experiment_name is None else experiment_name
model_path = os.path.join(base_path, experiment_name, 'representation', 'python_model.dill')
if make:
os.makedirs(os.path.dirname(model_path), exist_ok=True)
os.makedirs(os.path.join(os.path.dirname(model_path), 'results'), exist_ok=True)
return model_path
def export_function(fn, path=None):
"""
Exports a function. This tries to serialize the argument `fn`, which must be callable
and expect as input a numpy tensor of shape NCHW, where N (batch-size) can be arbitrary,
C (channel) is the number of input channels, and (H, W) are the dimensions of the image.
There are no guarantees that the serialization works as expected - you should double
check that this is indeed the case by importing the function.
Parameters
----------
fn : callable
Function to be serialized.
path : str
Path to the file where the function is saved. Defaults to the value set by the
`get_model_path` function above.
Returns
-------
str
Path to where the function is saved.
"""
assert callable(fn), "Provided function should at least be callable..."
path = get_function_path() if path is None else path
with open(path, 'wb') as f:
dill.dump(fn, f, protocol=dill.HIGHEST_PROTOCOL)
return path
def import_function(path=None):
"""
Imports a function from file.
Parameters
----------
path : str
Path to where the function is saved. Defaults to the return value of `get_function_path`
function defined above.
Returns
-------
callable
"""
path = get_function_path() if path is None else path
with open(path, 'rb') as f:
# Here goes nothing...
fn = dill.load(f)
return fn
def make_representor(fn, format='NCHW'):
"""
Wraps a function in another callable that can be used by `disentanglement_lib`.
Parameters
----------
fn : callable
Function to be wrapped.
format : str
Input format expected by `fn`. Can be NCHW or NHWC, where
N: batch
C: channels
H: height
W: width
Returns
-------
callable
"""
assert format in ['NCHW', 'NHWC'], f"format must either be NCHW or NHWC; got {format}."
def _represent(x):
assert isinstance(x, np.ndarray), \
f"Input to the representation function must be a ndarray, got {type(x)} instead."
assert x.ndim == 4, \
f"Input to the representation function must be a four dimensional NHWC array, " \
f"got a {x.ndim}-dimensional array of shape {x.shape} instead."
# Convert from NHWC to NCHW
if format == 'NCHW':
x = np.moveaxis(x, 3, 1)
N, C, H, W = x.shape
else:
N, H, W, C = x.shape
# Call the function on the array and validate its shape
y = fn(x)
assert isinstance(y, np.ndarray), f"Output from the representation function " \
f"should be a numpy array, got {type(y)} instead."
assert y.ndim == 2, "Output from the representation function should be two dimensional."
return y
return _represent
| [
"os.getenv",
"os.path.join",
"os.path.dirname",
"numpy.moveaxis",
"dill.dump",
"dill.load"
] | [((1105, 1184), 'os.path.join', 'os.path.join', (['base_path', 'experiment_name', '"""representation"""', '"""python_model.dill"""'], {}), "(base_path, experiment_name, 'representation', 'python_model.dill')\n", (1117, 1184), False, 'import os\n'), ((853, 906), 'os.getenv', 'os.getenv', (['"""AICROWD_OUTPUT_PATH"""', '"""../scratch/shared"""'], {}), "('AICROWD_OUTPUT_PATH', '../scratch/shared')\n", (862, 906), False, 'import os\n'), ((974, 1029), 'os.getenv', 'os.getenv', (['"""AICROWD_EVALUATION_NAME"""', '"""experiment_name"""'], {}), "('AICROWD_EVALUATION_NAME', 'experiment_name')\n", (983, 1029), False, 'import os\n'), ((2337, 2385), 'dill.dump', 'dill.dump', (['fn', 'f'], {'protocol': 'dill.HIGHEST_PROTOCOL'}), '(fn, f, protocol=dill.HIGHEST_PROTOCOL)\n', (2346, 2385), False, 'import dill\n'), ((2832, 2844), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (2841, 2844), False, 'import dill\n'), ((1218, 1245), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (1233, 1245), False, 'import os\n'), ((3827, 3847), 'numpy.moveaxis', 'np.moveaxis', (['x', '(3)', '(1)'], {}), '(x, 3, 1)\n', (3838, 3847), True, 'import numpy as np\n'), ((1295, 1322), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (1310, 1322), False, 'import os\n')] |
"""
~~~ REDIMENSIONALIZE THE SIMULATION AND DATA FILES ~~~
Re dimensionalizing and plotting the CQ Data (sims AND experimental)
will be done in a few steps
1. Read in the 3 experimental data files
2. Read in the 3*4 = 12 simulation data files
3. Dimensionalize P, M, wp, wm, and sb for both the data and the simulations
4. Use the old plotting program to plot everything together in 2 figures.
5. Save the results to data files.
"""
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
### STEP 1: READ IN THE EXPERIMENTAL DATA FILES
# Define the dictionaries
P = {}
M = {}
PM = {}
sb = {}
wp = {}
MasterDict = [M,P,PM,sb,wp]
# Start reading in the data
whichset = 'JulyData/'
dir = whichset+'Data CQs/NonDim CQ Values'
files = listdirNH(dir)
key1 = 'Data'
l = 0
for k in files:
Dict = MasterDict[l]
data = np.transpose(np.loadtxt(k).view(float))
Dict[key1]=data
l=l+1
### STEP 2: READ IN THE SIMULATION DATA
dir = whichset+'Simulations/Conserved Quantities'
key2 = ['dNLS CQ', 'Dysthe CQ', 'NLS CQ', 'vDysthe CQ']
dk = 0
for subdir in key2:
files = listdirNH(dir+'/'+subdir)
l = 0
for k in files:
Dict = MasterDict[l]
data = np.transpose(np.loadtxt(k).view(float))
Dict[key2[dk]]=data
l=l+1
dk = dk+1
### STEP 3: DIMENSIONALIZE THE DATA
# Define dimensionalization constants
g = 9.81
epsilon = 7.84869668208853e-05
w0 = 0.05864306286700947
k0 = w0**2/g
# Dim P
dim_P = {}
for key in P:
ent = P[key]
xi = ent[0]
p = ent[1]
x = xi/(epsilon**2*k0)
dim_p = epsilon**3*w0/k0**2*p
dim_P[key] = np.append([x],[dim_p],axis = 0)
# Dim M
dim_M = {}
for key in M:
ent = M[key]
xi = ent[0]
m = ent[1]
x = xi/(epsilon**2*k0)
dim_m = (epsilon/k0)**2*m
dim_M[key] = np.append([x],[dim_m],axis = 0)
# Dim PM
dim_PM = {}
for key in PM:
ent = PM[key]
Pent = dim_P[key]
Ment = dim_M[key]
m = Ment[1]
p = Pent[1]
xi = ent[0]
x = xi/(epsilon**2*k0)
dim_pm = (p/m)*1000 # Gives mHz
dim_PM[key] = np.append([x],[dim_pm],axis = 0)
# Dim sb
dim_sb = {}
for key in sb:
ent = sb[key]
xi = ent[0]
x = xi/(epsilon**2*k0)
sbv = np.zeros((len(ent)-1,len(xi)))
for j in range(1,len(ent)):
sideband = ent[j]
dim_sideband = (epsilon/k0)*sideband
sbv[j-1]=dim_sideband
dim_sb[key] = np.vstack([x,sbv])
# Dim wp
dim_wp = {}
for key in wp:
ent = wp[key]
xi = ent[0]
peak = ent[1]
x = xi/(epsilon**2*k0)
dim_peak = peak+w0*1000 # Gives mHz
dim_wp[key] = np.append([x],[dim_peak],axis = 0)
### STEP 4: PLOT THE RESULTS
# Initialize for plotting
plotter1 = [dim_M,dim_P,dim_PM,dim_wp]
key2[:0] = [key1]
titles1 = ['CQ M', 'CQ P', r'$\omega_m$', r'$\omega_p$']
titles2 = np.loadtxt(os.getcwd()+'/'+whichset+'sidebandnums.txt').view(float)
y1 = ['M (m'+r'$^2$'+')','P (m'+r'$^2$'+'/s)',r'$\omega_m$'+' (mHz)',r'$\omega_p$'+' (mHz)']
#y2 = [r'$|a_{-3}|$'+' (m)',r'$|a_{-2}|$'+' (m)',r'$|a_{-1}|$'+' (m)',r'$|a_0|$'+' (m)',r'$|a_1|$'+' (m)',r'$|a_2|$'+' (m)',r'$|a_3|$'+' (m)']
disp = ['.k',':m','-.g','--r','-c']
sizes = [13,1,1,1,1]
# Begin plotting
fig1, ax1 = plt.subplots(4,1,figsize = (11,6.5))
fig1.suptitle('Quantities of Interest',fontsize=16)
dispind = 0
for key in key2:
ax1 = ax1.flatten()
for i in range(len(plotter1)):
dict = plotter1[i]
VALUES = dict[key]
x = VALUES[0]
y = VALUES[1]
ax1[i].plot(x,y,disp[dispind],markersize = sizes[dispind])
ax1[i].set_title(titles1[i])
ax1[i].set_ylabel(y1[i])
ax1[i].set_xlabel('Location (m)')
ax1[i].ticklabel_format(style='sci',scilimits=(-1,1),axis='both')
dispind += 1
ax1[0].legend(key2,bbox_to_anchor=(1, 1))
fig1.tight_layout()
fig1.subplots_adjust(top=0.88)
plt.savefig(whichset+'Final Figures/CQResultFig.png',dpi=500)
fig2, ax2 = plt.subplots(len(titles2),sharex=True,figsize = (7,1.625*len(titles2)))
fig2.suptitle('Select Fourier Amplitudes',fontsize=16)
dispind = 0
for key in key2:
sbvals = dim_sb[key]
x = sbvals[0,:]
sideband7=np.delete(sbvals, 0, 0)
for po in range(len(titles2)):
ax2[po].plot(x,sideband7[po],disp[dispind],markersize = sizes[dispind])
ax2[po].set_ylabel('a'+ r'$_{'+str(int(titles2[po]))+'}$')
fig2.tight_layout()
fig2.subplots_adjust(top=0.97)
dispind += 1
plt.savefig(whichset+'Final Figures/FAResultFig.png',dpi=500)
### STEP 5: SAVE THE RESULTS
# Save P, M, wp, wm
md =[dim_P,dim_M,dim_PM,dim_wp]
val = ['dimP','dimM','dimPM','dimwp','dimsb']
#key2 = ['dNLS CQ', 'Dysthe CQ', 'NLS CQ', 'vDysthe CQ']
o = 0
for cqval in md:
# Save the Data Values
for ky in cqval:
if ky == 'Data':
np.savetxt(whichset+'Data CQs/Dim CQ Values/'+val[o]+'.txt',np.transpose(cqval[ky]).view(float))
else:
np.savetxt(whichset+'Simulations/Dimensional Results/'+str(ky)[:-3]+' dimCQ/'+val[o]+'.txt',np.transpose(cqval[ky]).view(float))
o=o+1
# Save sidebands
for ky in dim_sb:
if ky == 'Data':
np.savetxt(whichset+'Data CQs/Dim CQ Values/'+val[-1]+'.txt',np.transpose(dim_sb[ky]).view(float))
else:
np.savetxt(whichset+'Simulations/Dimensional Results/'+str(ky)[:-3]+' dimCQ/'+val[-1]+'.txt',np.transpose(dim_sb[ky]).view(float))
| [
"matplotlib.pyplot.savefig",
"numpy.delete",
"os.path.join",
"os.getcwd",
"numpy.append",
"numpy.vstack",
"numpy.loadtxt",
"numpy.transpose",
"matplotlib.pyplot.subplots"
] | [((3364, 3401), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(11, 6.5)'}), '(4, 1, figsize=(11, 6.5))\n', (3376, 3401), True, 'import matplotlib.pyplot as plt\n'), ((4015, 4079), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(whichset + 'Final Figures/CQResultFig.png')"], {'dpi': '(500)'}), "(whichset + 'Final Figures/CQResultFig.png', dpi=500)\n", (4026, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4588, 4652), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(whichset + 'Final Figures/FAResultFig.png')"], {'dpi': '(500)'}), "(whichset + 'Final Figures/FAResultFig.png', dpi=500)\n", (4599, 4652), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1813), 'numpy.append', 'np.append', (['[x]', '[dim_p]'], {'axis': '(0)'}), '([x], [dim_p], axis=0)\n', (1791, 1813), True, 'import numpy as np\n'), ((1974, 2005), 'numpy.append', 'np.append', (['[x]', '[dim_m]'], {'axis': '(0)'}), '([x], [dim_m], axis=0)\n', (1983, 2005), True, 'import numpy as np\n'), ((2234, 2266), 'numpy.append', 'np.append', (['[x]', '[dim_pm]'], {'axis': '(0)'}), '([x], [dim_pm], axis=0)\n', (2243, 2266), True, 'import numpy as np\n'), ((2557, 2576), 'numpy.vstack', 'np.vstack', (['[x, sbv]'], {}), '([x, sbv])\n', (2566, 2576), True, 'import numpy as np\n'), ((2755, 2789), 'numpy.append', 'np.append', (['[x]', '[dim_peak]'], {'axis': '(0)'}), '([x], [dim_peak], axis=0)\n', (2764, 2789), True, 'import numpy as np\n'), ((4306, 4329), 'numpy.delete', 'np.delete', (['sbvals', '(0)', '(0)'], {}), '(sbvals, 0, 0)\n', (4315, 4329), True, 'import numpy as np\n'), ((653, 676), 'os.path.join', 'os.path.join', (['path', '"""*"""'], {}), "(path, '*')\n", (665, 676), False, 'import os\n'), ((1024, 1037), 'numpy.loadtxt', 'np.loadtxt', (['k'], {}), '(k)\n', (1034, 1037), True, 'import numpy as np\n'), ((1382, 1395), 'numpy.loadtxt', 'np.loadtxt', (['k'], {}), '(k)\n', (1392, 1395), True, 'import numpy as np\n'), ((5333, 5357), 'numpy.transpose', 'np.transpose', (['dim_sb[ky]'], {}), '(dim_sb[ky])\n', (5345, 5357), True, 'import numpy as np\n'), ((5482, 5506), 'numpy.transpose', 'np.transpose', (['dim_sb[ky]'], {}), '(dim_sb[ky])\n', (5494, 5506), True, 'import numpy as np\n'), ((2984, 2995), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2993, 2995), False, 'import os\n'), ((5005, 5028), 'numpy.transpose', 'np.transpose', (['cqval[ky]'], {}), '(cqval[ky])\n', (5017, 5028), True, 'import numpy as np\n'), ((5160, 5183), 'numpy.transpose', 'np.transpose', (['cqval[ky]'], {}), '(cqval[ky])\n', (5172, 5183), True, 'import numpy as np\n')] |
"""Functionality for saving and loading Sklearn models as TileDB arrays"""
import pickle
from typing import Optional
import numpy as np
import sklearn
from sklearn import config_context
from sklearn.base import BaseEstimator
import tiledb
from .base import Meta, TileDBModel, Timestamp, current_milli_time
class SklearnTileDBModel(TileDBModel[BaseEstimator]):
"""
Class that implements all functionality needed to save Sklearn models as
TileDB arrays and load Sklearn models from TileDB arrays.
"""
Framework = "SKLEARN"
FrameworkVersion = sklearn.__version__
def save(self, *, update: bool = False, meta: Optional[Meta] = None) -> None:
"""
Save a Sklearn model as a TileDB array.
:param update: Whether we should update any existing TileDB array model at the
target location.
:param meta: Extra metadata to save in a TileDB array.
"""
# Serialize model
serialized_model = self._serialize_model()
# Create TileDB model array
if not update:
self._create_array()
self._write_array(serialized_model=serialized_model, meta=meta)
def load(self, *, timestamp: Optional[Timestamp] = None) -> BaseEstimator:
"""
Load a Sklearn model from a TileDB array.
:param timestamp: Range of timestamps to load fragments of the array which live
in the specified time range.
:return: A Sklearn model object.
"""
# TODO: Change timestamp when issue in core is resolved
model_array = tiledb.open(self.uri, ctx=self.ctx, timestamp=timestamp)
model_array_results = model_array[:]
model = pickle.loads(model_array_results["model_params"].item(0))
return model
def preview(self, *, display: str = "text") -> str:
"""
Create a text representation of the model.
:param display. If ‘diagram’, estimators will be displayed as a diagram in an
HTML format when shown in a jupyter notebook. If ‘text’, estimators will be
displayed as text.
:return. A string representation of the models internal configuration.
"""
if self.model:
with config_context(display=display):
return str(self.model)
else:
return ""
def _create_array(self) -> None:
"""Create a TileDB array for a Sklearn model."""
dom = tiledb.Domain(
tiledb.Dim(
name="model", domain=(1, 1), tile=1, dtype=np.int32, ctx=self.ctx
),
)
attrs = [
tiledb.Attr(
name="model_params",
dtype="S1",
var=True,
filters=tiledb.FilterList([tiledb.ZstdFilter()]),
ctx=self.ctx,
),
]
schema = tiledb.ArraySchema(domain=dom, sparse=False, attrs=attrs, ctx=self.ctx)
tiledb.Array.create(self.uri, schema, ctx=self.ctx)
# In case we are on TileDB-Cloud we have to update model array's file properties
if self.namespace:
from tiledb.ml._cloud_utils import update_file_properties
update_file_properties(self.uri, self._file_properties)
def _write_array(self, serialized_model: bytes, meta: Optional[Meta]) -> None:
"""
Write a Sklearn model to a TileDB array.
:param serialized_model: A pickled sklearn model.
:param meta: Extra metadata to save in a TileDB array.
"""
# TODO: Change timestamp when issue in core is resolved
with tiledb.open(
self.uri, "w", timestamp=current_milli_time(), ctx=self.ctx
) as tf_model_tiledb:
# Insertion in TileDB array
tf_model_tiledb[:] = {"model_params": np.array([serialized_model])}
self.update_model_metadata(array=tf_model_tiledb, meta=meta)
def _serialize_model(self) -> bytes:
"""
Serialize a Sklearn model with pickle.
:return: Pickled Sklearn model.
"""
return pickle.dumps(self.model, protocol=4)
| [
"tiledb.Array.create",
"tiledb.ArraySchema",
"tiledb.ml._cloud_utils.update_file_properties",
"pickle.dumps",
"tiledb.ZstdFilter",
"numpy.array",
"sklearn.config_context",
"tiledb.open",
"tiledb.Dim"
] | [((1581, 1637), 'tiledb.open', 'tiledb.open', (['self.uri'], {'ctx': 'self.ctx', 'timestamp': 'timestamp'}), '(self.uri, ctx=self.ctx, timestamp=timestamp)\n', (1592, 1637), False, 'import tiledb\n'), ((2872, 2943), 'tiledb.ArraySchema', 'tiledb.ArraySchema', ([], {'domain': 'dom', 'sparse': '(False)', 'attrs': 'attrs', 'ctx': 'self.ctx'}), '(domain=dom, sparse=False, attrs=attrs, ctx=self.ctx)\n', (2890, 2943), False, 'import tiledb\n'), ((2953, 3004), 'tiledb.Array.create', 'tiledb.Array.create', (['self.uri', 'schema'], {'ctx': 'self.ctx'}), '(self.uri, schema, ctx=self.ctx)\n', (2972, 3004), False, 'import tiledb\n'), ((4095, 4131), 'pickle.dumps', 'pickle.dumps', (['self.model'], {'protocol': '(4)'}), '(self.model, protocol=4)\n', (4107, 4131), False, 'import pickle\n'), ((2479, 2556), 'tiledb.Dim', 'tiledb.Dim', ([], {'name': '"""model"""', 'domain': '(1, 1)', 'tile': '(1)', 'dtype': 'np.int32', 'ctx': 'self.ctx'}), "(name='model', domain=(1, 1), tile=1, dtype=np.int32, ctx=self.ctx)\n", (2489, 2556), False, 'import tiledb\n'), ((3205, 3260), 'tiledb.ml._cloud_utils.update_file_properties', 'update_file_properties', (['self.uri', 'self._file_properties'], {}), '(self.uri, self._file_properties)\n', (3227, 3260), False, 'from tiledb.ml._cloud_utils import update_file_properties\n'), ((2235, 2266), 'sklearn.config_context', 'config_context', ([], {'display': 'display'}), '(display=display)\n', (2249, 2266), False, 'from sklearn import config_context\n'), ((3823, 3851), 'numpy.array', 'np.array', (['[serialized_model]'], {}), '([serialized_model])\n', (3831, 3851), True, 'import numpy as np\n'), ((2776, 2795), 'tiledb.ZstdFilter', 'tiledb.ZstdFilter', ([], {}), '()\n', (2793, 2795), False, 'import tiledb\n')] |
from __future__ import absolute_import, division, print_function
import copy
import errno
import json
import os
import os.path as osp
import random
import sys
import time
import warnings
import yaml
import numpy as np
import PIL
import torch
from PIL import Image
__all__ = [
'mkdir_if_missing', 'check_isfile', 'read_json', 'write_json', 'read_yaml',
'set_random_seed', "worker_init_fn", 'download_url', 'read_image', 'collect_env_info',
'get_model_attr', 'StateCacher', 'random_image'
]
def mkdir_if_missing(dirname):
"""Creates dirname if it is missing."""
if not osp.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def check_isfile(fpath):
"""Checks if the given path is a file.
Args:
fpath (str): file path.
Returns:
bool
"""
isfile = osp.isfile(fpath)
if not isfile:
warnings.warn('No file found at "{}"'.format(fpath))
return isfile
def read_json(fpath):
"""Reads json file from a path."""
with open(fpath, 'r') as f:
obj = json.load(f)
return obj
def write_json(obj, fpath):
"""Writes to a json file."""
mkdir_if_missing(osp.dirname(fpath))
with open(fpath, 'w') as f:
json.dump(obj, f, indent=4, separators=(',', ': '))
def read_yaml(fpath):
"""Reads YAML file from a path."""
with open(fpath, 'r') as f:
obj = yaml.safe_load(f)
return obj
def set_random_seed(seed, deterministic=False):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
random.seed(random.getstate()[1][0] + worker_id)
def download_url(url, dst):
"""Downloads file from a url to a destination.
Args:
url (str): url to download file.
dst (str): destination path.
"""
from six.moves import urllib
print('* url="{}"'.format(url))
print('* destination="{}"'.format(dst))
def _reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024*duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write(
'\r...%d%%, %d MB, %d KB/s, %d seconds passed' %
(percent, progress_size / (1024*1024), speed, duration)
)
sys.stdout.flush()
urllib.request.urlretrieve(url, dst, _reporthook)
sys.stdout.write('\n')
def read_image(path, grayscale=False):
"""Reads image from path using ``PIL.Image``.
Args:
path (str): path to an image.
grayscale (bool): load grayscale image
Returns:
PIL image
"""
got_img = False
if not osp.exists(path):
raise IOError('"{}" does not exist'.format(path))
while not got_img:
try:
img = Image.open(path).convert('L' if grayscale else 'RGB')
got_img = True
except IOError:
print('IOError occurred when reading "{}".'.format(path))
return img
def random_image(height, width):
input_size = (height, width, 3)
img = np.random.rand(*input_size).astype(np.float32)
img = np.uint8(img * 255)
out_img = Image.fromarray(img)
return out_img
def collect_env_info():
"""Returns env info as a string.
Code source: github.com/facebookresearch/maskrcnn-benchmark
"""
from torch.utils.collect_env import get_pretty_env_info
env_str = get_pretty_env_info()
env_str += '\n Pillow ({})'.format(PIL.__version__)
return env_str
def get_model_attr(model, attr):
if hasattr(model, 'module'):
return getattr(model.module, attr)
else:
return getattr(model, attr)
class StateCacher(object):
def __init__(self, in_memory, cache_dir=None):
self.in_memory = in_memory
self.cache_dir = cache_dir
if self.cache_dir is None:
import tempfile
self.cache_dir = tempfile.gettempdir()
else:
if not os.path.isdir(self.cache_dir):
raise ValueError("Given `cache_dir` is not a valid directory.")
self.cached = {}
def store(self, key, state_dict):
if self.in_memory:
self.cached.update({key: copy.deepcopy(state_dict)})
else:
fn = os.path.join(self.cache_dir, "state_{}_{}.pt".format(key, id(self)))
self.cached.update({key: fn})
torch.save(state_dict, fn)
def retrieve(self, key):
if key not in self.cached:
raise KeyError("Target {} was not cached.".format(key))
if self.in_memory:
return self.cached.get(key)
else:
fn = self.cached.get(key)
if not os.path.exists(fn):
raise RuntimeError(
"Failed to load state in {}. File doesn't exist anymore.".format(fn)
)
state_dict = torch.load(fn, map_location=lambda storage, location: storage)
return state_dict
def __del__(self):
"""Check whether there are unused cached files existing in `cache_dir` before
this instance being destroyed."""
if self.in_memory:
return
for k in self.cached:
if os.path.exists(self.cached[k]):
os.remove(self.cached[k])
| [
"numpy.uint8",
"numpy.random.get_state",
"numpy.random.rand",
"copy.deepcopy",
"os.remove",
"os.path.exists",
"random.getstate",
"torch.utils.collect_env.get_pretty_env_info",
"os.path.isdir",
"numpy.random.seed",
"sys.stdout.flush",
"os.path.isfile",
"os.path.dirname",
"torch.save",
"ti... | [((908, 925), 'os.path.isfile', 'osp.isfile', (['fpath'], {}), '(fpath)\n', (918, 925), True, 'import os.path as osp\n'), ((1549, 1572), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1566, 1572), False, 'import torch\n'), ((1577, 1609), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1603, 1609), False, 'import torch\n'), ((1733, 1753), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1747, 1753), True, 'import numpy as np\n'), ((1758, 1775), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1769, 1775), False, 'import random\n'), ((2817, 2866), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'dst', '_reporthook'], {}), '(url, dst, _reporthook)\n', (2843, 2866), False, 'from six.moves import urllib\n'), ((2871, 2893), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2887, 2893), False, 'import sys\n'), ((3612, 3631), 'numpy.uint8', 'np.uint8', (['(img * 255)'], {}), '(img * 255)\n', (3620, 3631), True, 'import numpy as np\n'), ((3647, 3667), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3662, 3667), False, 'from PIL import Image\n'), ((3898, 3919), 'torch.utils.collect_env.get_pretty_env_info', 'get_pretty_env_info', ([], {}), '()\n', (3917, 3919), False, 'from torch.utils.collect_env import get_pretty_env_info\n'), ((591, 610), 'os.path.exists', 'osp.exists', (['dirname'], {}), '(dirname)\n', (601, 610), True, 'import os.path as osp\n'), ((1132, 1144), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1141, 1144), False, 'import json\n'), ((1243, 1261), 'os.path.dirname', 'osp.dirname', (['fpath'], {}), '(fpath)\n', (1254, 1261), True, 'import os.path as osp\n'), ((1303, 1354), 'json.dump', 'json.dump', (['obj', 'f'], {'indent': '(4)', 'separators': "(',', ': ')"}), "(obj, f, indent=4, separators=(',', ': '))\n", (1312, 1354), False, 'import json\n'), ((1463, 1480), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1477, 1480), False, 'import yaml\n'), ((2628, 2756), 'sys.stdout.write', 'sys.stdout.write', (["('\\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent, progress_size /\n (1024 * 1024), speed, duration))"], {}), "('\\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent,\n progress_size / (1024 * 1024), speed, duration))\n", (2644, 2756), False, 'import sys\n'), ((2793, 2811), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2809, 2811), False, 'import sys\n'), ((3153, 3169), 'os.path.exists', 'osp.exists', (['path'], {}), '(path)\n', (3163, 3169), True, 'import os.path as osp\n'), ((637, 657), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (648, 657), False, 'import os\n'), ((2383, 2394), 'time.time', 'time.time', ([], {}), '()\n', (2392, 2394), False, 'import time\n'), ((2433, 2444), 'time.time', 'time.time', ([], {}), '()\n', (2442, 2444), False, 'import time\n'), ((3555, 3582), 'numpy.random.rand', 'np.random.rand', (['*input_size'], {}), '(*input_size)\n', (3569, 3582), True, 'import numpy as np\n'), ((4403, 4424), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (4422, 4424), False, 'import tempfile\n'), ((4880, 4906), 'torch.save', 'torch.save', (['state_dict', 'fn'], {}), '(state_dict, fn)\n', (4890, 4906), False, 'import torch\n'), ((5367, 5429), 'torch.load', 'torch.load', (['fn'], {'map_location': '(lambda storage, location: storage)'}), '(fn, map_location=lambda storage, location: storage)\n', (5377, 5429), False, 'import torch\n'), ((5705, 5735), 'os.path.exists', 'os.path.exists', (['self.cached[k]'], {}), '(self.cached[k])\n', (5719, 5735), False, 'import os\n'), ((4458, 4487), 'os.path.isdir', 'os.path.isdir', (['self.cache_dir'], {}), '(self.cache_dir)\n', (4471, 4487), False, 'import os\n'), ((5179, 5197), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (5193, 5197), False, 'import os\n'), ((5753, 5778), 'os.remove', 'os.remove', (['self.cached[k]'], {}), '(self.cached[k])\n', (5762, 5778), False, 'import os\n'), ((1872, 1893), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1891, 1893), True, 'import numpy as np\n'), ((1929, 1946), 'random.getstate', 'random.getstate', ([], {}), '()\n', (1944, 1946), False, 'import random\n'), ((3284, 3300), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (3294, 3300), False, 'from PIL import Image\n'), ((4698, 4723), 'copy.deepcopy', 'copy.deepcopy', (['state_dict'], {}), '(state_dict)\n', (4711, 4723), False, 'import copy\n')] |
# -*- coding:utf-8 -*-
import trdb2py.trading2_pb2
from trdb2py.utils import str2asset, asset2str
from datetime import datetime
import time
import pandas as pd
import numpy as np
import math
from trdb2py.timeutils import str2timestamp, getDayInYear, getYearDays, calcYears
CtrlTypeStr = [
'INIT',
'BUY',
'SELL',
'STOPLOSS',
'TAKEPROFIT',
'WITHDRAW',
'DEPOSIT',
]
def buildPNLReport(lstpnl: list) -> pd.DataFrame:
"""
buildPNLReport - 将PNL列表转换为pandas.DataFrame,方便计算
"""
fv0 = {
'title': [],
'asset': [],
'maxDrawdown': [],
'maxDrawdownStart': [],
'maxDrawdownEnd': [],
'maxDrawup': [],
'maxDrawupStart': [],
'maxDrawupEnd': [],
'sharpe': [],
'annualizedReturns': [],
'annualizedVolatility': [],
'totalReturns': [],
'variance': [],
'buyTimes': [],
'sellTimes': [],
'stoplossTimes': [],
'maxUpDay': [],
'maxPerUpDay': [],
'maxDownDay': [],
'maxPerDownDay': [],
'maxUpWeek': [],
'maxPerUpWeek': [],
'maxDownWeek': [],
'maxPerDownWeek': [],
'maxUpMonth': [],
'maxPerUpMonth': [],
'maxDownMonth': [],
'maxPerDownMonth': [],
'maxUpYear': [],
'maxPerUpYear': [],
'maxDownYear': [],
'maxPerDownYear': [],
'perWinRate': [],
'values': [],
}
for v in lstpnl:
fv0['title'].append(v['title'])
fv0['asset'].append(asset2str(v['pnl'].asset))
fv0['maxDrawdown'].append(v['pnl'].maxDrawdown)
fv0['maxDrawdownStart'].append(datetime.fromtimestamp(
v['pnl'].maxDrawdownStartTs).strftime('%Y-%m-%d'))
fv0['maxDrawdownEnd'].append(datetime.fromtimestamp(
v['pnl'].maxDrawdownEndTs).strftime('%Y-%m-%d'))
fv0['maxDrawup'].append(v['pnl'].maxDrawup)
fv0['maxDrawupStart'].append(datetime.fromtimestamp(
v['pnl'].maxDrawupStartTs).strftime('%Y-%m-%d'))
fv0['maxDrawupEnd'].append(datetime.fromtimestamp(
v['pnl'].maxDrawupEndTs).strftime('%Y-%m-%d'))
fv0['sharpe'].append(v['pnl'].sharpe)
fv0['annualizedReturns'].append(v['pnl'].annualizedReturns)
fv0['annualizedVolatility'].append(v['pnl'].annualizedVolatility)
fv0['totalReturns'].append(v['pnl'].totalReturns)
fv0['variance'].append(v['pnl'].variance)
fv0['buyTimes'].append(v['pnl'].buyTimes)
fv0['sellTimes'].append(v['pnl'].sellTimes)
fv0['stoplossTimes'].append(v['pnl'].stoplossTimes)
fv0['maxUpDay'].append(datetime.fromtimestamp(
v['pnl'].maxUpDayTs).strftime('%Y-%m-%d'))
fv0['maxPerUpDay'].append(v['pnl'].maxPerUpDay)
fv0['maxDownDay'].append(datetime.fromtimestamp(
v['pnl'].maxDownDayTs).strftime('%Y-%m-%d'))
fv0['maxPerDownDay'].append(v['pnl'].maxPerDownDay)
fv0['maxUpWeek'].append(datetime.fromtimestamp(
v['pnl'].maxUpWeekTs).strftime('%Y-%m-%d'))
fv0['maxPerUpWeek'].append(v['pnl'].maxPerUpWeek)
fv0['maxDownWeek'].append(datetime.fromtimestamp(
v['pnl'].maxDownWeekTs).strftime('%Y-%m-%d'))
fv0['maxPerDownWeek'].append(v['pnl'].maxPerDownWeek)
fv0['maxUpMonth'].append(datetime.fromtimestamp(
v['pnl'].maxUpMonthTs).strftime('%Y-%m-%d'))
fv0['maxPerUpMonth'].append(v['pnl'].maxPerUpMonth)
fv0['maxDownMonth'].append(datetime.fromtimestamp(
v['pnl'].maxDownMonthTs).strftime('%Y-%m-%d'))
fv0['maxPerDownMonth'].append(v['pnl'].maxPerDownMonth)
fv0['maxUpYear'].append(datetime.fromtimestamp(
v['pnl'].maxUpYearTs).strftime('%Y-%m-%d'))
fv0['maxPerUpYear'].append(v['pnl'].maxPerUpYear)
fv0['maxDownYear'].append(datetime.fromtimestamp(
v['pnl'].maxDownYearTs).strftime('%Y-%m-%d'))
fv0['maxPerDownYear'].append(v['pnl'].maxPerDownYear)
fv0['values'].append(len(v['pnl'].values))
if v['pnl'].sellTimes + v['pnl'].stoplossTimes == 0:
fv0['perWinRate'].append(0)
else:
fv0['perWinRate'].append(
v['pnl'].winTimes * 1.0 / (v['pnl'].sellTimes + v['pnl'].stoplossTimes))
return pd.DataFrame(fv0)
def getPNLLastTs(pnl: trdb2py.trading2_pb2.PNLAssetData):
ctrlnums = len(pnl.lstCtrl)
if ctrlnums <= 0:
return -1
return pnl.lstCtrl[ctrlnums - 1].ts
def getPNLLastCtrl(pnl: trdb2py.trading2_pb2.PNLAssetData) -> trdb2py.trading2_pb2.CtrlNode:
ctrlnums = len(pnl.lstCtrl)
if ctrlnums <= 0:
return None
return pnl.lstCtrl[ctrlnums - 1]
def getPNLValueWithTimestamp(ts, pnl: trdb2py.trading2_pb2.PNLAssetData, isAdd: bool = True) -> int:
if isAdd:
for i in range(0, len(pnl.values)):
if ts == pnl.values[i].ts:
return i
if ts < pnl.values[i].ts:
pnl.values.insert(i, trdb2py.trading2_pb2.PNLDataValue(ts=ts))
return i
pnl.values.append(trdb2py.trading2_pb2.PNLDataValue(ts=ts))
return len(pnl.values) - 1
for i in range(0, len(pnl.values)):
if ts == pnl.values[i].ts:
return i
i = i + 1
return -1
def mergePNL(lstpnl: list) -> trdb2py.trading2_pb2.PNLAssetData:
pnl = trdb2py.trading2_pb2.PNLAssetData()
for vpnl in lstpnl:
v = vpnl['pnl']
for cai in range(0, len(v.values)):
di = getPNLValueWithTimestamp(v.values[cai].ts, pnl)
pnl.values[di].value += v.values[cai].value
pnl.values[di].cost += v.values[cai].cost
if pnl.values[di].cost > 0:
pnl.values[di].perValue = pnl.values[di].value / \
pnl.values[di].cost
else:
pnl.values[di].perValue = 1
return pnl
def mergePNLEx(pnldest: trdb2py.trading2_pb2.PNLAssetData, pnlsrc: trdb2py.trading2_pb2.PNLAssetData, inmoney):
for cai in range(0, len(pnlsrc.values)):
di = getPNLValueWithTimestamp(pnlsrc.values[cai].ts, pnldest)
pnldest.values[di].value += (pnlsrc.values[cai].value - inmoney)
if pnldest.values[di].cost > 0:
pnldest.values[di].perValue = pnldest.values[di].value / \
pnldest.values[di].cost
else:
pnldest.values[di].perValue = 1
def rmPNLValuesWithTimestamp(ts, pnl: trdb2py.trading2_pb2.PNLAssetData):
i = getPNLValueWithTimestamp(ts, pnl)
del pnl.values[i+1:]
def getPNLTimestampLowInMonth(pnl: trdb2py.trading2_pb2.PNLAssetData) -> list:
ts = 0
dt = None
lastPerValue = 0
arr = []
for i in range(0, len(pnl.values)):
v = pnl.values[i]
if ts == 0:
ts = v.ts
dt = datetime.utcfromtimestamp(ts)
lastPerValue = v.perValue
else:
cdt = datetime.utcfromtimestamp(v.ts)
if dt.year == cdt.year and dt.month == cdt.month:
if lastPerValue > v.perValue:
ts = v.ts
dt = cdt
lastPerValue = v.perValue
if i == len(pnl.values) - 1:
arr.append(ts)
else:
arr.append(ts)
ts = v.ts
dt = cdt
lastPerValue = v.perValue
return arr
def getPNLTimestampHighInMonth(pnl: trdb2py.trading2_pb2.PNLAssetData) -> list:
ts = 0
dt = None
lastPerValue = 0
arr = []
for i in range(0, len(pnl.values)):
v = pnl.values[i]
if ts == 0:
ts = v.ts
dt = datetime.utcfromtimestamp(ts)
lastPerValue = v.perValue
else:
cdt = datetime.utcfromtimestamp(v.ts)
if dt.year == cdt.year and dt.month == cdt.month:
if lastPerValue < v.perValue:
ts = v.ts
dt = cdt
lastPerValue = v.perValue
if i == len(pnl.values) - 1:
arr.append(ts)
else:
arr.append(ts)
ts = v.ts
dt = cdt
lastPerValue = v.perValue
return arr
def countTradingDays4Year(pnl: trdb2py.trading2_pb2.PNLAssetData):
if len(pnl.values) > 0:
fy = calcYears(pnl.values[0].ts, pnl.values[len(pnl.values) - 1].ts)
return int(len(pnl.values) / fy)
return 0
def calcAnnualizedVolatility(pnl: trdb2py.trading2_pb2.PNLAssetData):
# https://www.zhihu.com/question/19770602
# https://wiki.mbalib.com/wiki/%E5%8E%86%E5%8F%B2%E6%B3%A2%E5%8A%A8%E7%8E%87
if len(pnl.values) > 0:
arr = []
for i in range(1, len(pnl.values)):
arr.append(
math.log(pnl.values[i].perValue / pnl.values[i-1].perValue))
arrstd = np.std(arr)
pnl.annualizedVolatility = arrstd * \
math.sqrt(countTradingDays4Year(pnl))
else:
return 0
def rebuildPNL(pnl: trdb2py.trading2_pb2.PNLAssetData):
if len(pnl.values) > 0:
pnl.totalReturns = pnl.values[len(pnl.values) - 1].perValue
calcAnnualizedVolatility(pnl)
rebuildDrawdown(pnl)
calcAnnualizedReturns(pnl)
calcSharpe(pnl)
else:
pnl.totalReturns = 1.0
pnl.annualizedVolatility = 0
def rebuildDrawdown(pnl: trdb2py.trading2_pb2.PNLAssetData):
maxv = 0
maxdd = 0
startts = 0
maxddsts = 0
maxddets = 0
for v in pnl.values:
if v.perValue > maxv:
maxv = v.perValue
v.drawdown = 0
startts = v.ts
else:
v.drawdown = (maxv - v.perValue) / maxv
if v.drawdown > maxdd:
maxdd = v.drawdown
maxddsts = startts
maxddets = v.ts
pnl.maxDrawdown = maxdd
pnl.maxDrawdownStartTs = maxddsts
pnl.maxDrawdownEndTs = maxddets
def calcAnnualizedReturns(pnl: trdb2py.trading2_pb2.PNLAssetData):
if len(pnl.values) > 0:
fy = calcYears(pnl.values[0].ts, pnl.values[len(pnl.values) - 1].ts)
if fy <= 1:
pnl.annualizedReturns = pnl.values[len(
pnl.values) - 1].perValue - 1
else:
pnl.annualizedReturns = (pnl.values[len(
pnl.values) - 1].perValue - 1) / len(pnl.values) * countTradingDays4Year(pnl)
def calcSharpe(pnl: trdb2py.trading2_pb2.PNLAssetData):
# https://www.zhihu.com/question/27264526
pnl.sharpe = (pnl.annualizedReturns - 0.03) / pnl.annualizedVolatility
def clonePNLWithTs(pnl: trdb2py.trading2_pb2.PNLAssetData, startTs) -> trdb2py.trading2_pb2.PNLAssetData:
npnl = trdb2py.trading2_pb2.PNLAssetData()
firsti = -1
firstpv = 1
for cai in range(0, len(pnl.values)):
if pnl.values[cai].ts >= startTs:
if firsti < 0:
firsti = cai
firstpv = pnl.values[cai].perValue
di = getPNLValueWithTimestamp(pnl.values[cai].ts, npnl)
npnl.values[di].value += pnl.values[cai].value
npnl.values[di].cost += pnl.values[cai].cost
npnl.values[di].perValue = pnl.values[cai].perValue / firstpv
return npnl
def genCtrlData(pnl: trdb2py.trading2_pb2.PNLAssetData, ctrlType, isPerValue: bool = True, dtFormat: str = '%Y-%m-%d', defVal=1) -> dict:
fv1 = {'date': [], 'value': []}
for v in pnl.lstCtrl:
if v.type == ctrlType:
fv1['date'].append(datetime.fromtimestamp(v.ts).strftime(dtFormat))
vi = getPNLValueWithTimestamp(v.ts, pnl, isAdd=False)
if vi >= 0:
if isPerValue:
fv1['value'].append(pnl.values[vi].perValue)
else:
fv1['value'].append(
pnl.values[vi].value - pnl.values[vi].cost)
else:
fv1['value'].append(defVal)
return fv1
def buildPNLCtrlData(pnl: trdb2py.trading2_pb2.PNLAssetData, isPerValue: bool = True, dtFormat: str = '%Y-%m-%d', defVal=1) -> pd.DataFrame:
fv1 = {'date': [], 'type': [], 'value': [], 'src':[],'dst':[],'fee':[], 'averageHoldingPrice':[],'sellPrice':[],'moneyParts':[],'lastMoneyParts':[]}
for v in pnl.lstCtrl:
fv1['date'].append(datetime.fromtimestamp(v.ts).strftime(dtFormat))
fv1['type'].append(CtrlTypeStr[v.type])
vi = getPNLValueWithTimestamp(v.ts, pnl, isAdd=False)
if vi >= 0:
if isPerValue:
fv1['value'].append(pnl.values[vi].perValue)
else:
fv1['value'].append(
pnl.values[vi].value - pnl.values[vi].cost)
else:
fv1['value'].append(defVal)
fv1['src'].append(v.volumeSrc)
fv1['dst'].append(v.volumeDst)
fv1['fee'].append(v.fee)
fv1['averageHoldingPrice'].append(v.averageHoldingPrice)
fv1['sellPrice'].append(v.sellPrice)
fv1['moneyParts'].append(v.moneyParts)
fv1['lastMoneyParts'].append(v.lastMoneyParts)
return pd.DataFrame(fv1) | [
"datetime.datetime.utcfromtimestamp",
"datetime.datetime.fromtimestamp",
"trdb2py.utils.asset2str",
"math.log",
"numpy.std",
"pandas.DataFrame"
] | [((4332, 4349), 'pandas.DataFrame', 'pd.DataFrame', (['fv0'], {}), '(fv0)\n', (4344, 4349), True, 'import pandas as pd\n'), ((13152, 13169), 'pandas.DataFrame', 'pd.DataFrame', (['fv1'], {}), '(fv1)\n', (13164, 13169), True, 'import pandas as pd\n'), ((8943, 8954), 'numpy.std', 'np.std', (['arr'], {}), '(arr)\n', (8949, 8954), True, 'import numpy as np\n'), ((1544, 1569), 'trdb2py.utils.asset2str', 'asset2str', (["v['pnl'].asset"], {}), "(v['pnl'].asset)\n", (1553, 1569), False, 'from trdb2py.utils import str2asset, asset2str\n'), ((6867, 6896), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['ts'], {}), '(ts)\n', (6892, 6896), False, 'from datetime import datetime\n'), ((6967, 6998), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['v.ts'], {}), '(v.ts)\n', (6992, 6998), False, 'from datetime import datetime\n'), ((7720, 7749), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['ts'], {}), '(ts)\n', (7745, 7749), False, 'from datetime import datetime\n'), ((7820, 7851), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['v.ts'], {}), '(v.ts)\n', (7845, 7851), False, 'from datetime import datetime\n'), ((8864, 8925), 'math.log', 'math.log', (['(pnl.values[i].perValue / pnl.values[i - 1].perValue)'], {}), '(pnl.values[i].perValue / pnl.values[i - 1].perValue)\n', (8872, 8925), False, 'import math\n'), ((1667, 1718), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDrawdownStartTs"], {}), "(v['pnl'].maxDrawdownStartTs)\n", (1689, 1718), False, 'from datetime import datetime\n'), ((1791, 1840), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDrawdownEndTs"], {}), "(v['pnl'].maxDrawdownEndTs)\n", (1813, 1840), False, 'from datetime import datetime\n'), ((1966, 2015), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDrawupStartTs"], {}), "(v['pnl'].maxDrawupStartTs)\n", (1988, 2015), False, 'from datetime import datetime\n'), ((2086, 2133), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDrawupEndTs"], {}), "(v['pnl'].maxDrawupEndTs)\n", (2108, 2133), False, 'from datetime import datetime\n'), ((2661, 2704), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxUpDayTs"], {}), "(v['pnl'].maxUpDayTs)\n", (2683, 2704), False, 'from datetime import datetime\n'), ((2829, 2874), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDownDayTs"], {}), "(v['pnl'].maxDownDayTs)\n", (2851, 2874), False, 'from datetime import datetime\n'), ((3003, 3047), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxUpWeekTs"], {}), "(v['pnl'].maxUpWeekTs)\n", (3025, 3047), False, 'from datetime import datetime\n'), ((3175, 3221), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDownWeekTs"], {}), "(v['pnl'].maxDownWeekTs)\n", (3197, 3221), False, 'from datetime import datetime\n'), ((3353, 3398), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxUpMonthTs"], {}), "(v['pnl'].maxUpMonthTs)\n", (3375, 3398), False, 'from datetime import datetime\n'), ((3529, 3576), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDownMonthTs"], {}), "(v['pnl'].maxDownMonthTs)\n", (3551, 3576), False, 'from datetime import datetime\n'), ((3709, 3753), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxUpYearTs"], {}), "(v['pnl'].maxUpYearTs)\n", (3731, 3753), False, 'from datetime import datetime\n'), ((3881, 3927), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["v['pnl'].maxDownYearTs"], {}), "(v['pnl'].maxDownYearTs)\n", (3903, 3927), False, 'from datetime import datetime\n'), ((12375, 12403), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['v.ts'], {}), '(v.ts)\n', (12397, 12403), False, 'from datetime import datetime\n'), ((11580, 11608), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['v.ts'], {}), '(v.ts)\n', (11602, 11608), False, 'from datetime import datetime\n')] |
"""
Zipf's law
This program fits data ranked along certain dimension (e.g. city population
and word appearance) to Zipfian distribution. The probability mass function
for zipf is: pmf(x, a) = 1/(zeta(a) * x**a), for x >= 1 and a > 1.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.zipf.html
It's clear that fitting data to zipf is essentially find a.
HOWEVER, the above function fails to characterize zipf if a <= 1. Therefore,
we resort to more original maths expression:
f(x) = (1/x**a) / sum_1^N (1/x**a), where N is the number of elements.
https://en.wikipedia.org/wiki/Zipf%27s_law
The right most part: sum_1^N (1/x**a) ~ (N**(1-a)-1) / (1-a)
https://en.wikipedia.org/wiki/Euler%E2%80%93Maclaurin_formula
This step significantly reduce computational complexity.
"""
import os.path
import numpy as np
import matplotlib.pyplot as plt
from scipy import special
import seaborn as sns
# import data
source_data_name = 'example_zipf.csv'
cwd = os.getcwd()
data_file = cwd + os.sep + source_data_name
data_original = np.genfromtxt(data_file, delimiter=',')
data_whole = data_original[~np.isnan(data_original)]
data_unique = np.trim_zeros(np.unique(data_whole))
# remove duplicates and rank the frequencies (sort them in descending order)
frequency = np.sort(data_unique)[::-1]
# truncate data if only part of the data is interested
frequency = frequency[0:1000]
rank = np.arange(1, len(frequency)+1)
pmf = frequency / sum(frequency)
# Zipf pmf(or normalized frequency) fitting with rank and frequency
# Maths: f(x) = 1 / (c * x**a) => log(f(x)) = - log(c) - a*log(x)
# Use numpy.polyfit (or scipy.polyfit) to find a and then we get f(x) easily
x = np.log(rank)
y = np.log(frequency / sum(frequency))
p = np.polyfit(x, y, 1)
a = -p[0]
if a > 1:
c1 = special.zeta(a)
c2 = rank ** a
pmf_z = 1 / (special.zeta(a) * rank ** a), a
else:
n = len(frequency)
pmf_z = (1-a) / ((n**(1-a) - 1) * rank ** a)
a = round(a, 3) # keep the three two decimal
# plot fitting result
log_plot = 1 # 0 - normal plot, 1 - log plot
format_on = 1 # 0 off, 1 on
sns.set()
if log_plot == 1:
plt.loglog(rank, pmf, 'o')
plt.loglog(rank, pmf_z, 'red', linewidth=2)
else:
plt.plot(rank, pmf, 'o')
plt.plot(rank, pmf_z, 'red', linewidth=2)
if format_on == 1:
plt.xlabel('Ranking of videos in terms of number of shares')
plt.ylabel('PMF')
lbs = ['Orignal data', 'Zipf distribution ($\\alpha$={})'.format(a)]
plt.legend(lbs, loc='upper right', bbox_to_anchor=(1, 1), frameon=False)
plt.tight_layout()
plt.show()
| [
"seaborn.set",
"numpy.unique",
"matplotlib.pyplot.loglog",
"numpy.polyfit",
"scipy.special.zeta",
"matplotlib.pyplot.ylabel",
"numpy.sort",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.isnan",
"matplotlib.pyplot.tight_layout",
"numpy.genfromtxt",
"matplotlib.py... | [((1065, 1104), 'numpy.genfromtxt', 'np.genfromtxt', (['data_file'], {'delimiter': '""","""'}), "(data_file, delimiter=',')\n", (1078, 1104), True, 'import numpy as np\n'), ((1710, 1722), 'numpy.log', 'np.log', (['rank'], {}), '(rank)\n', (1716, 1722), True, 'import numpy as np\n'), ((1768, 1787), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (1778, 1787), True, 'import numpy as np\n'), ((2139, 2148), 'seaborn.set', 'sns.set', ([], {}), '()\n', (2146, 2148), True, 'import seaborn as sns\n'), ((2619, 2629), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2627, 2629), True, 'import matplotlib.pyplot as plt\n'), ((1188, 1209), 'numpy.unique', 'np.unique', (['data_whole'], {}), '(data_whole)\n', (1197, 1209), True, 'import numpy as np\n'), ((1302, 1322), 'numpy.sort', 'np.sort', (['data_unique'], {}), '(data_unique)\n', (1309, 1322), True, 'import numpy as np\n'), ((1820, 1835), 'scipy.special.zeta', 'special.zeta', (['a'], {}), '(a)\n', (1832, 1835), False, 'from scipy import special\n'), ((2173, 2199), 'matplotlib.pyplot.loglog', 'plt.loglog', (['rank', 'pmf', '"""o"""'], {}), "(rank, pmf, 'o')\n", (2183, 2199), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2248), 'matplotlib.pyplot.loglog', 'plt.loglog', (['rank', 'pmf_z', '"""red"""'], {'linewidth': '(2)'}), "(rank, pmf_z, 'red', linewidth=2)\n", (2215, 2248), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2285), 'matplotlib.pyplot.plot', 'plt.plot', (['rank', 'pmf', '"""o"""'], {}), "(rank, pmf, 'o')\n", (2269, 2285), True, 'import matplotlib.pyplot as plt\n'), ((2291, 2332), 'matplotlib.pyplot.plot', 'plt.plot', (['rank', 'pmf_z', '"""red"""'], {'linewidth': '(2)'}), "(rank, pmf_z, 'red', linewidth=2)\n", (2299, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2358, 2418), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranking of videos in terms of number of shares"""'], {}), "('Ranking of videos in terms of number of shares')\n", (2368, 2418), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2441), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PMF"""'], {}), "('PMF')\n", (2434, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2593), 'matplotlib.pyplot.legend', 'plt.legend', (['lbs'], {'loc': '"""upper right"""', 'bbox_to_anchor': '(1, 1)', 'frameon': '(False)'}), "(lbs, loc='upper right', bbox_to_anchor=(1, 1), frameon=False)\n", (2531, 2593), True, 'import matplotlib.pyplot as plt\n'), ((2599, 2617), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2615, 2617), True, 'import matplotlib.pyplot as plt\n'), ((1134, 1157), 'numpy.isnan', 'np.isnan', (['data_original'], {}), '(data_original)\n', (1142, 1157), True, 'import numpy as np\n'), ((1874, 1889), 'scipy.special.zeta', 'special.zeta', (['a'], {}), '(a)\n', (1886, 1889), False, 'from scipy import special\n')] |
from blocks import *
from vector import *
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import openmesh as om
import numpy as np
bb = []
bbd = []
bb_c = []
b1 = Tetrakaidecahedron()
# pose vector 6: translation x, y, z, rotation about x, y, z axis
initial_pose(b1, [0, 0, 0, 2.0*np.pi/7, np.pi/7, np.pi/3])
#initial_pose(b1, [0, 0, 0, 0, 0, 0])
bb_c.append(b1.get_centroid())
bb.append(b1)
for i in range(5):
bb1 = []
bb_c1 = []
for b1 in bb:
# grow tetrakaidecahedron
cci = b1.get_tetrakaidecahedron_surface_list()
cc = b1.get_tetrakaidecahedron_surface_coords()
for ci, c1 in zip(cci, cc):
n1 = Tetrakaidecahedron()
if n1.move2surface(ci, c1, bb_c):
bb1.append(n1)
bb_c1.append(n1.get_centroid())
b1.interface_list.append(ci)
# grow dodecahedron
cci = b1.get_dodecahedron_surface_list()
cc = b1.get_dodecahedron_surface_coords()
for ci, c1 in zip(cci, cc):
n1 = Dodecahedron()
if n1.move2surface(c1, bb_c):
bbd.append(n1)
bb_c1.append(n1.get_centroid())
b1.interface_list.append(ci)
bb = bb + bb1
bb_c = bb_c + bb_c1
mesh = om.TriMesh()
frame_r = 1.0
# box: xmin, xmax, ymin, ymax, zmin, zmax
box = [-30, 30, -30, 30, -15, 15]
# make 6 plaens, xmin, xmax, ymin, ymax, zmin, zmax
planes = [
[box[0], 0, 0, 1, 0, 0],
[box[1], 0, 0, -1, 0, 0],
[0, box[2], 0, 0, 1, 0],
[0, box[3], 0, 0, -1, 0],
[0, 0, box[4], 0, 0, 1],
[0, 0, box[5], 0, 0, -1]]
# create mesh for all cutting edges
for b1 in bb + bbd:
v1 = b1.vertex
f1 = b1.face
## find all cutting points
cut_pp = []
for e in b1.edge:
p0 = v1[e[0]]
p1 = v1[e[1]]
pv = np.array([p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]])
for ai, a1 in enumerate(planes):
a0 = np.array(a1[:3])
av = np.array(a1[3:])
av = av / np.linalg.norm(av)
pav = np.dot(pv, av)
# line is parallel to the plane
if abs(pav) < 0.000001:
continue
# check cutting edge
d = np.dot((a0 - p0), av) / pav
# if the intersection point is in the line
if d >= 0 and d <= 1:
cut_p = p0 + d * pv
cut_pp.append([cut_p, e[0], e[1], ai])
# make mesh if the two cutting point is in the same surface
for i in range(len(cut_pp)):
for j in range(i + 1, len(cut_pp)):
b1 = cut_pp[i]
b2 = cut_pp[j]
# b1 and b2 are not in same plane
if b1[3] != b2[3]:
continue
# b1 and b2 are in same surface
if are_points_same_face([b1[1], b1[2], b2[1], b2[2]], f1):
#print b1[0], b2[0]
p0 = b1[0]
p1 = b2[0]
pv = np.array([p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]])
# move the outside point on border
# planes xmin, xmax
if b1[3] == 0 or b1[3] == 1:
p0[1] = box[2] if p0[1] < box[2] else p0[1]
p0[1] = box[3] if p0[1] > box[3] else p0[1]
p0[2] = box[4] if p0[2] < box[4] else p0[2]
p0[2] = box[5] if p0[2] > box[5] else p0[2]
p1[1] = box[2] if p1[1] < box[2] else p1[1]
p1[1] = box[3] if p1[1] > box[3] else p1[1]
p1[2] = box[4] if p1[2] < box[4] else p1[2]
p1[2] = box[5] if p1[2] > box[5] else p1[2]
# planes ymin, ymax
if b1[3] == 2 or b1[3] == 3:
p0[0] = box[0] if p0[0] < box[0] else p0[0]
p0[0] = box[1] if p0[0] > box[1] else p0[0]
p0[2] = box[4] if p0[2] < box[4] else p0[2]
p0[2] = box[5] if p0[2] > box[5] else p0[2]
p1[0] = box[0] if p1[0] < box[0] else p1[0]
p1[0] = box[1] if p1[0] > box[1] else p1[0]
p1[2] = box[4] if p1[2] < box[4] else p1[2]
p1[2] = box[5] if p1[2] > box[5] else p1[2]
# planes zmin, zmax
if b1[3] == 4 or b1[3] == 5:
p0[0] = box[0] if p0[0] < box[0] else p0[0]
p0[0] = box[1] if p0[0] > box[1] else p0[0]
p0[1] = box[2] if p0[1] < box[2] else p0[1]
p0[1] = box[3] if p0[1] > box[3] else p0[1]
p1[0] = box[0] if p1[0] < box[0] else p1[0]
p1[0] = box[1] if p1[0] > box[1] else p1[0]
p1[1] = box[2] if p1[1] < box[2] else p1[1]
p1[1] = box[3] if p1[1] > box[3] else p1[1]
make_mesh(mesh, p0, p1, p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
# make box frame
make_mesh(mesh, [box[0], box[2], box[4]], [box[0], box[2], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[0], box[3], box[4]], [box[0], box[3], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[0], box[2], box[4]], [box[0], box[3], box[4]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[0], box[2], box[5]], [box[0], box[3], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[1], box[2], box[4]], [box[1], box[2], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[1], box[3], box[4]], [box[1], box[3], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[1], box[2], box[4]], [box[1], box[3], box[4]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[1], box[2], box[5]], [box[1], box[3], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[0], box[2], box[4]], [box[1], box[2], box[4]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[0], box[2], box[5]], [box[1], box[2], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[0], box[3], box[4]], [box[1], box[3], box[4]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
make_mesh(mesh, [box[0], box[3], box[5]], [box[1], box[3], box[5]], p2=[1.0, 1.0, 1.0], r=frame_r, n=6)
if False:
# create mesh for all edges for tetrakaidecahedron and dodecahedron
for b1 in bb + bbd:
v1 = b1.vertex
for e in b1.edge:
p0 = v1[e[0]]
p1 = v1[e[1]]
# move the outside point on border
# planes xmin, xmax
if p0[0] < box[0] and p1[0] < box[0]:
continue
if p0[1] < box[2] and p1[1] < box[2]:
continue
if p0[2] < box[4] and p1[2] < box[4]:
continue
if p0[0] > box[1] and p1[0] > box[1]:
continue
if p0[1] > box[3] and p1[1] > box[3]:
continue
if p0[2] > box[5] and p1[2] > box[5]:
continue
# both inside
if p0[0] > box[0] and p0[0] < box[1] and p0[1] > box[2] and p0[1] < box[3] and p0[2] > box[4] and p0[2] < box[5]:
if p1[0] > box[0] and p1[0] < box[1] and p1[1] > box[2] and p1[1] < box[3] and p1[2] > box[4] and p1[2] < box[5]:
make_mesh(mesh, p0, p1, p2=[1.0, 0.1, 1.0], r=frame_r*0.5, n=6)
continue
if False:
p0[0] = box[0] if p0[0] < box[0] else p0[0]
p0[0] = box[1] if p0[0] > box[1] else p0[0]
p0[1] = box[2] if p0[1] < box[2] else p0[1]
p0[1] = box[3] if p0[1] > box[3] else p0[1]
p0[2] = box[4] if p0[2] < box[4] else p0[2]
p0[2] = box[5] if p0[2] > box[5] else p0[2]
p1[0] = box[0] if p1[0] < box[0] else p1[0]
p1[0] = box[1] if p1[0] > box[1] else p1[0]
p1[1] = box[2] if p1[1] < box[2] else p1[1]
p1[1] = box[3] if p1[1] > box[3] else p1[1]
p1[2] = box[4] if p1[2] < box[4] else p1[2]
p1[2] = box[5] if p1[2] > box[5] else p1[2]
make_mesh(mesh, p0, p1, p2=[1.0, 0.1, 1.0], r=frame_r*0.5, n=6)
# output final buble box
om.write_mesh("bubble_cut3.obj", mesh)
| [
"openmesh.TriMesh",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm",
"openmesh.write_mesh"
] | [((1300, 1312), 'openmesh.TriMesh', 'om.TriMesh', ([], {}), '()\n', (1310, 1312), True, 'import openmesh as om\n'), ((8218, 8256), 'openmesh.write_mesh', 'om.write_mesh', (['"""bubble_cut3.obj"""', 'mesh'], {}), "('bubble_cut3.obj', mesh)\n", (8231, 8256), True, 'import openmesh as om\n'), ((1885, 1940), 'numpy.array', 'np.array', (['[p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]]'], {}), '([p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]])\n', (1893, 1940), True, 'import numpy as np\n'), ((1999, 2015), 'numpy.array', 'np.array', (['a1[:3]'], {}), '(a1[:3])\n', (2007, 2015), True, 'import numpy as np\n'), ((2033, 2049), 'numpy.array', 'np.array', (['a1[3:]'], {}), '(a1[3:])\n', (2041, 2049), True, 'import numpy as np\n'), ((2109, 2123), 'numpy.dot', 'np.dot', (['pv', 'av'], {}), '(pv, av)\n', (2115, 2123), True, 'import numpy as np\n'), ((2072, 2090), 'numpy.linalg.norm', 'np.linalg.norm', (['av'], {}), '(av)\n', (2086, 2090), True, 'import numpy as np\n'), ((2278, 2297), 'numpy.dot', 'np.dot', (['(a0 - p0)', 'av'], {}), '(a0 - p0, av)\n', (2284, 2297), True, 'import numpy as np\n'), ((3010, 3065), 'numpy.array', 'np.array', (['[p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]]'], {}), '([p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]])\n', (3018, 3065), True, 'import numpy as np\n')] |
"""
Module Docstring
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
import argparse
import logging
import sys
sys.path.append('../')
from funcs import utils
import networkx as nx
import random
import pandas as pd
import numpy as np
from tqdm.autonotebook import trange
parser = argparse.ArgumentParser(description='Extract random paths between source and destination nodes, with destination nodes being genes associated to disease_id in gda file')
parser.add_argument('srcnodes_file', type=str, help='Source nodes IDs in one column')
parser.add_argument('gda_file', type=str, help='Gene-disease association file')
parser.add_argument('expr_file', type=str, help='Filename of expression file')
parser.add_argument('gsm_file', type=str, help='Filename of GSM IDs to consider')
parser.add_argument('net_path', type=str, default=None, help='Network filepath')
parser.add_argument('disease_id', type=str, help='diseaseId to consider as destination in gda file')
parser.add_argument('out_seqcorr_file', type=str, help='Output directory of sequential correlation files')
parser.add_argument('-s','--spearman', action='store_true', help='Whether to use Spearman correlation')
parser.add_argument('--N_samples', type=int, default=500, help='Number of random paths to extract')
parser.add_argument('--N_cores', type=int, default=1, help='Number of cores')
parser.add_argument('--seed', type=int, default=100, help='Random seed')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO,
format='%(module)s:%(levelname)s:%(asctime)s:%(message)s',
handlers=[logging.FileHandler("../logs/report.log"),
logging.StreamHandler()])
logging.info(args)
random.seed(args.seed)
srcnodes = utils.read_gene_list(args.srcnodes_file)
gda = pd.read_csv(args.gda_file,sep='\t')
expr = utils.read_expr(args.expr_file)
gsm = utils.read_text(args.gsm_file)
net = utils.read_network(args.net_path)
expr = expr[gsm]
destnodes = gda[gda.diseaseId==args.disease_id].geneId.unique().tolist()
logging.info("Number of nodes: {}".format(len(destnodes)))
def get_abscorr(i, j, corrdata):
method = 'pearson' if not args.spearman else 'spearman'
corrmatr = corrdata.loc[[i, j]].T.corr(method)
return corrmatr.abs().groupby('ENTREZ_GENE_ID').apply(lambda x: x.max()).T.groupby('ENTREZ_GENE_ID').apply(lambda x: x.max()).values[0, 1]
def get_seq_corr(path):
if not np.all([gene in expr.index.tolist() for gene in path]):
return np.nan
return np.mean([get_abscorr(path[i], path[i + 1], expr) for i in range(len(path) - 1)])
def get_random_coexpr(i):
src = random.choice(srcnodes)
dest = random.choice(destnodes)
path = random.choice(list(nx.all_shortest_paths(net, src, dest)))
return get_seq_corr(path)
if args.N_cores > 1:
seqs = utils.parallel_process(get_random_coexpr, range(args.N_samples), n_jobs=args.N_cores)
else:
seqs = []
for i in trange(args.N_samples):
seqs.append(get_random_coexpr(i))
np.savetxt(args.out_seqcorr_file, seqs) | [
"tqdm.autonotebook.trange",
"random.choice",
"logging.StreamHandler",
"argparse.ArgumentParser",
"pandas.read_csv",
"funcs.utils.read_network",
"logging.info",
"funcs.utils.read_gene_list",
"random.seed",
"networkx.all_shortest_paths",
"funcs.utils.read_text",
"logging.FileHandler",
"numpy.s... | [((135, 157), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (150, 157), False, 'import sys\n'), ((304, 483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract random paths between source and destination nodes, with destination nodes being genes associated to disease_id in gda file"""'}), "(description=\n 'Extract random paths between source and destination nodes, with destination nodes being genes associated to disease_id in gda file'\n )\n", (327, 483), False, 'import argparse\n'), ((1721, 1739), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (1733, 1739), False, 'import logging\n'), ((1740, 1762), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1751, 1762), False, 'import random\n'), ((1774, 1814), 'funcs.utils.read_gene_list', 'utils.read_gene_list', (['args.srcnodes_file'], {}), '(args.srcnodes_file)\n', (1794, 1814), False, 'from funcs import utils\n'), ((1821, 1857), 'pandas.read_csv', 'pd.read_csv', (['args.gda_file'], {'sep': '"""\t"""'}), "(args.gda_file, sep='\\t')\n", (1832, 1857), True, 'import pandas as pd\n'), ((1864, 1895), 'funcs.utils.read_expr', 'utils.read_expr', (['args.expr_file'], {}), '(args.expr_file)\n', (1879, 1895), False, 'from funcs import utils\n'), ((1902, 1932), 'funcs.utils.read_text', 'utils.read_text', (['args.gsm_file'], {}), '(args.gsm_file)\n', (1917, 1932), False, 'from funcs import utils\n'), ((1939, 1972), 'funcs.utils.read_network', 'utils.read_network', (['args.net_path'], {}), '(args.net_path)\n', (1957, 1972), False, 'from funcs import utils\n'), ((3034, 3073), 'numpy.savetxt', 'np.savetxt', (['args.out_seqcorr_file', 'seqs'], {}), '(args.out_seqcorr_file, seqs)\n', (3044, 3073), True, 'import numpy as np\n'), ((2655, 2678), 'random.choice', 'random.choice', (['srcnodes'], {}), '(srcnodes)\n', (2668, 2678), False, 'import random\n'), ((2690, 2714), 'random.choice', 'random.choice', (['destnodes'], {}), '(destnodes)\n', (2703, 2714), False, 'import random\n'), ((2967, 2989), 'tqdm.autonotebook.trange', 'trange', (['args.N_samples'], {}), '(args.N_samples)\n', (2973, 2989), False, 'from tqdm.autonotebook import trange\n'), ((1622, 1663), 'logging.FileHandler', 'logging.FileHandler', (['"""../logs/report.log"""'], {}), "('../logs/report.log')\n", (1641, 1663), False, 'import logging\n'), ((1695, 1718), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1716, 1718), False, 'import logging\n'), ((2745, 2782), 'networkx.all_shortest_paths', 'nx.all_shortest_paths', (['net', 'src', 'dest'], {}), '(net, src, dest)\n', (2766, 2782), True, 'import networkx as nx\n')] |
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.colors import colorConverter
import os
import ast
from scipy import ndimage
import paras_dorsoventral as dors
import paras_rostrocaudal as ros
import colourmix as colmix
matplotlib.rcParams.update({'font.size': 18})
time = 4000.0
slicenr = 5
tstep=50.0
axis = 'dorso'#'dorso'
runThrough = 'space'
scale = 0.5
StaticDataPath = 'cooltube_0.5_1'
if axis == 'dorso':
fig = plt.figure(figsize = [6.5, 8])
if axis == 'rostro':
fig = plt.figure(figsize = [4, 14])
ax1 = fig.add_subplot(111)
#DORSOVENTRAL
# generate the colors for your colormap
colorP = colorConverter.to_rgba(dors.colours[0])
colorO = colorConverter.to_rgba(dors.colours[1])
colorN = colorConverter.to_rgba(dors.colours[2])
white='white'
# make the colormaps
cmapP = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',[white,colorP],256)
cmapO = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapO',[white,colorO],256)
cmapN = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapN',[white,colorN],256)
cmapP._init()
cmapO._init()
cmapN._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
dAlphas = np.linspace(0, 0.8, cmapO.N+3)
cmapP._lut[:,-1] = dAlphas
cmapO._lut[:,-1] = dAlphas
cmapN._lut[:,-1] = dAlphas
#ROSTROCAUDAL
colorFB = colorConverter.to_rgba(ros.colours[0])
colorMB = colorConverter.to_rgba(ros.colours[1])
colorHB = colorConverter.to_rgba(ros.colours[2])
white='white'
# make the colormaps
cmapFB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapFB',[white,colorFB],256)
cmapMB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapMB',[white,colorMB],256)
cmapHB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapHB',[white,colorHB],256)
cmapFB._init()
cmapMB._init()
cmapHB._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
rAlphas = np.linspace(0, 0.8, cmapMB.N+3)
cmapFB._lut[:,-1] = rAlphas
cmapMB._lut[:,-1] = rAlphas
cmapHB._lut[:,-1] = rAlphas
def compare(matrices):
dimy = len(matrices[0])
dimx = len(matrices[0][0])
dimz = len(matrices[0][0][0])
show= np.zeros_like(matrices)
for i in range(dimy):
for j in range(dimx):
for k in range(dimz):
comparevalues =[m[i][j][k] for m in matrices]
gene = np.argmax(comparevalues)
show[gene][i][j][k] = np.max(comparevalues)
return show
def getCut(axis,t=0,s=0,dataPath=StaticDataPath):
if axis == 'dorso':
dorsoDir = dataPath + '/dorso/'
dcFile = dorsoDir + 'T%1.1f' %t + '_dComp.npy'
pFile = dorsoDir + 'T%1.1f' %t + '_P.npy'
oFile = dorsoDir + 'T%1.1f' %t + '_O.npy'
nFile = dorsoDir + 'T%1.1f' %t + '_N.npy'
if os.path.isfile(dcFile):
dComp = np.load(dcFile)
else:
pArray = np.load(pFile)
oArray = np.load(oFile)
nArray = np.load(nFile)
dComp = compare([pArray,oArray,nArray])
np.save(dcFile,dComp)
arrA = dComp[0]
arrB = dComp[1]
arrC = dComp[2]
arrA = arrA[s,:,:]
arrB = arrB[s,:,:]
arrC = arrC[s,:,:]
if axis == 'rostro':
rostroDir = dataPath + '/rostro/'
rcFile = rostroDir + 'T%1.1f' %t + '_rComp.npy'
FBFile = rostroDir + 'T%1.1f' %t + '_FB.npy'
MBFile = rostroDir + 'T%1.1f' %t + '_MB.npy'
HBFile = rostroDir + 'T%1.1f' %t + '_HB.npy'
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
FBArray = np.load(FBFile)
MBArray = np.load(MBFile)
HBArray = np.load(HBFile)
rComp = compare([FBArray,MBArray,HBArray])
np.save(rcFile,rComp)
arrA = rComp[0]
arrB = rComp[1]
arrC = rComp[2]
# arrA = arrA[:,s,:]
# arrB = arrB[:,s,:]
# arrC = arrC[:,s,:]
arrA = arrA[:,:,s]
arrB = arrB[:,:,s]
arrC = arrC[:,:,s]
if axis == 'rostro2':
rostroDir = dataPath + '/rostro/'
rcFile = rostroDir + 'T%1.1f' %t + '_rComp.npy'
FBFile = rostroDir + 'T%1.1f' %t + '_FB'
MBFile = rostroDir + 'T%1.1f' %t + '_MB'
HBFile = rostroDir + 'T%1.1f' %t + '_HB'
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
FBArray = np.load(FBFile)
MBArray = np.load(MBFile)
HBArray = np.load(HBFile)
rComp = compare([FBArray,MBArray,HBArray])
np.save(rcFile,rComp)
arrA = rComp[0]
arrB = rComp[1]
arrC = rComp[2]
arrA = arrA[:,s,:]
arrB = arrB[:,s,:]
arrC = arrC[:,s,:]
return arrA,arrB,arrC
def getTS(ts, rate, t=time, s=slicenr):
"""ts = what is looped over in the animation"""
if ts == 'time':
t_ret = rate*tstep
s_ret = slicenr
if ts =='space':
t_ret = t
s_ret = rate
return t_ret,s_ret
def update(rate):
ax1.clear()
t,s = getTS(runThrough,rate)
#print(rate,t,s)
cut = getCut(axis,t,s)
ax1.set_title("slice nr %d time %1.1f" %(s,t))
#if t < len(data[0][0]):
#ax1.matshow(data[:,t,:])
#t+=1
#else:
#t=0
# ax1.imshow(arrFB[rate,:,:],interpolation='bilinear',cmap=cmap1)
# ax1.imshow(arrMB[rate,:,:],interpolation='bilinear',cmap=cmap2)
# ax1.imshow(arrHB[rate,:,:],interpolation='bilinear',cmap=cmap3)
if axis == 'dorso':
cmap1,cmap2,cmap3 = cmapP,cmapO,cmapN
size = 500
if axis == 'rostro':
cmap1,cmap2,cmap3 = cmapFB,cmapMB,cmapHB
size =100
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
"""
ax1.imshow(cut[0],interpolation='nearest',cmap=cmap1)
ax1.imshow(cut[1],interpolation='nearest',cmap=cmap2)
ax1.imshow(cut[2],interpolation='nearest',cmap=cmap3)
"""
mapper1 = matplotlib.cm.ScalarMappable(cmap=cmap1)
mapper2 = matplotlib.cm.ScalarMappable(cmap=cmap2)
mapper3 = matplotlib.cm.ScalarMappable(cmap=cmap3)
c1= np.where(cut[0])
colors1 = mapper1.to_rgba(cut[0][c1])
c2= np.where(cut[1])
colors2 = mapper2.to_rgba(cut[1][c2])
c3= np.where(cut[2])
colors3 = mapper3.to_rgba(cut[2][c3])
ax1.set_aspect('auto')
ax1.set_xlim([-1,16])
ax1.scatter(c1[0],c1[1],c=colors1,s=size)
ax1.scatter(c2[0],c2[1],c=colors2,s=size)
ax1.scatter(c3[0],c3[1],c=colors3, s=size)
#plt.savefig('unsinnfig/t%d'% rate)
def plotSlices(time, dorsnr, rosnr, rosnr2, plotmethod='circle',save=True, dataPath=StaticDataPath):
# fug = plt.figure(figsize=(8, 6))
# gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
# axDors = fug.add_subplot(gs[0])
# axRos = fug.add_subplot(gs[1])
plt.close("all")
fug = plt.figure(figsize = [7.5, 8])
fag = plt.figure(figsize = [10, 14])
axDors = fug.add_subplot(1,1,1)
axRos = fag.add_subplot(1,2,1)
axRos2 = fag.add_subplot(1,2,2)
axDors.set_title("DV slice at \n x = %d µm, t = %1.1f " %(dorsnr*10/scale, time))
axDors.set_xlabel("y [µm]")
dxticks = np.arange(0,20,5)
axDors.xaxis.set_ticks(dxticks)
axDors.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in dxticks])
dyticks = np.arange(0,65,10)
axDors.yaxis.set_ticks(dyticks)
axDors.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in dyticks])
axDors.set_ylabel("z [µm]")
axRos.set_title("RC slice at \n z = %d µm, t = %1.1f " %(rosnr*10/scale, time))
rxticks = dxticks
axRos.xaxis.set_ticks(rxticks)
axRos.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in rxticks])
ryticks = np.arange(0,65,10)
axRos.yaxis.set_ticks(ryticks)
axRos.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in ryticks])
axRos.set_xlabel("y [µm]")
axRos.set_ylabel("x [µm]")
axRos2.set_title("RC slice at \n y = %d µm, t = %1.1f " %(rosnr*10/scale, time))
r2xticks = np.arange(0,65,10)
axRos2.xaxis.set_ticks(r2xticks)
axRos2.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in r2xticks])
r2yticks = np.arange(0,65,10)
axRos2.yaxis.set_ticks(r2yticks)
axRos2.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in r2yticks])
axRos2.set_xlabel("z [µm]")
axRos2.set_ylabel("x [µm]")
dataDors = getCut('dorso', t= time, s=dorsnr, dataPath = dataPath)
dataRos = getCut('rostro', t= time, s=rosnr, dataPath = dataPath)
dataRos2 = getCut('rostro2', t= time, s=rosnr2,dataPath = dataPath)
for axtype in ['rostro','dorso']:
if axtype == 'dorso':
cmap1,cmap2,cmap3 = cmapP,cmapO,cmapN
size = 500
ax = axDors
cut =dataDors
if axtype == 'rostro':
cmap1,cmap2,cmap3 = cmapFB,cmapMB,cmapHB
size =100
ax=axRos
ax2=axRos2
cut= dataRos
cut2=dataRos2
if plotmethod == 'circle':
mapper1 = matplotlib.cm.ScalarMappable(cmap=cmap1)
mapper2 = matplotlib.cm.ScalarMappable(cmap=cmap2)
mapper3 = matplotlib.cm.ScalarMappable(cmap=cmap3)
c1= np.where(cut[0])
colors1 = mapper1.to_rgba(cut[0][c1])
c2= np.where(cut[1])
colors2 = mapper2.to_rgba(cut[1][c2])
c3= np.where(cut[2])
colors3 = mapper3.to_rgba(cut[2][c3])
ax.set_aspect('auto')
#ax.set_xlim([-1,16])
ax.scatter(c1[0],c1[1],c=colors1,s=size)
ax.scatter(c2[0],c2[1],c=colors2,s=size)
ax.scatter(c3[0],c3[1],c=colors3, s=size)
if plotmethod == 'square':
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
if axtype == 'rostro':
ax.imshow(cut[0][:-1,:-1],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax.imshow(cut[1][:-1,:-1],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax.imshow(cut[2][:-1,:-1],interpolation='nearest',cmap=cmap3,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[0][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[1][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[2][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap3,origin = 'lower')
# rcut0 = ndimage.rotate(cut[0], 90)
# rcut1 = ndimage.rotate(cut[1], 90)
# rcut2 = ndimage.rotate(cut[2], 90)
# ax.imshow(rcut0,interpolation='nearest',cmap=cmap1)
# ax.imshow(rcut1,interpolation='nearest',cmap=cmap2)
# ax.imshow(rcut2,interpolation='nearest',cmap=cmap3)
if axtype == 'dorso':
rcut0 = ndimage.rotate(cut[0], -90)
rcut1 = ndimage.rotate(cut[1], -90)
rcut2 = ndimage.rotate(cut[2], -90)
ax.imshow(rcut0[:-1,1:],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax.imshow(rcut1[:-1,1:],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax.imshow(rcut2[:-1,1:],interpolation='nearest',cmap=cmap3,origin = 'lower')
if save ==True:
fug.savefig(dataPath + '/allPictures/T%1.1f_DV%d.png' %(time,dorsnr) )
fag.savefig(dataPath + '/allPictures/T%1.1f_RC%d_%d.png' %(time,rosnr,rosnr2) )
def plotSliceMix(plotFrom, time, dorsnr, rosnr, rosnr2,save=True):
dataPath = plotFrom
"""Plot gene combinations with a different colour for each combination."""
wntDir = plotFrom + '/Wnt/'
shhDir = plotFrom + '/Shh/'
rostroDir = plotFrom + '/rostro/'
dorsoDir = plotFrom + '/dorso/'
mixDir = plotFrom + '/Mix/'
baseLevels = np.load(plotFrom + '/BaseLevels.npy')
allDir = plotFrom + '/allPictures/'
pFile = dorsoDir + 'T%1.1f' %time + '_P'
oFile = dorsoDir + 'T%1.1f' %time + '_O'
nFile = dorsoDir + 'T%1.1f' %time + '_N'
dcFile = dorsoDir + 'T%1.1f' %time + '_dComp.npy'
pArray =np.load(pFile +'.npy')
oArray =np.load(oFile +'.npy')
nArray =np.load(nFile +'.npy')
if os.path.isfile(dcFile):
dComp = np.load(dcFile)
else:
dComp = compare([pArray,oArray,nArray])
np.save(dcFile,dComp)
fbFile = rostroDir + 'T%1.1f' %time + '_FB'
mbFile = rostroDir + 'T%1.1f' %time + '_MB'
hbFile = rostroDir + 'T%1.1f' %time + '_HB'
rcFile = rostroDir + 'T%1.1f' %time + '_rComp.npy'
fbArray =np.load(fbFile +'.npy')
mbArray =np.load(mbFile +'.npy')
hbArray =np.load(hbFile +'.npy')
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
rComp = compare([fbArray,mbArray,hbArray])
np.save(rcFile,rComp)
dimX = len(rComp[0])
dimY = len(rComp[0][0])
dimZ = len(rComp[0][0][0])
mixArray = np.zeros((len(colmix.colours),dimX,dimY,dimZ))
i=0
for pon in dComp:
for fbmbhb in rComp:
an = np.transpose(np.nonzero(pon))
bn = np.transpose(np.nonzero(fbmbhb))
anl = an.tolist()
bnl = bn.tolist()
incommon = set(str(x) for x in anl) & set(str(y) for y in bnl)
incommon = np.asarray([ast.literal_eval(i) for i in incommon])
for coord in incommon:
#print(coord)
mixArray[i][coord[0]][coord[1]][coord[2]] = 1
i+=1
mixArray[mixArray==0] = np.nan
#plt.close("all")
fug = plt.figure(figsize = [7.5, 8])
fag = plt.figure(figsize = [10, 14])
axDors = fug.add_subplot(1,1,1)
axRos = fag.add_subplot(1,2,1)
axRos2 = fag.add_subplot(1,2,2)
axDors.set_title("DV slice at \n x = %d µm, t = %1.1f " %(dorsnr*10/scale, time))
axDors.set_xlabel("y [µm]")
dxticks = np.arange(0,20,5)
axDors.xaxis.set_ticks(dxticks)
axDors.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in dxticks])
dyticks = np.arange(0,65,10)
axDors.yaxis.set_ticks(dyticks)
axDors.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in dyticks])
axDors.set_ylabel("z [µm]")
axRos.set_title("RC slice at \n z = %d µm, t = %1.1f " %(rosnr*10/scale, time))
rxticks = dxticks
axRos.xaxis.set_ticks(rxticks)
axRos.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in rxticks])
ryticks = np.arange(0,65,10)
axRos.yaxis.set_ticks(ryticks)
axRos.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in ryticks])
axRos.set_xlabel("y [µm]")
axRos.set_ylabel("x [µm]")
axRos2.set_title("RC slice at \n y = %d µm, t = %1.1f " %(rosnr*10/scale, time))
r2xticks = np.arange(0,65,10)
axRos2.xaxis.set_ticks(r2xticks)
axRos2.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in r2xticks])
r2yticks = np.arange(0,65,10)
axRos2.yaxis.set_ticks(r2yticks)
axRos2.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in r2yticks])
axRos2.set_xlabel("z [µm]")
axRos2.set_ylabel("x [µm]")
for axtype in ['rostro','dorso']:
for i in range(len(mixArray)):
#for i in range(3):
colours = colmix.colours[i]
#colours2 = colmix.colours[i+1]
myCmap = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',[colours,colours],256)
#myCmap2 = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',['white',colours2],256)
print(i, colours)
if axtype == 'dorso':
size = 500
ax = axDors
arr = getMixCut(axtype,mixArray[i],s=dorsnr)
arr=(np.flip(np.transpose(arr),axis=1))[:-1,1:]
cut = np.ma.masked_where(np.isnan(arr),arr)
#cut= np.flip(cut)
ax.set_aspect('equal')
if axtype == 'rostro':
size =100
ax=axRos
ax2=axRos2
ax.set_aspect('equal')
ax2.set_aspect('equal')
arr= getMixCut('rostro',mixArray[i],s=rosnr)
arr2=getMixCut('rostro2',mixArray[i],s=rosnr2)
cut= np.ma.masked_where(np.isnan(arr),arr)
cut2 = np.ma.masked_where(np.isnan(arr2),arr2)
cut2 = (np.flip(np.transpose(cut2),axis=1))
cut2= cut2[:,1:]
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
if axtype == 'rostro':
print(cut[:-1,:-1])
ax.pcolor(cut[:-1,:-1],cmap=myCmap)
ax2.pcolor(cut2[:-1,:-1],cmap=myCmap)
# rcut0 = ndimage.rotate(cut[0], 90)
# rcut1 = ndimage.rotate(cut[1], 90)
# rcut2 = ndimage.rotate(cut[2], 90)
# ax.imshow(rcut0,interpolation='nearest',cmap=cmap1)
# ax.imshow(rcut1,interpolation='nearest',cmap=cmap2)
# ax.imshow(rcut2,interpolation='nearest',cmap=cmap3)
if axtype == 'dorso':
print("DORSO")
rcut = ndimage.rotate(cut, -90)
ax.pcolor(cut,cmap=myCmap)
if save ==True:
fug.savefig(dataPath + '/allPictures/T%1.1f_DV%d_Mix.png' %(time,dorsnr) )
fag.savefig(dataPath + '/allPictures/T%1.1f_RC%d_%d_Mix.png' %(time,rosnr,rosnr2) )
def getMixCut(axis,mixArray_i,s=0):
print(s, mixArray_i.size)
if axis == 'dorso':
arrA = mixArray_i[s,:,:]
if axis == 'rostro':
arrA = mixArray_i[:,:,s]
if axis == 'rostro2':
arrA = mixArray_i[:,s,:]
print(arrA.shape)
return arrA
def test():
plt.close("all")
fig = plt.figure()
ax = fig.add_subplot(3,1,1)
ax3=fig.add_subplot(3,1,3)
ax2 = fig.add_subplot(3,1,2)
arr = np.random.rand(10,10)
arr2 = np.copy(arr)
arr[arr<=0.5] = np.nan
arr2[arr2>0.5] = np.nan
print(arr)
m = np.ma.masked_where(np.isnan(arr),arr)
m2 = np.ma.masked_where(np.isnan(arr2),arr2)
ax.pcolor(m,cmap =cmapP)
ax2.pcolor(m2,cmap=cmapO)
ax3.pcolor(m,cmap=cmapP)
ax3.pcolor(m2,cmap=cmapO)
#animation = FuncAnimation(fig, update, interval=700)
myt= 8000
dnr=10
rnr=5
rnr2=4
plotSlices(myt,dnr,rnr,rnr2,save=True,plotmethod='square')
plotSliceMix(StaticDataPath,myt,dnr,rnr,rnr2)
#(StaticDataPath, 4000.0, 10,5,4)
#test()
plt.show() | [
"numpy.random.rand",
"matplotlib.colors.colorConverter.to_rgba",
"scipy.ndimage.rotate",
"numpy.save",
"numpy.arange",
"numpy.where",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"matplotlib.rcParams.update",
"numpy.argmax",
"os.path.isfile",
"a... | [((370, 415), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (396, 415), False, 'import matplotlib\n'), ((759, 798), 'matplotlib.colors.colorConverter.to_rgba', 'colorConverter.to_rgba', (['dors.colours[0]'], {}), '(dors.colours[0])\n', (781, 798), False, 'from matplotlib.colors import colorConverter\n'), ((808, 847), 'matplotlib.colors.colorConverter.to_rgba', 'colorConverter.to_rgba', (['dors.colours[1]'], {}), '(dors.colours[1])\n', (830, 847), False, 'from matplotlib.colors import colorConverter\n'), ((857, 896), 'matplotlib.colors.colorConverter.to_rgba', 'colorConverter.to_rgba', (['dors.colours[2]'], {}), '(dors.colours[2])\n', (879, 896), False, 'from matplotlib.colors import colorConverter\n'), ((941, 1030), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""my_cmapP"""', '[white, colorP]', '(256)'], {}), "('my_cmapP', [white,\n colorP], 256)\n", (992, 1030), False, 'import matplotlib\n'), ((1032, 1121), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""my_cmapO"""', '[white, colorO]', '(256)'], {}), "('my_cmapO', [white,\n colorO], 256)\n", (1083, 1121), False, 'import matplotlib\n'), ((1123, 1212), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""my_cmapN"""', '[white, colorN]', '(256)'], {}), "('my_cmapN', [white,\n colorN], 256)\n", (1174, 1212), False, 'import matplotlib\n'), ((1426, 1458), 'numpy.linspace', 'np.linspace', (['(0)', '(0.8)', '(cmapO.N + 3)'], {}), '(0, 0.8, cmapO.N + 3)\n', (1437, 1458), True, 'import numpy as np\n'), ((1563, 1601), 'matplotlib.colors.colorConverter.to_rgba', 'colorConverter.to_rgba', (['ros.colours[0]'], {}), '(ros.colours[0])\n', (1585, 1601), False, 'from matplotlib.colors import colorConverter\n'), ((1612, 1650), 'matplotlib.colors.colorConverter.to_rgba', 'colorConverter.to_rgba', (['ros.colours[1]'], {}), '(ros.colours[1])\n', (1634, 1650), False, 'from matplotlib.colors import colorConverter\n'), ((1661, 1699), 'matplotlib.colors.colorConverter.to_rgba', 'colorConverter.to_rgba', (['ros.colours[2]'], {}), '(ros.colours[2])\n', (1683, 1699), False, 'from matplotlib.colors import colorConverter\n'), ((1745, 1836), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""my_cmapFB"""', '[white, colorFB]', '(256)'], {}), "('my_cmapFB', [white,\n colorFB], 256)\n", (1796, 1836), False, 'import matplotlib\n'), ((1839, 1930), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""my_cmapMB"""', '[white, colorMB]', '(256)'], {}), "('my_cmapMB', [white,\n colorMB], 256)\n", (1890, 1930), False, 'import matplotlib\n'), ((1933, 2024), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""my_cmapHB"""', '[white, colorHB]', '(256)'], {}), "('my_cmapHB', [white,\n colorHB], 256)\n", (1984, 2024), False, 'import matplotlib\n'), ((2241, 2274), 'numpy.linspace', 'np.linspace', (['(0)', '(0.8)', '(cmapMB.N + 3)'], {}), '(0, 0.8, cmapMB.N + 3)\n', (2252, 2274), True, 'import numpy as np\n'), ((19612, 19622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19620, 19622), True, 'import matplotlib.pyplot as plt\n'), ((576, 604), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[6.5, 8]'}), '(figsize=[6.5, 8])\n', (586, 604), True, 'import matplotlib.pyplot as plt\n'), ((638, 665), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[4, 14]'}), '(figsize=[4, 14])\n', (648, 665), True, 'import matplotlib.pyplot as plt\n'), ((2492, 2515), 'numpy.zeros_like', 'np.zeros_like', (['matrices'], {}), '(matrices)\n', (2505, 2515), True, 'import numpy as np\n'), ((6523, 6563), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'cmap1'}), '(cmap=cmap1)\n', (6551, 6563), False, 'import matplotlib\n'), ((6578, 6618), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'cmap2'}), '(cmap=cmap2)\n', (6606, 6618), False, 'import matplotlib\n'), ((6633, 6673), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'cmap3'}), '(cmap=cmap3)\n', (6661, 6673), False, 'import matplotlib\n'), ((6683, 6699), 'numpy.where', 'np.where', (['cut[0]'], {}), '(cut[0])\n', (6691, 6699), True, 'import numpy as np\n'), ((6750, 6766), 'numpy.where', 'np.where', (['cut[1]'], {}), '(cut[1])\n', (6758, 6766), True, 'import numpy as np\n'), ((6817, 6833), 'numpy.where', 'np.where', (['cut[2]'], {}), '(cut[2])\n', (6825, 6833), True, 'import numpy as np\n'), ((7395, 7411), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7404, 7411), True, 'import matplotlib.pyplot as plt\n'), ((7427, 7455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[7.5, 8]'}), '(figsize=[7.5, 8])\n', (7437, 7455), True, 'import matplotlib.pyplot as plt\n'), ((7468, 7496), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[10, 14]'}), '(figsize=[10, 14])\n', (7478, 7496), True, 'import matplotlib.pyplot as plt\n'), ((7744, 7763), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(5)'], {}), '(0, 20, 5)\n', (7753, 7763), True, 'import numpy as np\n'), ((7887, 7907), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (7896, 7907), True, 'import numpy as np\n'), ((8276, 8296), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (8285, 8296), True, 'import numpy as np\n'), ((8572, 8592), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (8581, 8592), True, 'import numpy as np\n'), ((8716, 8736), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (8725, 8736), True, 'import numpy as np\n'), ((12663, 12700), 'numpy.load', 'np.load', (["(plotFrom + '/BaseLevels.npy')"], {}), "(plotFrom + '/BaseLevels.npy')\n", (12670, 12700), True, 'import numpy as np\n'), ((12957, 12980), 'numpy.load', 'np.load', (["(pFile + '.npy')"], {}), "(pFile + '.npy')\n", (12964, 12980), True, 'import numpy as np\n'), ((12992, 13015), 'numpy.load', 'np.load', (["(oFile + '.npy')"], {}), "(oFile + '.npy')\n", (12999, 13015), True, 'import numpy as np\n'), ((13027, 13050), 'numpy.load', 'np.load', (["(nFile + '.npy')"], {}), "(nFile + '.npy')\n", (13034, 13050), True, 'import numpy as np\n'), ((13067, 13089), 'os.path.isfile', 'os.path.isfile', (['dcFile'], {}), '(dcFile)\n', (13081, 13089), False, 'import os\n'), ((13437, 13461), 'numpy.load', 'np.load', (["(fbFile + '.npy')"], {}), "(fbFile + '.npy')\n", (13444, 13461), True, 'import numpy as np\n'), ((13474, 13498), 'numpy.load', 'np.load', (["(mbFile + '.npy')"], {}), "(mbFile + '.npy')\n", (13481, 13498), True, 'import numpy as np\n'), ((13511, 13535), 'numpy.load', 'np.load', (["(hbFile + '.npy')"], {}), "(hbFile + '.npy')\n", (13518, 13535), True, 'import numpy as np\n'), ((13552, 13574), 'os.path.isfile', 'os.path.isfile', (['rcFile'], {}), '(rcFile)\n', (13566, 13574), False, 'import os\n'), ((14478, 14506), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[7.5, 8]'}), '(figsize=[7.5, 8])\n', (14488, 14506), True, 'import matplotlib.pyplot as plt\n'), ((14519, 14547), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[10, 14]'}), '(figsize=[10, 14])\n', (14529, 14547), True, 'import matplotlib.pyplot as plt\n'), ((14795, 14814), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(5)'], {}), '(0, 20, 5)\n', (14804, 14814), True, 'import numpy as np\n'), ((14938, 14958), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (14947, 14958), True, 'import numpy as np\n'), ((15327, 15347), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (15336, 15347), True, 'import numpy as np\n'), ((15623, 15643), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (15632, 15643), True, 'import numpy as np\n'), ((15767, 15787), 'numpy.arange', 'np.arange', (['(0)', '(65)', '(10)'], {}), '(0, 65, 10)\n', (15776, 15787), True, 'import numpy as np\n'), ((18900, 18916), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (18909, 18916), True, 'import matplotlib.pyplot as plt\n'), ((18927, 18939), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18937, 18939), True, 'import matplotlib.pyplot as plt\n'), ((19046, 19068), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (19060, 19068), True, 'import numpy as np\n'), ((19079, 19091), 'numpy.copy', 'np.copy', (['arr'], {}), '(arr)\n', (19086, 19091), True, 'import numpy as np\n'), ((3136, 3158), 'os.path.isfile', 'os.path.isfile', (['dcFile'], {}), '(dcFile)\n', (3150, 3158), False, 'import os\n'), ((3911, 3933), 'os.path.isfile', 'os.path.isfile', (['rcFile'], {}), '(rcFile)\n', (3925, 3933), False, 'import os\n'), ((4767, 4789), 'os.path.isfile', 'os.path.isfile', (['rcFile'], {}), '(rcFile)\n', (4781, 4789), False, 'import os\n'), ((13107, 13122), 'numpy.load', 'np.load', (['dcFile'], {}), '(dcFile)\n', (13114, 13122), True, 'import numpy as np\n'), ((13189, 13211), 'numpy.save', 'np.save', (['dcFile', 'dComp'], {}), '(dcFile, dComp)\n', (13196, 13211), True, 'import numpy as np\n'), ((13592, 13607), 'numpy.load', 'np.load', (['rcFile'], {}), '(rcFile)\n', (13599, 13607), True, 'import numpy as np\n'), ((13677, 13699), 'numpy.save', 'np.save', (['rcFile', 'rComp'], {}), '(rcFile, rComp)\n', (13684, 13699), True, 'import numpy as np\n'), ((19195, 19208), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (19203, 19208), True, 'import numpy as np\n'), ((19242, 19256), 'numpy.isnan', 'np.isnan', (['arr2'], {}), '(arr2)\n', (19250, 19256), True, 'import numpy as np\n'), ((3180, 3195), 'numpy.load', 'np.load', (['dcFile'], {}), '(dcFile)\n', (3187, 3195), True, 'import numpy as np\n'), ((3231, 3245), 'numpy.load', 'np.load', (['pFile'], {}), '(pFile)\n', (3238, 3245), True, 'import numpy as np\n'), ((3267, 3281), 'numpy.load', 'np.load', (['oFile'], {}), '(oFile)\n', (3274, 3281), True, 'import numpy as np\n'), ((3303, 3317), 'numpy.load', 'np.load', (['nFile'], {}), '(nFile)\n', (3310, 3317), True, 'import numpy as np\n'), ((3382, 3404), 'numpy.save', 'np.save', (['dcFile', 'dComp'], {}), '(dcFile, dComp)\n', (3389, 3404), True, 'import numpy as np\n'), ((3955, 3970), 'numpy.load', 'np.load', (['rcFile'], {}), '(rcFile)\n', (3962, 3970), True, 'import numpy as np\n'), ((4007, 4022), 'numpy.load', 'np.load', (['FBFile'], {}), '(FBFile)\n', (4014, 4022), True, 'import numpy as np\n'), ((4045, 4060), 'numpy.load', 'np.load', (['MBFile'], {}), '(MBFile)\n', (4052, 4060), True, 'import numpy as np\n'), ((4083, 4098), 'numpy.load', 'np.load', (['HBFile'], {}), '(HBFile)\n', (4090, 4098), True, 'import numpy as np\n'), ((4166, 4188), 'numpy.save', 'np.save', (['rcFile', 'rComp'], {}), '(rcFile, rComp)\n', (4173, 4188), True, 'import numpy as np\n'), ((4811, 4826), 'numpy.load', 'np.load', (['rcFile'], {}), '(rcFile)\n', (4818, 4826), True, 'import numpy as np\n'), ((4863, 4878), 'numpy.load', 'np.load', (['FBFile'], {}), '(FBFile)\n', (4870, 4878), True, 'import numpy as np\n'), ((4901, 4916), 'numpy.load', 'np.load', (['MBFile'], {}), '(MBFile)\n', (4908, 4916), True, 'import numpy as np\n'), ((4939, 4954), 'numpy.load', 'np.load', (['HBFile'], {}), '(HBFile)\n', (4946, 4954), True, 'import numpy as np\n'), ((5022, 5044), 'numpy.save', 'np.save', (['rcFile', 'rComp'], {}), '(rcFile, rComp)\n', (5029, 5044), True, 'import numpy as np\n'), ((9593, 9633), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'cmap1'}), '(cmap=cmap1)\n', (9621, 9633), False, 'import matplotlib\n'), ((9656, 9696), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'cmap2'}), '(cmap=cmap2)\n', (9684, 9696), False, 'import matplotlib\n'), ((9719, 9759), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'cmap3'}), '(cmap=cmap3)\n', (9747, 9759), False, 'import matplotlib\n'), ((9785, 9801), 'numpy.where', 'np.where', (['cut[0]'], {}), '(cut[0])\n', (9793, 9801), True, 'import numpy as np\n'), ((9868, 9884), 'numpy.where', 'np.where', (['cut[1]'], {}), '(cut[1])\n', (9876, 9884), True, 'import numpy as np\n'), ((9951, 9967), 'numpy.where', 'np.where', (['cut[2]'], {}), '(cut[2])\n', (9959, 9967), True, 'import numpy as np\n'), ((16190, 16282), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'matplotlib.colors.LinearSegmentedColormap.from_list', (['"""my_cmapP"""', '[colours, colours]', '(256)'], {}), "('my_cmapP', [colours,\n colours], 256)\n", (16241, 16282), False, 'import matplotlib\n'), ((2691, 2715), 'numpy.argmax', 'np.argmax', (['comparevalues'], {}), '(comparevalues)\n', (2700, 2715), True, 'import numpy as np\n'), ((2754, 2775), 'numpy.max', 'np.max', (['comparevalues'], {}), '(comparevalues)\n', (2760, 2775), True, 'import numpy as np\n'), ((11683, 11710), 'scipy.ndimage.rotate', 'ndimage.rotate', (['cut[0]', '(-90)'], {}), '(cut[0], -90)\n', (11697, 11710), False, 'from scipy import ndimage\n'), ((11735, 11762), 'scipy.ndimage.rotate', 'ndimage.rotate', (['cut[1]', '(-90)'], {}), '(cut[1], -90)\n', (11749, 11762), False, 'from scipy import ndimage\n'), ((11787, 11814), 'scipy.ndimage.rotate', 'ndimage.rotate', (['cut[2]', '(-90)'], {}), '(cut[2], -90)\n', (11801, 11814), False, 'from scipy import ndimage\n'), ((13949, 13964), 'numpy.nonzero', 'np.nonzero', (['pon'], {}), '(pon)\n', (13959, 13964), True, 'import numpy as np\n'), ((13996, 14014), 'numpy.nonzero', 'np.nonzero', (['fbmbhb'], {}), '(fbmbhb)\n', (14006, 14014), True, 'import numpy as np\n'), ((18260, 18284), 'scipy.ndimage.rotate', 'ndimage.rotate', (['cut', '(-90)'], {}), '(cut, -90)\n', (18274, 18284), False, 'from scipy import ndimage\n'), ((14187, 14206), 'ast.literal_eval', 'ast.literal_eval', (['i'], {}), '(i)\n', (14203, 14206), False, 'import ast\n'), ((16702, 16715), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (16710, 16715), True, 'import numpy as np\n'), ((17200, 17213), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (17208, 17213), True, 'import numpy as np\n'), ((17262, 17276), 'numpy.isnan', 'np.isnan', (['arr2'], {}), '(arr2)\n', (17270, 17276), True, 'import numpy as np\n'), ((17315, 17333), 'numpy.transpose', 'np.transpose', (['cut2'], {}), '(cut2)\n', (17327, 17333), True, 'import numpy as np\n'), ((10860, 10898), 'scipy.ndimage.rotate', 'ndimage.rotate', (['cut2[0][:-1, :-1]', '(-90)'], {}), '(cut2[0][:-1, :-1], -90)\n', (10874, 10898), False, 'from scipy import ndimage\n'), ((10984, 11022), 'scipy.ndimage.rotate', 'ndimage.rotate', (['cut2[1][:-1, :-1]', '(-90)'], {}), '(cut2[1][:-1, :-1], -90)\n', (10998, 11022), False, 'from scipy import ndimage\n'), ((11108, 11146), 'scipy.ndimage.rotate', 'ndimage.rotate', (['cut2[2][:-1, :-1]', '(-90)'], {}), '(cut2[2][:-1, :-1], -90)\n', (11122, 11146), False, 'from scipy import ndimage\n'), ((16626, 16643), 'numpy.transpose', 'np.transpose', (['arr'], {}), '(arr)\n', (16638, 16643), True, 'import numpy as np\n')] |
"""Sub-package containing the matrix class and related functions."""
from defmatrix import *
__all__ = defmatrix.__all__
from numpy.testing import Tester
test = Tester(__file__).test
bench = Tester(__file__).bench
| [
"numpy.testing.Tester"
] | [((163, 179), 'numpy.testing.Tester', 'Tester', (['__file__'], {}), '(__file__)\n', (169, 179), False, 'from numpy.testing import Tester\n'), ((193, 209), 'numpy.testing.Tester', 'Tester', (['__file__'], {}), '(__file__)\n', (199, 209), False, 'from numpy.testing import Tester\n')] |
from abc import ABCMeta, abstractmethod
from time import time
import numpy as np
from copy import deepcopy, copy
from .kpp import KPP, KPPExtension
from .separation import YCliqueSeparator, YZCliqueSeparator, ZCliqueSeparator, ProjectedCliqueSeparator
from .graph import decompose_graph
class KPPAlgorithmResults:
def __init__(self, output):
self.output = output
def __getitem__(self, key):
return self.output[key]
def algorithm_params(self):
return self.output['params']
def preprocess_stats(self):
if not self.output['params']['preprocess']:
return None
keys = ['preprocess time', 'preprocess components', 'largest components']
return {k: self.output[k] for k in keys}
def branch_and_bound_stats(self):
keys = ['optimal value', 'branch and bound time', 'ub', 'lb']
res = dict()
if self.output['params']['preprocess']:
if self.output['preprocess components'] == 0:
for k in keys:
res[k] = 0.0
res['optimality'] = True
else:
for k in keys:
res[k] = sum(self.output['solution'][k])
if np.all(np.array(self.output['solution']['status']) == 2):
res['optimality'] = True
else:
res['optimality'] = False
else:
for k in keys:
res[k] = self.output['solution'][k]
res['optimality'] = (self.output['solution']['status'] == 2)
return res
class KPPAlgorithmBase(metaclass=ABCMeta):
def __init__(self, G, k, **kwargs):
kwargs = deepcopy(kwargs)
self.output = dict()
self.G = G
self.k = k
self.params = dict()
self.params['preprocess'] = kwargs.pop('preprocess', False)
self.params['y-cut'] = kwargs.pop('y-cut', [])
self.params['y-cut removal'] = kwargs.pop('y-cut removal', 0)
self.params['removal slack'] = kwargs.pop('removal slack', 1e-3)
self.params['symmetry breaking'] = kwargs.pop('symmetry breaking', False)
self.params['fractional y-cut'] = kwargs.pop('fractional y-cut', False)
self.verbosity = kwargs.pop('verbosity', 1)
@abstractmethod
def solve_single_problem(self, g):
pass
def y_cut_phase(self, kpp, max_cliques, results):
for p in self.params['y-cut']:
kpp.add_separator(YCliqueSeparator(max_cliques, p, self.k))
start = time()
results['y-cut constraints added'] = kpp.cut()
end = time()
results['y-cut time'] = end - start
results['y-cut lb'] = kpp.model.objVal
if self.params['fractional y-cut']:
res = kpp.add_fractional_cut()
if self.verbosity > 0:
if res:
print(" Added fractional y-cut")
else:
print(" Fractional y-cut not appropriate")
if self.params['y-cut removal']:
results["y-cut constraints removed"] = kpp.remove_redundant_constraints(hard=(
self.params['y-cut removal'] > 1), allowed_slack=self.params['removal slack'])
kpp.sep_algs.clear()
def run(self):
self.output['params'] = copy(self.params)
if self.verbosity > 1:
print('Solving 2-Level KPP')
print('Input graph has %d nodes and %d edges' %
(self.G.vcount(), self.G.ecount()))
if self.params['preprocess']:
start = time()
graphs = decompose_graph(self.G, self.k)
end = time()
self.output['preprocess time'] = end - start
self.output['preprocess components'] = len(graphs)
if len(graphs) > 0:
self.output['largest components'] = max(g.vcount() for g in graphs)
else:
self.output['largest components'] = 0
self.output['solution'] = dict()
if self.verbosity:
print('Graph preprocessing yields %d components' % len(graphs))
for i, g in enumerate(graphs):
if self.verbosity > 0:
print(25 * '-')
print('Solving for component %d' % i)
print(25 * '-')
res = self.solve_single_problem(g)
if not self.output['solution']:
for k, val in res.items():
self.output['solution'][k] = [val]
else:
for k, val in res.items():
self.output['solution'][k].append(val)
else:
res = self.solve_single_problem(self.G)
self.output['solution'] = res
return KPPAlgorithmResults(self.output)
class KPPBasicAlgorithm(KPPAlgorithmBase):
def __init__(self, G, k, x_coefs=None, **kwargs):
KPPAlgorithmBase.__init__(self, G, k, **kwargs)
self.x_coefs = x_coefs
if x_coefs and self.params['preprocess']:
raise ArgumentError(
'Cannot set x coefficients when preprocessing is enabled')
self.params['x-cut'] = kwargs.pop('x-cut', [])
self.params['x-cut colours'] = kwargs.pop('x-cut colours', [])
self.params['x-cut removal'] = kwargs.pop('x-cut removal', 0)
# Gurobi parameters
self.gurobi_params = kwargs
def x_cut_phase(self, kpp, max_cliques, results):
for p in self.params['x-cut']:
for colours in self.params['x-cut colours']:
kpp.add_separator(ProjectedCliqueSeparator(max_cliques, p,
kpp.num_colours(), colours))
start = time()
results['x-cut constraints added'] = kpp.cut()
end = time()
results['x-cut time'] = end - start
results['x-cut lb'] = kpp.model.objVal
if self.params['x-cut removal']:
results["x-cut constraints removed"] = kpp.remove_redundant_constraints(hard=(
self.params['x-cut removal'] > 1), allowed_slack=self.params['removal slack'])
kpp.sep_algs.clear()
def solve_single_problem(self, g):
if self.verbosity > 0:
print("Running exact solution algorithm")
results = dict()
results['nodes'] = g.vcount()
results['edges'] = g.ecount()
kpp = KPP(g, self.k, x_coefs=self.x_coefs, verbosity=self.verbosity)
for (key, val) in self.gurobi_params.items():
kpp.model.setParam(key, val)
if self.params['y-cut']:
max_cliques = g.maximal_cliques()
results["clique number"] = max(len(nodes) for nodes in max_cliques)
self.y_cut_phase(kpp, max_cliques, results)
kpp.add_node_variables()
if self.params['x-cut']:
self.x_cut_phase(kpp, max_cliques, results)
if self.params['symmetry breaking']:
kpp.break_symmetry()
kpp.solve()
if self.verbosity > 0:
print('')
results["optimality gap"] = kpp.model.MIPGap
results["status"] = kpp.model.Status
results["branch and bound time"] = kpp.model.Runtime
if results["status"] == 2:
results["optimal value"] = kpp.model.objVal
else:
results["optimal value"] = np.NaN
if kpp.model.SolCount > 0:
results["ub"] = kpp.model.objVal
else:
results["ub"] = np.Inf
results["lb"] = kpp.model.objBound
results["branch and bound nodes"] = int(kpp.model.NodeCount)
return results
class KPPAlgorithm(KPPAlgorithmBase):
def __init__(self, G, k, k2, **kwargs):
KPPAlgorithmBase.__init__(self, G, k, **kwargs)
self.k2 = k2
self.params['yz-cut'] = kwargs.pop('yz-cut', [])
self.params['yz-cut removal'] = kwargs.pop('yz-cut removal', 0)
self.params['z-cut'] = kwargs.pop('z-cut', [])
self.params['z-cut removal'] = kwargs.pop('z-cut removal', 0)
# Gurobi parameters
self.gurobi_params = kwargs
def solve_single_problem(self, g):
if self.verbosity > 0:
print("Running exact solution algorithm")
results = dict()
results['nodes'] = g.vcount()
results['edges'] = g.ecount()
kpp = KPPExtension(g, self.k, self.k2, verbosity=self.verbosity)
for (key, val) in self.gurobi_params.items():
kpp.model.setParam(key, val)
if self.params['y-cut'] or self.params['yz-cut'] or self.params['z-cut']:
max_cliques = g.maximal_cliques()
results["clique number"] = max(len(nodes) for nodes in max_cliques)
if self.params['y-cut']:
self.y_cut_phase(kpp, max_cliques, results)
else:
results['y-cut time'] = 0.0
results['y-cut lb'] = 0.0
results['y-cut constraints added'] = 0
results['y-cut constraints removed'] = 0
kpp.add_z_variables()
if self.params['yz-cut']:
for p in self.params['yz-cut']:
kpp.add_separator(YZCliqueSeparator(max_cliques, p, self.k, self.k2))
start = time()
results['yz-cut constraints added'] = kpp.cut()
end = time()
results['yz-cut time'] = end - start
results['yz-cut lb'] = kpp.model.objVal
if self.params['yz-cut removal']:
results["yz-cut constraints removed"] = kpp.remove_redundant_constraints(
hard=(self.params['yz-cut removal']), allowed_slack=self.params['removal slack'])
kpp.sep_algs.clear()
else:
results['yz-cut time'] = 0.0
results['yz-cut lb'] = results['y-cut lb']
results['yz-cut constraints added'] = 0
results['yz-cut constraints removed'] = 0
if self.params['z-cut']:
for p in self.params['z-cut']:
kpp.add_separator(ZCliqueSeparator(max_cliques, p, self.k, self.k2))
start = time()
results['z-cut constraints added'] = kpp.cut()
end = time()
results['z-cut time'] = end - start
results['z-cut lb'] = kpp.model.objVal
if self.params['z-cut removal']:
results["z-cut constraints removed"] = kpp.remove_redundant_constraints(
hard=(self.params['z-cut removal']), allowed_slack=self.params['removal slack'])
kpp.sep_algs.clear()
else:
results['z-cut time'] = 0.0
results['z-cut lb'] = results['yz-cut lb']
results['z-cut constraints added'] = 0
results['z-cut constraints removed'] = 0
kpp.add_node_variables()
if self.params['symmetry breaking']:
kpp.break_symmetry()
kpp.solve()
if self.verbosity > 0:
print('')
results["optimality gap"] = kpp.model.MIPGap
results["status"] = kpp.model.Status
if results["status"] == 2:
results["optimal value"] = kpp.model.objVal
results["branch and bound time"] = kpp.model.Runtime
else:
results["optimal value"] = np.NaN
results["branch and bound time"] = np.NaN
results["branch and bound nodes"] = int(kpp.model.NodeCount)
return results
| [
"numpy.array",
"copy.copy",
"time.time",
"copy.deepcopy"
] | [((1504, 1520), 'copy.deepcopy', 'deepcopy', (['kwargs'], {}), '(kwargs)\n', (1512, 1520), False, 'from copy import deepcopy, copy\n'), ((2286, 2292), 'time.time', 'time', ([], {}), '()\n', (2290, 2292), False, 'from time import time\n'), ((2354, 2360), 'time.time', 'time', ([], {}), '()\n', (2358, 2360), False, 'from time import time\n'), ((2958, 2975), 'copy.copy', 'copy', (['self.params'], {}), '(self.params)\n', (2962, 2975), False, 'from copy import deepcopy, copy\n'), ((5100, 5106), 'time.time', 'time', ([], {}), '()\n', (5104, 5106), False, 'from time import time\n'), ((5168, 5174), 'time.time', 'time', ([], {}), '()\n', (5172, 5174), False, 'from time import time\n'), ((3188, 3194), 'time.time', 'time', ([], {}), '()\n', (3192, 3194), False, 'from time import time\n'), ((3254, 3260), 'time.time', 'time', ([], {}), '()\n', (3258, 3260), False, 'from time import time\n'), ((8228, 8234), 'time.time', 'time', ([], {}), '()\n', (8232, 8234), False, 'from time import time\n'), ((8301, 8307), 'time.time', 'time', ([], {}), '()\n', (8305, 8307), False, 'from time import time\n'), ((8987, 8993), 'time.time', 'time', ([], {}), '()\n', (8991, 8993), False, 'from time import time\n'), ((9059, 9065), 'time.time', 'time', ([], {}), '()\n', (9063, 9065), False, 'from time import time\n'), ((1114, 1157), 'numpy.array', 'np.array', (["self.output['solution']['status']"], {}), "(self.output['solution']['status'])\n", (1122, 1157), True, 'import numpy as np\n')] |
###
#
# Usage: python script.py /path/to/parameters/file.json /path/to/model/output/directory
#
# Description: Script for generating a basic fully convolutional network with blocks but no pass through or gates. We
# use padding='valid' instead of 'causal' to avoid non-contextual learning within training samples.
#
# The output model will be saved to
#
# /path/to/model/output/directory
# Model parameters should include either a 'filters' key with a list or list of lists describing the absolute numbers
# of filters in each block's layer, or a filter_increments list or list of lists describing the additional filters
# relative to the last layer.
# 'default_activation' should be used to specify the default activation function to use, such as 'relu' or 'elu', when
# a filter entry does not specifically specify one.
###
import pickle
import sys
import numpy as np
np.random.seed(0) # should ensure consistent model weight initializations
from tensorflow.keras.layers import Input, Conv1D, Activation, LayerNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.initializers import Constant, he_normal
def main():
arguments = sys.argv
if len(arguments) != 3:
print("Rerun with the proper arguments. Example usage:\n")
print(" $ python script.py /path/to/parameters/file.json /path/to/model/output/directory")
print()
return
print(sys.argv)
parameters_file_path = sys.argv[1]
model_output_path = sys.argv[2]
with open(parameters_file_path, 'rb') as params_file:
model_params = pickle.load(params_file)
if 'filters' in model_params:
filters = model_params['filters']
if not isinstance(filters[0], list):
filters = [filters]
else:
filters = []
filter_increments = model_params['filter_increments']
if not isinstance(filter_increments[0], list):
filter_increments = [filter_increments]
filter_count = 0
for filter_increments_list in filter_increments:
filters_list = []
for filter_increment in filter_increments_list:
filter_count += filter_increment
filters_list.append(filter_count)
filters.append(filters_list)
default_activation = model_params.get('default_activation', 'relu')
def default_dilation_function(layer_index):
return 2 ** layer_index
use_calibrated_output_bias = model_params.get('use_calibrated_output_bias', False)
if use_calibrated_output_bias:
output_bias_initializer = Constant(-3.2) # This will default to P(note=1) = 0.04, which is a normal base rate
else:
output_bias_initializer = "zeros"
default_kernel_initializer = model_params.get('default_kernel_initializer', 'glorot_uniform')
if default_kernel_initializer == 'he_normal':
default_initializer = he_normal
use_layer_normalization = model_params.get("use_layer_normalization", False)
######################
### MODEL BUILDING ###
######################
inputs = Input(shape=(None, 1))
conv = inputs
for block_id in range(0, len(filters)):
block_filters = filters[block_id]
for i in range(0, len(block_filters)):
conv = Conv1D(filters=block_filters[i],
kernel_size=2,
strides=1,
dilation_rate=default_dilation_function(i),
padding='valid',
kernel_initializer=default_kernel_initializer)(conv)
if use_layer_normalization:
conv = LayerNormalization(axis=-1)(conv)
conv = Activation(default_activation)(conv)
outputs = Conv1D(filters=1,
kernel_size=1,
strides=1,
dilation_rate=1,
padding='valid',
kernel_initializer=default_kernel_initializer,
bias_initializer=output_bias_initializer)(conv)
outputs = Activation('sigmoid')(outputs)
model = Model(inputs=inputs, outputs=outputs)
model.save(model_output_path)
if __name__ == '__main__':
main()
| [
"tensorflow.keras.layers.Input",
"pickle.load",
"tensorflow.keras.initializers.Constant",
"numpy.random.seed",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.LayerNormalization"
] | [((917, 934), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (931, 934), True, 'import numpy as np\n'), ((3133, 3155), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, 1)'}), '(shape=(None, 1))\n', (3138, 3155), False, 'from tensorflow.keras.layers import Input, Conv1D, Activation, LayerNormalization\n'), ((4159, 4196), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (4164, 4196), False, 'from tensorflow.keras.models import Model\n'), ((1622, 1646), 'pickle.load', 'pickle.load', (['params_file'], {}), '(params_file)\n', (1633, 1646), False, 'import pickle\n'), ((2629, 2643), 'tensorflow.keras.initializers.Constant', 'Constant', (['(-3.2)'], {}), '(-3.2)\n', (2637, 2643), False, 'from tensorflow.keras.initializers import Constant, he_normal\n'), ((3802, 3977), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(1)', 'kernel_size': '(1)', 'strides': '(1)', 'dilation_rate': '(1)', 'padding': '"""valid"""', 'kernel_initializer': 'default_kernel_initializer', 'bias_initializer': 'output_bias_initializer'}), "(filters=1, kernel_size=1, strides=1, dilation_rate=1, padding=\n 'valid', kernel_initializer=default_kernel_initializer,\n bias_initializer=output_bias_initializer)\n", (3808, 3977), False, 'from tensorflow.keras.layers import Input, Conv1D, Activation, LayerNormalization\n'), ((4115, 4136), 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (4125, 4136), False, 'from tensorflow.keras.layers import Input, Conv1D, Activation, LayerNormalization\n'), ((3749, 3779), 'tensorflow.keras.layers.Activation', 'Activation', (['default_activation'], {}), '(default_activation)\n', (3759, 3779), False, 'from tensorflow.keras.layers import Input, Conv1D, Activation, LayerNormalization\n'), ((3695, 3722), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (3713, 3722), False, 'from tensorflow.keras.layers import Input, Conv1D, Activation, LayerNormalization\n')] |
import numpy as np
from scipy.sparse import linalg
def weighted_mean(x, w):
# numpy.average can do the same computation
assert(x.shape == w.shape)
s = w.sum()
if s == 0:
raise ValueError("Sum of weights is zero")
return (x * w).sum() / s
def get_solver_(method, **kwargs):
def lstsq(A, b):
x, _, _, _ = np.linalg.lstsq(A, b, **kwargs)
return x
def cg(A, b):
x, _ = linalg.cg(np.dot(A.T, A), np.dot(A.T, b), **kwargs)
return x
if method == "lstsq":
return lstsq
if method == "cg":
return cg
def solve_linear_equation(A, b, weights=None, method="lstsq", **kwargs):
solve = get_solver_(method)
assert(A.shape[0] == b.shape[0])
if weights is None:
return solve(A, b)
assert(A.shape[0] == weights.shape[0])
w = np.sqrt(weights)
b = b * w
A = A * w.reshape(-1, 1)
return solve(A, b)
| [
"numpy.dot",
"numpy.sqrt",
"numpy.linalg.lstsq"
] | [((841, 857), 'numpy.sqrt', 'np.sqrt', (['weights'], {}), '(weights)\n', (848, 857), True, 'import numpy as np\n'), ((349, 380), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'b'], {}), '(A, b, **kwargs)\n', (364, 380), True, 'import numpy as np\n'), ((442, 456), 'numpy.dot', 'np.dot', (['A.T', 'A'], {}), '(A.T, A)\n', (448, 456), True, 'import numpy as np\n'), ((458, 472), 'numpy.dot', 'np.dot', (['A.T', 'b'], {}), '(A.T, b)\n', (464, 472), True, 'import numpy as np\n')] |
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2 # 导入需要的库
import numpy as np
camera = PiCamera()
camera.resolution = (640, 480) # 设置分辨率
camera.framerate = 32 # 设置帧率
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1) # 等待摄像头模块初始化
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
frame = frame.array
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) # 转换颜色空间
print(hsv[240][320])
# 通过颜色设计模板
image_mask=cv2.inRange(hsv,np.array([0,0,0]), np.array([50,255,255]))
# 计算输出图像
output=cv2.bitwise_and(frame,frame,mask=image_mask)
cv2.imshow('Original',frame) # 显示原始图像
cv2.imshow('Output',output) # 显示输出图像
key = cv2.waitKey(1) & 0xFF # 等待按键
rawCapture.truncate(0) # 准备下一副图像
if key == ord("q"):
break | [
"cv2.bitwise_and",
"picamera.PiCamera",
"time.sleep",
"cv2.imshow",
"numpy.array",
"cv2.cvtColor",
"picamera.array.PiRGBArray",
"cv2.waitKey"
] | [((129, 139), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (137, 139), False, 'from picamera import PiCamera\n'), ((221, 256), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': '(640, 480)'}), '(camera, size=(640, 480))\n', (231, 256), False, 'from picamera.array import PiRGBArray\n'), ((257, 272), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (267, 272), False, 'import time\n'), ((406, 444), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (418, 444), False, 'import cv2\n'), ((591, 637), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'image_mask'}), '(frame, frame, mask=image_mask)\n', (606, 637), False, 'import cv2\n'), ((640, 669), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'frame'], {}), "('Original', frame)\n", (650, 669), False, 'import cv2\n'), ((682, 710), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'output'], {}), "('Output', output)\n", (692, 710), False, 'import cv2\n'), ((524, 543), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (532, 543), True, 'import numpy as np\n'), ((543, 567), 'numpy.array', 'np.array', (['[50, 255, 255]'], {}), '([50, 255, 255])\n', (551, 567), True, 'import numpy as np\n'), ((729, 743), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (740, 743), False, 'import cv2\n')] |
import argparse
import gc
from pathlib import Path
import h5py
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import transforms
from tqdm import tqdm
from model import MultiHeadResNet, MultiHeadEffNet, MultiHeadMaskEncoder
from utils import ImageDataset, norm
def parse_args():
parser = argparse.ArgumentParser(description="Extract global features from image to form a feature database.")
parser.add_argument("--model-name", type=str, default='resnet152', help="model name (default:resnet152)")
parser.add_argument("--dataset", type=str, default='/data/wangjiawei/dataset/AIMeetsBeauty',
help='The path of training set (default: /data/wangjiawei/dataset/AIMeetsBeauty)')
parser.add_argument("--model-ckpt", type=str, default='./ckpts/resnet152.pkl',
help="The path of feature set (default:./ckpts/resnet152.pkl")
parser.add_argument("--output-feature-path", type=str, default='./feature/resnet152.hdf5',
help="The path of feature set (default:./feature/resnet152.hdf5)")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(args)
# Load models
print('Loading Model')
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_name = args.model_name
if model_name.startswith('efficientnet'):
embed = MultiHeadEffNet(model=model_name, pretrained=False)
elif model_name.startswith('resnet'):
embed = MultiHeadResNet(model=model_name, pretrained=False)
mask_encoder = MultiHeadMaskEncoder(model=embed.model, name=model_name)
if args.model_ckpt:
checkpoint = torch.load(args.model_ckpt, map_location='cpu')
embed.load_state_dict(checkpoint['embed'])
mask_encoder.load_state_dict(checkpoint['mask_encoder'])
print('Load state dict.')
embed.to(device)
embed.eval()
mask_encoder.to(device)
mask_encoder.eval()
print('Done')
# Load dataset
print('Loading dataset...')
SIZE = 224
data_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))])
def collate_fn(data):
img_names, imgs = list(zip(*data))
imgs = torch.stack(imgs)
return img_names, imgs
BATCH_SIZE = 4
dataset = ImageDataset(dataset_path=Path(args.dataset), size=SIZE, istrain=True, transforms=data_transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=6,
pin_memory=True, collate_fn=collate_fn)
print('Done')
# Acquire the database features.
if model_name.startswith('efficientnet'):
valid_layer = [2, 3]
vec_length = np.array(embed.out_channels)[valid_layer].sum()
img_name_np = []
z_att_np = np.zeros([len(dataset), vec_length], dtype='float32')
z_att_max_np = np.zeros([len(dataset), vec_length], dtype='float32')
z_max_np = np.zeros([len(dataset), embed.last_channels], dtype='float32')
gc.disable()
with tqdm(total=len(dataset)) as pbar:
for i, (img_names, imgs) in enumerate(dataloader):
pbar.update(BATCH_SIZE)
with torch.no_grad():
img_name_np.extend(img_names)
xs = embed(imgs.to(device)) # x_representation:[b, 2048, h, w]
masks = mask_encoder(xs) # [b, 1, h, w]
z_ATT = np.concatenate(
[norm(mask_encoder.attention_pooling(x, mask).squeeze(3).squeeze(2).detach().cpu().numpy()) for
i, (x, mask) in enumerate(zip(xs, masks)) if i in valid_layer], axis=1)
z_ATTMAX = np.concatenate([norm(
F.adaptive_max_pool2d(x * mask, output_size=1).squeeze(3).squeeze(2).detach().cpu().numpy()) for
i, (x, mask) in enumerate(zip(xs, masks)) if i in valid_layer],
axis=1)
z_MAX = norm(
F.adaptive_max_pool2d(xs[-1], output_size=1).squeeze(3).squeeze(2).detach().cpu().numpy())
for idx, (z_ATT_, z_ATTMAX_, z_MAX_) in enumerate(zip(z_ATT, z_ATTMAX, z_MAX)):
z_att_np[i * BATCH_SIZE + idx, :] = z_ATT_
z_att_max_np[i * BATCH_SIZE + idx, :] = z_ATTMAX_
z_max_np[i * BATCH_SIZE + idx, :] = z_MAX_
gc.enable()
# Save the features
img_name_np = np.array(img_name_np, dtype='object')
with h5py.File(args.output_feature_path, 'w') as f:
f.create_dataset('img_name_ds', shape=img_name_np.shape, data=img_name_np,
dtype=h5py.special_dtype(vlen=str))
f.create_dataset('z_att_ds', shape=z_att_np.shape, data=z_att_np)
f.create_dataset('z_att_max_ds', shape=z_att_max_np.shape, data=z_att_max_np)
f.create_dataset('z_max_ds', shape=z_max_np.shape, data=z_max_np)
elif model_name.startswith('resnet'):
valid_layer = [3]
vec_length = np.array(embed.out_channels)[valid_layer].sum()
img_name_np = []
z_att_np = np.zeros([len(dataset), vec_length], dtype='float32')
gc.disable()
with tqdm(total=len(dataset)) as pbar:
for i, (img_names, imgs) in enumerate(dataloader):
pbar.update(BATCH_SIZE)
with torch.no_grad():
img_name_np.extend(img_names)
xs = embed(imgs.to(device)) # x_representation:[b, 2048, h, w]
masks = mask_encoder(xs) # [b, 1, h, w]
z_ATT = np.concatenate(
[norm(mask_encoder.attention_pooling(x, mask).squeeze(3).squeeze(2).detach().cpu().numpy()) for
i, (x, mask) in enumerate(zip(xs, masks)) if i in valid_layer], axis=1)
for idx, (z_ATT_) in enumerate(z_ATT):
z_att_np[i * BATCH_SIZE + idx, :] = z_ATT_
gc.enable()
# Save the features
img_name_np = np.array(img_name_np, dtype='object')
with h5py.File(args.output_feature_path, 'w') as f:
f.create_dataset('img_name_ds', shape=img_name_np.shape, data=img_name_np,
dtype=h5py.special_dtype(vlen=str))
f.create_dataset('z_att_ds', shape=z_att_np.shape, data=z_att_np)
| [
"model.MultiHeadResNet",
"gc.enable",
"argparse.ArgumentParser",
"pathlib.Path",
"gc.disable",
"torch.load",
"torch.stack",
"model.MultiHeadMaskEncoder",
"h5py.File",
"torch.nn.functional.adaptive_max_pool2d",
"numpy.array",
"torch.cuda.is_available",
"torchvision.transforms.Normalize",
"t... | [((329, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract global features from image to form a feature database."""'}), "(description=\n 'Extract global features from image to form a feature database.')\n", (352, 435), False, 'import argparse\n'), ((1617, 1673), 'model.MultiHeadMaskEncoder', 'MultiHeadMaskEncoder', ([], {'model': 'embed.model', 'name': 'model_name'}), '(model=embed.model, name=model_name)\n', (1637, 1673), False, 'from model import MultiHeadResNet, MultiHeadEffNet, MultiHeadMaskEncoder\n'), ((2568, 2701), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(False)', 'num_workers': '(6)', 'pin_memory': '(True)', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=BATCH_SIZE, shuffle=False,\n num_workers=6, pin_memory=True, collate_fn=collate_fn)\n', (2595, 2701), False, 'import torch\n'), ((1435, 1486), 'model.MultiHeadEffNet', 'MultiHeadEffNet', ([], {'model': 'model_name', 'pretrained': '(False)'}), '(model=model_name, pretrained=False)\n', (1450, 1486), False, 'from model import MultiHeadResNet, MultiHeadEffNet, MultiHeadMaskEncoder\n'), ((1719, 1766), 'torch.load', 'torch.load', (['args.model_ckpt'], {'map_location': '"""cpu"""'}), "(args.model_ckpt, map_location='cpu')\n", (1729, 1766), False, 'import torch\n'), ((2368, 2385), 'torch.stack', 'torch.stack', (['imgs'], {}), '(imgs)\n', (2379, 2385), False, 'import torch\n'), ((3208, 3220), 'gc.disable', 'gc.disable', ([], {}), '()\n', (3218, 3220), False, 'import gc\n'), ((4627, 4638), 'gc.enable', 'gc.enable', ([], {}), '()\n', (4636, 4638), False, 'import gc\n'), ((4690, 4727), 'numpy.array', 'np.array', (['img_name_np'], {'dtype': '"""object"""'}), "(img_name_np, dtype='object')\n", (4698, 4727), True, 'import numpy as np\n'), ((1301, 1326), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1324, 1326), False, 'import torch\n'), ((1545, 1596), 'model.MultiHeadResNet', 'MultiHeadResNet', ([], {'model': 'model_name', 'pretrained': '(False)'}), '(model=model_name, pretrained=False)\n', (1560, 1596), False, 'from model import MultiHeadResNet, MultiHeadEffNet, MultiHeadMaskEncoder\n'), ((2144, 2165), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2163, 2165), False, 'from torchvision import transforms\n'), ((2175, 2250), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n', (2195, 2250), False, 'from torchvision import transforms\n'), ((2478, 2496), 'pathlib.Path', 'Path', (['args.dataset'], {}), '(args.dataset)\n', (2482, 2496), False, 'from pathlib import Path\n'), ((4741, 4781), 'h5py.File', 'h5py.File', (['args.output_feature_path', '"""w"""'], {}), "(args.output_feature_path, 'w')\n", (4750, 4781), False, 'import h5py\n'), ((5430, 5442), 'gc.disable', 'gc.disable', ([], {}), '()\n', (5440, 5442), False, 'import gc\n'), ((6224, 6235), 'gc.enable', 'gc.enable', ([], {}), '()\n', (6233, 6235), False, 'import gc\n'), ((6287, 6324), 'numpy.array', 'np.array', (['img_name_np'], {'dtype': '"""object"""'}), "(img_name_np, dtype='object')\n", (6295, 6324), True, 'import numpy as np\n'), ((6338, 6378), 'h5py.File', 'h5py.File', (['args.output_feature_path', '"""w"""'], {}), "(args.output_feature_path, 'w')\n", (6347, 6378), False, 'import h5py\n'), ((2895, 2923), 'numpy.array', 'np.array', (['embed.out_channels'], {}), '(embed.out_channels)\n', (2903, 2923), True, 'import numpy as np\n'), ((3393, 3408), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3406, 3408), False, 'import torch\n'), ((4910, 4938), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (4928, 4938), False, 'import h5py\n'), ((5276, 5304), 'numpy.array', 'np.array', (['embed.out_channels'], {}), '(embed.out_channels)\n', (5284, 5304), True, 'import numpy as np\n'), ((5615, 5630), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5628, 5630), False, 'import torch\n'), ((6507, 6535), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (6525, 6535), False, 'import h5py\n'), ((4218, 4262), 'torch.nn.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['xs[-1]'], {'output_size': '(1)'}), '(xs[-1], output_size=1)\n', (4239, 4262), True, 'import torch.nn.functional as F\n'), ((3943, 3989), 'torch.nn.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['(x * mask)'], {'output_size': '(1)'}), '(x * mask, output_size=1)\n', (3964, 3989), True, 'import torch.nn.functional as F\n')] |
import numpy as np
def get_random_number_generator(seed):
"""Turn seed into a np.random.Generator instance."""
return np.random.default_rng(seed)
| [
"numpy.random.default_rng"
] | [((129, 156), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (150, 156), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Selecting Mouginot catchments that are part of the contiguous GrIS.
+ mapping catchment limits
Based on code from Lizz Ultee: catchment-plot.py
@author: vincent
"""
import os
import sys
import shapefile
from netCDF4 import Dataset
import numpy as np
# import pandas as pd
# import pyproj as pyproj
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from shapely.geometry import Polygon
import pyproj
MapSurfaceElev = True #choose between surface elevation and bed topography map
MapBedTopo = False #choose between surface elevation and bed topography map
### Read BedMachine ###
pathBM = '/media/vincent/TOSH4TB/GeorgiaTech/DataSearch/BedMachine/BedMachineGreenland-2021-04-20.nc'
ds = Dataset(pathBM,mode='r') #open dataset in reading mode
xx = ds.variables['x'][:].copy() #x-coord (polar stereo (70, 45))
yy = ds.variables['y'][:].copy() #y-coord
if MapSurfaceElev:
surfBM = ds.variables['surface'][:].copy() #surface elevation
maskBM = ds.variables['mask'][:].copy() #BM mask values: 0=ocean, 1=ice-free land, 2=grounded ice, 3=floating ice, 4=non-Greenland land
surfproc = np.ma.masked_where(maskBM!=2,surfBM) #Surface elev for grounded ice
if MapBedTopo:
bedBM = ds.variables['bed'][:].copy() #bed topo
ds.close()
## Down-sampling BedMachine (original resolution: 150m) ##
x1 = xx[::20]
y1 = yy[::20]
if MapSurfaceElev:
surf1 = surfproc[::20,::20]
del(surfBM,surfproc,maskBM)
if MapBedTopo:
bed1 = bedBM[::20,::20]
del(bedBM)
del(xx,yy)
### Read Mouginot catchments from shapefile ###
pathMoug = '/media/vincent/TOSH4TB/GeorgiaTech/DataSearch/CatchmentsMouginot/Greenland_Basins_PS_v1.4.2.shp'
sf = shapefile.Reader(pathMoug)
### Mapping ###
fig1 = plt.figure(figsize=[8,9])
ax = fig1.subplots(1)
ax = plt.axes(projection=ccrs.NorthPolarStereo(central_longitude=-45.0))
ax.set(xlim=(min(x1)-10, max(x1)+10), ylim=(min(y1)-10, max(y1)+10))
if MapSurfaceElev: #Mapping the surface elevation
surflevels = np.linspace(np.min(surf1)-1,np.max(surf1)+1,101)
ax.contourf(x1,y1,surf1,cmap='jet',levels=surflevels)
elif MapBedTopo: #Mapping the bed topography
bedlevels = np.linspace(-3500,3500,101)
ax.contourf(x1,y1,bed1,cmap='jet',levels=bedlevels)
catchID = 0 #keeping track of catchment ID number
allAreas0 = [] #areas of the catchments
allCentroids0 = [] #centroids of the catchments
allxvertices0 = [] #all x-coordinates of the vertices
allyvertices0 = [] #all y-coordinates of the vertices
allIDs0 = [] #all catchment IDs
for shape in sf.shapeRecords():
if 'ICE_CAPS' not in shape.record['NAME']: #excluding Ice Caps catchments
if len(shape.shape.parts)>1:
# Disjointed catchments have shape.shape.parts>1
catchment_color='grey'
else:
# Unified catchments have shape.shape.parts==1
catchment_color='k'
for i in range(len(shape.shape.parts)): ## plot disjointed parts separately
i_start = shape.shape.parts[i] #index starting this part of the catchment
if i==len(shape.shape.parts)-1: #at last part of the catchment
i_end = len(shape.shape.points) #last point to plot is the overall last of the catchment
else:
i_end = shape.shape.parts[i+1] #otherwise,plot until index starting next part of the catchment
x = [i[0] for i in shape.shape.points[i_start:i_end]] #x is first element of the sublist
y = [i[1] for i in shape.shape.points[i_start:i_end]] #y is second element of the sublist
allxvertices0.append(x)
allyvertices0.append(y)
allAreas0.append(Polygon(shape.shape.points[i_start:i_end]).area)
allCentroids0.append(list(Polygon(shape.shape.points[i_start:i_end]).centroid.coords)[0])
allIDs0.append(catchID)
#ax.plot(x,y, color=catchment_color)
catchID += 1
# Conversion to numpy arrays #
allAreas0 = np.array(allAreas0)
allCentroids0 = np.array(allCentroids0) #centroids of the catchments
allxvertices0 = np.array(allxvertices0) #all x-coordinates of the vertices
allyvertices0 = np.array(allyvertices0) #all y-coordinates of the vertices
allIDs0 = np.array(allIDs0) #all catchment IDs
# Find largest part of the catchments with multiple parts #
multiplecatch = np.unique(np.array(allIDs0)[np.where(np.diff(allIDs0)==0)[0]])
todel = np.array([]) #indices of smaller parts that we want to remove
for idnb in multiplecatch:
inds = np.where(np.array(allIDs0)==idnb)[0] #indices having the same catchment ID
maxarea = 0 #largest area among the parts of the catchment
for ii in inds:
if allAreas0[ii]>maxarea:
maxarea = allAreas0[ii]
#Add the smaller parts to the indices to delete
todel = np.append(todel,[index for index in inds if allAreas0[index]<maxarea]).astype(int)
allAreas = np.delete(allAreas0,todel) #areas of the catchments
allCentroids = np.delete(allCentroids0,todel,axis=0) #centroids of the catchments
allxvertices = np.delete(allxvertices0,todel,axis=0) #all x-coordinates of the vertices
allyvertices = np.delete(allyvertices0,todel,axis=0) #all y-coordinates of the vertices
allIDs = np.delete(allIDs0,todel) #all catchment IDs
for ii,ctr in enumerate(allCentroids0):
if ii not in todel:
ax.plot(allxvertices0[ii],allyvertices0[ii],'k',label='Contiguous GrIS')
#ax.text(ctr[0],ctr[1],str(allIDs[ii]))
else:
ax.plot(allxvertices0[ii],allyvertices0[ii],'r',label='Excluded')
#ax.text(ctr[0],ctr[1],str(allIDs[ii]),color='r')
#plt.legend(fontsize=11,loc='lower right')
plt.show()
fig1.tight_layout()
### Projecting to lat-lon ###
#Projection info is from Lizz Ultee (could not find info about projection for Mouginot catchments)
wgs84 = pyproj.Proj("+init=EPSG:4326") # LatLon with WGS84 data (equiv. to EPSG4326)
psn_M = pyproj.Proj("+init=epsg:3413") # Polar Stereographic North
allCentroids0_lonlat = []
for ii in range(len(allCentroids0)):
allCentroids0_lonlat.append(pyproj.transform(psn_M,wgs84,allCentroids0[ii][0],allCentroids0[ii][1]))
allCentroids_lonlat = []
for ii in range(len(allCentroids)):
allCentroids_lonlat.append(pyproj.transform(psn_M,wgs84,allCentroids[ii][0],allCentroids[ii][1]))
| [
"shapefile.Reader",
"cartopy.crs.NorthPolarStereo",
"numpy.delete",
"netCDF4.Dataset",
"pyproj.transform",
"numpy.diff",
"numpy.ma.masked_where",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.append",
"shapely.geometry.Polygon",
"pyproj.Proj",
"numpy.mi... | [((754, 779), 'netCDF4.Dataset', 'Dataset', (['pathBM'], {'mode': '"""r"""'}), "(pathBM, mode='r')\n", (761, 779), False, 'from netCDF4 import Dataset\n'), ((1713, 1739), 'shapefile.Reader', 'shapefile.Reader', (['pathMoug'], {}), '(pathMoug)\n', (1729, 1739), False, 'import shapefile\n'), ((1765, 1791), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 9]'}), '(figsize=[8, 9])\n', (1775, 1791), True, 'import matplotlib.pyplot as plt\n'), ((4056, 4075), 'numpy.array', 'np.array', (['allAreas0'], {}), '(allAreas0)\n', (4064, 4075), True, 'import numpy as np\n'), ((4092, 4115), 'numpy.array', 'np.array', (['allCentroids0'], {}), '(allCentroids0)\n', (4100, 4115), True, 'import numpy as np\n'), ((4161, 4184), 'numpy.array', 'np.array', (['allxvertices0'], {}), '(allxvertices0)\n', (4169, 4184), True, 'import numpy as np\n'), ((4236, 4259), 'numpy.array', 'np.array', (['allyvertices0'], {}), '(allyvertices0)\n', (4244, 4259), True, 'import numpy as np\n'), ((4311, 4328), 'numpy.array', 'np.array', (['allIDs0'], {}), '(allIDs0)\n', (4319, 4328), True, 'import numpy as np\n'), ((4496, 4508), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4504, 4508), True, 'import numpy as np\n'), ((4989, 5016), 'numpy.delete', 'np.delete', (['allAreas0', 'todel'], {}), '(allAreas0, todel)\n', (4998, 5016), True, 'import numpy as np\n'), ((5057, 5096), 'numpy.delete', 'np.delete', (['allCentroids0', 'todel'], {'axis': '(0)'}), '(allCentroids0, todel, axis=0)\n', (5066, 5096), True, 'import numpy as np\n'), ((5140, 5179), 'numpy.delete', 'np.delete', (['allxvertices0', 'todel'], {'axis': '(0)'}), '(allxvertices0, todel, axis=0)\n', (5149, 5179), True, 'import numpy as np\n'), ((5229, 5268), 'numpy.delete', 'np.delete', (['allyvertices0', 'todel'], {'axis': '(0)'}), '(allyvertices0, todel, axis=0)\n', (5238, 5268), True, 'import numpy as np\n'), ((5318, 5343), 'numpy.delete', 'np.delete', (['allIDs0', 'todel'], {}), '(allIDs0, todel)\n', (5327, 5343), True, 'import numpy as np\n'), ((5742, 5752), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5750, 5752), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5942), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:4326"""'], {}), "('+init=EPSG:4326')\n", (5923, 5942), False, 'import pyproj\n'), ((5998, 6028), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=epsg:3413"""'], {}), "('+init=epsg:3413')\n", (6009, 6028), False, 'import pyproj\n'), ((1157, 1196), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(maskBM != 2)', 'surfBM'], {}), '(maskBM != 2, surfBM)\n', (1175, 1196), True, 'import numpy as np\n'), ((1838, 1884), 'cartopy.crs.NorthPolarStereo', 'ccrs.NorthPolarStereo', ([], {'central_longitude': '(-45.0)'}), '(central_longitude=-45.0)\n', (1859, 1884), True, 'import cartopy.crs as ccrs\n'), ((2191, 2220), 'numpy.linspace', 'np.linspace', (['(-3500)', '(3500)', '(101)'], {}), '(-3500, 3500, 101)\n', (2202, 2220), True, 'import numpy as np\n'), ((4435, 4452), 'numpy.array', 'np.array', (['allIDs0'], {}), '(allIDs0)\n', (4443, 4452), True, 'import numpy as np\n'), ((6152, 6226), 'pyproj.transform', 'pyproj.transform', (['psn_M', 'wgs84', 'allCentroids0[ii][0]', 'allCentroids0[ii][1]'], {}), '(psn_M, wgs84, allCentroids0[ii][0], allCentroids0[ii][1])\n', (6168, 6226), False, 'import pyproj\n'), ((6317, 6389), 'pyproj.transform', 'pyproj.transform', (['psn_M', 'wgs84', 'allCentroids[ii][0]', 'allCentroids[ii][1]'], {}), '(psn_M, wgs84, allCentroids[ii][0], allCentroids[ii][1])\n', (6333, 6389), False, 'import pyproj\n'), ((2034, 2047), 'numpy.min', 'np.min', (['surf1'], {}), '(surf1)\n', (2040, 2047), True, 'import numpy as np\n'), ((2050, 2063), 'numpy.max', 'np.max', (['surf1'], {}), '(surf1)\n', (2056, 2063), True, 'import numpy as np\n'), ((4888, 4961), 'numpy.append', 'np.append', (['todel', '[index for index in inds if allAreas0[index] < maxarea]'], {}), '(todel, [index for index in inds if allAreas0[index] < maxarea])\n', (4897, 4961), True, 'import numpy as np\n'), ((4605, 4622), 'numpy.array', 'np.array', (['allIDs0'], {}), '(allIDs0)\n', (4613, 4622), True, 'import numpy as np\n'), ((3739, 3781), 'shapely.geometry.Polygon', 'Polygon', (['shape.shape.points[i_start:i_end]'], {}), '(shape.shape.points[i_start:i_end])\n', (3746, 3781), False, 'from shapely.geometry import Polygon\n'), ((4462, 4478), 'numpy.diff', 'np.diff', (['allIDs0'], {}), '(allIDs0)\n', (4469, 4478), True, 'import numpy as np\n'), ((3830, 3872), 'shapely.geometry.Polygon', 'Polygon', (['shape.shape.points[i_start:i_end]'], {}), '(shape.shape.points[i_start:i_end])\n', (3837, 3872), False, 'from shapely.geometry import Polygon\n')] |
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from copy import deepcopy
import numpy as np
from miplearn.classifiers import Classifier
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import logging
logger = logging.getLogger(__name__)
class CrossValidatedClassifier(Classifier):
"""
A meta-classifier that, upon training, evaluates the performance of another
classifier on the training data set using k-fold cross validation, then
either adopts the other classifier it if the cv-score is high enough, or
returns a constant label for every x_test otherwise.
The threshold is specified in comparison to a dummy classifier trained
on the same dataset. For example, a threshold of 0.0 indicates that any
classifier as good as the dummy predictor is acceptable. A threshold of 1.0
indicates that only classifier with a perfect cross-validation score are
acceptable. Other numbers are a linear interpolation of these two extremes.
"""
def __init__(
self,
classifier=LogisticRegression(),
threshold=0.75,
constant=0.0,
cv=5,
scoring="accuracy",
):
self.classifier = None
self.classifier_prototype = classifier
self.constant = constant
self.threshold = threshold
self.cv = cv
self.scoring = scoring
def fit(self, x_train, y_train):
# Calculate dummy score and absolute score threshold
y_train_avg = np.average(y_train)
dummy_score = max(y_train_avg, 1 - y_train_avg)
absolute_threshold = 1.0 * self.threshold + dummy_score * (1 - self.threshold)
# Calculate cross validation score and decide which classifier to use
clf = deepcopy(self.classifier_prototype)
cv_score = float(
np.mean(
cross_val_score(
clf,
x_train,
y_train,
cv=self.cv,
scoring=self.scoring,
)
)
)
if cv_score >= absolute_threshold:
logger.debug(
"cv_score is above threshold (%.2f >= %.2f); keeping"
% (cv_score, absolute_threshold)
)
self.classifier = clf
else:
logger.debug(
"cv_score is below threshold (%.2f < %.2f); discarding"
% (cv_score, absolute_threshold)
)
self.classifier = DummyClassifier(
strategy="constant",
constant=self.constant,
)
# Train chosen classifier
self.classifier.fit(x_train, y_train)
def predict_proba(self, x_test):
return self.classifier.predict_proba(x_test)
| [
"logging.getLogger",
"numpy.average",
"sklearn.linear_model.LogisticRegression",
"copy.deepcopy",
"sklearn.dummy.DummyClassifier",
"sklearn.model_selection.cross_val_score"
] | [((490, 517), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (507, 517), False, 'import logging\n'), ((1311, 1331), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1329, 1331), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1747, 1766), 'numpy.average', 'np.average', (['y_train'], {}), '(y_train)\n', (1757, 1766), True, 'import numpy as np\n'), ((2003, 2038), 'copy.deepcopy', 'deepcopy', (['self.classifier_prototype'], {}), '(self.classifier_prototype)\n', (2011, 2038), False, 'from copy import deepcopy\n'), ((2759, 2819), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""constant"""', 'constant': 'self.constant'}), "(strategy='constant', constant=self.constant)\n", (2774, 2819), False, 'from sklearn.dummy import DummyClassifier\n'), ((2102, 2174), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['clf', 'x_train', 'y_train'], {'cv': 'self.cv', 'scoring': 'self.scoring'}), '(clf, x_train, y_train, cv=self.cv, scoring=self.scoring)\n', (2117, 2174), False, 'from sklearn.model_selection import cross_val_score\n')] |
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D
from tensorflow.keras.constraints import max_norm
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import numpy as np
import os
import time
from sklearn.metrics import classification_report, f1_score
from min2net.utils import TimeHistory, compute_class_weight
class EEGNet:
def __init__(self,
input_shape=(1,20,400),
num_class=2,
loss='sparse_categorical_crossentropy',
epochs=200,
batch_size=100,
optimizer = Adam(beta_1=0.9, beta_2=0.999, epsilon=1e-08),
lr=0.01,
min_lr=0.01,
factor=0.25,
patience=10,
es_patience=20,
verbose=1,
log_path='logs',
model_name='EEGNet',
data_format = 'channels_first',
**kwargs):
self.input_shape = input_shape
self.num_class = num_class
self.loss = loss
self.epochs = epochs
self.batch_size = batch_size
self.optimizer = optimizer
self.optimizer.lr = lr
self.lr = lr
self.min_lr = min_lr
self.factor = factor
self.patience = patience
self.es_patience = es_patience
self.verbose = verbose
self.log_path = log_path
self.model_name = model_name
self.data_format = data_format
self.weights_dir = log_path+'/'+model_name+'_out_weights.h5'
self.csv_dir = log_path+'/'+model_name+'_out_log.log'
self.time_log = log_path+'/'+model_name+'_time_log.csv'
# use **kwargs to set the new value of below args.
self.kernLength = 200
self.F1 = 8
self.D = 2
self.F2 = int(self.F1*self.D)
self.norm_rate = 0.25
self.dropout_rate = 0.5
self.f1_average = 'binary' if self.num_class == 2 else 'macro'
self.shuffle = False
self.metrics = 'accuracy'
self.monitor = 'val_loss'
self.mode = 'min'
self.save_best_only = True
self.save_weight_only = True
self.seed = 1234
self.class_balancing = False
self.class_weight = None
for k in kwargs.keys():
self.__setattr__(k, kwargs[k])
if self.data_format == 'channels_first':
self.Chans = self.input_shape[1]
self.Samples = self.input_shape[2]
else:
self.Chans = self.input_shape[0]
self.Samples = self.input_shape[1]
np.random.seed(self.seed)
tf.random.set_seed(self.seed)
K.set_image_data_format(self.data_format)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
def build(self):
input1 = Input(shape=self.input_shape)
##################################################################
block1 = Conv2D(self.F1, (1, self.kernLength), padding='same',
input_shape=self.input_shape,
use_bias=False)(input1)
block1 = BatchNormalization()(block1)
block1 = DepthwiseConv2D((self.Chans, 1), use_bias=False,
depth_multiplier=self.D,
depthwise_constraint=max_norm(1.))(block1)
block1 = BatchNormalization()(block1)
block1 = Activation('elu')(block1)
block1 = AveragePooling2D((1, 4))(block1)
block1 = Dropout(self.dropout_rate)(block1)
block2 = SeparableConv2D(self.F2, (1, self.kernLength//4),
use_bias=False, padding='same')(block1)
block2 = BatchNormalization()(block2)
block2 = Activation('elu')(block2)
block2 = AveragePooling2D((1, 8))(block2)
block2 = Dropout(self.dropout_rate)(block2)
flatten = Flatten(name='flatten')(block2)
dense = Dense(self.num_class, name='dense',
kernel_constraint=max_norm(self.norm_rate))(flatten)
softmax = Activation('softmax', name='softmax')(dense)
return Model(inputs=input1, outputs=softmax)
def fit(self, X_train, y_train, X_val, y_val):
if X_train.ndim != 4:
raise Exception('ValueError: `X_train` is incompatible: expected ndim=4, found ndim='+str(X_train.ndim))
elif X_val.ndim != 4:
raise Exception('ValueError: `X_val` is incompatible: expected ndim=4, found ndim='+str(X_val.ndim))
self.input_shape = X_train.shape[1:]
if self.data_format == 'channels_first':
self.Chans = self.input_shape[1]
self.Samples = self.input_shape[2]
else:
self.Chans = self.input_shape[0]
self.Samples = self.input_shape[1]
csv_logger = CSVLogger(self.csv_dir)
time_callback = TimeHistory(self.time_log)
checkpointer = ModelCheckpoint(monitor=self.monitor, filepath=self.weights_dir, verbose=self.verbose,
save_best_only=self.save_best_only, save_weight_only=self.save_weight_only)
reduce_lr = ReduceLROnPlateau(monitor=self.monitor, patience=self.patience, factor=self.factor,
mode=self.mode, verbose=self.verbose, min_lr=self.min_lr)
es = EarlyStopping(monitor=self.monitor, mode=self.mode, verbose=self.verbose, patience=self.es_patience)
model = self.build()
model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)
model.summary()
print("The first kernel size is (1, {})".format(self.kernLength))
if self.class_balancing: # compute_class_weight if class_balancing is True
self.class_weight = compute_class_weight(y_train)
else:
self.class_weight = None
model.fit(X_train, y_train,
batch_size=self.batch_size, shuffle=self.shuffle,
epochs=self.epochs, validation_data=(X_val, y_val), class_weight=self.class_weight,
callbacks=[checkpointer, csv_logger,reduce_lr,es, time_callback])
def predict(self, X_test, y_test):
if X_test.ndim != 4:
raise Exception('ValueError: `X_test` is incompatible: expected ndim=4, found ndim='+str(X_test.ndim))
model = self.build()
model.load_weights(self.weights_dir)
model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)
start = time.time()
y_pred = model.predict(X_test)
end = time.time()
loss, accuracy = model.evaluate(x=X_test, y=y_test, batch_size=self.batch_size, verbose=self.verbose)
y_pred_argm = np.argmax(y_pred, axis=1)
print(classification_report(y_test, y_pred_argm))
print("F1-score is computed based on {}".format(self.f1_average))
f1 = f1_score(y_test, y_pred_argm, average=self.f1_average)
evaluation = {'loss': loss,
'accuracy': accuracy,
'f1-score': f1,
'prediction_time': end-start}
Y = {'y_true': y_test,
'y_pred': y_pred_argm}
return Y, evaluation | [
"sklearn.metrics.classification_report",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.AveragePooling2D",
"min2net.utils.compute_class_weight",
"tensorflow.keras.backend.set_image_data_format",
"tensorflow.keras.layers.Input",
"os.pa... | [((889, 934), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)'}), '(beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n', (893, 934), False, 'from tensorflow.keras.optimizers import Adam\n'), ((2904, 2929), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2918, 2929), True, 'import numpy as np\n'), ((2938, 2967), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['self.seed'], {}), '(self.seed)\n', (2956, 2967), True, 'import tensorflow as tf\n'), ((2976, 3017), 'tensorflow.keras.backend.set_image_data_format', 'K.set_image_data_format', (['self.data_format'], {}), '(self.data_format)\n', (2999, 3017), True, 'from tensorflow.keras import backend as K\n'), ((3148, 3177), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.input_shape'}), '(shape=self.input_shape)\n', (3153, 3177), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((4580, 4617), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'input1', 'outputs': 'softmax'}), '(inputs=input1, outputs=softmax)\n', (4585, 4617), False, 'from tensorflow.keras.models import Model\n'), ((5284, 5307), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (['self.csv_dir'], {}), '(self.csv_dir)\n', (5293, 5307), False, 'from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((5332, 5358), 'min2net.utils.TimeHistory', 'TimeHistory', (['self.time_log'], {}), '(self.time_log)\n', (5343, 5358), False, 'from min2net.utils import TimeHistory, compute_class_weight\n'), ((5382, 5554), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'monitor': 'self.monitor', 'filepath': 'self.weights_dir', 'verbose': 'self.verbose', 'save_best_only': 'self.save_best_only', 'save_weight_only': 'self.save_weight_only'}), '(monitor=self.monitor, filepath=self.weights_dir, verbose=\n self.verbose, save_best_only=self.save_best_only, save_weight_only=self\n .save_weight_only)\n', (5397, 5554), False, 'from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((5605, 5751), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': 'self.monitor', 'patience': 'self.patience', 'factor': 'self.factor', 'mode': 'self.mode', 'verbose': 'self.verbose', 'min_lr': 'self.min_lr'}), '(monitor=self.monitor, patience=self.patience, factor=self\n .factor, mode=self.mode, verbose=self.verbose, min_lr=self.min_lr)\n', (5622, 5751), False, 'from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((5799, 5903), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'self.monitor', 'mode': 'self.mode', 'verbose': 'self.verbose', 'patience': 'self.es_patience'}), '(monitor=self.monitor, mode=self.mode, verbose=self.verbose,\n patience=self.es_patience)\n', (5812, 5903), False, 'from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((6994, 7005), 'time.time', 'time.time', ([], {}), '()\n', (7003, 7005), False, 'import time\n'), ((7059, 7070), 'time.time', 'time.time', ([], {}), '()\n', (7068, 7070), False, 'import time\n'), ((7203, 7228), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (7212, 7228), True, 'import numpy as np\n'), ((7374, 7428), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred_argm'], {'average': 'self.f1_average'}), '(y_test, y_pred_argm, average=self.f1_average)\n', (7382, 7428), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((3033, 3062), 'os.path.exists', 'os.path.exists', (['self.log_path'], {}), '(self.log_path)\n', (3047, 3062), False, 'import os\n'), ((3076, 3102), 'os.makedirs', 'os.makedirs', (['self.log_path'], {}), '(self.log_path)\n', (3087, 3102), False, 'import os\n'), ((3277, 3381), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['self.F1', '(1, self.kernLength)'], {'padding': '"""same"""', 'input_shape': 'self.input_shape', 'use_bias': '(False)'}), "(self.F1, (1, self.kernLength), padding='same', input_shape=self.\n input_shape, use_bias=False)\n", (3283, 3381), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((3468, 3488), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3486, 3488), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((3738, 3758), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3756, 3758), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((3790, 3807), 'tensorflow.keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (3800, 3807), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((3839, 3863), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(1, 4)'], {}), '((1, 4))\n', (3855, 3863), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((3895, 3921), 'tensorflow.keras.layers.Dropout', 'Dropout', (['self.dropout_rate'], {}), '(self.dropout_rate)\n', (3902, 3921), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((3954, 4042), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['self.F2', '(1, self.kernLength // 4)'], {'use_bias': '(False)', 'padding': '"""same"""'}), "(self.F2, (1, self.kernLength // 4), use_bias=False, padding\n ='same')\n", (3969, 4042), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((4106, 4126), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4124, 4126), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((4158, 4175), 'tensorflow.keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (4168, 4175), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((4207, 4231), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(1, 8)'], {}), '((1, 8))\n', (4223, 4231), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((4263, 4289), 'tensorflow.keras.layers.Dropout', 'Dropout', (['self.dropout_rate'], {}), '(self.dropout_rate)\n', (4270, 4289), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((4322, 4345), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (4329, 4345), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((4519, 4556), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {'name': '"""softmax"""'}), "('softmax', name='softmax')\n", (4529, 4556), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation, MaxPooling2D, DepthwiseConv2D, AveragePooling2D, SeparableConv2D\n'), ((6238, 6267), 'min2net.utils.compute_class_weight', 'compute_class_weight', (['y_train'], {}), '(y_train)\n', (6258, 6267), False, 'from min2net.utils import TimeHistory, compute_class_weight\n'), ((7243, 7285), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred_argm'], {}), '(y_test, y_pred_argm)\n', (7264, 7285), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((3693, 3706), 'tensorflow.keras.constraints.max_norm', 'max_norm', (['(1.0)'], {}), '(1.0)\n', (3701, 3706), False, 'from tensorflow.keras.constraints import max_norm\n'), ((4461, 4485), 'tensorflow.keras.constraints.max_norm', 'max_norm', (['self.norm_rate'], {}), '(self.norm_rate)\n', (4469, 4485), False, 'from tensorflow.keras.constraints import max_norm\n')] |
import numpy as np
from MagniPy.lensdata import Data
from MagniPy.LensBuild.defaults import get_default_SIE_random, get_default_SIE
from MagniPy.util import approx_theta_E
from MagniPy.Workflow.grism_lenses.quad import Quad
class Lens1608(Quad):
g1x, g1y = 0.4161, -1.0581
g2x, g2y = -0.2897, -0.9243
g2x -= g1x
g2y -= g1y
# from Fassnacht et al. 2002
x = np.array([-0.000, -0.738, -0.7446, 1.1284]) - g1x
y = np.array([0.000, -1.961, -0.4537, -1.2565]) - g1y
m = np.array([1., 0.5, 0.51, 0.35])
sigma_x = np.array([0.005]*4)
sigma_y = np.array([0.005]*4)
# from Koopmans et al. 2003
time_delay_AB, delta_AB = -31.5, 1.5
time_delay_AC, delta_AC = 4.5, 1.5
time_delay_AD, delta_AD = 45.5, 2.
# delta_time_delay = np.array([delta_AB, delta_AC, delta_AD])
# relative_arrival_times = np.array([time_delay_AB, time_delay_AC, time_delay_AD])
relative_arrival_times = np.array([31., 36., 76.])
delta_time_delay = np.array([2., 1.5, 2.])
sigma_m = np.zeros_like(sigma_x)
kwargs_lens_init = [{'theta_E': 0.9036453181989341, 'center_x': 0.007600860009089998, 'center_y': -0.057685769153856994, 'e1': 0.33480085295892065, 'e2': -0.05097223221504117, 'gamma': 2.08},
{'gamma1': 0.06096918564904592, 'gamma2': 0.07721907911829631}]
amp_scale = 1000.
kwargs_lens_light = [{'amp': amp_scale * 1.1, 'R_sersic': 0.4, 'n_sersic': 4., 'center_x': 0., 'center_y': 0.}]
kwargs_source_light = [{'amp': amp_scale * 1.6, 'R_sersic': 0.12, 'n_sersic': 3., 'center_x': None, 'center_y': None,
'e1': -0.1, 'e2': 0.3}]
zlens, zsrc = 0.61, 1.4
data = Data(x, y, m, None, None,
sigma_x = sigma_x, sigma_y = sigma_y,
sigma_m=sigma_m)
identifier = 'lens1608'
flux_ratio_index = 0
fluximg = ['A', 'B', 'C', 'D'][flux_ratio_index]
_macromodel = get_default_SIE(zlens)
_macromodel.lenstronomy_args['theta_E'] = approx_theta_E(x, y)
has_satellite = True
satellite_mass_model = ['SIS']
satellite_redshift = [zlens]
satellite_convention = ['phys']
satellite_pos_mass = [g2x, g2y]
kwargs_satellite_light = [{'amp': amp_scale * 0.7, 'R_sersic': 0.2, 'n_sersic': 4.,
'center_x': g2x, 'center_y': g2y}]
# satellite einstein radius from Koopmans et al. 2003
satellite_kwargs = [{'theta_E': 0.26, 'center_x': satellite_pos_mass[0], 'center_y': satellite_pos_mass[1]}]
gamma_min = 1.95
gamma_max = 2.2
srcmin = 0.02
srcmax = 0.05
@staticmethod
def relative_time_delays(arrival_times):
trel = arrival_times[1:] - arrival_times[0]
trel = [abs(trel[0]), abs(trel[0]) + trel[1], abs(trel[0]) + abs(trel[2])]
return np.array(trel)
def optimize_fit(self, kwargs_fit={}, macro_init = None, print_output = False):
if 'datatofit' in kwargs_fit.keys():
data = kwargs_fit['datatofit']
del kwargs_fit['datatofit']
else:
data = self.data
optdata, optmodel = self._fit(data, self.solver, kwargs_fit, macromodel_init=macro_init)
if print_output:
self._print_output(optdata[0], optmodel[0])
return optdata[0], optmodel[0]
def optimize_fit_lensmodel(self, kwargs_fit={}, macro_init = None, print_output = False):
kwargs_fit.update({'identifier': self.identifier})
optdata, optmodel = self._fit_lensmodel(self.data, self.solver, kwargs_fit, macromodel_init=macro_init)
if print_output:
self._print_output(optdata[0], optmodel[0])
return optdata[0], optmodel[0]
def _print_output(self, optdata, optmodel):
macromodel = optmodel.lens_components[0]
print('optimized mags: ', optdata.m)
print('observed mags: ', self.data.m)
print('lensmodel fit: ')
print('Einstein radius: ', macromodel.lenstronomy_args['theta_E'])
print('shear, shear_theta:', macromodel.shear, macromodel.shear_theta)
print('ellipticity, PA:', macromodel.ellip_PA_polar()[0], macromodel.ellip_PA_polar()[1])
print('centroid: ', macromodel.lenstronomy_args['center_x'],
macromodel.lenstronomy_args['center_y'])
print('\n')
print('flux ratios w.r.t. image '+str(self.fluximg)+':')
print('observed: ', self.data.compute_flux_ratios(index=self.flux_ratio_index))
print('recovered: ', optdata.compute_flux_ratios(index=self.flux_ratio_index)) | [
"MagniPy.lensdata.Data",
"MagniPy.util.approx_theta_E",
"numpy.array",
"MagniPy.LensBuild.defaults.get_default_SIE",
"numpy.zeros_like"
] | [((499, 531), 'numpy.array', 'np.array', (['[1.0, 0.5, 0.51, 0.35]'], {}), '([1.0, 0.5, 0.51, 0.35])\n', (507, 531), True, 'import numpy as np\n'), ((545, 566), 'numpy.array', 'np.array', (['([0.005] * 4)'], {}), '([0.005] * 4)\n', (553, 566), True, 'import numpy as np\n'), ((579, 600), 'numpy.array', 'np.array', (['([0.005] * 4)'], {}), '([0.005] * 4)\n', (587, 600), True, 'import numpy as np\n'), ((934, 962), 'numpy.array', 'np.array', (['[31.0, 36.0, 76.0]'], {}), '([31.0, 36.0, 76.0])\n', (942, 962), True, 'import numpy as np\n'), ((983, 1008), 'numpy.array', 'np.array', (['[2.0, 1.5, 2.0]'], {}), '([2.0, 1.5, 2.0])\n', (991, 1008), True, 'import numpy as np\n'), ((1022, 1044), 'numpy.zeros_like', 'np.zeros_like', (['sigma_x'], {}), '(sigma_x)\n', (1035, 1044), True, 'import numpy as np\n'), ((1684, 1760), 'MagniPy.lensdata.Data', 'Data', (['x', 'y', 'm', 'None', 'None'], {'sigma_x': 'sigma_x', 'sigma_y': 'sigma_y', 'sigma_m': 'sigma_m'}), '(x, y, m, None, None, sigma_x=sigma_x, sigma_y=sigma_y, sigma_m=sigma_m)\n', (1688, 1760), False, 'from MagniPy.lensdata import Data\n'), ((1943, 1965), 'MagniPy.LensBuild.defaults.get_default_SIE', 'get_default_SIE', (['zlens'], {}), '(zlens)\n', (1958, 1965), False, 'from MagniPy.LensBuild.defaults import get_default_SIE_random, get_default_SIE\n'), ((2013, 2033), 'MagniPy.util.approx_theta_E', 'approx_theta_E', (['x', 'y'], {}), '(x, y)\n', (2027, 2033), False, 'from MagniPy.util import approx_theta_E\n'), ((383, 424), 'numpy.array', 'np.array', (['[-0.0, -0.738, -0.7446, 1.1284]'], {}), '([-0.0, -0.738, -0.7446, 1.1284])\n', (391, 424), True, 'import numpy as np\n'), ((441, 482), 'numpy.array', 'np.array', (['[0.0, -1.961, -0.4537, -1.2565]'], {}), '([0.0, -1.961, -0.4537, -1.2565])\n', (449, 482), True, 'import numpy as np\n'), ((2819, 2833), 'numpy.array', 'np.array', (['trel'], {}), '(trel)\n', (2827, 2833), True, 'import numpy as np\n')] |
#!/bin/env python3
import sys
def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs)
import os
import json
import ast
import re
import time
from datetime import datetime
import subprocess
import traceback
from collections import Counter
import numpy as np
import threading
from response import Response
from query_graph_info import QueryGraphInfo
from knowledge_graph_info import KnowledgeGraphInfo
from actions_parser import ActionsParser
from ARAX_filter import ARAXFilter
from ARAX_resultify import ARAXResultify
from ARAX_query_graph_interpreter import ARAXQueryGraphInterpreter
from ARAX_messenger import ARAXMessenger
from ARAX_ranker import ARAXRanker
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../UI/OpenAPI/python-flask-server/")
from swagger_server.models.message import Message
from swagger_server.models.knowledge_graph import KnowledgeGraph
from swagger_server.models.query_graph import QueryGraph
from swagger_server.models.q_node import QNode
from swagger_server.models.q_edge import QEdge
from swagger_server.models.previous_message_processing_plan import PreviousMessageProcessingPlan
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../..")
from RTXConfiguration import RTXConfiguration
from swagger_server.models.message import Message
from swagger_server.models.q_node import QNode
from swagger_server.models.q_edge import QEdge
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../reasoningtool/QuestionAnswering")
from ParseQuestion import ParseQuestion
from Q0Solution import Q0
#import ReasoningUtilities
from QueryGraphReasoner import QueryGraphReasoner
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../UI/Feedback/")
from RTXFeedback import RTXFeedback
class ARAXQuery:
#### Constructor
def __init__(self):
self.response = None
self.message = None
def query_return_stream(self,query):
main_query_thread = threading.Thread(target=self.asynchronous_query, args=(query,))
main_query_thread.start()
if self.response is None or "DONE" not in self.response.status:
# Sleep until a response object has been created
while self.response is None:
time.sleep(0.1)
i_message = 0
n_messages = len(self.response.messages)
while "DONE" not in self.response.status:
n_messages = len(self.response.messages)
while i_message < n_messages:
yield(json.dumps(self.response.messages[i_message])+"\n")
i_message += 1
time.sleep(0.2)
# #### If there are any more logging messages in the queue, send them first
n_messages = len(self.response.messages)
while i_message < n_messages:
yield(json.dumps(self.response.messages[i_message])+"\n")
i_message += 1
# Remove the little DONE flag the other thread used to signal this thread that it is done
self.response.status = re.sub('DONE,','',self.response.status)
# Stream the resulting message back to the client
yield(json.dumps(ast.literal_eval(repr(self.message)))+"\n")
# Wait until both threads rejoin here and the return
main_query_thread.join()
return { 'DONE': True }
def asynchronous_query(self,query):
#### Define a new response object if one does not yet exist
if self.response is None:
self.response = Response()
result = self.query(query)
message = self.message
if message is None:
message = Message()
self.message = message
message.message_code = result.error_code
message.code_description = result.message
message.log = result.messages
# Insert a little flag into the response status to denote that this thread is done
self.response.status = f"DONE,{self.response.status}"
return
def query_return_message(self,query):
result = self.query(query)
message = self.message
if message is None:
message = Message()
self.message = message
message.message_code = result.error_code
message.code_description = result.message
message.log = result.messages
return message
def query(self,query):
#### Define a default response
response = Response()
self.response = response
#Response.output = 'STDERR'
response.info(f"ARAXQuery launching on incoming Message")
#### Determine a plan for what to do based on the input
result = self.examine_incoming_query(query)
if result.status != 'OK':
return response
query_attributes = result.data
# #### If we have a query_graph in the input query
if "have_query_graph" in query_attributes:
# Then if there is also a processing plan, assume they go together. Leave the query_graph intact
# and then will later execute the processing plan
if "have_previous_message_processing_plan" in query_attributes:
pass
else:
response.info(f"Found input query_graph. Interpreting it and generating ARAXi processing plan to answer it")
interpreter = ARAXQueryGraphInterpreter()
query['message'] = ARAXMessenger().from_dict(query['message'])
result = interpreter.translate_to_araxi(query['message'])
response.merge(result)
if result.status != 'OK':
return response
query['previous_message_processing_plan'] = {}
query['previous_message_processing_plan']['processing_actions'] = result.data['araxi_commands']
query_attributes['have_previous_message_processing_plan'] = True
#response.info(f"Found input query_graph. Sending to the QueryGraphReasoner")
#qgr = QueryGraphReasoner()
#message = qgr.answer(query["message"]["query_graph"], TxltrApiFormat=True)
##self.log_query(query,message,'new')
#rtxFeedback = RTXFeedback()
#rtxFeedback.connect()
#rtxFeedback.addNewMessage(message,query)
#rtxFeedback.disconnect()
#self.limit_message(message,query)
#self.message = message
#return response
#### If we have a previous message processing plan, handle that
if "have_previous_message_processing_plan" in query_attributes:
response.info(f"Found input processing plan. Sending to the ProcessingPlanExecutor")
result = self.executeProcessingPlan(query)
return response
#### Otherwise extract the id and the terms from the incoming parameters
else:
response.info(f"Found id and terms from canned query")
id = query["message"]["query_type_id"]
terms = query["message"]["terms"]
#### Create an RTX Feedback management object
response.info(f"Try to find a cached message for this canned query")
rtxFeedback = RTXFeedback()
rtxFeedback.connect()
cachedMessage = rtxFeedback.getCachedMessage(query)
#### If we can find a cached message for this query and this version of RTX, then return the cached message
if ( cachedMessage is not None ):
response.info(f"Loaded cached message for return")
apiMessage = Message().from_dict(cachedMessage)
rtxFeedback.disconnect()
self.limit_message(apiMessage,query)
if apiMessage.message_code is None:
if apiMessage.result_code is not None:
apiMessage.message_code = apiMessage.result_code
else:
apiMessage.message_code = "wha??"
#self.log_query(query,apiMessage,'cached')
self.message = apiMessage
return response
#### Still have special handling for Q0
if id == 'Q0':
response.info(f"Answering 'what is' question with Q0 handler")
q0 = Q0()
message = q0.answer(terms["term"],use_json=True)
if 'original_question' in query["message"]:
message.original_question = query["message"]["original_question"]
message.restated_question = query["message"]["restated_question"]
message.query_type_id = query["message"]["query_type_id"]
message.terms = query["message"]["terms"]
id = message.id
#self.log_query(query,message,'new')
rtxFeedback.addNewMessage(message,query)
rtxFeedback.disconnect()
self.limit_message(message,query)
self.message = message
return response
#### Else call out to original solution scripts for an answer
else:
response.info(f"Entering legacy handler for a canned query")
#### Use the ParseQuestion system to determine what the execution_string should be
txltr = ParseQuestion()
eprint(terms)
command = "python3 " + txltr.get_execution_string(id,terms)
#### Set CWD to the QuestioningAnswering area and then invoke from the shell the Q1Solution code
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__))+"/../../reasoningtool/QuestionAnswering")
eprint(command)
returnedText = subprocess.run( [ command ], stdout=subprocess.PIPE, shell=True )
os.chdir(cwd)
#### reformat the stdout result of the shell command into a string
reformattedText = returnedText.stdout.decode('utf-8')
#eprint(reformattedText)
#### Try to decode that string into a message object
try:
#data = ast.literal_eval(reformattedText)
data = json.loads(reformattedText)
message = Message.from_dict(data)
if message.message_code is None:
if message.result_code is not None:
message.message_code = message.result_code
else:
message.message_code = "wha??"
#### If it fails, the just create a new Message object with a notice about the failure
except:
response.error("Error parsing the message from the reasoner. This is an internal bug that needs to be fixed. Unable to respond to this question at this time. The unparsable message was: " + reformattedText, error_code="InternalError551")
return response
#print(query)
if 'original_question' in query["message"]:
message.original_question = query["message"]["original_question"]
message.restated_question = query["message"]["restated_question"]
message.query_type_id = query["message"]["query_type_id"]
message.terms = query["message"]["terms"]
#### Log the result and return the Message object
#self.log_query(query,message,'new')
rtxFeedback.addNewMessage(message,query)
rtxFeedback.disconnect()
#### Limit message
self.limit_message(message,query)
self.message = message
return response
#### If the query type id is not triggered above, then return an error
response.error(f"The specified query id '{id}' is not supported at this time", error_code="UnsupportedQueryTypeID")
rtxFeedback.disconnect()
return response
def examine_incoming_query(self,query):
response = self.response
response.info(f"Examine input query for needed information for dispatch")
#eprint(query)
#### Check to see if there's a processing plan
if "previous_message_processing_plan" in query:
response.data["have_previous_message_processing_plan"] = 1
#### Check to see if the pre-0.9.2 query_message has come through
if "query_message" in query:
response.error("Query specified 'query_message' instead of 'message', which is pre-0.9.2 style. Please update.", error_code="Pre0.9.2Query")
return response
#### Check to see if there's a query message to process
if "message" in query:
response.data["have_message"] = 1
#### Check the query_type_id and terms to make sure there is information in both
if "query_type_id" in query["message"] and query["message"]["query_type_id"] is not None:
if "terms" in query["message"] is not None:
response.data["have_query_type_id_and_terms"] = 1
else:
response.error("query_type_id was provided but terms is empty", error_code="QueryTypeIdWithoutTerms")
return response
elif "terms" in query["message"] and query["message"]["terms"] is not None:
response.error("terms hash was provided without a query_type_id", error_code="TermsWithoutQueryTypeId")
return response
#### Check if there is a query_graph
if "query_graph" in query["message"] and query["message"]["query_graph"] is not None:
response.data["have_query_graph"] = 1
#### If there is both a query_type_id and a query_graph, then return an error
if "have_query_graph" in response.data and "have_query_type_id_and_terms" in response.data:
response.error("Message contains both a query_type_id and a query_graph, which is disallowed", error_code="BothQueryTypeIdAndQueryGraph")
return response
#### Check to see if there is at least a message or a previous_message_processing_plan
if "have_message" not in response.data and "have_previous_message_processing_plan" not in response.data:
response.error("No message or previous_message_processing_plan present in Query", error_code="NoQueryMessageOrPreviousMessageProcessingPlan")
return response
# #### FIXME Need to do more validation and tidying of the incoming message here or somewhere
#### If we got this far, then everything seems to be good enough to proceed
return response
def limit_message(self,message,query):
if "max_results" in query and query["max_results"] is not None:
if message.results is not None:
if len(message.results) > query["max_results"]:
del message.results[query["max_results"]:]
message.code_description += " (output is limited to "+str(query["max_results"]) + " results)"
#### Given an input query with a processing plan, execute that processing plan on the input
def executeProcessingPlan(self,inputEnvelope):
response = self.response
response.debug(f"Entering executeProcessingPlan")
messages = []
message = None
# If there is already a message (perhaps with a query_graph) already in the query, preserve it
if 'message' in inputEnvelope and inputEnvelope['message'] is not None:
message = inputEnvelope['message']
messages = [ message ]
message_id = None
query = None
#### Pull out the main processing plan envelope
envelope = PreviousMessageProcessingPlan.from_dict(inputEnvelope["previous_message_processing_plan"])
#### Connect to the message store just once, even if we won't use it
rtxFeedback = RTXFeedback()
rtxFeedback.connect()
#### Create a messenger object for basic message processing
messenger = ARAXMessenger()
#### If there are URIs provided, try to load them
if envelope.previous_message_uris is not None:
response.debug(f"Found previous_message_uris")
for uri in envelope.previous_message_uris:
response.debug(f" messageURI={uri}")
matchResult = re.match( r'http[s]://arax.rtx.ai/.*api/rtx/.+/message/(\d+)',uri,re.M|re.I )
if matchResult:
referenced_message_id = matchResult.group(1)
response.debug(f"Found local RTX identifier corresponding to respond_id {referenced_message_id}")
response.debug(f"Loading message_id {referenced_message_id}")
referenced_message = rtxFeedback.getMessage(referenced_message_id)
#eprint(type(message))
if not isinstance(referenced_message,tuple):
referenced_message = ARAXMessenger().from_dict(referenced_message)
response.debug(f"Original question was: {referenced_message.original_question}")
messages.append(referenced_message)
message_id = referenced_message_id
query = { "query_type_id": referenced_message.query_type_id, "restated_question": referenced_message.restated_question, "terms": referenced_message.terms }
else:
response.error(f"Unable to load message_id {referenced_message_id}", error_code="CannotLoadMessageById")
return response
#### If there are one or more previous_messages embedded in the POST, process them
if envelope.previous_messages is not None:
response.debug(f"Received previous_messages")
for uploadedMessage in envelope.previous_messages:
response.debug(f"uploadedMessage is a "+str(uploadedMessage.__class__))
if str(uploadedMessage.__class__) == "<class 'swagger_server.models.message.Message'>":
uploadedMessage = ARAXMessenger().from_dict(uploadedMessage)
messages.append(uploadedMessage)
if uploadedMessage.results:
pass
#if message["terms"] is None:
# message["terms"] = { "dummyTerm": "giraffe" }
#if message["query_type_id"] is None:
# message["query_type_id"] = "UnknownQ"
#if message["restated_question"] is None:
# message["restated_question"] = "Unknown question"
#if message["original_question"] is None:
# message["original_question"] = "Unknown question"
#query = { "query_type_id": message["query_type_id"], "restated_question": message["restated_question"], "original_question": message["original_question"], "terms": message["terms"] }
else:
#response.error(f"Uploaded message does not contain a results. May be the wrong format")
#return response
response.warning(f"There are no results in this uploaded message, but maybe that's okay")
else:
response.error(f"Uploaded message is not of type Message. It is of type"+str(uploadedMessage.__class__))
return response
#### Take different actions based on the number of messages we now have in hand
n_messages = len(messages)
if n_messages == 0:
response.debug(f"No starting messages were referenced. Will start with a blank template Message")
result = messenger.create_message()
message = result.data['message']
elif n_messages == 1:
response.debug(f"A single Message is ready and in hand")
message = messages[0]
else:
response.debug(f"Multiple Messages were uploaded or imported by reference. However, proper merging code has not been implmented yet! Will use just the first Message for now.")
message = messages[0]
#### Examine the options that were provided and act accordingly
optionsDict = {}
if envelope.options:
response.debug(f"Processing options were provided, but these are not implemented at the moment and will be ignored")
for option in envelope.options:
response.debug(f" option="+option)
optionsDict[option] = 1
#### If there are processing_actions, then fulfill those
if envelope.processing_actions:
response.debug(f"Found processing_actions")
actions_parser = ActionsParser()
result = actions_parser.parse(envelope.processing_actions)
response.merge(result)
if result.error_code != 'OK':
return response
#### Import the individual ARAX processing modules and process DSL commands
from ARAX_expander import ARAXExpander
from ARAX_overlay import ARAXOverlay
from ARAX_filter_kg import ARAXFilterKG
from ARAX_resultify import ARAXResultify
from ARAX_filter_results import ARAXFilterResults
expander = ARAXExpander()
filter = ARAXFilter()
overlay = ARAXOverlay()
filter_kg = ARAXFilterKG()
resultifier = ARAXResultify()
filter_results = ARAXFilterResults()
self.message = message
#### Process each action in order
action_stats = { }
actions = result.data['actions']
for action in actions:
response.info(f"Processing action '{action['command']}' with parameters {action['parameters']}")
nonstandard_result = False
skip_merge = False
# Catch a crash
try:
if action['command'] == 'create_message':
result = messenger.create_message()
message = result.data['message']
self.message = message
elif action['command'] == 'add_qnode':
result = messenger.add_qnode(message,action['parameters'])
elif action['command'] == 'add_qedge':
result = messenger.add_qedge(message,action['parameters'])
elif action['command'] == 'expand':
result = expander.apply(message,action['parameters'], response=response)
skip_merge = True
elif action['command'] == 'filter':
result = filter.apply(message,action['parameters'])
elif action['command'] == 'resultify':
result = resultifier.apply(message, action['parameters'])
elif action['command'] == 'overlay': # recognize the overlay command
result = overlay.apply(message, action['parameters'], response=response)
skip_merge = True
elif action['command'] == 'filter_kg': # recognize the filter_kg command
result = filter_kg.apply(message, action['parameters'])
elif action['command'] == 'filter_results': # recognize the filter_kg command
result = filter_results.apply(message, action['parameters'])
elif action['command'] == 'query_graph_reasoner':
response.info(f"Sending current query_graph to the QueryGraphReasoner")
qgr = QueryGraphReasoner()
message = qgr.answer(ast.literal_eval(repr(message.query_graph)), TxltrApiFormat=True)
self.message = message
nonstandard_result = True
elif action['command'] == 'return':
action_stats['return_action'] = action
break
else:
response.error(f"Unrecognized command {action['command']}", error_code="UnrecognizedCommand")
return response
except Exception as error:
exception_type, exception_value, exception_traceback = sys.exc_info()
response.error(f"An uncaught error occurred: {error}: {repr(traceback.format_exception(exception_type, exception_value, exception_traceback))}", error_code="UncaughtARAXiError")
return response
#### Merge down this result and end if we're in an error state
if nonstandard_result is False:
if not skip_merge:
response.merge(result)
if result.status != 'OK':
message.message_code = response.error_code
message.code_description = response.message
message.log = response.messages
return response
#### Immediately after resultify, run the experimental ranker
if action['command'] == 'resultify':
response.info(f"Running experimental reranker on results")
try:
ranker = ARAXRanker()
ranker.aggregate_scores(message, response=response)
except Exception as error:
exception_type, exception_value, exception_traceback = sys.exc_info()
response.error(f"An uncaught error occurred: {error}: {repr(traceback.format_exception(exception_type, exception_value, exception_traceback))}", error_code="UncaughtARAXiError")
return response
#### At the end, process the explicit return() action, or implicitly perform one
return_action = { 'command': 'return', 'parameters': { 'message': 'true', 'store': 'true' } }
if action is not None and action['command'] == 'return':
return_action = action
#### If an explicit one left out some parameters, set the defaults
if 'store' not in return_action['parameters']:
return_action['parameters']['store'] == 'false'
if 'message' not in return_action['parameters']:
return_action['parameters']['message'] == 'false'
# Fill out the message with data
message.message_code = response.error_code
message.code_description = response.message
message.log = response.messages
if message.query_options is None:
message.query_options = {}
message.query_options['processing_actions'] = envelope.processing_actions
# If store=true, then put the message in the database
if return_action['parameters']['store'] == 'true':
response.debug(f"Storing resulting Message")
message_id = rtxFeedback.addNewMessage(message,query)
#### If asking for the full message back
if return_action['parameters']['message'] == 'true':
response.info(f"Processing is complete. Transmitting resulting Message back to client.")
return response
#### Else just the id is returned
else:
if message_id is None:
message_id = 0
response.info(f"Processing is complete. Resulting Message id is {message_id} and is available to fetch via /message endpoint.")
return( { "status": 200, "message_id": str(message_id), "n_results": message.n_results, "url": "https://arax.rtx.ai/api/rtx/v1/message/"+str(message_id) }, 200)
##################################################################################################
def stringify_dict(inputDict):
outString = "{"
for key,value in sorted(inputDict.items(), key=lambda t: t[0]):
if outString != "{":
outString += ","
outString += "'"+str(key)+"':'"+str(value)+"'"
outString += "}"
return(outString)
##################################################################################################
def main():
#### Parse command line options
import argparse
argparser = argparse.ArgumentParser(description='Primary interface to the ARAX system')
argparser.add_argument('--verbose', action='count', help='If set, print more information about ongoing processing' )
argparser.add_argument('example_number', type=int, help='Integer number of the example query to execute')
params = argparser.parse_args()
#### Set verbose
verbose = params.verbose
if verbose is None: verbose = 1
#### Create a response and ARAXQuery
response = Response()
araxq = ARAXQuery()
#### For debugging purposes, you can send all messages as they are logged to STDERR
#Response.output = 'STDERR'
#### Set the query based on the supplied example_number
if params.example_number == 1:
query = { 'message': { 'query_type_id': 'Q0', 'terms': { 'term': 'lovastatin' } } }
#query = { "query_type_id": "Q0", "terms": { "term": "lovastatin" }, "bypass_cache": "true" } # Use bypass_cache if the cache if bad for this question
elif params.example_number == 2:
query = { "message": { "query_graph": { "edges": [
{ "id": "qg2", "source_id": "qg1", "target_id": "qg0", "type": "physically_interacts_with" }
],
"nodes": [
{ "id": "qg0", "name": "acetaminophen", "curie": "CHEMBL.COMPOUND:CHEMBL112", "type": "chemical_substance" },
{ "id": "qg1", "name": None, "desc": "Generic protein", "curie": None, "type": "protein" }
] } } }
elif params.example_number == 3: # FIXME: Don't fix me, this is our planned demo example 1.
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=acetaminophen, id=n0)",
"add_qnode(type=protein, id=n1)",
"add_qedge(source_id=n0, target_id=n1, id=e0)",
"expand(edge_id=e0)",
"resultify(ignore_edge_direction=true)",
"filter_results(action=limit_number_of_results, max_results=10)",
"return(message=true, store=false)",
]}}
elif params.example_number == 301: # Variant of 3 with NGD
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=acetaminophen, id=n0)",
"add_qnode(type=protein, id=n1)",
"add_qedge(source_id=n0, target_id=n1, id=e0)",
"expand(edge_id=e0)",
"overlay(action=compute_ngd, virtual_relation_label=N1, source_qnode_id=n0, target_qnode_id=n1)",
"resultify(ignore_edge_direction=true)",
"return(message=true, store=true)",
]}}
elif params.example_number == 4:
query = { "previous_message_processing_plan": { "processing_actions": [
"add_qnode(name=hypertension, id=n00)",
"add_qnode(type=protein, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"resultify()",
"return(message=true, store=false)",
] } }
elif params.example_number == 5: # test overlay with ngd: hypertension->protein
query = { "previous_message_processing_plan": { "processing_actions": [
"add_qnode(name=hypertension, id=n00)",
"add_qnode(type=protein, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"overlay(action=compute_ngd)",
"resultify()",
"return(message=true, store=true)",
] } }
elif params.example_number == 6: # test overlay
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
"add_qnode(curie=DOID:12384, id=n00)",
"add_qnode(type=phenotypic_feature, is_set=True, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00, type=has_phenotype)",
"expand(edge_id=e00)",
#"overlay(action=overlay_clinical_info, paired_concept_frequency=true)",
#"overlay(action=overlay_clinical_info, chi_square=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n01)",
"overlay(action=overlay_clinical_info, paired_concept_frequency=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n01)",
#"overlay(action=compute_ngd, default_value=inf)",
#"overlay(action=compute_ngd, virtual_relation_label=NGD1, source_qnode_id=n00, target_qnode_id=n01)",
"filter(maximum_results=2)",
"return(message=true, store=true)",
] } }
elif params.example_number == 7: # stub to test out the compute_jaccard feature
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:14330, id=n00)", # parkinsons
"add_qnode(type=protein, is_set=True, id=n01)",
"add_qnode(type=chemical_substance, is_set=true, id=n02)",
"add_qedge(source_id=n01, target_id=n00, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(edge_id=[e00,e01])",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"return(message=true, store=true)",
]}}
elif params.example_number == 8: # to test jaccard with known result # FIXME: ERROR: Node DOID:8398 has been returned as an answer for multiple query graph nodes (n00, n02)
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:8398, id=n00)", # osteoarthritis
"add_qnode(type=phenotypic_feature, is_set=True, id=n01)",
"add_qnode(type=disease, is_set=true, id=n02)",
"add_qedge(source_id=n01, target_id=n00, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(edge_id=[e00,e01])",
"return(message=true, store=true)",
]}}
elif params.example_number == 9: # to test jaccard with known result. This check's out by comparing with match p=(s:disease{id:"DOID:1588"})-[]-(r:protein)-[]-(:chemical_substance) return p and manually counting
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:1588, id=n00)",
"add_qnode(type=protein, is_set=True, id=n01)",
"add_qnode(type=chemical_substance, is_set=true, id=n02)",
"add_qedge(source_id=n01, target_id=n00, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(edge_id=[e00,e01])",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"return(message=true, store=true)",
]}}
elif params.example_number == 10: # test case of drug prediction
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:1588, id=n00)",
"add_qnode(type=chemical_substance, is_set=false, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"overlay(action=predict_drug_treats_disease)",
"resultify(ignore_edge_direction=True)",
"return(message=true, store=true)",
]}}
elif params.example_number == 11: # test overlay with overlay_clinical_info, paired_concept_frequency via COHD
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
"add_qnode(curie=DOID:0060227, id=n00)", # <NAME>
"add_qnode(type=phenotypic_feature, is_set=True, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00, type=has_phenotype)",
"expand(edge_id=e00)",
"overlay(action=overlay_clinical_info, paired_concept_frequency=true)",
#"overlay(action=overlay_clinical_info, paired_concept_frequency=true, virtual_relation_label=COHD1, source_qnode_id=n00, target_qnode_id=n01)",
"filter(maximum_results=2)",
"return(message=true, store=true)",
] } }
elif params.example_number == 12: # dry run of example 2 # FIXME NOTE: this is our planned example 2 (so don't fix, it's just so it's highlighted in my IDE)
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
"add_qnode(name=DOID:14330, id=n00)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00,e01], kp=ARAX/KG1)",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.2, remove_connected_nodes=t, qnode_id=n02)",
"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=Pharos)", # can be removed, but shows we can filter by Knowledge provider
"overlay(action=predict_drug_treats_disease, source_qnode_id=n02, target_qnode_id=n00, virtual_relation_label=P1)",
"resultify(ignore_edge_direction=true)",
"filter_results(action=sort_by_edge_attribute, edge_attribute=jaccard_index, direction=descending, max_results=15)",
"return(message=true, store=true)",
] } }
elif params.example_number == 13: # add pubmed id's
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:1227, id=n00)",
"add_qnode(type=chemical_substance, is_set=true, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"overlay(action=add_node_pmids, max_num=15)",
"return(message=true, store=false)"
]}}
elif params.example_number == 14: # test
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:8712, id=n00)",
"add_qnode(type=phenotypic_feature, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, is_set=true, id=n02)",
"add_qnode(type=protein, is_set=true, id=n03)",
"add_qedge(source_id=n00, target_id=n01, id=e00, type=has_phenotype)", # phenotypes of disease
"add_qedge(source_id=n02, target_id=n01, id=e01, type=indicated_for)", # only look for drugs that are indicated for those phenotypes
"add_qedge(source_id=n02, target_id=n03, id=e02)", # find proteins that interact with those drugs
"expand(edge_id=[e00, e01, e02])",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)", # only look at drugs that target lots of phenotypes
#"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.06, remove_connected_nodes=t, qnode_id=n02)", # remove edges and drugs that connect to few phenotypes
#"filter_kg(action=remove_edges_by_type, edge_type=J1, remove_connected_nodes=f)",
##"overlay(action=overlay_clinical_info, paired_concept_frequency=true)", # overlay with COHD information
#"overlay(action=overlay_clinical_info, paired_concept_frequency=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n02)", # overlay drug->disease virtual edges with COHD information
#"filter_kg(action=remove_edges_by_attribute, edge_attribute=paired_concept_frequency, direction=below, threshold=0.0000001, remove_connected_nodes=t, qnode_id=n02)", # remove drugs below COHD threshold
#"overlay(action=compute_jaccard, start_node_id=n01, intermediate_node_id=n02, end_node_id=n03, virtual_relation_label=J2)", # look at proteins that share many/any drugs in common with the phenotypes
#"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.001, remove_connected_nodes=t, qnode_id=n03)",
#"filter_kg(action=remove_edges_by_type, edge_type=J2, remove_connected_nodes=f)",
#"filter_kg(action=remove_edges_by_type, edge_type=C1, remove_connected_nodes=f)",
##"overlay(action=compute_ngd)",
"return(message=true, store=false)"
]}}
elif params.example_number == 15: # FIXME NOTE: this is our planned example 3 (so don't fix, it's just so it's highlighted in my IDE)
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:9406, id=n00)", # hypopituitarism
"add_qnode(type=chemical_substance, is_set=true, id=n01)", # look for all drugs associated with this disease (29 total drugs)
"add_qnode(type=protein, id=n02)", # look for proteins associated with these diseases (240 total proteins)
"add_qedge(source_id=n00, target_id=n01, id=e00)", # get connections
"add_qedge(source_id=n01, target_id=n02, id=e01)", # get connections
"expand(edge_id=[e00,e01])", # expand the query graph
"overlay(action=overlay_clinical_info, observed_expected_ratio=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n01)", # Look in COHD to find which drug are being used to treat this disease based on the log ratio of expected frequency of this drug being used to treat a disease, vs. the observed number of times it’s used to treat this disease
"filter_kg(action=remove_edges_by_attribute, edge_attribute=observed_expected_ratio, direction=below, threshold=3, remove_connected_nodes=t, qnode_id=n01)", # concentrate only on those drugs that are more likely to be treating this disease than expected
"filter_kg(action=remove_orphaned_nodes, node_type=protein)", # remove proteins that got disconnected as a result of this filter action
"overlay(action=compute_ngd, virtual_relation_label=N1, source_qnode_id=n01, target_qnode_id=n02)", # use normalized google distance to find how frequently the protein and the drug are mentioned in abstracts
"filter_kg(action=remove_edges_by_attribute, edge_attribute=normalized_google_distance, direction=above, threshold=0.85, remove_connected_nodes=t, qnode_id=n02)", # remove proteins that are not frequently mentioned together in PubMed abstracts
"resultify(ignore_edge_direction=true)",
"return(message=true, store=true)"
]}}
elif params.example_number == 1515: # Exact duplicate of ARAX_Example3.ipynb
query = {"previous_message_processing_plan": {"processing_actions": [
"add_qnode(curie=DOID:9406, id=n00)",
"add_qnode(type=chemical_substance, is_set=true, id=n01)",
"add_qnode(type=protein, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(edge_id=[e00,e01])",
"overlay(action=overlay_clinical_info, observed_expected_ratio=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n01)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=observed_expected_ratio, direction=below, threshold=3, remove_connected_nodes=t, qnode_id=n01)",
"filter_kg(action=remove_orphaned_nodes, node_type=protein)",
"overlay(action=compute_ngd, virtual_relation_label=N1, source_qnode_id=n01, target_qnode_id=n02)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=normalized_google_distance, direction=above, threshold=0.85, remove_connected_nodes=t, qnode_id=n02)",
"resultify(ignore_edge_direction=true)",
"return(message=true, store=true)"
]}}
elif params.example_number == 16: # To test COHD obs/exp ratio
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:8398, id=n00)",
"add_qnode(type=phenotypic_feature, is_set=true, id=n01)",
"add_qedge(source_id=n00, target_id=n01, type=has_phenotype, id=e00)",
"expand(edge_id=e00)",
"return(message=true, store=true)"
]}}
elif params.example_number == 17: # Test resultify #FIXME: this returns a single result instead of a list (one for each disease/phenotype found)
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:731, id=n00, type=disease, is_set=false)",
"add_qnode(type=phenotypic_feature, is_set=false, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
'resultify(ignore_edge_direction=true)',
"return(message=true, store=false)"
]}}
elif params.example_number == 18: # test removing orphaned nodes
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:9406, id=n00)",
"add_qnode(type=chemical_substance, is_set=true, id=n01)",
"add_qnode(type=protein, is_set=true, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00, e01])",
"filter_kg(action=remove_edges_by_type, edge_type=physically_interacts_with, remove_connected_nodes=f)",
"filter_kg(action=remove_orphaned_nodes, node_type=protein)",
"return(message=true, store=false)"
]}}
elif params.example_number == 19: # Let's see what happens if you ask for a node in KG2, but not in KG1 and try to expand
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=CUI:C1452002, id=n00)",
"add_qnode(type=chemical_substance, is_set=true, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00, type=interacts_with)",
"expand(edge_id=e00)",
"return(message=true, store=false)"
]}} # returns response of "OK" with the info: QueryGraphReasoner found no results for this query graph
elif params.example_number == 20: # Now try with KG2 expander
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=CUI:C1452002, id=n00)",
"add_qnode(type=chemical_substance, is_set=true, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00, type=interacts_with)",
"expand(edge_id=e00, kp=ARAX/KG2)",
"return(message=true, store=false)"
]}} # returns response of "OK" with the info: QueryGraphReasoner found no results for this query graph
elif params.example_number == 101: # test of filter results code
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
"add_qnode(name=DOID:14330, id=n00)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00,e01])",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.2, remove_connected_nodes=t, qnode_id=n02)",
"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=Pharos)",
"resultify(ignore_edge_direction=true)",
"filter_results(action=sort_by_edge_attribute, edge_attribute=jaccard_index, direction=d, max_results=15)",
#"filter_results(action=sort_by_edge_count, direction=a)",
#"filter_results(action=limit_number_of_results, max_results=5)",
"return(message=true, store=false)",
] } }
elif params.example_number == 102: # add pubmed id's
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:1227, id=n00)",
"add_qnode(type=chemical_substance, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"overlay(action=add_node_pmids, max_num=15)",
"resultify(ignore_edge_direction=true)",
"filter_results(action=sort_by_node_attribute, node_attribute=pubmed_ids, direction=a, max_results=20)",
"return(message=true, store=false)"
]}}
elif params.example_number == 103: # add pubmed id's
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:1227, id=n00)",
"add_qnode(type=chemical_substance, is_set=true, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"overlay(action=add_node_pmids, max_num=15)",
"filter_kg(action=remove_nodes_by_property, node_property=uri, property_value=https://www.ebi.ac.uk/chembl/compound/inspect/CHEMBL2111164)",
"return(message=true, store=false)"
]}}
elif params.example_number == 1212: # dry run of example 2 with the machine learning model
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
"add_qnode(curie=DOID:14330, id=n00)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00,e01], kp=ARAX/KG1)",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.2, remove_connected_nodes=t, qnode_id=n02)",
"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=Pharos)", # can be removed, but shows we can filter by Knowledge provider
"overlay(action=predict_drug_treats_disease, source_qnode_id=n02, target_qnode_id=n00, virtual_relation_label=P1)", # overlay by probability that the drug treats the disease
"resultify(ignore_edge_direction=true)",
"filter_results(action=sort_by_edge_attribute, edge_attribute=probability_drug_treats, direction=descending, max_results=15)", # filter by the probability that the drug treats the disease. cilnidipine prob=0.8976650309881645 which is the 9th highest (so top 10)
"return(message=true, store=false)",
] } }
elif params.example_number == 201: # KG2 version of demo example 1 (acetaminophen)
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=CHEMBL.COMPOUND:CHEMBL112)", # acetaminophen
"add_qnode(id=n01, type=protein, is_set=true)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"expand(edge_id=e00, kp=ARAX/KG2)",
"return(message=true, store=false)",
]}}
elif params.example_number == 202: # KG2 version of demo example 2 (Parkinson's)
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
"add_qnode(name=DOID:14330, id=n00)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=molecularly_interacts_with)", # for KG2
#"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)", # for KG1
"expand(edge_id=[e00,e01], kp=ARAX/KG2)", # for KG2
#"expand(edge_id=[e00,e01], kp=ARAX/KG1)", # for KG1
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)", # seems to work just fine
"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.008, remove_connected_nodes=t, qnode_id=n02)",
"resultify(ignore_edge_direction=true)",
"filter_results(action=sort_by_edge_attribute, edge_attribute=jaccard_index, direction=descending, max_results=15)",
"return(message=true, store=false)",
] } }
elif params.example_number == 203: # KG2 version of demo example 3 (but using idiopathic pulmonary fibrosis)
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
#"add_qnode(id=n00, curie=DOID:0050156)", # idiopathic pulmonary fibrosis
"add_qnode(curie=DOID:9406, id=n00)", # hypopituitarism, original demo example
"add_qnode(id=n01, type=chemical_substance, is_set=true)",
"add_qnode(id=n02, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"add_qedge(id=e01, source_id=n01, target_id=n02)",
"expand(edge_id=[e00,e01], kp=ARAX/KG2)",
"overlay(action=overlay_clinical_info, observed_expected_ratio=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n01)",
"overlay(action=compute_ngd, virtual_relation_label=N1, source_qnode_id=n01, target_qnode_id=n02)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=observed_expected_ratio, direction=below, threshold=2, remove_connected_nodes=t, qnode_id=n01)",
"filter_kg(action=remove_orphaned_nodes, node_type=protein)",
"return(message=true, store=false)",
] } }
elif params.example_number == 2033: # KG2 version of demo example 3 (but using idiopathic pulmonary fibrosis), with all decorations
query = { "previous_message_processing_plan": { "processing_actions": [
"create_message",
"add_qnode(id=n00, curie=DOID:0050156)", # idiopathic pulmonary fibrosis
#"add_qnode(curie=DOID:9406, id=n00)", # hypopituitarism, original demo example
"add_qnode(id=n01, type=chemical_substance, is_set=true)",
"add_qnode(id=n02, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"add_qedge(id=e01, source_id=n01, target_id=n02)",
"expand(edge_id=[e00,e01], kp=ARAX/KG2)",
"overlay(action=overlay_clinical_info, observed_expected_ratio=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n01)",
"overlay(action=compute_ngd, virtual_relation_label=N1, source_qnode_id=n01, target_qnode_id=n02)",
#"filter_kg(action=remove_edges_by_attribute, edge_attribute=observed_expected_ratio, direction=below, threshold=0, remove_connected_nodes=t, qnode_id=n01)",
#"filter_kg(action=remove_orphaned_nodes, node_type=protein)",
"return(message=true, store=false)",
] } }
elif params.example_number == 222: # Simple BTE query
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=NCBIGene:1017)", # CDK2
"add_qnode(id=n01, type=chemical_substance, is_set=True)",
"add_qedge(id=e00, source_id=n01, target_id=n00)",
"expand(edge_id=e00, kp=BTE)",
"return(message=true, store=false)",
]}}
elif params.example_number == 233: # KG2 version of demo example 1 (acetaminophen)
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=CHEMBL.COMPOUND:CHEMBL112)", # acetaminophen
"add_qnode(id=n01, type=protein, is_set=true)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"expand(edge_id=e00, kp=ARAX/KG2)",
"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=https://pharos.nih.gov)",
"return(message=true, store=false)",
]}}
elif params.example_number == 300: # KG2 version of demo example 1 (acetaminophen)
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:14330, id=n00)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00,e01], kp=ARAX/KG1)",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_edge_type=J1)",
"filter_kg(action=remove_edges_by_attribute_default, edge_attribute=jaccard_index, type=std, remove_connected_nodes=t, qnode_id=n02)",
#"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=Pharos)", # can be removed, but shows we can filter by Knowledge provider
"resultify(ignore_edge_direction=true)",
"filter_results(action=sort_by_edge_attribute, edge_attribute=jaccard_index, direction=descending, max_results=15)",
"return(message=true, store=false)",
]}}
elif params.example_number == 690: # test issue 690
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:14330, id=n00)",
"add_qnode(type=not_a_real_type, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=molecularly_interacts_with)",
"expand(edge_id=[e00,e01], continue_if_no_results=true)",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"return(message=true, store=false)"
]}}
elif params.example_number == 6231: # chunyu testing #623, all nodes already in the KG and QG
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=CHEMBL.COMPOUND:CHEMBL521, type=chemical_substance)",
"add_qnode(id=n01, is_set=true, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"add_qnode(id=n02, type=biological_process)",
"add_qedge(id=e01, source_id=n01, target_id=n02)",
"expand(edge_id=[e00, e01], kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n01, virtual_relation_label=FET, target_qnode_id=n02, cutoff=0.05)",
"resultify()",
"return(message=true, store=false)"
]}}
elif params.example_number == 6232: # chunyu testing #623, this should return the 10 smallest FET p-values and only add the virtual edge with top 10 FET p-values
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=CHEMBL.COMPOUND:CHEMBL521, type=chemical_substance)",
"add_qnode(id=n01, is_set=true, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"add_qnode(id=n02, type=biological_process)",
"add_qedge(id=e01, source_id=n01, target_id=n02)",
"expand(edge_id=[e00, e01], kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n01, virtual_relation_label=FET, target_qnode_id=n02, top_n=10)",
"resultify()",
"return(message=true, store=false)"
]}}
elif params.example_number == 6233: # chunyu testing #623, this DSL tests the FET module based on (source id - involved_in - target id) and only decorate/add virtual edge with pvalue<0.05
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=CHEMBL.COMPOUND:CHEMBL521, type=chemical_substance)",
"add_qnode(id=n01, is_set=true, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"add_qnode(id=n02, type=biological_process)",
"add_qedge(id=e01, source_id=n01, target_id=n02, type=involved_in)",
"expand(edge_id=[e00, e01], kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n01, virtual_relation_label=FET, target_qnode_id=n02, rel_edge_id=e01, cutoff=0.05)",
"resultify()",
"return(message=true, store=false)"
]}}
elif params.example_number == 6234: # chunyu testing #623, nodes not in the KG and QG. This should throw an error initially. In the future we might want to add these nodes.
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=CHEMBL.COMPOUND:CHEMBL521, type=chemical_substance)",
"add_qnode(id=n01, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"expand(edge_id=[e00], kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n01, virtual_relation_label=FET, target_qnode_id=n02, cutoff=0.05)",
"resultify()",
"return(message=true, store=false)"
]}}
elif params.example_number == 6235: # chunyu testing #623, this is a two-hop sample. First, find all edges between DOID:14330 and proteins and then filter out the proteins with connection having pvalue>0.001 to DOID:14330. Second, find all edges between proteins and chemical_substances and then filter out the chemical_substances with connection having pvalue>0.005 to proteins
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:14330, id=n00, type=disease)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n00, target_qnode_id=n01, virtual_relation_label=FET1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=fisher_exact_test_p-value, direction=above, threshold=0.001, remove_connected_nodes=t, qnode_id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=e01, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n01, target_qnode_id=n02, virtual_relation_label=FET2)",
"resultify()",
"return(message=true, store=false)"
]}}
elif params.example_number == 6236: # chunyu testing #623, this is a three-hop sample: DOID:14330 - protein - (physically_interacts_with) - chemical_substance - phenotypic_feature
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:14330, id=n00, type=disease)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n00, target_qnode_id=n01, virtual_relation_label=FET1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=fisher_exact_test_p-value, direction=above, threshold=0.001, remove_connected_nodes=t, qnode_id=n01)",
"add_qnode(type=chemical_substance, is_set=true, id=n02)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=e01, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n01, target_qnode_id=n02, virtual_relation_label=FET2)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=fisher_exact_test_p-value, direction=above, threshold=0.001, remove_connected_nodes=t, qnode_id=n02)",
"add_qnode(type=phenotypic_feature, id=n03)",
"add_qedge(source_id=n02, target_id=n03, id=e02)",
"expand(edge_id=e02, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n02, target_qnode_id=n03, virtual_relation_label=FET3)",
"resultify()",
"return(message=true, store=false)"
]}}
elif params.example_number == 6237: # chunyu testing #623, this is a four-hop sample: CHEMBL521 - protein - biological_process - protein - disease
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(id=n00, curie=CHEMBL.COMPOUND:CHEMBL521, type=chemical_substance)",
"add_qnode(id=n01, is_set=true, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"expand(edge_id=e00, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n00, target_qnode_id=n01, virtual_relation_label=FET1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=fisher_exact_test_p-value, direction=above, threshold=0.01, remove_connected_nodes=t, qnode_id=n01)",
"add_qnode(type=biological_process, is_set=true, id=n02)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(edge_id=e01, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n01, target_qnode_id=n02, virtual_relation_label=FET2)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=fisher_exact_test_p-value, direction=above, threshold=0.01, remove_connected_nodes=t, qnode_id=n02)",
"add_qnode(type=protein, is_set=true, id=n03)",
"add_qedge(source_id=n02, target_id=n03, id=e02)",
"expand(edge_id=e02, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n02, target_qnode_id=n03, virtual_relation_label=FET3)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=fisher_exact_test_p-value, direction=above, threshold=0.01, remove_connected_nodes=t, qnode_id=n03)",
"add_qnode(type=disease, id=n04)",
"add_qedge(source_id=n03, target_id=n04, id=e03)",
"expand(edge_id=e03, kp=ARAX/KG1)",
"overlay(action=fisher_exact_test, source_qnode_id=n03, target_qnode_id=n04, virtual_relation_label=FET4)",
"resultify()",
"return(message=true, store=false)"
]}}
elif params.example_number == 7680: # issue 768 test all but jaccard, uncomment any one you want to test
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:1588, id=n0)",
"add_qnode(type=chemical_substance, id=n1)",
"add_qedge(source_id=n0, target_id=n1, id=e0)",
"expand(edge_id=e0)",
#"overlay(action=predict_drug_treats_disease)",
#"overlay(action=predict_drug_treats_disease, source_qnode_id=n1, target_qnode_id=n0, virtual_relation_label=P1)",
#"overlay(action=overlay_clinical_info,paired_concept_frequency=true)",
#"overlay(action=overlay_clinical_info,observed_expected_ratio=true)",
#"overlay(action=overlay_clinical_info,chi_square=true)",
#"overlay(action=overlay_clinical_info,paired_concept_frequency=true, source_qnode_id=n0, target_qnode_id=n1, virtual_relation_label=CP1)",
#"overlay(action=overlay_clinical_info,observed_expected_ratio=true, source_qnode_id=n0, target_qnode_id=n1, virtual_relation_label=OE1)",
#"overlay(action=overlay_clinical_info,chi_square=true, source_qnode_id=n0, target_qnode_id=n1, virtual_relation_label=C1)",
"overlay(action=fisher_exact_test, source_qnode_id=n0, target_qnode_id=n1, virtual_relation_label=FET)",
"resultify()",
"filter_results(action=limit_number_of_results, max_results=15)",
"return(message=true, store=true)",
]}}
elif params.example_number == 7681: # issue 768 with jaccard
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:14330, id=n00)", # parkinsons
"add_qnode(type=protein, is_set=True, id=n01)",
"add_qnode(type=chemical_substance, is_set=False, id=n02)",
"add_qedge(source_id=n01, target_id=n00, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(edge_id=[e00,e01])",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"resultify()",
"filter_results(action=limit_number_of_results, max_results=15)",
"return(message=true, store=true)",
]}}
elif params.example_number == 7200: # issue 720, example 2
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:14330, id=n00)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00,e01], kp=ARAX/KG1)",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
#"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.2, remove_connected_nodes=t, qnode_id=n02)",
#"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=Pharos)",
#"overlay(action=predict_drug_treats_disease, source_qnode_id=n02, target_qnode_id=n00, virtual_relation_label=P1)",
"resultify(ignore_edge_direction=true, debug=true)",
"return(message=true, store=true)",
]}}
elif params.example_number == 885:
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:11830, id=n00)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=molecularly_interacts_with)",
"expand(edge_id=[e00,e01], kp=ARAX/KG2)",
# overlay a bunch of clinical info
"overlay(action=overlay_clinical_info, paired_concept_frequency=true, source_qnode_id=n00, target_qnode_id=n02, virtual_relation_label=C1)",
"overlay(action=overlay_clinical_info, observed_expected_ratio=true, source_qnode_id=n00, target_qnode_id=n02, virtual_relation_label=C2)",
"overlay(action=overlay_clinical_info, chi_square=true, source_qnode_id=n00, target_qnode_id=n02, virtual_relation_label=C3)",
# return results
"resultify(ignore_edge_direction=true)",
"return(message=true, store=true)",
]}}
elif params.example_number == 887:
query = {"previous_message_processing_plan": {"processing_actions": [
"add_qnode(name=DOID:9406, id=n00)",
"add_qnode(type=chemical_substance, is_set=true, id=n01)",
"add_qnode(type=protein, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(edge_id=[e00,e01])",
"overlay(action=overlay_clinical_info, observed_expected_ratio=true, virtual_relation_label=C1, source_qnode_id=n00, target_qnode_id=n01)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=observed_expected_ratio, direction=below, threshold=3, remove_connected_nodes=t, qnode_id=n01)",
"filter_kg(action=remove_orphaned_nodes, node_type=protein)",
"overlay(action=compute_ngd, virtual_relation_label=N1, source_qnode_id=n01, target_qnode_id=n02)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=normalized_google_distance, direction=above, threshold=0.85, remove_connected_nodes=t, qnode_id=n02)",
"resultify(ignore_edge_direction=true)",
"return(message=true, store=true)"
]}}
elif params.example_number == 892: # drug disease prediction with BTE
query = {"previous_message_processing_plan": {"processing_actions": [
"add_qnode(curie=DOID:11830, type=disease, id=n00)",
"add_qnode(type=gene, curie=[UniProtKB:P39060, UniProtKB:O43829, UniProtKB:P20849], is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(kp=BTE)",
"overlay(action=predict_drug_treats_disease, source_qnode_id=n02, target_qnode_id=n00, virtual_relation_label=P1)",
"resultify(ignore_edge_direction=true)",
"return(message=true, store=true)"
]}}
elif params.example_number == 8922: # drug disease prediction with BTE and KG2
query = {"previous_message_processing_plan": {"processing_actions": [
"add_qnode(curie=DOID:11830, id=n0, type=disease)",
"add_qnode(type=chemical_substance, id=n1)",
"add_qedge(source_id=n0, target_id=n1, id=e1)",
"expand(edge_id=e1, kp=ARAX/KG2)",
"expand(edge_id=e1, kp=BTE)",
#"overlay(action=overlay_clinical_info, paired_concept_frequency=true)",
#"overlay(action=overlay_clinical_info, observed_expected_ratio=true)",
#"overlay(action=overlay_clinical_info, chi_square=true)",
"overlay(action=predict_drug_treats_disease)",
#"overlay(action=compute_ngd)",
"resultify(ignore_edge_direction=true)",
#"filter_results(action=limit_number_of_results, max_results=50)",
"return(message=true, store=true)"
]}}
elif params.example_number == 8671: # test_one_hop_kitchen_sink_BTE_1
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(curie=DOID:11830, id=n0, type=disease)",
"add_qnode(type=chemical_substance, id=n1)",
"add_qedge(source_id=n0, target_id=n1, id=e1)",
# "expand(edge_id=e00, kp=ARAX/KG2)",
"expand(edge_id=e1, kp=BTE)",
"overlay(action=overlay_clinical_info, paired_concept_frequency=true)",
"overlay(action=overlay_clinical_info, observed_expected_ratio=true)",
"overlay(action=overlay_clinical_info, chi_square=true)",
"overlay(action=predict_drug_treats_disease)",
"overlay(action=compute_ngd)",
"resultify(ignore_edge_direction=true)",
"filter_results(action=limit_number_of_results, max_results=50)",
"return(message=true, store=true)",
]}}
elif params.example_number == 8672: # test_one_hop_based_on_types_1
query = {"previous_message_processing_plan": {"processing_actions": [
"create_message",
"add_qnode(name=DOID:11830, id=n00, type=disease)",
"add_qnode(type=chemical_substance, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00, kp=ARAX/KG2)",
"expand(edge_id=e00, kp=BTE)",
"overlay(action=overlay_clinical_info, observed_expected_ratio=true)",
"overlay(action=predict_drug_treats_disease)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=probability_treats, direction=below, threshold=0.75, remove_connected_nodes=true, qnode_id=n01)",
"overlay(action=compute_ngd)",
"resultify(ignore_edge_direction=true)",
"filter_results(action=limit_number_of_results, max_results=50)",
"return(message=true, store=true)",
]}}
else:
eprint(f"Invalid test number {params.example_number}. Try 1 through 17")
return
if 0:
message = araxq.query_return_message(query)
print(json.dumps(ast.literal_eval(repr(message)),sort_keys=True,indent=2))
return
result = araxq.query(query)
response.merge(result)
if result.status != 'OK':
print(response.show(level=Response.DEBUG))
return response
#### Retrieve the Translator Message from the result
message = araxq.message
#### Print out the message that came back
#print(response.show(level=Response.DEBUG))
#print("Returned message:\n")
#print(json.dumps(ast.literal_eval(repr(message)),sort_keys=True,indent=2))
#print(json.dumps(ast.literal_eval(repr(message.id)), sort_keys=True, indent=2))
#print(json.dumps(ast.literal_eval(repr(message.knowledge_graph.edges)), sort_keys=True, indent=2))
#print(json.dumps(ast.literal_eval(repr(message.query_graph)), sort_keys=True, indent=2))
#print(json.dumps(ast.literal_eval(repr(message.knowledge_graph.nodes)), sort_keys=True, indent=2))
print(json.dumps(ast.literal_eval(repr(message.id)), sort_keys=True, indent=2))
#print(response.show(level=Response.DEBUG))
print(response.show(level=Response.DEBUG))
print(f"Number of results: {len(message.results)}")
#print(f"Drugs names in the KG: {[x.name for x in message.knowledge_graph.nodes if 'chemical_substance' in x.type or 'drug' in x.type]}")
#print(f"Essence names in the answers: {[x.essence for x in message.results]}")
print("Results:")
for result in message.results:
confidence = result.confidence
if confidence is None:
confidence = 0.0
print(" -" + '{:6.3f}'.format(confidence) + f"\t{result.essence}")
#print(json.dumps(ast.literal_eval(repr(message.results[0])), sort_keys=True, indent=2))
#print(json.dumps(ast.literal_eval(repr(message.results)), sort_keys=True, indent=2))
#print(set.union(*[set(x.qg_id for x in r.edge_bindings if x.qg_id.startswith('J')) for r in message.results]))
# look for qg id's in edge_bindings in results
if False:
try:
print(f"Result qg_id's in results: {set.union(*[set(x.qg_id for x in r.edge_bindings) for r in message.results])}")
except:
pass
# Check edge attributes
if True:
vals = []
num_edges_show = 2
num_edges_shown = 0
#attribute_of_interest = 'jaccard_index'
#attribute_of_interest = 'observed_expected_ratio'
#attribute_of_interest = 'ngd'
#attribute_of_interest = 'normalized_google_distance'
#attribute_of_interest = 'chi_square'
#attribute_of_interest = 'paired_concept_frequency'
#attribute_of_interest = 'probability'
attribute_of_interest = 'probability_treats'
all_attribute_names = set()
for edge in message.knowledge_graph.edges:
if hasattr(edge, 'edge_attributes') and edge.edge_attributes and len(edge.edge_attributes) >= 1:
for edge_attribute in edge.edge_attributes:
all_attribute_names.add(edge_attribute.name)
if edge_attribute.name == attribute_of_interest:
if num_edges_shown < num_edges_show:
print(json.dumps(ast.literal_eval(repr(edge)), sort_keys=True, indent=2))
num_edges_shown += 1
#for attr in edge.edge_attributes:
# vals.append((attr.name, attr.value))
vals.append((edge_attribute.name, float(edge_attribute.value))) # FIXME: some edge_attributes are floats, others are strings, object model weirdness
elif attribute_of_interest == all:
print(json.dumps(ast.literal_eval(repr(edge)), sort_keys=True, indent=2))
print(f"All edge attribute names: {all_attribute_names}")
if vals:
print(f"number of edges with attribute {attribute_of_interest}: {len(vals)}")
print(f"Mean of attribute {attribute_of_interest}: {np.mean([x[1] for x in vals])}")
print(f"Median of attribute {attribute_of_interest}: {np.median([x[1] for x in vals])}")
print(f"Max of attribute {attribute_of_interest}: {np.max([x[1] for x in vals])}")
print(f"Min of attribute {attribute_of_interest}: {np.min([x[1] for x in vals])}")
# show all the values of the edge attributes
#print(sorted(Counter(vals).items(), key=lambda x:float(x[0][1])))
# check for edges from a given drug
if False:
for edge in message.knowledge_graph.edges:
if edge.source_id == "CHEMBL.COMPOUND:CHEMBL452076" or edge.target_id == "CHEMBL.COMPOUND:CHEMBL452076":
print(edge)
#for node in message.knowledge_graph.nodes:
# print(f"{node.name} {node.type[0]}")
# print(node.qnode_id)
# if params.example_number == 101:
# import math
# edge_values = {}
# # iterate over the edges find the attribute values
# for edge in message.knowledge_graph.edges: # iterate over the edges
# edge_values[str(edge.id)] = {'value': None, 'type': edge.type}
# if hasattr(edge, 'edge_attributes'): # check if they have attributes
# if edge.edge_attributes: # if there are any edge attributes
# for attribute in edge.edge_attributes: # for each attribute
# if attribute.name == 'jaccard_index': # check if it's the desired one
# edge_values[str(edge.id)] = {'value': attribute.value, 'type': edge.type}
# if True:
# value_list=[-math.inf]*len(message.results)
# else:
# value_list=[math.inf]*len(message.results)
# i = 0
# type_flag = False
# for result in message.results:
# for binding in result.edge_bindings:
# if edge_values[binding.kg_id]['value'] is not None:
# if not type_flag or (type_flag and params['edge_type'] == edge_values[binding.kg_id]['type']):
# if abs(value_list[i]) == math.inf:
# value_list[i] = edge_values[binding.kg_id]['value']
# else:
# # this will take the sum off all edges with the attribute if we want to change to max edit this line
# value_list[i] += edge_values[binding.kg_id]['value']
# i+=1
# print(value_list)
# print([len(r.edge_bindings) for r in message.results])
# if params.example_number == 102:
# import math
# node_values = {}
# # iterate over the nodes find the attribute values
# for node in message.knowledge_graph.nodes: # iterate over the nodes
# node_values[str(node.id)] = {'value': None, 'type': node.type}
# if hasattr(node, 'node_attributes'): # check if they have attributes
# if node.node_attributes: # if there are any node attributes
# for attribute in node.node_attributes: # for each attribute
# if attribute.name == 'pubmed_ids': # check if it's the desired one
# node_values[str(node.id)] = {'value': attribute.value.count("PMID"), 'type': node.type}
# if True:
# value_list=[-math.inf]*len(message.results)
# else:
# value_list=[math.inf]*len(message.results)
# i = 0
# type_flag = False
# for result in message.results:
# for binding in result.node_bindings:
# if node_values[binding.kg_id]['value'] is not None:
# if not type_flag or (type_flag and params['node_type'] == node_values[binding.kg_id]['type']):
# if abs(value_list[i]) == math.inf:
# value_list[i] = node_values[binding.kg_id]['value']
# else:
# # this will take the sum off all nodes with the attribute if we want to change to max edit this line
# value_list[i] += node_values[binding.kg_id]['value']
# i+=1
# print(value_list)
# #print([len(r.node_bindings) for r in message.results])
#print(len(message.knowledge_graph.nodes))
# check number of TP's for example 3
if False:
proteins = []
for node in message.knowledge_graph.nodes:
if node.type[0] == "protein":
proteins.append(node.id)
#for protein in sorted(proteins):
# print(f"{protein}")
known_proteins = ["UniProtKB:P16473",
"UniProtKB:P05093",
"UniProtKB:P06401",
"UniProtKB:P08235",
"UniProtKB:P18405",
"UniProtKB:P03372",
"UniProtKB:P10275",
"UniProtKB:P11511",
"UniProtKB:P19838",
"UniProtKB:Q13936",
"UniProtKB:Q16665",
"UniProtKB:P22888",
"UniProtKB:Q9HB55",
"UniProtKB:P05108",
"UniProtKB:P08684",
"UniProtKB:Q92731",
"UniProtKB:P80365",
"UniProtKB:P24462",
"UniProtKB:P04278",
"UniProtKB:P31213",
"UniProtKB:P08842",
"UniProtKB:Q15125",
"UniProtKB:P04150",
"UniProtKB:P37058",
"UniProtKB:P54132",
"UniProtKB:P24462",
"UniProtKB:P80365",
"UniProtKB:Q92731",
"UniProtKB:P04278",
"UniProtKB:P31213",
"UniProtKB:Q15125",
"UniProtKB:P08842",
"UniProtKB:P16473",
"UniProtKB:P08235",
"UniProtKB:P05093",
"UniProtKB:P06401",
"UniProtKB:P18405",
"UniProtKB:P54132",
"UniProtKB:P04150",
"UniProtKB:P37058",
"UniProtKB:P08684",
"UniProtKB:P22888",
"UniProtKB:P05108",
"UniProtKB:Q9HB55",
"UniProtKB:Q13936",
"UniProtKB:P19838",
"UniProtKB:P11511",
"UniProtKB:P10275",
"UniProtKB:Q16665",
"UniProtKB:P03372"]
print(f"For example 15 (demo eg. 3), number of TP proteins: {len(set(known_proteins).intersection(set(proteins)))}") # fill these in after finding a good example
try:
print(f"Number of KnowledgeProviders in KG: {Counter([x.provided_by for x in message.knowledge_graph.edges])}")
except:
print(f"Number of KnowledgeProviders in KG: {Counter([y for x in message.knowledge_graph.edges for y in x.provided_by])}")
# print the message id at the bottom for convenience too:
print(f"message id: {json.dumps(ast.literal_eval(repr(message.id)), sort_keys=True, indent=2)}")
if __name__ == "__main__": main()
| [
"ARAX_query_graph_interpreter.ARAXQueryGraphInterpreter",
"ARAX_ranker.ARAXRanker",
"time.sleep",
"RTXFeedback.RTXFeedback",
"sys.exc_info",
"ARAX_resultify.ARAXResultify",
"ARAX_messenger.ARAXMessenger",
"numpy.mean",
"argparse.ArgumentParser",
"subprocess.run",
"json.dumps",
"numpy.max",
"... | [((28448, 28523), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Primary interface to the ARAX system"""'}), "(description='Primary interface to the ARAX system')\n", (28471, 28523), False, 'import argparse\n'), ((28935, 28945), 'response.Response', 'Response', ([], {}), '()\n', (28943, 28945), False, 'from response import Response\n'), ((1959, 2022), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.asynchronous_query', 'args': '(query,)'}), '(target=self.asynchronous_query, args=(query,))\n', (1975, 2022), False, 'import threading\n'), ((4483, 4493), 'response.Response', 'Response', ([], {}), '()\n', (4491, 4493), False, 'from response import Response\n'), ((7242, 7255), 'RTXFeedback.RTXFeedback', 'RTXFeedback', ([], {}), '()\n', (7253, 7255), False, 'from RTXFeedback import RTXFeedback\n'), ((15587, 15682), 'swagger_server.models.previous_message_processing_plan.PreviousMessageProcessingPlan.from_dict', 'PreviousMessageProcessingPlan.from_dict', (["inputEnvelope['previous_message_processing_plan']"], {}), "(inputEnvelope[\n 'previous_message_processing_plan'])\n", (15626, 15682), False, 'from swagger_server.models.previous_message_processing_plan import PreviousMessageProcessingPlan\n'), ((15778, 15791), 'RTXFeedback.RTXFeedback', 'RTXFeedback', ([], {}), '()\n', (15789, 15791), False, 'from RTXFeedback import RTXFeedback\n'), ((15911, 15926), 'ARAX_messenger.ARAXMessenger', 'ARAXMessenger', ([], {}), '()\n', (15924, 15926), False, 'from ARAX_messenger import ARAXMessenger\n'), ((708, 733), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (723, 733), False, 'import os\n'), ((1173, 1198), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1188, 1198), False, 'import os\n'), ((1434, 1459), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1449, 1459), False, 'import os\n'), ((1679, 1704), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1694, 1704), False, 'import os\n'), ((3075, 3116), 're.sub', 're.sub', (['"""DONE,"""', '""""""', 'self.response.status'], {}), "('DONE,', '', self.response.status)\n", (3081, 3116), False, 'import re\n'), ((3551, 3561), 'response.Response', 'Response', ([], {}), '()\n', (3559, 3561), False, 'from response import Response\n'), ((3679, 3688), 'swagger_server.models.message.Message', 'Message', ([], {}), '()\n', (3686, 3688), False, 'from swagger_server.models.message import Message\n'), ((4191, 4200), 'swagger_server.models.message.Message', 'Message', ([], {}), '()\n', (4198, 4200), False, 'from swagger_server.models.message import Message\n'), ((8249, 8253), 'Q0Solution.Q0', 'Q0', ([], {}), '()\n', (8251, 8253), False, 'from Q0Solution import Q0\n'), ((9206, 9221), 'ParseQuestion.ParseQuestion', 'ParseQuestion', ([], {}), '()\n', (9219, 9221), False, 'from ParseQuestion import ParseQuestion\n'), ((9448, 9459), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9457, 9459), False, 'import os\n'), ((9621, 9682), 'subprocess.run', 'subprocess.run', (['[command]'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '([command], stdout=subprocess.PIPE, shell=True)\n', (9635, 9682), False, 'import subprocess\n'), ((9699, 9712), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (9707, 9712), False, 'import os\n'), ((20712, 20727), 'actions_parser.ActionsParser', 'ActionsParser', ([], {}), '()\n', (20725, 20727), False, 'from actions_parser import ActionsParser\n'), ((21287, 21301), 'ARAX_expander.ARAXExpander', 'ARAXExpander', ([], {}), '()\n', (21299, 21301), False, 'from ARAX_expander import ARAXExpander\n'), ((21323, 21335), 'ARAX_filter.ARAXFilter', 'ARAXFilter', ([], {}), '()\n', (21333, 21335), False, 'from ARAX_filter import ARAXFilter\n'), ((21358, 21371), 'ARAX_overlay.ARAXOverlay', 'ARAXOverlay', ([], {}), '()\n', (21369, 21371), False, 'from ARAX_overlay import ARAXOverlay\n'), ((21396, 21410), 'ARAX_filter_kg.ARAXFilterKG', 'ARAXFilterKG', ([], {}), '()\n', (21408, 21410), False, 'from ARAX_filter_kg import ARAXFilterKG\n'), ((21437, 21452), 'ARAX_resultify.ARAXResultify', 'ARAXResultify', ([], {}), '()\n', (21450, 21452), False, 'from ARAX_resultify import ARAXResultify\n'), ((21482, 21501), 'ARAX_filter_results.ARAXFilterResults', 'ARAXFilterResults', ([], {}), '()\n', (21499, 21501), False, 'from ARAX_filter_results import ARAXFilterResults\n'), ((2249, 2264), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2259, 2264), False, 'import time\n'), ((2632, 2647), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2642, 2647), False, 'import time\n'), ((5400, 5427), 'ARAX_query_graph_interpreter.ARAXQueryGraphInterpreter', 'ARAXQueryGraphInterpreter', ([], {}), '()\n', (5425, 5427), False, 'from ARAX_query_graph_interpreter import ARAXQueryGraphInterpreter\n'), ((10060, 10087), 'json.loads', 'json.loads', (['reformattedText'], {}), '(reformattedText)\n', (10070, 10087), False, 'import json\n'), ((10114, 10137), 'swagger_server.models.message.Message.from_dict', 'Message.from_dict', (['data'], {}), '(data)\n', (10131, 10137), False, 'from swagger_server.models.message import Message\n'), ((16241, 16320), 're.match', 're.match', (['"""http[s]://arax.rtx.ai/.*api/rtx/.+/message/(\\\\d+)"""', 'uri', '(re.M | re.I)'], {}), "('http[s]://arax.rtx.ai/.*api/rtx/.+/message/(\\\\d+)', uri, re.M | re.I)\n", (16249, 16320), False, 'import re\n'), ((7593, 7602), 'swagger_server.models.message.Message', 'Message', ([], {}), '()\n', (7600, 7602), False, 'from swagger_server.models.message import Message\n'), ((88339, 88402), 'collections.Counter', 'Counter', (['[x.provided_by for x in message.knowledge_graph.edges]'], {}), '([x.provided_by for x in message.knowledge_graph.edges])\n', (88346, 88402), False, 'from collections import Counter\n'), ((2854, 2899), 'json.dumps', 'json.dumps', (['self.response.messages[i_message]'], {}), '(self.response.messages[i_message])\n', (2864, 2899), False, 'import json\n'), ((5463, 5478), 'ARAX_messenger.ARAXMessenger', 'ARAXMessenger', ([], {}), '()\n', (5476, 5478), False, 'from ARAX_messenger import ARAXMessenger\n'), ((9497, 9522), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (9512, 9522), False, 'import os\n'), ((24393, 24407), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (24405, 24407), False, 'import sys\n'), ((25402, 25414), 'ARAX_ranker.ARAXRanker', 'ARAXRanker', ([], {}), '()\n', (25412, 25414), False, 'from ARAX_ranker import ARAXRanker\n'), ((82449, 82478), 'numpy.mean', 'np.mean', (['[x[1] for x in vals]'], {}), '([x[1] for x in vals])\n', (82456, 82478), True, 'import numpy as np\n'), ((82548, 82579), 'numpy.median', 'np.median', (['[x[1] for x in vals]'], {}), '([x[1] for x in vals])\n', (82557, 82579), True, 'import numpy as np\n'), ((82646, 82674), 'numpy.max', 'np.max', (['[x[1] for x in vals]'], {}), '([x[1] for x in vals])\n', (82652, 82674), True, 'import numpy as np\n'), ((82741, 82769), 'numpy.min', 'np.min', (['[x[1] for x in vals]'], {}), '([x[1] for x in vals])\n', (82747, 82769), True, 'import numpy as np\n'), ((88471, 88545), 'collections.Counter', 'Counter', (['[y for x in message.knowledge_graph.edges for y in x.provided_by]'], {}), '([y for x in message.knowledge_graph.edges for y in x.provided_by])\n', (88478, 88545), False, 'from collections import Counter\n'), ((2529, 2574), 'json.dumps', 'json.dumps', (['self.response.messages[i_message]'], {}), '(self.response.messages[i_message])\n', (2539, 2574), False, 'import json\n'), ((17995, 18010), 'ARAX_messenger.ARAXMessenger', 'ARAXMessenger', ([], {}), '()\n', (18008, 18010), False, 'from ARAX_messenger import ARAXMessenger\n'), ((25617, 25631), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (25629, 25631), False, 'import sys\n'), ((16856, 16871), 'ARAX_messenger.ARAXMessenger', 'ARAXMessenger', ([], {}), '()\n', (16869, 16871), False, 'from ARAX_messenger import ARAXMessenger\n'), ((24488, 24573), 'traceback.format_exception', 'traceback.format_exception', (['exception_type', 'exception_value', 'exception_traceback'], {}), '(exception_type, exception_value, exception_traceback\n )\n', (24514, 24573), False, 'import traceback\n'), ((25716, 25801), 'traceback.format_exception', 'traceback.format_exception', (['exception_type', 'exception_value', 'exception_traceback'], {}), '(exception_type, exception_value, exception_traceback\n )\n', (25742, 25801), False, 'import traceback\n'), ((23710, 23730), 'QueryGraphReasoner.QueryGraphReasoner', 'QueryGraphReasoner', ([], {}), '()\n', (23728, 23730), False, 'from QueryGraphReasoner import QueryGraphReasoner\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ade:
# Asynchronous Differential Evolution.
#
# Copyright (C) 2018-19 by <NAME>,
# http://edsuom.com/ade
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Unit tests for L{ade.report}.
"""
import pickle
import numpy as np
from twisted.internet import defer
from ade.util import *
from ade import history
from ade.test import testbase as tb
class Test_Analysis(tb.TestCase):
def setUp(self):
self.names = ['foo', 'bar', 'zebra']
self.X = np.array([
[110.0, 1, 2, 5], #0
[810.0, 2, 3, 4], #1
[270.0, 3, 4, 3], #2
[580.0, 4, 5, 2], #3
[999.0, 5, 6, 1], #4
])
self.K = [0, 3, 2, 1, 4]
self.a = history.Analysis(self.names, self.X, self.K)
def test_name2k_k2name(self):
for k, name in enumerate(self.names):
self.assertEqual(self.a.name2k(name), k+1)
self.assertEqual(self.a.k2name(k+1), name)
def test_valueVsSSE(self):
XY = self.a.value_vs_SSE(['bar'])
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], [110., 580., 270., 810., 999.])
self.assertItemsEqual(XY[1], [2, 5, 4, 3, 6])
def test_corr(self):
self.assertAlmostEqual(self.a.corr(1, 2), +1)
self.assertAlmostEqual(self.a.corr(1, 3), -1)
def test_Kf12(self):
# 0.0 1.0
K = self.a.Kf12(0.0, 1.0)
self.assertItemsEqual(K, [0, 3, 2, 1])
# 0.0 0.6 1.01
K = self.a.Kf12(0.0, 0.6)
self.assertItemsEqual(K, [0, 3, 2])
K = self.a.Kf12(0.6, 1.01)
self.assertItemsEqual(K, [1, 4])
# 0.0 0.3 1.01
K = self.a.Kf12(0.0, 0.3)
self.assertItemsEqual(K, [0, 2])
K = self.a.Kf12(0.3, 1.01)
self.assertItemsEqual(K, [3, 1, 4])
def test_Kp12(self):
K = self.a.Kp12(0.0, 0.5)
self.assertItemsEqual(K, [0, 3, 2])
K = self.a.Kp12(0.2, 0.7)
self.assertItemsEqual(K, [3, 2, 1])
K = self.a.Kp12(0.5, 1.01)
self.assertItemsEqual(K, [2, 1, 4])
class Test_ClosestPairFinder(tb.TestCase):
def setUp(self):
self.cpf = history.ClosestPairFinder(10, 4)
def test_setRow(self):
self.cpf.S = True # Just not None, for testing
for k in range(10):
Z = [10.0+k] + [k,k+1,k+2]
self.cpf.setRow(k, Z)
self.assertEqual(self.cpf.S, None)
self.assertItemsEqual(self.cpf.X[0,:], [10.0, 0, 1, 2])
def test_clearRow(self):
self.cpf.setRow(0, [100.0, 2, 3, 4])
self.cpf.S = True # Just not None, for testing
self.cpf.clearRow(0)
self.assertEqual(self.cpf.S, None)
self.assertEqual(len(self.cpf.K), 0)
def test_pairs_sampled(self):
self.cpf.K = {3, 1, 4, 5, 9, 2}
for N in (2, 3, 4):
KP = self.cpf.pairs_sampled(N)
self.assertEqual(KP.shape, (N, 2))
for k1, k2 in KP:
self.assertGreater(k2, k1)
self.assertGreater(k1, 0)
self.assertGreater(k2, 0)
self.assertLess(k1, 10)
self.assertLess(k2, 10)
def test_pairs_all(self):
self.cpf.K = {3, 1, 4, 5, 9, 2}
N = len(self.cpf.K)
Np = N*(N-1)/2
KP = self.cpf.pairs_all()
self.assertEqual(KP.shape, (Np, 2))
for k1, k2 in KP:
self.assertGreater(k2, k1)
self.assertGreater(k1, 0)
self.assertGreater(k2, 0)
self.assertLess(k1, 10)
self.assertLess(k2, 10)
@defer.inlineCallbacks
def test_diffs(self):
self.cpf.setRow(0, [ 90.0, 0.11, 0.2, 0.3])
self.cpf.setRow(1, [ 90.0, 0.09, 0.2, 0.3])
self.cpf.setRow(2, [100.0, 0.09, 0.2, 0.3])
self.cpf.setRow(3, [110.0, 0.11, 0.2, 0.3])
self.cpf.setRow(4, [110.0, 0.10, 0.2, 0.3])
self.assertEqual(self.cpf.S, None)
K = np.array([[0, 1], [0, 2], [0, 3], [2, 3], [3, 4]])
D = yield self.cpf(K=K)
self.assertEqual(self.cpf.S.shape, (4,))
s0 = 1.0/np.var([90., 90., 100., 110., 110.])
self.assertAlmostEqual(self.cpf.S[0], s0)
s1 = 1.0/np.var([0.11, 0.09, 0.09, 0.11, 0.10])
self.assertAlmostEqual(self.cpf.S[1], s1)
# 0-1 0-2 0-3 2-3 3-4
SSEs = [90.0, 95.0, 100.0, 105.0, 110.0]
for k, de in enumerate(
[s1*0.02**2, # 0, 1
s0*10.0**2 + s1*0.02**2, # 0, 2
s0*20.0**2, # 0, 3
s0*10.0**2 + s1*0.02**2, # 2, 3
s1*0.01**2 # 3, 4
]):
#print k, D[k], de/np.sqrt(SSEs[k]),
self.assertWithinOnePercent(D[k], de/SSEs[k])
@defer.inlineCallbacks
def test_diffs_someNeverpops(self):
self.cpf.setRow(0, [100.0, 0.1130, 0.10, 0.100], 1)
self.cpf.setRow(1, [100.0, 0.1010, 0.11, 0.100], 1)
self.cpf.setRow(2, [100.0, 0.0940, 0.10, 0.100], 0)
self.cpf.setRow(3, [100.0, 0.0957, 0.10, 0.099], 1)
self.cpf.setRow(4, [100.0, 0.1100, 0.11, 0.100], 0)
self.cpf.setRow(5, [100.0, 0.1100, 0.11, 0.110], 1)
K = np.array([[0, 1], [0, 2], [2, 3]])
D = yield self.cpf(K=K)
# Kn = 4, N = 6
Kn_penalty = 1 + np.exp(12*(4.0/6 - 0.4))
penalty = [Kn_penalty if x else 1.0 for x in (1, 1, 0)]
for k, p in enumerate(penalty):
self.assertWithinTenPercent(D[k], 0.00120/p)
@defer.inlineCallbacks
def test_call(self):
self.cpf.setRow(0, [ 90.0, 0.11, 0.2, 0.30])
self.cpf.setRow(1, [ 90.0, 0.09, 0.2, 0.30])
self.cpf.setRow(2, [100.0, 0.09, 0.2, 0.30])
self.cpf.setRow(3, [110.0, 0.11, 0.2, 0.30])
self.cpf.setRow(4, [110.0, 0.10, 0.2, 0.30])
self.cpf.setRow(5, [140.0, 0.10, 0.2, 0.30])
self.cpf.setRow(6, [140.0, 0.10, 0.2, 0.31])
self.cpf.setRow(7, [140.1, 0.10, 0.2, 0.31])
kr = yield self.cpf()
self.assertEqual(kr, 6)
self.cpf.clearRow(6)
kr = yield self.cpf()
self.assertEqual(kr, 1)
self.cpf.clearRow(1)
kr = yield self.cpf()
self.assertEqual(kr, 0)
class Test_History(tb.TestCase):
def setUp(self):
self.names = ['foo', 'bar', 'zebra']
self.h = history.History(self.names, N_max=10)
def tearDown(self):
return self.h.shutdown()
def test_kkr(self):
self.h.X = np.array([[
# 0 1 2 3 4 5 6 7 kr
# 2 0 3 - 1 4 - 5 k
3, 1, 4, 0, 2, 5, 0, 9]]).transpose()
self.h.K = [
# 0 1 2 3 4 5 k
1, 4, 0, 2, 5, 7]
# 1 2 3 4 5 9 X[K,0]
N = 6
for SSE, k_exp, kr_exp in (
(7.0, 5, 3),
(1.0, 0, 3),
(99, 6, 3),
):
k, kr = self.h.kkr(SSE, N)
self.assertEqual(k, k_exp)
self.assertEqual(kr, kr_exp)
@defer.inlineCallbacks
def test_add_worsening(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 + k
yield self.h.add(i)
self.assertEqual(len(self.h), k+1)
self.assertItemsEqual(self.h[k], [i.SSE] + i.values)
for k, values in enumerate(self.h):
self.assertItemsEqual(values, [k,k+1,k+2])
@defer.inlineCallbacks
def test_add_ignoreInfSSE(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 + k if k < 3 else float('+inf')
kr = yield self.h.add(i)
if k < 3:
self.assertLess(kr, 10)
self.assertEqual(len(self.h), k+1)
self.assertItemsEqual(self.h[k], [i.SSE] + i.values)
else:
self.assertIs(kr, None)
self.assertEqual(len(self.h), 3)
for k, values in enumerate(self.h):
self.assertItemsEqual(values, [k,k+1,k+2])
@defer.inlineCallbacks
def test_add_improving(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 - k
yield self.h.add(i)
self.assertEqual(len(self.h), k+1)
for k, values in enumerate(self.h):
self.assertItemsEqual(values, [4-k,4-k+1,4-k+2])
def popitem_predictably(self, x):
value = sorted(x.values())[0]
for key, this_value in x.items():
if this_value == value:
x.pop(key)
return key, value
@defer.inlineCallbacks
def test_add_limitSize_worsening(self):
krPopped = set()
for k in range(15):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 1000.0 + k
yield self.h.add(i)
if len(self.h.kr) == 10:
iHash, kr = self.popitem_predictably(self.h.kr)
self.h.notInPop(kr)
krPopped.add(kr)
self.assertEqual(len(self.h), 10)
valuesPrev = None
for values in self.h:
if valuesPrev is not None:
for v, vp in zip(values, valuesPrev):
self.assertGreater(v, vp)
valuesPrev = values
@defer.inlineCallbacks
def test_add_limitSize_improving(self):
krPopped = set()
for k in range(15):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 1000.0 - k
yield self.h.add(i)
if len(self.h.kr) == 10:
iHash, kr = self.popitem_predictably(self.h.kr)
yield self.h.notInPop(kr)
krPopped.add(kr)
self.assertEqual(len(self.h), 10)
valuesPrev = None
for values in self.h:
if valuesPrev is not None:
for v, vp in zip(values, valuesPrev):
self.assertLess(v, vp)
valuesPrev = values
@defer.inlineCallbacks
def test_add_limitSize_improving_neverInPop(self):
for k in range(15):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 1000.0 - k
yield self.h.add(i, neverInPop=True)
self.assertEqual(len(self.h), 10)
self.assertEqual(len(self.h.Kp), 0)
self.assertEqual(len(self.h.Kn), 10)
valuesPrev = None
for values in self.h:
if valuesPrev is not None:
for v, vp in zip(values, valuesPrev):
self.assertLess(v, vp)
valuesPrev = values
@defer.inlineCallbacks
def test_add_then_purge(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 - k
yield self.h.add(i)
self.assertEqual(len(self.h), k+1)
self.assertEqual(len(self.h), 5)
self.assertEqual(len(self.h.Kp), 5)
self.h.purgePop()
self.assertEqual(len(self.h), 0)
self.assertEqual(len(self.h.Kp), 0)
@defer.inlineCallbacks
def test_value_vs_SSE(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
yield self.h.add(i)
XY = yield self.h.value_vs_SSE(['bar'])
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(10.0, 19.0, 10))
self.assertItemsEqual(XY[1], np.linspace(1.0, 10.0, 10))
@defer.inlineCallbacks
def test_value_vs_SSE_maxRatio(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
yield self.h.add(i)
XY = yield self.h.value_vs_SSE(['bar'], maxRatio=1.5)
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(10.0, 15.0, 6))
self.assertItemsEqual(XY[1], np.linspace(1.0, 6.0, 6))
@defer.inlineCallbacks
def test_value_vs_SSE_inPop(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
kr = yield self.h.add(i)
self.h.notInPop(kr)
XY = yield self.h.value_vs_SSE(['bar'], inPop=True)
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(10.0, 18.0, 9))
self.assertItemsEqual(XY[1], np.linspace(1.0, 9.0, 9))
@defer.inlineCallbacks
def test_value_vs_SSE_notInPop(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
kr = yield self.h.add(i)
if k > 5: self.h.notInPop(kr)
XY = yield self.h.value_vs_SSE(['bar'], notInPop=True)
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(16.0, 19.0, 4))
self.assertItemsEqual(XY[1], np.linspace(7.0, 10.0, 4))
@defer.inlineCallbacks
def test_pickle(self):
def values(k):
return [k, np.exp(-0.1*k), np.exp(-0.5*k)]
for k in range(10):
i = tb.MockIndividual(values=values(k))
i.SSE = 1000.0+k
yield self.h.add(i)
s = pickle.dumps(self.h)
h = pickle.loads(s)
self.assertEqual(len(h), 10)
for k, x in enumerate(h):
sdiff = np.sum(np.square(x-values(k)))
self.assertLess(sdiff, 1E-6)
| [
"ade.history.ClosestPairFinder",
"ade.history.History",
"pickle.dumps",
"ade.test.testbase.MockIndividual",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"pickle.loads",
"ade.history.Analysis",
"numpy.var"
] | [((1146, 1251), 'numpy.array', 'np.array', (['[[110.0, 1, 2, 5], [810.0, 2, 3, 4], [270.0, 3, 4, 3], [580.0, 4, 5, 2], [\n 999.0, 5, 6, 1]]'], {}), '([[110.0, 1, 2, 5], [810.0, 2, 3, 4], [270.0, 3, 4, 3], [580.0, 4, \n 5, 2], [999.0, 5, 6, 1]])\n', (1154, 1251), True, 'import numpy as np\n'), ((1383, 1427), 'ade.history.Analysis', 'history.Analysis', (['self.names', 'self.X', 'self.K'], {}), '(self.names, self.X, self.K)\n', (1399, 1427), False, 'from ade import history\n'), ((2846, 2878), 'ade.history.ClosestPairFinder', 'history.ClosestPairFinder', (['(10)', '(4)'], {}), '(10, 4)\n', (2871, 2878), False, 'from ade import history\n'), ((4637, 4687), 'numpy.array', 'np.array', (['[[0, 1], [0, 2], [0, 3], [2, 3], [3, 4]]'], {}), '([[0, 1], [0, 2], [0, 3], [2, 3], [3, 4]])\n', (4645, 4687), True, 'import numpy as np\n'), ((5950, 5984), 'numpy.array', 'np.array', (['[[0, 1], [0, 2], [2, 3]]'], {}), '([[0, 1], [0, 2], [2, 3]])\n', (5958, 5984), True, 'import numpy as np\n'), ((7103, 7140), 'ade.history.History', 'history.History', (['self.names'], {'N_max': '(10)'}), '(self.names, N_max=10)\n', (7118, 7140), False, 'from ade import history\n'), ((13982, 14002), 'pickle.dumps', 'pickle.dumps', (['self.h'], {}), '(self.h)\n', (13994, 14002), False, 'import pickle\n'), ((14015, 14030), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (14027, 14030), False, 'import pickle\n'), ((4786, 4827), 'numpy.var', 'np.var', (['[90.0, 90.0, 100.0, 110.0, 110.0]'], {}), '([90.0, 90.0, 100.0, 110.0, 110.0])\n', (4792, 4827), True, 'import numpy as np\n'), ((4890, 4927), 'numpy.var', 'np.var', (['[0.11, 0.09, 0.09, 0.11, 0.1]'], {}), '([0.11, 0.09, 0.09, 0.11, 0.1])\n', (4896, 4927), True, 'import numpy as np\n'), ((6066, 6094), 'numpy.exp', 'np.exp', (['(12 * (4.0 / 6 - 0.4))'], {}), '(12 * (4.0 / 6 - 0.4))\n', (6072, 6094), True, 'import numpy as np\n'), ((7907, 7950), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (7924, 7950), True, 'from ade.test import testbase as tb\n'), ((8326, 8369), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (8343, 8369), True, 'from ade.test import testbase as tb\n'), ((8964, 9007), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (8981, 9007), True, 'from ade.test import testbase as tb\n'), ((9585, 9628), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (9602, 9628), True, 'from ade.test import testbase as tb\n'), ((10266, 10309), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (10283, 10309), True, 'from ade.test import testbase as tb\n'), ((10936, 10979), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (10953, 10979), True, 'from ade.test import testbase as tb\n'), ((11515, 11558), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (11532, 11558), True, 'from ade.test import testbase as tb\n'), ((11975, 12018), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (11992, 12018), True, 'from ade.test import testbase as tb\n'), ((12196, 12223), 'numpy.linspace', 'np.linspace', (['(10.0)', '(19.0)', '(10)'], {}), '(10.0, 19.0, 10)\n', (12207, 12223), True, 'import numpy as np\n'), ((12262, 12288), 'numpy.linspace', 'np.linspace', (['(1.0)', '(10.0)', '(10)'], {}), '(1.0, 10.0, 10)\n', (12273, 12288), True, 'import numpy as np\n'), ((12404, 12447), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (12421, 12447), True, 'from ade.test import testbase as tb\n'), ((12639, 12665), 'numpy.linspace', 'np.linspace', (['(10.0)', '(15.0)', '(6)'], {}), '(10.0, 15.0, 6)\n', (12650, 12665), True, 'import numpy as np\n'), ((12704, 12728), 'numpy.linspace', 'np.linspace', (['(1.0)', '(6.0)', '(6)'], {}), '(1.0, 6.0, 6)\n', (12715, 12728), True, 'import numpy as np\n'), ((12841, 12884), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (12858, 12884), True, 'from ade.test import testbase as tb\n'), ((13107, 13133), 'numpy.linspace', 'np.linspace', (['(10.0)', '(18.0)', '(9)'], {}), '(10.0, 18.0, 9)\n', (13118, 13133), True, 'import numpy as np\n'), ((13172, 13196), 'numpy.linspace', 'np.linspace', (['(1.0)', '(9.0)', '(9)'], {}), '(1.0, 9.0, 9)\n', (13183, 13196), True, 'import numpy as np\n'), ((13312, 13355), 'ade.test.testbase.MockIndividual', 'tb.MockIndividual', ([], {'values': '[k, k + 1, k + 2]'}), '(values=[k, k + 1, k + 2])\n', (13329, 13355), True, 'from ade.test import testbase as tb\n'), ((13595, 13621), 'numpy.linspace', 'np.linspace', (['(16.0)', '(19.0)', '(4)'], {}), '(16.0, 19.0, 4)\n', (13606, 13621), True, 'import numpy as np\n'), ((13660, 13685), 'numpy.linspace', 'np.linspace', (['(7.0)', '(10.0)', '(4)'], {}), '(7.0, 10.0, 4)\n', (13671, 13685), True, 'import numpy as np\n'), ((7243, 7279), 'numpy.array', 'np.array', (['[[3, 1, 4, 0, 2, 5, 0, 9]]'], {}), '([[3, 1, 4, 0, 2, 5, 0, 9]])\n', (7251, 7279), True, 'import numpy as np\n'), ((13788, 13804), 'numpy.exp', 'np.exp', (['(-0.1 * k)'], {}), '(-0.1 * k)\n', (13794, 13804), True, 'import numpy as np\n'), ((13804, 13820), 'numpy.exp', 'np.exp', (['(-0.5 * k)'], {}), '(-0.5 * k)\n', (13810, 13820), True, 'import numpy as np\n')] |
import torch
# torch.manual_seed(0)
import torch.nn as nn
from modelZoo.resNet import ResNet, Bottleneck, BasicBlock
from modelZoo.DyanOF import creatRealDictionary
from utils import generateGridPoles, gridRing,fista
import numpy as np
def load_preTrained_model(pretrained, newModel):
'load pretrained resnet-X to self defined model '
'modified resnet has no last two layers, only return feature map'
pre_dict = pretrained.state_dict()
new_dict = newModel.state_dict()
pre_dict = {k: v for k, v in pre_dict.items() if k in new_dict}
new_dict.update(pre_dict)
newModel.load_state_dict(new_dict)
for param in newModel.parameters():
param.requires_grad = False
return newModel
class keyframeProposalNet(nn.Module):
def __init__(self, numFrame, Drr, Dtheta, gpu_id, backbone, config):
super(keyframeProposalNet, self).__init__()
self.num_frame = numFrame
self.gpu_id = gpu_id
self.backbone = backbone
self.config = config
if self.backbone == 'Resnet101':
self.modifiedResnet = ResNet(block=Bottleneck, layers=[3, 4, 23, 3], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # ResNet-101
self.Conv2d = nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1, groups=1, bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
elif self.backbone == 'Resnet50':
self.modifiedResnet = ResNet(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # ResNet-50
self.Conv2d = nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1, groups=1, bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
elif self.backbone == 'Resnet34':
self.modifiedResnet = ResNet(block=BasicBlock, layers=[3, 4, 6, 3], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # ResNet-34
# self.layer2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
elif self.backbone == 'Resnet18':
self.modifiedResnet = ResNet(block=BasicBlock, layers=[2, 2, 2, 2], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # Resent-18
self.relu = nn.LeakyReLU(inplace=True)
'downsample feature map'
self.layer2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l2 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l3 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1, groups=1, bias=False, dilation=1)
self.bn_l4 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.Drr = nn.Parameter(Drr, requires_grad=True)
self.Dtheta = nn.Parameter(Dtheta, requires_grad=True)
'embeded infomation along time space'
if self.config == 'Penn':
self.fcn1 = nn.Conv2d(self.num_frame, 25, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)
self.fc = nn.Linear(2560, self.num_frame)
else:
self.fcn1 = nn.Conv2d(self.num_frame, 25, kernel_size=1, stride=1, padding=0, groups=1, bias=False,
dilation=1)
self.fc = nn.Linear(5760, self.num_frame)
self.bn2 = nn.BatchNorm2d(25, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.fcn2 = nn.Conv2d(25, 10, kernel_size=1, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn3 = nn.BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
self.sig = nn.Sigmoid()
def forward(self, x):
Dictionary = creatRealDictionary(self.num_frame, self.Drr, self.Dtheta, self.gpu_id)
imageFeature = self.modifiedResnet(x) # T X 512 X 7 X 7
if self.backbone == 'Resnet34' or 'Resnet18':
convx = imageFeature
else:
convx = self.Conv2d(imageFeature)
convx = self.bn1(convx)
convx = self.relu(convx)
x2 = self.layer2(convx)
x2 = self.bn_l2(x2)
x2 = self.relu(x2)
x3 = self.layer3(x2)
x3 = self.bn_l3(x3)
x3 = self.relu(x3)
x4 = self.layer4(x3)
x4 = self.bn_l4(x4)
feature = self.relu(x4)
return feature, Dictionary, imageFeature
def forward2(self, feature, alpha):
x = feature.permute(1, 0, 2, 3)
x = self.fcn1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.fcn2(x)
x = self.bn3(x)
x = self.relu(x)
x = x.view(1, -1)
x = self.fc(x)
out = self.sig(alpha*x)
return out
class onlineUpdate(nn.Module):
def __init__(self, FRA, PRE, T, Drr, Dtheta, gpu_id):
super(onlineUpdate, self).__init__()
self.gpu_id = gpu_id
self.Drr = Drr
self.Dtheta = Dtheta
self.numFrame = T
self.K_FPN = keyframeProposalNet(numFrame=self.numFrame, Drr=self.Drr, Dtheta=self.Dtheta, gpu_id=gpu_id,
backbone='Resnet18', config='jhmdb')
self.FRA = FRA
self.PRE = PRE
self.relu = nn.LeakyReLU(inplace=True)
self.layer0 = nn.Conv2d(512*2, 512, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l0 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l1 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2 = nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l2 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3 = nn.Conv2d(128, 64, kernel_size=1, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l3 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.fc = nn.Linear(1*64*3*3, 2)
def get_keylist(self, x, alpha):
feature, Dictionary, imgFeature = self.K_FPN.forward(x)
indicator = self.K_FPN.forward2(feature, alpha)
s = indicator[0, :]
key_ind = (s > 0.995).nonzero().squeeze(1)
key_list_tot = key_ind.cpu().numpy()
key_list_FRA = list(key_list_tot[np.where(key_list_tot < self.FRA)[0]]) # input key list
key_list = list(key_list_tot[np.where(key_list_tot < self.PRE+ self.FRA)[0]])
keylist_to_pred = list(set(key_list) - set(key_list_FRA))
Dict_key = Dictionary[key_list_FRA, :]
feat_key = imgFeature[key_list_FRA, :]
t, c, w, h = feat_key.shape
feat_key = feat_key.reshape(1, t, c * w * h)
sparseCode_key = fista(Dict_key, feat_key, 0.01, 100, self.gpu_id)
return sparseCode_key, Dictionary, keylist_to_pred, key_list_FRA, key_list,imgFeature
def forward(self, imgFeature, sparseCode_key, Dictionary, fraNum):
gtImgFeature = imgFeature[fraNum]
c, w, h = gtImgFeature.shape
newDictionary = torch.cat((Dictionary[0:self.FRA], Dictionary[fraNum].unsqueeze(0)))
newImgFeature = torch.matmul(newDictionary, sparseCode_key).reshape(newDictionary.shape[0], c, w, h)
predImgFeature = newImgFeature[-1]
combineFeature = torch.cat((gtImgFeature, predImgFeature)).unsqueeze(0)
x = self.layer0(combineFeature)
x = self.bn_l0(x)
x = self.relu(x)
x = self.layer1(x)
x = self.bn_l1(x)
x = self.relu(x)
x = self.layer2(x)
x = self.bn_l2(x)
x = self.relu(x)
x = self.layer3(x)
x = self.bn_l3(x)
x = self.relu(x)
x = x.view(1, -1)
out = self.fc(x)
return out
if __name__ == "__main__":
gpu_id = 2
alpha = 4 # step size for sigmoid
N = 4 * 40
P, Pall = gridRing(N)
Drr = abs(P)
Drr = torch.from_numpy(Drr).float()
Dtheta = np.angle(P)
Dtheta = torch.from_numpy(Dtheta).float()
net = keyframeProposalNet(numFrame=40,Drr=Drr, Dtheta=Dtheta, gpu_id=gpu_id, backbone='Resnet34', config='Penn')
net.cuda(gpu_id)
X = torch.randn(1, 40, 3, 224, 224).cuda(gpu_id)
for i in range(0, X.shape[0]):
x = X[i]
feature,dictionary,_ = net.forward(x)
out = net.forward2(feature, alpha)
print('check')
print('done')
| [
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"torch.nn.LeakyReLU",
"numpy.where",
"utils.gridRing",
"torch.from_numpy",
"torch.nn.Conv2d",
"numpy.angle",
"utils.fista",
"torch.nn.Parameter",
"torch.matmul",
"modelZoo.resNet.ResNet",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Linear",
"modelZ... | [((8981, 8992), 'utils.gridRing', 'gridRing', (['N'], {}), '(N)\n', (8989, 8992), False, 'from utils import generateGridPoles, gridRing, fista\n'), ((9063, 9074), 'numpy.angle', 'np.angle', (['P'], {}), '(P)\n', (9071, 9074), True, 'import numpy as np\n'), ((2888, 2914), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2900, 2914), True, 'import torch.nn as nn\n'), ((2971, 3065), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=\n False, dilation=1)\n', (2980, 3065), True, 'import torch.nn as nn\n'), ((3082, 3169), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(256, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (3096, 3169), True, 'import torch.nn as nn\n'), ((3188, 3282), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(256, 128, kernel_size=3, stride=1, padding=0, groups=1, bias=\n False, dilation=1)\n', (3197, 3282), True, 'import torch.nn as nn\n'), ((3299, 3386), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(128, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (3313, 3386), True, 'import torch.nn as nn\n'), ((3405, 3497), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(128, 64, kernel_size=3, stride=1, padding=1, groups=1, bias=False,\n dilation=1)\n', (3414, 3497), True, 'import torch.nn as nn\n'), ((3515, 3601), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(64, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (3529, 3601), True, 'import torch.nn as nn\n'), ((3619, 3656), 'torch.nn.Parameter', 'nn.Parameter', (['Drr'], {'requires_grad': '(True)'}), '(Drr, requires_grad=True)\n', (3631, 3656), True, 'import torch.nn as nn\n'), ((3679, 3719), 'torch.nn.Parameter', 'nn.Parameter', (['Dtheta'], {'requires_grad': '(True)'}), '(Dtheta, requires_grad=True)\n', (3691, 3719), True, 'import torch.nn as nn\n'), ((4226, 4312), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(25)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(25, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (4240, 4312), True, 'import torch.nn as nn\n'), ((4329, 4420), 'torch.nn.Conv2d', 'nn.Conv2d', (['(25)', '(10)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(25, 10, kernel_size=1, stride=1, padding=0, groups=1, bias=False,\n dilation=1)\n', (4338, 4420), True, 'import torch.nn as nn\n'), ((4436, 4522), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(10)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(10, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (4450, 4522), True, 'import torch.nn as nn\n'), ((4544, 4572), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (4564, 4572), True, 'import torch.nn as nn\n'), ((4593, 4605), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4603, 4605), True, 'import torch.nn as nn\n'), ((4655, 4726), 'modelZoo.DyanOF.creatRealDictionary', 'creatRealDictionary', (['self.num_frame', 'self.Drr', 'self.Dtheta', 'self.gpu_id'], {}), '(self.num_frame, self.Drr, self.Dtheta, self.gpu_id)\n', (4674, 4726), False, 'from modelZoo.DyanOF import creatRealDictionary\n'), ((6164, 6190), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6176, 6190), True, 'import torch.nn as nn\n'), ((6214, 6312), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512 * 2)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(512 * 2, 512, kernel_size=3, stride=1, padding=0, groups=1, bias=\n False, dilation=1)\n', (6223, 6312), True, 'import torch.nn as nn\n'), ((6327, 6414), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(512, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (6341, 6414), True, 'import torch.nn as nn\n'), ((6434, 6528), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=\n False, dilation=1)\n', (6443, 6528), True, 'import torch.nn as nn\n'), ((6545, 6632), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(256, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (6559, 6632), True, 'import torch.nn as nn\n'), ((6652, 6746), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(256, 128, kernel_size=1, stride=1, padding=0, groups=1, bias=\n False, dilation=1)\n', (6661, 6746), True, 'import torch.nn as nn\n'), ((6763, 6850), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(128, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (6777, 6850), True, 'import torch.nn as nn\n'), ((6870, 6962), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(128, 64, kernel_size=1, stride=1, padding=0, groups=1, bias=False,\n dilation=1)\n', (6879, 6962), True, 'import torch.nn as nn\n'), ((6980, 7066), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(64, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (6994, 7066), True, 'import torch.nn as nn\n'), ((7082, 7110), 'torch.nn.Linear', 'nn.Linear', (['(1 * 64 * 3 * 3)', '(2)'], {}), '(1 * 64 * 3 * 3, 2)\n', (7091, 7110), True, 'import torch.nn as nn\n'), ((7849, 7898), 'utils.fista', 'fista', (['Dict_key', 'feat_key', '(0.01)', '(100)', 'self.gpu_id'], {}), '(Dict_key, feat_key, 0.01, 100, self.gpu_id)\n', (7854, 7898), False, 'from utils import generateGridPoles, gridRing, fista\n'), ((1091, 1253), 'modelZoo.resNet.ResNet', 'ResNet', ([], {'block': 'Bottleneck', 'layers': '[3, 4, 23, 3]', 'zero_init_residual': '(False)', 'groups': '(1)', 'width_per_group': '(64)', 'replace_stride_with_dilation': 'None', 'norm_layer': 'None'}), '(block=Bottleneck, layers=[3, 4, 23, 3], zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None)\n', (1097, 1253), False, 'from modelZoo.resNet import ResNet, Bottleneck, BasicBlock\n'), ((1368, 1463), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(2048, 512, kernel_size=3, stride=1, padding=1, groups=1, bias=\n False, dilation=1)\n', (1377, 1463), True, 'import torch.nn as nn\n'), ((1482, 1569), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(512, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (1496, 1569), True, 'import torch.nn as nn\n'), ((3825, 3928), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.num_frame', '(25)'], {'kernel_size': '(1)', 'stride': '(2)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(self.num_frame, 25, kernel_size=1, stride=2, padding=0, groups=1,\n bias=False, dilation=1)\n', (3834, 3928), True, 'import torch.nn as nn\n'), ((3947, 3978), 'torch.nn.Linear', 'nn.Linear', (['(2560)', 'self.num_frame'], {}), '(2560, self.num_frame)\n', (3956, 3978), True, 'import torch.nn as nn\n'), ((4018, 4121), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.num_frame', '(25)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(self.num_frame, 25, kernel_size=1, stride=1, padding=0, groups=1,\n bias=False, dilation=1)\n', (4027, 4121), True, 'import torch.nn as nn\n'), ((4174, 4205), 'torch.nn.Linear', 'nn.Linear', (['(5760)', 'self.num_frame'], {}), '(5760, self.num_frame)\n', (4183, 4205), True, 'import torch.nn as nn\n'), ((9020, 9041), 'torch.from_numpy', 'torch.from_numpy', (['Drr'], {}), '(Drr)\n', (9036, 9041), False, 'import torch\n'), ((9088, 9112), 'torch.from_numpy', 'torch.from_numpy', (['Dtheta'], {}), '(Dtheta)\n', (9104, 9112), False, 'import torch\n'), ((9269, 9300), 'torch.randn', 'torch.randn', (['(1)', '(40)', '(3)', '(224)', '(224)'], {}), '(1, 40, 3, 224, 224)\n', (9280, 9300), False, 'import torch\n'), ((1643, 1804), 'modelZoo.resNet.ResNet', 'ResNet', ([], {'block': 'Bottleneck', 'layers': '[3, 4, 6, 3]', 'zero_init_residual': '(False)', 'groups': '(1)', 'width_per_group': '(64)', 'replace_stride_with_dilation': 'None', 'norm_layer': 'None'}), '(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None)\n', (1649, 1804), False, 'from modelZoo.resNet import ResNet, Bottleneck, BasicBlock\n'), ((1908, 2003), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'groups': '(1)', 'bias': '(False)', 'dilation': '(1)'}), '(2048, 512, kernel_size=3, stride=1, padding=1, groups=1, bias=\n False, dilation=1)\n', (1917, 2003), True, 'import torch.nn as nn\n'), ((2022, 2109), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {'eps': '(1e-05)', 'momentum': '(0.1)', 'affine': '(True)', 'track_running_stats': '(True)'}), '(512, eps=1e-05, momentum=0.1, affine=True,\n track_running_stats=True)\n', (2036, 2109), True, 'import torch.nn as nn\n'), ((8262, 8305), 'torch.matmul', 'torch.matmul', (['newDictionary', 'sparseCode_key'], {}), '(newDictionary, sparseCode_key)\n', (8274, 8305), False, 'import torch\n'), ((8416, 8457), 'torch.cat', 'torch.cat', (['(gtImgFeature, predImgFeature)'], {}), '((gtImgFeature, predImgFeature))\n', (8425, 8457), False, 'import torch\n'), ((2184, 2345), 'modelZoo.resNet.ResNet', 'ResNet', ([], {'block': 'BasicBlock', 'layers': '[3, 4, 6, 3]', 'zero_init_residual': '(False)', 'groups': '(1)', 'width_per_group': '(64)', 'replace_stride_with_dilation': 'None', 'norm_layer': 'None'}), '(block=BasicBlock, layers=[3, 4, 6, 3], zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None)\n', (2190, 2345), False, 'from modelZoo.resNet import ResNet, Bottleneck, BasicBlock\n'), ((7428, 7461), 'numpy.where', 'np.where', (['(key_list_tot < self.FRA)'], {}), '(key_list_tot < self.FRA)\n', (7436, 7461), True, 'import numpy as np\n'), ((7522, 7566), 'numpy.where', 'np.where', (['(key_list_tot < self.PRE + self.FRA)'], {}), '(key_list_tot < self.PRE + self.FRA)\n', (7530, 7566), True, 'import numpy as np\n'), ((2618, 2779), 'modelZoo.resNet.ResNet', 'ResNet', ([], {'block': 'BasicBlock', 'layers': '[2, 2, 2, 2]', 'zero_init_residual': '(False)', 'groups': '(1)', 'width_per_group': '(64)', 'replace_stride_with_dilation': 'None', 'norm_layer': 'None'}), '(block=BasicBlock, layers=[2, 2, 2, 2], zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None)\n', (2624, 2779), False, 'from modelZoo.resNet import ResNet, Bottleneck, BasicBlock\n')] |
from os import listdir
from os.path import join, basename, isfile
import librosa
import numpy as np
from numpy.linalg import norm
from pymo.parsers import BVHParser
from pymo.preprocessing import JointSelector, MocapParameterizer, Numpyfier, RootTransformer
from sklearn.pipeline import Pipeline
import h5py
from scripts.data_loader.saga import utils
from model.vocab import Vocab
import logging
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
AUDIO_SR = 16_000
MOTION_FPS = 25
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
class SagaDatasetProcessor:
def __init__(self):
self.bvh_parser = BVHParser()
# These variables are initialized in load_or_save_motion_properties
self.motion_transforms = None
self.lang_model = None
self.motion_properties = None
def load_or_save_motion_properties(self, dataset_dir):
"""
Extract and save the properties of the motion dataset in 'dataset_dir'.
The following properties are saved:
n_joints: the number of joints in the skeletons
joint_names: the names of the joints in the skeleton
bones: the bones in the skeleton
mean_coordinates: the mean 3D pose
mean_dir_vec: the mean directional vector for each bone
NOTE: The end effector joints and the root joint are ignored.
NOTE: The bones are represented as a list of
(start_joint_idx, end_joint_idx, bone_length) triplets
"""
motion_info_file = join(dataset_dir, "..", "motion_info")
if isfile(motion_info_file + '.npz'):
self.motion_properties = np.load(motion_info_file + '.npz')
logging.info(f'Loaded motion dataset info from {motion_info_file}!')
self.init_motion_transforms(self.motion_properties["joint_names"])
return
logging.info(f'Calculating motion dataset info...')
motion_dir = join(dataset_dir, "Motion")
motion_files = [join(motion_dir, file) for file in sorted(listdir(motion_dir))
if file.endswith(".bvh")]
joint_names, bones, n_joints = self.extract_skeleton_data(motion_files[0])
self.init_motion_transforms(joint_names)
motion_coordinates = np.concatenate([
self.load_motion(motion_file, convert_to_dir_vectors = False)
for motion_file in motion_files])
motion_dir_vectors = utils.coordinates_to_dir_vectors(motion_coordinates, bones)
mean_coordinates = np.mean(motion_coordinates, axis = 0)
mean_dir_vectors = np.mean(motion_dir_vectors, axis = 0)
self.motion_properties = {
"joint_names": joint_names,
"bones": bones,
"n_joints": n_joints,
"mean_coordinates": mean_coordinates,
"mean_dir_vec": mean_dir_vectors
}
np.savez(motion_info_file, **self.motion_properties)
logging.info(f' Saved motion dataset info to {motion_info_file}!')
def init_motion_transforms(self, joint_names):
self.motion_transforms = Pipeline([('root', RootTransformer('hip_centric')),
('pos', MocapParameterizer('position')),
# We remove end effector "Nub" joints and the root joint
('jtsel', JointSelector(joint_names, include_root=False)),
('np', Numpyfier())])
def run(self, dataset_dir):
# Initialize the language model only when the processor is run
if self.lang_model is None:
self.lang_model = Vocab("SAGA")
assert self.motion_properties is not None
assert self.motion_transforms is not None
audio_dir = join(dataset_dir, "Audio")
audio_files = [join(audio_dir, file) for file in sorted(listdir(audio_dir))
if file.endswith(".mov.wav")]
text_dir = join(dataset_dir, "Transcripts")
text_files = [join(text_dir, file) for file in sorted(listdir(text_dir))
if file.endswith(".hdf5")]
motion_dir = join(dataset_dir, "Motion")
motion_files = [join(motion_dir, file) for file in sorted(listdir(motion_dir))
if file.endswith(".bvh")]
assert len(audio_files) == len(text_files) == len(motion_files)
audio_data = []
text_data = []
motion_data = []
logging.info("Processing files...")
for audio_file, text_file, motion_file in zip(audio_files, text_files, motion_files):
logging.info(f" {basename(audio_file)} {basename(text_file)}, {basename(motion_file)}")
audio, text, motion = self.process_recording(audio_file, text_file, motion_file)
audio_data.append(audio)
text_data.append(text)
motion_data.append(motion)
np.save(join(dataset_dir, "audio.npy"), np.asarray(np.concatenate(audio_data)).astype(np.float32))
np.save(join(dataset_dir, "text.npy"), np.asarray(np.concatenate(text_data)).astype(np.long))
np.save(join(dataset_dir, "motion.npy"), np.asarray(np.concatenate(motion_data)).astype(np.float32))
def load_motion(self, bvh_file, convert_to_dir_vectors = True):
"""
If 'convert_to_dir_vectors' is True,
return an (n_frames, n_joints, 3) array of 3D directional vectors in XYZ axis order
else
return an (n_frames, n_joints, 3) array of 3D coordinates in XYZ axis order.
"""
assert self.motion_transforms is not None
motion_data = self.bvh_parser.parse(bvh_file)
coordinates = self.motion_transforms.fit_transform([motion_data])[0]
n_frames = len(coordinates)
# (n_frames, n_joints * 3) -> (n_frames, n_joints, 3)
coordinates = coordinates.reshape(n_frames, -1, 3)
if not convert_to_dir_vectors:
return coordinates
dir_vectors = utils.coordinates_to_dir_vectors(coordinates, self.motion_properties["bones"])
# Normalize the directional vectors
dir_vectors -= self.motion_properties["mean_dir_vec"]
return dir_vectors
def load_audio(self, audio_file):
"""
Return a raw waveform with the sampled at the configured rate.
"""
audio, _ = librosa.load(audio_file, mono = True, sr = AUDIO_SR, res_type="kaiser_fast")
return audio
def load_text(self, text_file):
"""
Opens the given hdf5 file containing the text dataset
"""
words = h5py.File(text_file, mode='r').get("words").asstr()[0]
# TODO(RN)
word_data = h5py.File(text_file, mode='r').get("text_encodings")
word_times = word_data[:, :2]
for word in words:
self.lang_model.index_word(word)
return words, word_times
def slice_into_segments(self, audio, words, word_times, motion):
"""
Audio: 16 kHz
Text: list of strings with length n_words
text_features: (n_words, 2 + embedding_dim)
Motion: 25 FPS
"""
n_motion_frames_in_segment = int(round(34 * 25 / 15))
n_audio_frames_in_segment = int(round(n_motion_frames_in_segment * (AUDIO_SR / MOTION_FPS)))
motion_stride = int(round(10 * 25 / 15))
audio_stride = int(motion_stride * (AUDIO_SR / MOTION_FPS))
segment_len_sec = n_motion_frames_in_segment / MOTION_FPS
segment_stride_sec = motion_stride / MOTION_FPS
n_motion_segments = (len(motion) - n_motion_frames_in_segment) // motion_stride
n_audio_segments = (len(audio) - n_audio_frames_in_segment) // audio_stride
assert n_motion_segments == n_audio_segments
audio_segments = []
text_segments = []
motion_segments = []
for segment_idx in range(n_motion_segments):
audio_start = segment_idx * audio_stride
audio_end = audio_start + n_audio_frames_in_segment
audio_segment = audio[audio_start : audio_end]
motion_start = segment_idx * motion_stride
motion_end = motion_start + n_motion_frames_in_segment
motion_segment = motion[motion_start : motion_end]
segment_start_sec = segment_idx * segment_stride_sec
segment_end_sec = segment_start_sec + segment_len_sec
text_segment = self._construct_word_idx_vector(words, word_times,
segment_start_sec, segment_end_sec,
n_frames = n_motion_frames_in_segment)
audio_segments.append(audio_segment)
text_segments.append(text_segment)
motion_segments.append(motion_segment)
audio_segments = np.asarray(audio_segments).astype(np.float32)
text_segments = np.asarray(text_segments).astype(np.float32)
motion_segments = np.asarray(motion_segments).astype(np.float32)
return audio_segments, text_segments, motion_segments
def _construct_word_idx_vector(self, words, word_times, segment_start_sec, segment_end_sec, n_frames):
"""
Construct a vector that contains the index of the spoken word for each frame in the segment.
Args:
words: A list of every word in the recording
word_times: An array of shape (n_words, 2) containing the timestaps (in seconds)
of the start and the end of each word
segment_start_sec: The timestamp of the start of the segment (in seconds)
segment_end_sec: The timestamp of the end of the segment (in seconds)
n_frames: The number of (motion) frames in the segment.
NOTE: The returned word index vector contains each word's index only once,
at the positions where the words start in the segment.
The vector contains zeros in all other frames.
For example,
"I" 0.5s | "said" 0.3s | "no" 0.5s | "way" 0.2s
could be converted to
[ 1 0 0 0 0 | 2 0 0 | 3 0 0 0 0 | 4 0]
or
[ 1 0 | 2 | 3 0 | 4]
depending on the number of frames (57 by default).
NOTE: if self.remove_word_timing is True, they word indices are instead spread with equal distance between them.
"""
# TODO(RN): remove word timing option?
frame_duration = (segment_end_sec - segment_start_sec) / n_frames
word_starts_sec = word_times[:, 0]
word_ends_sec = word_times[:, 1]
word_idx_vector = np.zeros(n_frames)
for i in range(len(words)):
# Only consider words that are spoken during the segment
if word_ends_sec[i] > segment_start_sec and word_starts_sec[i] < segment_end_sec:
frame_idx = int(np.floor((word_starts_sec[i] - segment_start_sec) / frame_duration))
if frame_idx < 0:
frame_idx = 0
word_idx_vector[frame_idx] = self.lang_model.get_word_index(words[i])
return word_idx_vector
def process_recording(self, audio_file, text_file, motion_file):
audio = self.load_audio(audio_file)
words, word_times = self.load_text(text_file)
motion = self.load_motion(motion_file)
audio_segments, text_segments, motion_segments = self.slice_into_segments(audio, words, word_times, motion)
return audio_segments, text_segments, motion_segments
@staticmethod
def extract_skeleton_data(bvh_file):
"""
Return the joint names, the bones and the number of joints
from the given BVH file.
NOTE: The end effector joints and the root joint are ignored.
NOTE: The bones are represented as a list of
(start_joint_idx, end_joint_idx, bone_length) triplets.
"""
parser = BVHParser()
bvh_data = parser.parse(bvh_file)
skeleton = bvh_data.skeleton
joint_names = [joint_name for joint_name in skeleton.keys()
if not joint_name.endswith("_Nub") and joint_name != "Pelvis"]
joint_to_idx = {name: idx for idx, name in enumerate(joint_names)}
n_joints = len(joint_names)
# triplets of (from_joint_idx, to_joint_idx, bone_length)
bones = []
logging.info("Found the following bones in the skeleton:")
for joint_name, joint_idx in joint_to_idx.items():
# We consider every bone except the one from the root Pelvis to Spine_01,
# because the Pelvis is always stationary since we center the motion on it
if skeleton[joint_name]['parent'] not in [None, "Pelvis"]:
parent_idx = joint_to_idx[skeleton[joint_name]['parent']]
bone_length = norm(skeleton[joint_name]['offsets'])
logging.info(f"{joint_names[parent_idx]:>16} -{bone_length:.2f}- {joint_name:<16}")
bones.append((parent_idx, joint_idx, bone_length))
return joint_names, bones, n_joints
if __name__ == "__main__":
processor = SagaDatasetProcessor()
processor.run("dataset")
| [
"logging.getLogger",
"scripts.data_loader.saga.utils.coordinates_to_dir_vectors",
"pymo.parsers.BVHParser",
"numpy.linalg.norm",
"logging.info",
"librosa.load",
"numpy.mean",
"numpy.savez",
"os.listdir",
"numpy.asarray",
"pymo.preprocessing.RootTransformer",
"numpy.concatenate",
"pymo.prepro... | [((414, 440), 'logging.getLogger', 'logging.getLogger', (['"""numba"""'], {}), "('numba')\n", (431, 440), False, 'import logging\n'), ((538, 564), 'logging.getLogger', 'logging.getLogger', (['"""numba"""'], {}), "('numba')\n", (555, 564), False, 'import logging\n'), ((682, 693), 'pymo.parsers.BVHParser', 'BVHParser', ([], {}), '()\n', (691, 693), False, 'from pymo.parsers import BVHParser\n'), ((1645, 1683), 'os.path.join', 'join', (['dataset_dir', '""".."""', '"""motion_info"""'], {}), "(dataset_dir, '..', 'motion_info')\n", (1649, 1683), False, 'from os.path import join, basename, isfile\n'), ((1696, 1729), 'os.path.isfile', 'isfile', (["(motion_info_file + '.npz')"], {}), "(motion_info_file + '.npz')\n", (1702, 1729), False, 'from os.path import join, basename, isfile\n'), ((1991, 2042), 'logging.info', 'logging.info', (['f"""Calculating motion dataset info..."""'], {}), "(f'Calculating motion dataset info...')\n", (2003, 2042), False, 'import logging\n'), ((2066, 2093), 'os.path.join', 'join', (['dataset_dir', '"""Motion"""'], {}), "(dataset_dir, 'Motion')\n", (2070, 2093), False, 'from os.path import join, basename, isfile\n'), ((2570, 2629), 'scripts.data_loader.saga.utils.coordinates_to_dir_vectors', 'utils.coordinates_to_dir_vectors', (['motion_coordinates', 'bones'], {}), '(motion_coordinates, bones)\n', (2602, 2629), False, 'from scripts.data_loader.saga import utils\n'), ((2666, 2701), 'numpy.mean', 'np.mean', (['motion_coordinates'], {'axis': '(0)'}), '(motion_coordinates, axis=0)\n', (2673, 2701), True, 'import numpy as np\n'), ((2731, 2766), 'numpy.mean', 'np.mean', (['motion_dir_vectors'], {'axis': '(0)'}), '(motion_dir_vectors, axis=0)\n', (2738, 2766), True, 'import numpy as np\n'), ((3029, 3081), 'numpy.savez', 'np.savez', (['motion_info_file'], {}), '(motion_info_file, **self.motion_properties)\n', (3037, 3081), True, 'import numpy as np\n'), ((3090, 3157), 'logging.info', 'logging.info', (['f""" Saved motion dataset info to {motion_info_file}!"""'], {}), "(f' Saved motion dataset info to {motion_info_file}!')\n", (3102, 3157), False, 'import logging\n'), ((3963, 3989), 'os.path.join', 'join', (['dataset_dir', '"""Audio"""'], {}), "(dataset_dir, 'Audio')\n", (3967, 3989), False, 'from os.path import join, basename, isfile\n'), ((4158, 4190), 'os.path.join', 'join', (['dataset_dir', '"""Transcripts"""'], {}), "(dataset_dir, 'Transcripts')\n", (4162, 4190), False, 'from os.path import join, basename, isfile\n'), ((4363, 4390), 'os.path.join', 'join', (['dataset_dir', '"""Motion"""'], {}), "(dataset_dir, 'Motion')\n", (4367, 4390), False, 'from os.path import join, basename, isfile\n'), ((4693, 4728), 'logging.info', 'logging.info', (['"""Processing files..."""'], {}), "('Processing files...')\n", (4705, 4728), False, 'import logging\n'), ((6221, 6299), 'scripts.data_loader.saga.utils.coordinates_to_dir_vectors', 'utils.coordinates_to_dir_vectors', (['coordinates', "self.motion_properties['bones']"], {}), "(coordinates, self.motion_properties['bones'])\n", (6253, 6299), False, 'from scripts.data_loader.saga import utils\n'), ((6599, 6671), 'librosa.load', 'librosa.load', (['audio_file'], {'mono': '(True)', 'sr': 'AUDIO_SR', 'res_type': '"""kaiser_fast"""'}), "(audio_file, mono=True, sr=AUDIO_SR, res_type='kaiser_fast')\n", (6611, 6671), False, 'import librosa\n'), ((11121, 11139), 'numpy.zeros', 'np.zeros', (['n_frames'], {}), '(n_frames)\n', (11129, 11139), True, 'import numpy as np\n'), ((12446, 12457), 'pymo.parsers.BVHParser', 'BVHParser', ([], {}), '()\n', (12455, 12457), False, 'from pymo.parsers import BVHParser\n'), ((12906, 12964), 'logging.info', 'logging.info', (['"""Found the following bones in the skeleton:"""'], {}), "('Found the following bones in the skeleton:')\n", (12918, 12964), False, 'import logging\n'), ((1768, 1802), 'numpy.load', 'np.load', (["(motion_info_file + '.npz')"], {}), "(motion_info_file + '.npz')\n", (1775, 1802), True, 'import numpy as np\n'), ((1815, 1883), 'logging.info', 'logging.info', (['f"""Loaded motion dataset info from {motion_info_file}!"""'], {}), "(f'Loaded motion dataset info from {motion_info_file}!')\n", (1827, 1883), False, 'import logging\n'), ((2118, 2140), 'os.path.join', 'join', (['motion_dir', 'file'], {}), '(motion_dir, file)\n', (2122, 2140), False, 'from os.path import join, basename, isfile\n'), ((3825, 3838), 'model.vocab.Vocab', 'Vocab', (['"""SAGA"""'], {}), "('SAGA')\n", (3830, 3838), False, 'from model.vocab import Vocab\n'), ((4013, 4034), 'os.path.join', 'join', (['audio_dir', 'file'], {}), '(audio_dir, file)\n', (4017, 4034), False, 'from os.path import join, basename, isfile\n'), ((4221, 4241), 'os.path.join', 'join', (['text_dir', 'file'], {}), '(text_dir, file)\n', (4225, 4241), False, 'from os.path import join, basename, isfile\n'), ((4415, 4437), 'os.path.join', 'join', (['motion_dir', 'file'], {}), '(motion_dir, file)\n', (4419, 4437), False, 'from os.path import join, basename, isfile\n'), ((5146, 5176), 'os.path.join', 'join', (['dataset_dir', '"""audio.npy"""'], {}), "(dataset_dir, 'audio.npy')\n", (5150, 5176), False, 'from os.path import join, basename, isfile\n'), ((5254, 5283), 'os.path.join', 'join', (['dataset_dir', '"""text.npy"""'], {}), "(dataset_dir, 'text.npy')\n", (5258, 5283), False, 'from os.path import join, basename, isfile\n'), ((5358, 5389), 'os.path.join', 'join', (['dataset_dir', '"""motion.npy"""'], {}), "(dataset_dir, 'motion.npy')\n", (5362, 5389), False, 'from os.path import join, basename, isfile\n'), ((6939, 6969), 'h5py.File', 'h5py.File', (['text_file'], {'mode': '"""r"""'}), "(text_file, mode='r')\n", (6948, 6969), False, 'import h5py\n'), ((9166, 9192), 'numpy.asarray', 'np.asarray', (['audio_segments'], {}), '(audio_segments)\n', (9176, 9192), True, 'import numpy as np\n'), ((9236, 9261), 'numpy.asarray', 'np.asarray', (['text_segments'], {}), '(text_segments)\n', (9246, 9261), True, 'import numpy as np\n'), ((9307, 9334), 'numpy.asarray', 'np.asarray', (['motion_segments'], {}), '(motion_segments)\n', (9317, 9334), True, 'import numpy as np\n'), ((13372, 13409), 'numpy.linalg.norm', 'norm', (["skeleton[joint_name]['offsets']"], {}), "(skeleton[joint_name]['offsets'])\n", (13376, 13409), False, 'from numpy.linalg import norm\n'), ((13443, 13531), 'logging.info', 'logging.info', (['f"""{joint_names[parent_idx]:>16} -{bone_length:.2f}- {joint_name:<16}"""'], {}), "(\n f'{joint_names[parent_idx]:>16} -{bone_length:.2f}- {joint_name:<16}')\n", (13455, 13531), False, 'import logging\n'), ((2160, 2179), 'os.listdir', 'listdir', (['motion_dir'], {}), '(motion_dir)\n', (2167, 2179), False, 'from os import listdir\n'), ((3271, 3301), 'pymo.preprocessing.RootTransformer', 'RootTransformer', (['"""hip_centric"""'], {}), "('hip_centric')\n", (3286, 3301), False, 'from pymo.preprocessing import JointSelector, MocapParameterizer, Numpyfier, RootTransformer\n'), ((3355, 3385), 'pymo.preprocessing.MocapParameterizer', 'MocapParameterizer', (['"""position"""'], {}), "('position')\n", (3373, 3385), False, 'from pymo.preprocessing import JointSelector, MocapParameterizer, Numpyfier, RootTransformer\n'), ((3541, 3587), 'pymo.preprocessing.JointSelector', 'JointSelector', (['joint_names'], {'include_root': '(False)'}), '(joint_names, include_root=False)\n', (3554, 3587), False, 'from pymo.preprocessing import JointSelector, MocapParameterizer, Numpyfier, RootTransformer\n'), ((3640, 3651), 'pymo.preprocessing.Numpyfier', 'Numpyfier', ([], {}), '()\n', (3649, 3651), False, 'from pymo.preprocessing import JointSelector, MocapParameterizer, Numpyfier, RootTransformer\n'), ((4054, 4072), 'os.listdir', 'listdir', (['audio_dir'], {}), '(audio_dir)\n', (4061, 4072), False, 'from os import listdir\n'), ((4261, 4278), 'os.listdir', 'listdir', (['text_dir'], {}), '(text_dir)\n', (4268, 4278), False, 'from os import listdir\n'), ((4457, 4476), 'os.listdir', 'listdir', (['motion_dir'], {}), '(motion_dir)\n', (4464, 4476), False, 'from os import listdir\n'), ((11372, 11439), 'numpy.floor', 'np.floor', (['((word_starts_sec[i] - segment_start_sec) / frame_duration)'], {}), '((word_starts_sec[i] - segment_start_sec) / frame_duration)\n', (11380, 11439), True, 'import numpy as np\n'), ((4853, 4873), 'os.path.basename', 'basename', (['audio_file'], {}), '(audio_file)\n', (4861, 4873), False, 'from os.path import join, basename, isfile\n'), ((4876, 4895), 'os.path.basename', 'basename', (['text_file'], {}), '(text_file)\n', (4884, 4895), False, 'from os.path import join, basename, isfile\n'), ((4899, 4920), 'os.path.basename', 'basename', (['motion_file'], {}), '(motion_file)\n', (4907, 4920), False, 'from os.path import join, basename, isfile\n'), ((5190, 5216), 'numpy.concatenate', 'np.concatenate', (['audio_data'], {}), '(audio_data)\n', (5204, 5216), True, 'import numpy as np\n'), ((5298, 5323), 'numpy.concatenate', 'np.concatenate', (['text_data'], {}), '(text_data)\n', (5312, 5323), True, 'import numpy as np\n'), ((5402, 5429), 'numpy.concatenate', 'np.concatenate', (['motion_data'], {}), '(motion_data)\n', (5416, 5429), True, 'import numpy as np\n'), ((6845, 6875), 'h5py.File', 'h5py.File', (['text_file'], {'mode': '"""r"""'}), "(text_file, mode='r')\n", (6854, 6875), False, 'import h5py\n')] |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from SphereVoxelization_fft import compute_2d, compute_3d
import freud
matplotlib.use("agg")
class TestSphereVoxelization:
def test_random_points_2d(self):
width = 100
r_max = 10.0
num_points = 10
box_size = r_max * 10
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
for w in (width, (width, width), [width, width]):
vox = freud.density.SphereVoxelization(w, r_max)
# Test access
with pytest.raises(AttributeError):
vox.box
with pytest.raises(AttributeError):
vox.voxels
vox.compute(system=(box, points))
# Test access
vox.box
vox.voxels
# Verify the output dimensions are correct
assert vox.voxels.shape == (width, width)
assert np.prod(vox.voxels.shape) == np.prod(vox.width)
# Verify the calculation is correct
# here we assert that the calculations (from two different methods)
# are the same up to rounding error
fft_vox = compute_2d(box_size, width, points, r_max)
num_same = len(
np.where(np.isclose(vox.voxels - fft_vox, np.zeros(fft_vox.shape)))[0]
)
total_num = np.prod(fft_vox.shape)
assert num_same / total_num > 0.95
# Verify that the voxels are all 1's and 0's
num_zeros = len(
np.where(np.isclose(vox.voxels, np.zeros(vox.voxels.shape)))[0]
)
num_ones = len(
np.where(np.isclose(vox.voxels, np.ones(vox.voxels.shape)))[0]
)
assert num_zeros > 0
assert num_ones > 0
assert num_zeros + num_ones == np.prod(vox.voxels.shape)
def test_random_points_3d(self):
width = 100
r_max = 10.0
num_points = 10
box_size = r_max * 10
box, points = freud.data.make_random_system(box_size, num_points, is2D=False)
for w in (width, (width, width, width), [width, width, width]):
vox = freud.density.SphereVoxelization(w, r_max)
# Test access
with pytest.raises(AttributeError):
vox.box
with pytest.raises(AttributeError):
vox.voxels
vox.compute(system=(box, points))
# Test access
vox.box
vox.voxels
# Verify the output dimensions are correct
assert vox.voxels.shape == (width, width, width)
# Verify the calculation is correct
# here we assert that the calculations (from two different methods)
# are the same up to rounding error
fft_vox = compute_3d(box_size, width, points, r_max)
num_same = len(
np.where(np.isclose(vox.voxels - fft_vox, np.zeros(fft_vox.shape)))[0]
)
total_num = np.prod(fft_vox.shape)
assert num_same / total_num > 0.95
# Verify that the voxels are all 1's and 0's
num_zeros = len(
np.where(np.isclose(vox.voxels, np.zeros(vox.voxels.shape)))[0]
)
num_ones = len(
np.where(np.isclose(vox.voxels, np.ones(vox.voxels.shape)))[0]
)
assert num_zeros > 0
assert num_ones > 0
assert num_zeros + num_ones == np.prod(vox.voxels.shape)
def test_change_box_dimension(self):
width = 100
r_max = 10.0
num_points = 100
box_size = r_max * 3.1
# test that computing a 3D system after computing a 2D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
vox = freud.density.SphereVoxelization(width, r_max)
vox.compute(system=(box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
with pytest.raises(ValueError):
vox.compute((test_box, test_points))
# test that computing a 2D system after computing a 3D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=False)
vox = freud.density.SphereVoxelization(width, r_max)
vox.compute(system=(box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=True
)
with pytest.raises(ValueError):
vox.compute((test_box, test_points))
def test_repr(self):
vox = freud.density.SphereVoxelization(100, 10.0)
assert str(vox) == str(eval(repr(vox)))
# Use both signatures
vox3 = freud.density.SphereVoxelization((98, 99, 100), 10.0)
assert str(vox3) == str(eval(repr(vox3)))
def test_repr_png(self):
width = 100
r_max = 10.0
num_points = 100
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
vox = freud.density.SphereVoxelization(width, r_max)
with pytest.raises(AttributeError):
vox.plot()
assert vox._repr_png_() is None
vox.compute((box, points))
vox.plot()
vox = freud.density.SphereVoxelization(width, r_max)
test_box = freud.box.Box.cube(box_size)
vox.compute((test_box, points))
vox.plot()
assert vox._repr_png_() is None
plt.close("all")
| [
"numpy.prod",
"SphereVoxelization_fft.compute_2d",
"numpy.ones",
"freud.data.make_random_system",
"matplotlib.use",
"SphereVoxelization_fft.compute_3d",
"freud.box.Box.cube",
"matplotlib.pyplot.close",
"numpy.zeros",
"pytest.raises",
"freud.density.SphereVoxelization"
] | [((156, 177), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (170, 177), False, 'import matplotlib\n'), ((364, 426), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (393, 426), False, 'import freud\n'), ((2070, 2133), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(False)'}), '(box_size, num_points, is2D=False)\n', (2099, 2133), False, 'import freud\n'), ((3818, 3880), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (3847, 3880), False, 'import freud\n'), ((3895, 3941), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['width', 'r_max'], {}), '(width, r_max)\n', (3927, 3941), False, 'import freud\n'), ((4017, 4080), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(False)'}), '(box_size, num_points, is2D=False)\n', (4046, 4080), False, 'import freud\n'), ((4295, 4358), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(False)'}), '(box_size, num_points, is2D=False)\n', (4324, 4358), False, 'import freud\n'), ((4373, 4419), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['width', 'r_max'], {}), '(width, r_max)\n', (4405, 4419), False, 'import freud\n'), ((4495, 4557), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (4524, 4557), False, 'import freud\n'), ((4709, 4752), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['(100)', '(10.0)'], {}), '(100, 10.0)\n', (4741, 4752), False, 'import freud\n'), ((4847, 4900), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['(98, 99, 100)', '(10.0)'], {}), '((98, 99, 100), 10.0)\n', (4879, 4900), False, 'import freud\n'), ((5100, 5162), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (5129, 5162), False, 'import freud\n'), ((5177, 5223), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['width', 'r_max'], {}), '(width, r_max)\n', (5209, 5223), False, 'import freud\n'), ((5402, 5448), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['width', 'r_max'], {}), '(width, r_max)\n', (5434, 5448), False, 'import freud\n'), ((5468, 5496), 'freud.box.Box.cube', 'freud.box.Box.cube', (['box_size'], {}), '(box_size)\n', (5486, 5496), False, 'import freud\n'), ((5604, 5620), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5613, 5620), True, 'import matplotlib.pyplot as plt\n'), ((503, 545), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['w', 'r_max'], {}), '(w, r_max)\n', (535, 545), False, 'import freud\n'), ((1213, 1255), 'SphereVoxelization_fft.compute_2d', 'compute_2d', (['box_size', 'width', 'points', 'r_max'], {}), '(box_size, width, points, r_max)\n', (1223, 1255), False, 'from SphereVoxelization_fft import compute_2d, compute_3d\n'), ((1409, 1431), 'numpy.prod', 'np.prod', (['fft_vox.shape'], {}), '(fft_vox.shape)\n', (1416, 1431), True, 'import numpy as np\n'), ((2224, 2266), 'freud.density.SphereVoxelization', 'freud.density.SphereVoxelization', (['w', 'r_max'], {}), '(w, r_max)\n', (2256, 2266), False, 'import freud\n'), ((2874, 2916), 'SphereVoxelization_fft.compute_3d', 'compute_3d', (['box_size', 'width', 'points', 'r_max'], {}), '(box_size, width, points, r_max)\n', (2884, 2916), False, 'from SphereVoxelization_fft import compute_2d, compute_3d\n'), ((3070, 3092), 'numpy.prod', 'np.prod', (['fft_vox.shape'], {}), '(fft_vox.shape)\n', (3077, 3092), True, 'import numpy as np\n'), ((4116, 4141), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4129, 4141), False, 'import pytest\n'), ((4593, 4618), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4606, 4618), False, 'import pytest\n'), ((5238, 5267), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (5251, 5267), False, 'import pytest\n'), ((590, 619), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (603, 619), False, 'import pytest\n'), ((662, 691), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (675, 691), False, 'import pytest\n'), ((966, 991), 'numpy.prod', 'np.prod', (['vox.voxels.shape'], {}), '(vox.voxels.shape)\n', (973, 991), True, 'import numpy as np\n'), ((995, 1013), 'numpy.prod', 'np.prod', (['vox.width'], {}), '(vox.width)\n', (1002, 1013), True, 'import numpy as np\n'), ((1889, 1914), 'numpy.prod', 'np.prod', (['vox.voxels.shape'], {}), '(vox.voxels.shape)\n', (1896, 1914), True, 'import numpy as np\n'), ((2311, 2340), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2324, 2340), False, 'import pytest\n'), ((2383, 2412), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2396, 2412), False, 'import pytest\n'), ((3550, 3575), 'numpy.prod', 'np.prod', (['vox.voxels.shape'], {}), '(vox.voxels.shape)\n', (3557, 3575), True, 'import numpy as np\n'), ((1342, 1365), 'numpy.zeros', 'np.zeros', (['fft_vox.shape'], {}), '(fft_vox.shape)\n', (1350, 1365), True, 'import numpy as np\n'), ((1614, 1640), 'numpy.zeros', 'np.zeros', (['vox.voxels.shape'], {}), '(vox.voxels.shape)\n', (1622, 1640), True, 'import numpy as np\n'), ((1736, 1761), 'numpy.ones', 'np.ones', (['vox.voxels.shape'], {}), '(vox.voxels.shape)\n', (1743, 1761), True, 'import numpy as np\n'), ((3003, 3026), 'numpy.zeros', 'np.zeros', (['fft_vox.shape'], {}), '(fft_vox.shape)\n', (3011, 3026), True, 'import numpy as np\n'), ((3275, 3301), 'numpy.zeros', 'np.zeros', (['vox.voxels.shape'], {}), '(vox.voxels.shape)\n', (3283, 3301), True, 'import numpy as np\n'), ((3397, 3422), 'numpy.ones', 'np.ones', (['vox.voxels.shape'], {}), '(vox.voxels.shape)\n', (3404, 3422), True, 'import numpy as np\n')] |
import numpy as np
import copy
import cv2
import os
import PIL.Image as Image
from PIL import ImageDraw
import lmdb
from tqdm import tqdm
import carla
from collect_pm import CollectPerspectiveImage, InversePerspectiveMapping
config = dict(
env=dict(
simulator=dict(
disable_two_wheels=True,
waypoint_num=32,
planner=dict(
type='behavior',
resolution=1,
),
obs=(
dict(
name='rgb',
type='rgb',
size=[640, 360],
position=[0.5, 0.0, 2.5],
rotation=[0, 0, 0],
sensor_tick=1. / 30,
),
dict(
name='lidar',
type='lidar',
channels=64,
range=50,
points_per_second=100000,
rotation_frequency=30,
upper_fov=10,
lower_fov=-30,
position=[0.5, 0.0, 2.5],
rotation=[0, 0, 0],
sensor_tick=0.05,
)
),
verbose=True,
),
# visualize=dict(
# type='rgb',
# outputs=['show']
# ),
col_is_failure=True,
stuck_is_failure=True
),
env_num=5,
episode_nums=40,
env_manager=dict(
auto_reset=False,
shared_memory=False,
),
env_wrapper=dict(suite='FullTown01-v3', ),
server=[
dict(carla_host='localhost', carla_ports=[9000, 9010, 2]),
],
policy=dict(
target_speed=25,
noise=False,
),
collector=dict(suite='FullTown01-v3', ),
dir_path='datasets/cict_datasets_train',
npy_prefix='_preloads'
)
scale = 12.0
x_offset = 800
y_offset = 1000
MAX_SPEED = 50
TRAJ_LENGTH = 25
MIN_TRAJ_LENGTH = 15
vehicle_width = 2.0
longitudinal_sample_number_near = 8
longitudinal_sample_number_far = 0.5
longitudinal_length = 25.0
lateral_sample_number = 20
lateral_step_factor = 1.0
ksize = 21
sensor_config = {'rgb': {'img_height': 360, 'img_width': 640, 'fov': 120, 'location': [0.5, 0, 2.5]}}
class Param(object):
def __init__(self):
self.traj_length = float(TRAJ_LENGTH)
self.target_speed = float(MAX_SPEED)
self.vehicle_width = float(vehicle_width)
self.longitudinal_sample_number_near = longitudinal_sample_number_near
self.longitudinal_sample_number_far = longitudinal_sample_number_far
self.lateral_sample_number = lateral_sample_number
self.lateral_step_factor = lateral_step_factor
self.longitudinal_length = longitudinal_length
self.ksize = ksize
self.sensor_config = sensor_config
params = Param()
def get_map():
origin_map = np.zeros((6000, 6000, 3), dtype="uint8")
#origin_map.fill(255)
origin_map = Image.fromarray(origin_map)
return origin_map
def draw_point(waypoint_list, origin_map):
route_list = []
for waypoint in waypoint_list:
x = scale * waypoint[0] + x_offset
y = scale * waypoint[1] + y_offset
route_list.append(x)
route_list.append(y)
draw = ImageDraw.Draw(origin_map)
draw.point(route_list, fill=(255, 255, 255))
#print(route_list)
#print(waypoint_list)
return origin_map
def draw_route(waypoint_list, origin_map):
route_list = []
for waypoint in waypoint_list:
x = scale * waypoint[0] + x_offset
y = scale * waypoint[1] + y_offset
route_list.append(x)
route_list.append(y)
draw = ImageDraw.Draw(origin_map)
draw.line(route_list, 'red', width=30)
#print(route_list)
#print(waypoint_list)
return origin_map
def find_dest_with_fix_length(start, waypoint_list):
length = start
for i in range(len(waypoint_list) - 1):
length += np.linalg.norm(waypoint_list[i + 1][:2] - waypoint_list[i][:2])
if length >= params.traj_length:
return waypoint_list[i + 1][:2], i + 1
return waypoint_list[-1][:2], -1
def draw_destination(location, waypoint_list, origin_map):
start = np.linalg.norm(waypoint_list[0][:2] - location[:2])
#print(location, waypoint_list[0], start)
dest, _ = find_dest_with_fix_length(start, waypoint_list)
x = scale * dest[0] + x_offset
y = scale * dest[1] + y_offset
#print(dest, x, y)
draw = ImageDraw.Draw(origin_map)
draw.ellipse((x - 15, y - 15, x + 15, y + 15), fill='red', outline='red', width=30)
return origin_map
def get_nav(location, rotation, plan_map, town=1):
if town == 1:
x_offset = 800
y_offset = 1000
elif town == 2:
x_offset = 1500
y_offset = 0
x = int(scale * location[0] + x_offset)
y = int(scale * location[1] + y_offset)
#print(x, y, plan_map)
_nav = plan_map.crop((x - 400, y - 400, x + 400, y + 400))
im_rotate = _nav.rotate(rotation[1] + 90)
nav = im_rotate.crop((_nav.size[0] // 2 - 320, _nav.size[1] // 2 - 360, _nav.size[0] // 2 + 320, _nav.size[1] // 2))
#print(nav)
nav = cv2.cvtColor(np.asarray(nav), cv2.COLOR_BGR2RGB)
return nav
'''
def get_bezier(location, waypoint_list):
total_length = [np.linalg.norm(location[:2] - waypoint_list[0][:2])]
for i in range(len(waypoint_list)-1):
total_length.append(np.linalg.norm(waypoint_list[i][:2] - waypoint_list[i+1][:2]) + total_length[-1])
t = np.array(total_length[:-1]).reshape(-1, 1) / total_length[-1]
b0 = location[:2].reshape(1, 2)
b4 = waypoint_list[-1][:2].reshape(1, 2)
B0 = (1 - t) ** 4
B4 = t ** 4
p = waypoint_list[:-1, :2] - np.dot(np.concatenate([B0, B4], axis=1), np.concatenate([b0, b4], axis=0))
B1 = 4 * t * ((1 - t) ** 3)
B2 = 6 * (t ** 2) * ((1 - t) ** 2)
B3 = 4 * (1 - t) * (t ** 3)
Bm = np.concatenate([B1, B2, B3], axis=1)
bm = np.dot(np.linalg.inv(np.dot(Bm.T, Bm)), Bm.T)
bm = np.dot(bm, p)
b = np.concatenate([b0, bm, b4], axis=0)
t = np.linspace(0, 1, 100)
t = t.reshape(100, 1)
B0 = (1 - t) ** 4
B4 = t ** 4
B1 = 4 * t * ((1 - t) ** 3)
B2 = 6 * (t ** 2) * ((1 - t) ** 2)
B3 = 4 * (1 - t) * (t ** 3)
B = np.concatenate([B0, B1, B2, B3, B4], axis=1)
bezier_list = np.dot(B, b)
#print(b)
return bezier_list, b
'''
def destination(save_dir, episode_path):
lmdb_file = lmdb.open(os.path.join(save_dir, episode_path, 'measurements.lmdb')).begin()
waypoint_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('npy') and x.startswith('way'))
]
waypoint_file.sort()
#print(waypoint_file)
for k in tqdm(waypoint_file):
index = k.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
location = np.array([measurements[7], measurements[8], measurements[9]]).astype(np.float32)
rotation = np.array([measurements[18], measurements[19], measurements[20]]).astype(np.float32)
waypoint_list = np.load(os.path.join(save_dir, episode_path, k))
origin_map = get_map()
plan_map = draw_destination(location, waypoint_list, copy.deepcopy(origin_map))
dest = get_nav(location, rotation, plan_map, town=1)
cv2.imwrite(os.path.join(save_dir, episode_path, 'dest_%05d.png' % int(index)), dest)
class Sensor(object):
def __init__(self, config):
self.type_id = 'sensor.camera.rgb'
self.transform = carla.Transform(
carla.Location(x=config['location'][0], y=config['location'][1], z=config['location'][2])
)
self.attributes = dict()
self.attributes['role_name'] = 'front'
self.attributes['image_size_x'] = str(config['img_width'])
self.attributes['image_size_y'] = str(config['img_height'])
self.attributes['fov'] = str(config['fov'])
def get_transform(self):
return self.transform
def find_traj_with_fix_length(start_index, pose_list):
length = 0.0
for i in range(start_index, len(pose_list) - 1):
length += pose_list[i].location.distance(pose_list[i + 1].location)
if length >= params.traj_length:
return i + 1
return -1
def destination2(save_dir, episode_path):
lmdb_file = lmdb.open(os.path.join(save_dir, episode_path, 'measurements.lmdb')).begin()
waypoint_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('npy') and x.startswith('way'))
]
waypoint_file.sort()
#print(waypoint_file)
sensor = Sensor(params.sensor_config['rgb'])
collect_perspective = CollectPerspectiveImage(params, sensor)
commands = []
for k in tqdm(waypoint_file):
index = k.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
location = np.array([measurements[7], measurements[8], measurements[9]]).astype(np.float32)
rotation = np.array([measurements[18], measurements[19], measurements[20]]).astype(np.float32)
waypoint_list = np.load(os.path.join(save_dir, episode_path, k))
start = np.linalg.norm(waypoint_list[0][:2] - location[:2])
#print(location, waypoint_list[0], start)
dest, _ = find_dest_with_fix_length(start, waypoint_list)
#if location[0] - dest[0] > 0.5:
# commands.append(1)
#elif location[0] - dest[0] < -0.5:
# commands.append(2)
#else:
# commands.append(0)
zero = np.zeros((3, 1))
zero[:2, 0] = dest
dest_map = collect_perspective.drawDestInImage(zero, location, rotation)
cv2.imwrite(os.path.join(save_dir, episode_path, 'dest2_%05d.png' % int(index)), dest_map)
#np.save(os.path.join(save_dir, episode_path, 'commands.npy'), np.array(commands))
def get_potential_map(save_dir, episode_path, measurements, img_file):
pm_dir = os.path.join(save_dir, episode_path, 'pm')
print(pm_dir)
if not os.path.exists(pm_dir):
os.mkdir(pm_dir)
pose_list = []
loc_list = []
for measurement in measurements:
transform = carla.Transform()
transform.location.x = float(measurement['location'][0])
transform.location.y = float(measurement['location'][1])
transform.location.z = float(measurement['location'][2])
transform.rotation.pitch = float(measurement['rotation'][0])
transform.rotation.yaw = float(measurement['rotation'][1])
transform.rotation.roll = float(measurement['rotation'][2])
pose_list.append(transform)
loc_list.append(measurement['location'])
sensor = Sensor(params.sensor_config['rgb'])
collect_perspective = CollectPerspectiveImage(params, sensor)
for index in tqdm(range(len(pose_list))):
end_index = find_traj_with_fix_length(index, pose_list)
if end_index < 0:
print('no enough traj: ', index, index / len(pose_list))
break
vehicle_transform = pose_list[index] # in world coordinate
traj_pose_list = []
traj_list = []
for i in range(index, end_index):
traj_pose_list.append((i, pose_list[i]))
traj_list.append(loc_list[i])
#t1 = time.time()
'''
bezier_list, bezier_coff = get_bezier(traj_list[0], np.stack(traj_list[1:], axis=0))
measurements[index]['bezier_coff'] = bezier_coff
origin_map = get_map()
plan_map = draw_route(bezier_list, copy.deepcopy(origin_map))
plan_map = draw_point(traj_list, plan_map)
nav = get_nav(measurements[index]['location'], measurements[index]['rotation'], plan_map, town=1)
cv2.imwrite(os.path.join(save_dir, episode_path, 'nav_%05d.png' % int(index)), nav)
'''
empty_image = collect_perspective.getPM(traj_pose_list, vehicle_transform)
#t2 = time.time()
#cv2.imshow('empty_image', empty_image)
#cv2.waitKey(3)
cv2.imwrite(os.path.join(pm_dir, '%05d.png' % index), empty_image)
return measurements
def get_inverse_potential_map(save_dir, episode_path, pm_file, lidar_file):
ipm_dir = os.path.join(save_dir, episode_path, 'ipm')
if not os.path.exists(ipm_dir):
os.mkdir(ipm_dir)
sensor = Sensor(params.sensor_config['rgb'])
inverse_perspective_mapping = InversePerspectiveMapping(params, sensor)
for i in tqdm(range(len(pm_file))):
pm = cv2.imread(os.path.join(save_dir, pm_file[i]))
lidar = np.load(os.path.join(save_dir, lidar_file[i]))
ipm = inverse_perspective_mapping.getIPM(pm)
img = inverse_perspective_mapping.get_cost_map(ipm, lidar)
cv2.imwrite(os.path.join(ipm_dir, '%05d.png' % i), img)
def get_option(option_name, end_ind):
x = np.load(option_name, allow_pickle=True)
option = x[0] - 1
end_ind = len(option_name) if end_ind == -1 else end_ind + 1
for o in x[1:end_ind]:
if o != 4:
option = o - 1
break
return option
def save_as_npy(save_dir, episode_path):
lmdb_file = lmdb.open(os.path.join(save_dir, episode_path, 'measurements.lmdb')).begin()
dest_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('png') and x.startswith('dest_'))
]
dest_file.sort()
dest_file2 = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('png') and x.startswith('dest2_'))
]
dest_file2.sort()
img_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('png') and x.startswith('rgb'))
]
img_file.sort()
#print(waypoint_file)
measurements_list = []
for k in tqdm(img_file):
index = k.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
data = {}
data['time'] = float(measurements[1])
data['acceleration'] = np.array([measurements[4], measurements[5], measurements[6]], dtype=np.float32)
data['location'] = np.array([measurements[7], measurements[8], measurements[9]], dtype=np.float32)
data['direction'] = float(measurements[11]) - 1.
data['velocity'] = np.array([measurements[12], measurements[13], measurements[14]], dtype=np.float32)
data['angular_velocity'] = np.array([measurements[15], measurements[16], measurements[17]], dtype=np.float32)
data['rotation'] = np.array([measurements[18], measurements[19], measurements[20]]).astype(np.float32)
data['steer'] = float(measurements[21])
data['throttle'] = float(measurements[22])
data['brake'] = float(measurements[23])
data['real_steer'] = float(measurements[24])
data['real_throttle'] = float(measurements[25])
data['real_brake'] = float(measurements[26])
data['tl_state'] = float(measurements[27])
data['tl_distance'] = float(measurements[28])
waypoint_list = np.load(os.path.join(save_dir, episode_path, 'waypoints_%05d.npy' % int(index)))
start = np.linalg.norm(data['location'][:2] - waypoint_list[0][:2])
_, end_ind = find_dest_with_fix_length(start, waypoint_list)
data['option'] = get_option(os.path.join(save_dir, episode_path, 'direction_%05d.npy' %
int(index)), end_ind) if data['direction'] == 3 else data['direction']
#print(episode_path, int(index), data['option'], data['command'])
measurements_list.append(data)
dest_file = [os.path.join(episode_path, x) for x in dest_file]
dest_file2 = [os.path.join(episode_path, x) for x in dest_file2]
img_file = [os.path.join(episode_path, x) for x in img_file]
measurements_list = get_potential_map(save_dir, episode_path, measurements_list, img_file)
pm_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path, 'pm'))
if x.endswith('png') and (not x.startswith('fake'))
]
pm_file.sort()
pm_file = [os.path.join(episode_path, 'pm', x) for x in pm_file]
lidar_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('npy') and x.startswith('lidar'))
]
lidar_file.sort()
lidar_file = [os.path.join(episode_path, x) for x in lidar_file]
get_inverse_potential_map(save_dir, episode_path, pm_file, lidar_file)
ipm_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path, 'ipm'))
if x.endswith('png') and (not x.startswith('pred'))
]
ipm_file.sort()
ipm_file = [os.path.join(episode_path, 'ipm', x) for x in ipm_file]
if not os.path.exists(config['npy_prefix']):
os.mkdir(config['npy_prefix'])
np.save(
'%s/%s.npy' % (config['npy_prefix'], episode_path),
[img_file, dest_file, dest_file2, pm_file, ipm_file, measurements_list]
)
if __name__ == '__main__':
save_dir = config['dir_path']
epi_folder = [x for x in os.listdir(save_dir) if x.startswith('epi')]
#epi_folder = ['episode_00038','episode_00039']
#epi_folder = ['episode_00037']
for episode_path in tqdm(epi_folder):
destination(save_dir, episode_path)
destination2(save_dir, episode_path)
save_as_npy(save_dir, episode_path)
| [
"os.path.exists",
"PIL.Image.fromarray",
"os.listdir",
"copy.deepcopy",
"carla.Transform",
"tqdm.tqdm",
"collect_pm.CollectPerspectiveImage",
"os.path.join",
"numpy.asarray",
"carla.Location",
"numpy.array",
"numpy.zeros",
"PIL.ImageDraw.Draw",
"os.mkdir",
"numpy.linalg.norm",
"collect... | [((2870, 2910), 'numpy.zeros', 'np.zeros', (['(6000, 6000, 3)'], {'dtype': '"""uint8"""'}), "((6000, 6000, 3), dtype='uint8')\n", (2878, 2910), True, 'import numpy as np\n'), ((2954, 2981), 'PIL.Image.fromarray', 'Image.fromarray', (['origin_map'], {}), '(origin_map)\n', (2969, 2981), True, 'import PIL.Image as Image\n'), ((3260, 3286), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['origin_map'], {}), '(origin_map)\n', (3274, 3286), False, 'from PIL import ImageDraw\n'), ((3664, 3690), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['origin_map'], {}), '(origin_map)\n', (3678, 3690), False, 'from PIL import ImageDraw\n'), ((4207, 4258), 'numpy.linalg.norm', 'np.linalg.norm', (['(waypoint_list[0][:2] - location[:2])'], {}), '(waypoint_list[0][:2] - location[:2])\n', (4221, 4258), True, 'import numpy as np\n'), ((4473, 4499), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['origin_map'], {}), '(origin_map)\n', (4487, 4499), False, 'from PIL import ImageDraw\n'), ((6743, 6762), 'tqdm.tqdm', 'tqdm', (['waypoint_file'], {}), '(waypoint_file)\n', (6747, 6762), False, 'from tqdm import tqdm\n'), ((8740, 8779), 'collect_pm.CollectPerspectiveImage', 'CollectPerspectiveImage', (['params', 'sensor'], {}), '(params, sensor)\n', (8763, 8779), False, 'from collect_pm import CollectPerspectiveImage, InversePerspectiveMapping\n'), ((8811, 8830), 'tqdm.tqdm', 'tqdm', (['waypoint_file'], {}), '(waypoint_file)\n', (8815, 8830), False, 'from tqdm import tqdm\n'), ((10056, 10098), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', '"""pm"""'], {}), "(save_dir, episode_path, 'pm')\n", (10068, 10098), False, 'import os\n'), ((10851, 10890), 'collect_pm.CollectPerspectiveImage', 'CollectPerspectiveImage', (['params', 'sensor'], {}), '(params, sensor)\n', (10874, 10890), False, 'from collect_pm import CollectPerspectiveImage, InversePerspectiveMapping\n'), ((12298, 12341), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', '"""ipm"""'], {}), "(save_dir, episode_path, 'ipm')\n", (12310, 12341), False, 'import os\n'), ((12488, 12529), 'collect_pm.InversePerspectiveMapping', 'InversePerspectiveMapping', (['params', 'sensor'], {}), '(params, sensor)\n', (12513, 12529), False, 'from collect_pm import CollectPerspectiveImage, InversePerspectiveMapping\n'), ((12927, 12966), 'numpy.load', 'np.load', (['option_name'], {'allow_pickle': '(True)'}), '(option_name, allow_pickle=True)\n', (12934, 12966), True, 'import numpy as np\n'), ((13852, 13866), 'tqdm.tqdm', 'tqdm', (['img_file'], {}), '(img_file)\n', (13856, 13866), False, 'from tqdm import tqdm\n'), ((16892, 17028), 'numpy.save', 'np.save', (["('%s/%s.npy' % (config['npy_prefix'], episode_path))", '[img_file, dest_file, dest_file2, pm_file, ipm_file, measurements_list]'], {}), "('%s/%s.npy' % (config['npy_prefix'], episode_path), [img_file,\n dest_file, dest_file2, pm_file, ipm_file, measurements_list])\n", (16899, 17028), True, 'import numpy as np\n'), ((17297, 17313), 'tqdm.tqdm', 'tqdm', (['epi_folder'], {}), '(epi_folder)\n', (17301, 17313), False, 'from tqdm import tqdm\n'), ((3941, 4004), 'numpy.linalg.norm', 'np.linalg.norm', (['(waypoint_list[i + 1][:2] - waypoint_list[i][:2])'], {}), '(waypoint_list[i + 1][:2] - waypoint_list[i][:2])\n', (3955, 4004), True, 'import numpy as np\n'), ((5178, 5193), 'numpy.asarray', 'np.asarray', (['nav'], {}), '(nav)\n', (5188, 5193), True, 'import numpy as np\n'), ((9279, 9330), 'numpy.linalg.norm', 'np.linalg.norm', (['(waypoint_list[0][:2] - location[:2])'], {}), '(waypoint_list[0][:2] - location[:2])\n', (9293, 9330), True, 'import numpy as np\n'), ((9658, 9674), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (9666, 9674), True, 'import numpy as np\n'), ((10128, 10150), 'os.path.exists', 'os.path.exists', (['pm_dir'], {}), '(pm_dir)\n', (10142, 10150), False, 'import os\n'), ((10160, 10176), 'os.mkdir', 'os.mkdir', (['pm_dir'], {}), '(pm_dir)\n', (10168, 10176), False, 'import os\n'), ((10273, 10290), 'carla.Transform', 'carla.Transform', ([], {}), '()\n', (10288, 10290), False, 'import carla\n'), ((12353, 12376), 'os.path.exists', 'os.path.exists', (['ipm_dir'], {}), '(ipm_dir)\n', (12367, 12376), False, 'import os\n'), ((12386, 12403), 'os.mkdir', 'os.mkdir', (['ipm_dir'], {}), '(ipm_dir)\n', (12394, 12403), False, 'import os\n'), ((14118, 14197), 'numpy.array', 'np.array', (['[measurements[4], measurements[5], measurements[6]]'], {'dtype': 'np.float32'}), '([measurements[4], measurements[5], measurements[6]], dtype=np.float32)\n', (14126, 14197), True, 'import numpy as np\n'), ((14225, 14304), 'numpy.array', 'np.array', (['[measurements[7], measurements[8], measurements[9]]'], {'dtype': 'np.float32'}), '([measurements[7], measurements[8], measurements[9]], dtype=np.float32)\n', (14233, 14304), True, 'import numpy as np\n'), ((14389, 14476), 'numpy.array', 'np.array', (['[measurements[12], measurements[13], measurements[14]]'], {'dtype': 'np.float32'}), '([measurements[12], measurements[13], measurements[14]], dtype=np.\n float32)\n', (14397, 14476), True, 'import numpy as np\n'), ((14507, 14594), 'numpy.array', 'np.array', (['[measurements[15], measurements[16], measurements[17]]'], {'dtype': 'np.float32'}), '([measurements[15], measurements[16], measurements[17]], dtype=np.\n float32)\n', (14515, 14594), True, 'import numpy as np\n'), ((15238, 15297), 'numpy.linalg.norm', 'np.linalg.norm', (["(data['location'][:2] - waypoint_list[0][:2])"], {}), "(data['location'][:2] - waypoint_list[0][:2])\n", (15252, 15297), True, 'import numpy as np\n'), ((15715, 15744), 'os.path.join', 'os.path.join', (['episode_path', 'x'], {}), '(episode_path, x)\n', (15727, 15744), False, 'import os\n'), ((15783, 15812), 'os.path.join', 'os.path.join', (['episode_path', 'x'], {}), '(episode_path, x)\n', (15795, 15812), False, 'import os\n'), ((15850, 15879), 'os.path.join', 'os.path.join', (['episode_path', 'x'], {}), '(episode_path, x)\n', (15862, 15879), False, 'import os\n'), ((16186, 16221), 'os.path.join', 'os.path.join', (['episode_path', '"""pm"""', 'x'], {}), "(episode_path, 'pm', x)\n", (16198, 16221), False, 'import os\n'), ((16423, 16452), 'os.path.join', 'os.path.join', (['episode_path', 'x'], {}), '(episode_path, x)\n', (16435, 16452), False, 'import os\n'), ((16743, 16779), 'os.path.join', 'os.path.join', (['episode_path', '"""ipm"""', 'x'], {}), "(episode_path, 'ipm', x)\n", (16755, 16779), False, 'import os\n'), ((16811, 16847), 'os.path.exists', 'os.path.exists', (["config['npy_prefix']"], {}), "(config['npy_prefix'])\n", (16825, 16847), False, 'import os\n'), ((16857, 16887), 'os.mkdir', 'os.mkdir', (["config['npy_prefix']"], {}), "(config['npy_prefix'])\n", (16865, 16887), False, 'import os\n'), ((7154, 7193), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', 'k'], {}), '(save_dir, episode_path, k)\n', (7166, 7193), False, 'import os\n'), ((7287, 7312), 'copy.deepcopy', 'copy.deepcopy', (['origin_map'], {}), '(origin_map)\n', (7300, 7312), False, 'import copy\n'), ((7623, 7717), 'carla.Location', 'carla.Location', ([], {'x': "config['location'][0]", 'y': "config['location'][1]", 'z': "config['location'][2]"}), "(x=config['location'][0], y=config['location'][1], z=config[\n 'location'][2])\n", (7637, 7717), False, 'import carla\n'), ((9222, 9261), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', 'k'], {}), '(save_dir, episode_path, k)\n', (9234, 9261), False, 'import os\n'), ((12126, 12166), 'os.path.join', 'os.path.join', (['pm_dir', "('%05d.png' % index)"], {}), "(pm_dir, '%05d.png' % index)\n", (12138, 12166), False, 'import os\n'), ((12595, 12629), 'os.path.join', 'os.path.join', (['save_dir', 'pm_file[i]'], {}), '(save_dir, pm_file[i])\n', (12607, 12629), False, 'import os\n'), ((12655, 12692), 'os.path.join', 'os.path.join', (['save_dir', 'lidar_file[i]'], {}), '(save_dir, lidar_file[i])\n', (12667, 12692), False, 'import os\n'), ((12835, 12872), 'os.path.join', 'os.path.join', (['ipm_dir', "('%05d.png' % i)"], {}), "(ipm_dir, '%05d.png' % i)\n", (12847, 12872), False, 'import os\n'), ((17139, 17159), 'os.listdir', 'os.listdir', (['save_dir'], {}), '(save_dir)\n', (17149, 17159), False, 'import os\n'), ((6468, 6525), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', '"""measurements.lmdb"""'], {}), "(save_dir, episode_path, 'measurements.lmdb')\n", (6480, 6525), False, 'import os\n'), ((6587, 6623), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path'], {}), '(save_dir, episode_path)\n', (6599, 6623), False, 'import os\n'), ((6938, 6999), 'numpy.array', 'np.array', (['[measurements[7], measurements[8], measurements[9]]'], {}), '([measurements[7], measurements[8], measurements[9]])\n', (6946, 6999), True, 'import numpy as np\n'), ((7038, 7102), 'numpy.array', 'np.array', (['[measurements[18], measurements[19], measurements[20]]'], {}), '([measurements[18], measurements[19], measurements[20]])\n', (7046, 7102), True, 'import numpy as np\n'), ((8403, 8460), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', '"""measurements.lmdb"""'], {}), "(save_dir, episode_path, 'measurements.lmdb')\n", (8415, 8460), False, 'import os\n'), ((8522, 8558), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path'], {}), '(save_dir, episode_path)\n', (8534, 8558), False, 'import os\n'), ((9006, 9067), 'numpy.array', 'np.array', (['[measurements[7], measurements[8], measurements[9]]'], {}), '([measurements[7], measurements[8], measurements[9]])\n', (9014, 9067), True, 'import numpy as np\n'), ((9106, 9170), 'numpy.array', 'np.array', (['[measurements[18], measurements[19], measurements[20]]'], {}), '([measurements[18], measurements[19], measurements[20]])\n', (9114, 9170), True, 'import numpy as np\n'), ((13233, 13290), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', '"""measurements.lmdb"""'], {}), "(save_dir, episode_path, 'measurements.lmdb')\n", (13245, 13290), False, 'import os\n'), ((13348, 13384), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path'], {}), '(save_dir, episode_path)\n', (13360, 13384), False, 'import os\n'), ((13511, 13547), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path'], {}), '(save_dir, episode_path)\n', (13523, 13547), False, 'import os\n'), ((13674, 13710), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path'], {}), '(save_dir, episode_path)\n', (13686, 13710), False, 'import os\n'), ((14617, 14681), 'numpy.array', 'np.array', (['[measurements[18], measurements[19], measurements[20]]'], {}), '([measurements[18], measurements[19], measurements[20]])\n', (14625, 14681), True, 'import numpy as np\n'), ((16042, 16084), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', '"""pm"""'], {}), "(save_dir, episode_path, 'pm')\n", (16054, 16084), False, 'import os\n'), ((16290, 16326), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path'], {}), '(save_dir, episode_path)\n', (16302, 16326), False, 'import os\n'), ((16596, 16639), 'os.path.join', 'os.path.join', (['save_dir', 'episode_path', '"""ipm"""'], {}), "(save_dir, episode_path, 'ipm')\n", (16608, 16639), False, 'import os\n')] |
import cv2
import imutils
import numpy as np
from PIL import Image as PilImage
from PIL.ExifTags import TAGS
class Image:
"""
Encapsulation of routines to an image. Internally, the
image is stored as a numpy.ndarray.
Arguments
---------
filename: str, optional
if given, the image referenced by the filename is loaded from disc
"""
def __init__(self, filename=None):
self._raw = None
if filename:
self.load(filename)
self.contours = []
def __getitem__(self, index):
return self._raw[index]
@property
def height(self):
if self._raw is None:
raise ValueError("Cannot read height from undefined image")
return self._raw.shape[0]
@property
def width(self):
if self._raw is None:
raise ValueError("Cannot read width from undefined image")
return self._raw.shape[1]
def crop_by_contour(self, index=0, inplace=False):
"""
Crop the image about the bounding box of a contour.
The contours need to be created first with the ``find_contours`` routine.
Arguments
---------
index: int, optional
the index of the contour to use
inplace: bool, optional
if True, the image is cropped and returned, otherwise a new image is returned
Returns
-------
Image:
the cropped image
"""
if index < 0 or index >= len(self.contours):
raise IndexError(f"The given contour index ({index}) is out of range")
x, y, w, h = cv2.boundingRect(self.contours[index])
cropped = self._raw[y : y + h, x : x + w] # noqa
if inplace:
self._raw = cropped
return self
new_image = Image()
new_image._raw = cropped
return new_image
def draw_contours(self, color, inplace=False):
"""
Draw around each contour of the image
The contours need to be created first with the ``find_contours`` routine.
Arguments
---------
color: tuple
the RGB color of the boundary
inplace: bool, optional
if True, the image is modified and returned, otherwise a new image is returned
Returns
-------
Image:
the modified image
"""
raw = self._raw.copy()
for contour in self.contours:
cv2.drawContours(raw, [contour], -1, color, 2)
if inplace:
self._raw = raw
return self
new_image = Image()
new_image._raw = raw
return new_image
def find_contours(self, threshold):
"""
Find all contours of the image by first converting it
to gray-scale and then applying a color threshold.
Arguments
---------
threshold: int
any pixel above the threshold is counted as foreground, otherwise background
"""
gray_img = cv2.cvtColor(self._raw, cv2.COLOR_BGR2GRAY)
blurred_img = cv2.GaussianBlur(gray_img, (5, 5), 0)
thresh_img = cv2.threshold(blurred_img, threshold, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(
thresh_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
self.contours = sorted(
imutils.grab_contours(cnts), key=cv2.contourArea, reverse=True
)
def load(self, filename, auto_orient=True):
"""
Load an image from file
Arguments
---------
filename: str
the path to the file
auto_orient: bool, optional
if True, orient the image according to the meta data (if available)
"""
pil_image = PilImage.open(filename)
if auto_orient:
self._orient_pil_image(pil_image)
self._raw = np.array(pil_image)
if len(self._raw.shape) == 3 and self._raw.shape[2] >= 3:
self._raw = cv2.cvtColor(self._raw, cv2.COLOR_BGR2RGB)
def save(self, filename, jpg_quality=None):
"""
Save image to disc
Arguments
---------
filename: str
the path to the file
jpg_quality: int, optional
an value between 1 and 100 indicating the JPG quality
"""
params = []
if jpg_quality:
params = [int(cv2.IMWRITE_JPEG_QUALITY), jpg_quality]
cv2.imwrite(filename, self._raw, params)
@staticmethod
def _find_exif_value(pil_image, field):
exif = pil_image._getexif()
if not exif:
return 1
for key, value in exif.items():
if TAGS.get(key) == field:
return value
return 1
@staticmethod
def _orient_pil_image(pil_image):
orientation = Image._find_exif_value(pil_image, "Orientation")
rotation_angle = {3: 180, 6: 270, 8: 90}.get(orientation, 0)
if rotation_angle:
pil_image = pil_image.rotate(rotation_angle, expand=False)
| [
"cv2.imwrite",
"PIL.Image.open",
"cv2.drawContours",
"cv2.threshold",
"PIL.ExifTags.TAGS.get",
"numpy.array",
"imutils.grab_contours",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.boundingRect"
] | [((1618, 1656), 'cv2.boundingRect', 'cv2.boundingRect', (['self.contours[index]'], {}), '(self.contours[index])\n', (1634, 1656), False, 'import cv2\n'), ((3024, 3067), 'cv2.cvtColor', 'cv2.cvtColor', (['self._raw', 'cv2.COLOR_BGR2GRAY'], {}), '(self._raw, cv2.COLOR_BGR2GRAY)\n', (3036, 3067), False, 'import cv2\n'), ((3090, 3127), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray_img', '(5, 5)', '(0)'], {}), '(gray_img, (5, 5), 0)\n', (3106, 3127), False, 'import cv2\n'), ((3782, 3805), 'PIL.Image.open', 'PilImage.open', (['filename'], {}), '(filename)\n', (3795, 3805), True, 'from PIL import Image as PilImage\n'), ((3896, 3915), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (3904, 3915), True, 'import numpy as np\n'), ((4460, 4500), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'self._raw', 'params'], {}), '(filename, self._raw, params)\n', (4471, 4500), False, 'import cv2\n'), ((2467, 2513), 'cv2.drawContours', 'cv2.drawContours', (['raw', '[contour]', '(-1)', 'color', '(2)'], {}), '(raw, [contour], -1, color, 2)\n', (2483, 2513), False, 'import cv2\n'), ((3149, 3210), 'cv2.threshold', 'cv2.threshold', (['blurred_img', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(blurred_img, threshold, 255, cv2.THRESH_BINARY)\n', (3162, 3210), False, 'import cv2\n'), ((3376, 3403), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (3397, 3403), False, 'import imutils\n'), ((4006, 4048), 'cv2.cvtColor', 'cv2.cvtColor', (['self._raw', 'cv2.COLOR_BGR2RGB'], {}), '(self._raw, cv2.COLOR_BGR2RGB)\n', (4018, 4048), False, 'import cv2\n'), ((4698, 4711), 'PIL.ExifTags.TAGS.get', 'TAGS.get', (['key'], {}), '(key)\n', (4706, 4711), False, 'from PIL.ExifTags import TAGS\n')] |
import numpy as np
import scipy.interpolate as si
def euclidean_distance(a, b):
diff = a - b
return np.sqrt(np.dot(diff, diff))
# source: https://stackoverflow.com/questions/34803197/fast-b-spline-algorithm-with-numpy-scipy
def bspline(cv, n=100, degree=3, periodic=False):
"""Calculate n samples on a bspline
cv : Array ov control vertices
n : Number of samples to return
degree: Curve degree
periodic: True - Curve is closed
"""
cv = np.asarray(cv)
count = cv.shape[0]
# Closed curve
if periodic:
kv = np.arange(-degree, count + degree + 1)
factor, fraction = divmod(count + degree + 1, count)
cv = np.roll(np.concatenate((cv,) * factor + (cv[:fraction],)), -1, axis=0)
degree = np.clip(degree, 1, degree)
# Opened curve
else:
degree = np.clip(degree, 1, count - 1)
kv = np.clip(np.arange(count + degree + 1) - degree, 0, count - degree)
# Return samples
max_param = count - (degree * (1 - periodic))
spl = si.BSpline(kv, cv, degree)
return spl(np.linspace(0, max_param, n))
# from https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
def CatmullRomSpline(P0, P1, P2, P3, nPoints=100):
"""
P0, P1, P2, and P3 should be (x,y) point pairs that define the Catmull-Rom spline.
nPoints is the number of points to include in this curve segment.
"""
# Convert the points to numpy so that we can do array multiplication
P0, P1, P2, P3 = map(np.array, [P0, P1, P2, P3])
# Calculate t0 to t4
alpha = 0.5
def tj(ti, Pi, Pj):
xi, yi = Pi
xj, yj = Pj
return (((xj - xi) ** 2 + (yj - yi) ** 2) ** 0.5) ** alpha + ti
t0 = 0
t1 = tj(t0, P0, P1)
t2 = tj(t1, P1, P2)
t3 = tj(t2, P2, P3)
# Only calculate points between P1 and P2
t = np.linspace(t1, t2, nPoints)
# Reshape so that we can multiply by the points P0 to P3
# and get a point for each value of t.
t = t.reshape(len(t), 1)
A1 = (t1 - t) / (t1 - t0) * P0 + (t - t0) / (t1 - t0) * P1
A2 = (t2 - t) / (t2 - t1) * P1 + (t - t1) / (t2 - t1) * P2
A3 = (t3 - t) / (t3 - t2) * P2 + (t - t2) / (t3 - t2) * P3
B1 = (t2 - t) / (t2 - t0) * A1 + (t - t0) / (t2 - t0) * A2
B2 = (t3 - t) / (t3 - t1) * A2 + (t - t1) / (t3 - t1) * A3
C = (t2 - t) / (t2 - t1) * B1 + (t - t1) / (t2 - t1) * B2
return C
def CatmullRomChain(P, n_points=100):
"""
Calculate Catmull Rom for a chain of points and return the combined curve.
"""
sz = len(P)
# The curve C will contain an array of (x,y) points.
C = []
for i in range(sz - 3):
c = CatmullRomSpline(P[i], P[i + 1], P[i + 2], P[i + 3], nPoints=n_points)
C.extend(c)
return C
def catmull_rom_spline(coords, n_points=10):
coords = list(map(np.array, coords))
coords.insert(0, coords[0] - (coords[1] - coords[0]))
coords.append(coords[-1] + (coords[-1] - coords[-2]))
c = CatmullRomChain(coords, n_points=n_points)
return np.array(c)
| [
"numpy.clip",
"numpy.asarray",
"numpy.array",
"numpy.dot",
"numpy.linspace",
"numpy.concatenate",
"scipy.interpolate.BSpline",
"numpy.arange"
] | [((490, 504), 'numpy.asarray', 'np.asarray', (['cv'], {}), '(cv)\n', (500, 504), True, 'import numpy as np\n'), ((1046, 1072), 'scipy.interpolate.BSpline', 'si.BSpline', (['kv', 'cv', 'degree'], {}), '(kv, cv, degree)\n', (1056, 1072), True, 'import scipy.interpolate as si\n'), ((1864, 1892), 'numpy.linspace', 'np.linspace', (['t1', 't2', 'nPoints'], {}), '(t1, t2, nPoints)\n', (1875, 1892), True, 'import numpy as np\n'), ((3049, 3060), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3057, 3060), True, 'import numpy as np\n'), ((118, 136), 'numpy.dot', 'np.dot', (['diff', 'diff'], {}), '(diff, diff)\n', (124, 136), True, 'import numpy as np\n'), ((579, 617), 'numpy.arange', 'np.arange', (['(-degree)', '(count + degree + 1)'], {}), '(-degree, count + degree + 1)\n', (588, 617), True, 'import numpy as np\n'), ((780, 806), 'numpy.clip', 'np.clip', (['degree', '(1)', 'degree'], {}), '(degree, 1, degree)\n', (787, 806), True, 'import numpy as np\n'), ((854, 883), 'numpy.clip', 'np.clip', (['degree', '(1)', '(count - 1)'], {}), '(degree, 1, count - 1)\n', (861, 883), True, 'import numpy as np\n'), ((1088, 1116), 'numpy.linspace', 'np.linspace', (['(0)', 'max_param', 'n'], {}), '(0, max_param, n)\n', (1099, 1116), True, 'import numpy as np\n'), ((700, 749), 'numpy.concatenate', 'np.concatenate', (['((cv,) * factor + (cv[:fraction],))'], {}), '((cv,) * factor + (cv[:fraction],))\n', (714, 749), True, 'import numpy as np\n'), ((905, 934), 'numpy.arange', 'np.arange', (['(count + degree + 1)'], {}), '(count + degree + 1)\n', (914, 934), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
from PIL import ImageFile
import argparse
import os
import json
# DONE: Import dependencies for Debugging andd Profiling
# ====================================#
# 1. Import SMDebug framework class. #
# ====================================#
import smdebug.pytorch as smd
def train(model, trainloader, criterion, optimizer, hook, epoch):
'''
DONE: Complete this function that can take a model and
data loaders for training and will get train the model
Remember to include any debugging/profiling hooks that you might need
'''
model.train()
# =================================================#
# 2a. Set the SMDebug hook for the training phase. #
# =================================================#
if hook:
hook.set_mode(smd.modes.TRAIN)
device = get_device()
running_avg_loss = 0
print_every = 10
for batch_idx, (inputs, labels) in enumerate(trainloader):
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
loss = criterion(logps, labels)
if hook:
hook.record_tensor_value(tensor_name="NLLLoss", tensor_value=loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_avg_loss = loss.item() / inputs.shape[0]
running_avg_loss += loss.item()
if batch_idx % print_every == 0:
print(f" Epoch {epoch + 1}.. "
f"Epoch progress: {100 * batch_idx / len(trainloader):.1f}%.. "
f"Batch avg train loss: {batch_avg_loss:.3f}.. ")
running_avg_loss /= len(trainloader.dataset)
return (running_avg_loss,)
def test(model, testloader, criterion, hook=None, epoch=0):
'''
DONE: Complete this function that can take a model and
data loaders for training and will get train the model
Remember to include any debugging/profiling hooks that you might need
'''
model.eval()
# ===================================================#
# 2b. Set the SMDebug hook for the validation phase. #
# ===================================================#
if hook:
hook.set_mode(smd.modes.EVAL)
device = get_device()
accuracies = []
epoch_avg_loss = 0
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
# test_losses.append(batch_loss.item())
if hook:
hook.record_tensor_value(tensor_name="NLLLoss", tensor_value=batch_loss)
epoch_avg_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracies.append(torch.mean(equals.type(torch.FloatTensor)).item())
epoch_avg_loss /= len(testloader.dataset)
epoch_avg_accuracy = np.mean(accuracies)
model.train()
return epoch_avg_loss, epoch_avg_accuracy
def get_device():
# Use GPU if it's available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return device
def net():
'''
DONE: Complete this function that initializes your model
Remember to use a pretrained model
'''
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 512)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(0.2)),
('fc2', nn.Linear(512, 133)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
print('Device: ', get_device())
model.to(get_device())
return model
def create_data_loaders(train_dir, batch_size, test_dir, test_batch_size):
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=test_batch_size)
return trainloader, testloader
def main(args):
# Print hyperparameters
print(f'HYPERPARAMS: batch_size={args.batch_size}; test_batchsize={args.test_batch_size}; epochs={args.epochs}; lr={args.lr}')
print('ALL ARGS: ', args)
'''
DONE: Initialize a model by calling the net function
'''
model = net()
model.to(get_device())
# ======================================================#
# 3a. Register the SMDebug hook to save output tensors. #
# ======================================================#
# Following lines are commented because they did not work with Densenet model.
#hook = smd.Hook.create_from_json_file()
#hook.register_hook(model)
#hook.register_module(model)
# get_hook does work with densenet model.
hook = smd.get_hook(create_if_not_exists=True)
'''
DONE: Create your loss and optimizer
'''
loss_criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=args.lr)
#if hook:
# hook.register_loss(loss_criterion)
# print('loss registered')
'''
DONE: Call the train function to start training your model
Remember that you will need to set up a way to get training data from S3
'''
trainloader, testloader = create_data_loaders(args.train_dir, args.batch_size, args.test_dir, args.test_batch_size)
assert len(trainloader) > 0
assert len(testloader) > 0
ImageFile.LOAD_TRUNCATED_IMAGES = True
epochs = args.epochs
print('Start training...')
for epoch in range(epochs):
# ===========================================================#
# 3b. Pass the SMDebug hook to the train and test functions. #
# ===========================================================#
(train_loss,) = train(model, trainloader, loss_criterion, optimizer, hook, epoch)
(test_loss, test_accuracy) = test(model, testloader, loss_criterion, hook, epoch)
print(f"Epoch {epoch + 1}.. "
f"Progress: {100 * epoch / epochs:.1f}% "
f"Train loss: {train_loss:.3f} "
f"Test loss: {test_loss:.3f} "
f"Test accuracy: {test_accuracy:.3f}")
'''
DONE: Test the model to see its accuracy
'''
(test_loss, test_accuracy) = test(model, testloader, loss_criterion, hook)
print(f"Final test accuracy: {test_accuracy:.3f}")
'''
DONE: Save the trained model
'''
torch.save(model.cpu(), args.model_dir + '/model.pth')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
'''
DONE: Specify any training args that you might need
'''
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=256,
metavar="N",
help="input batch size for testing (default: 256)",
)
parser.add_argument(
"--epochs",
type=int,
default=1,
metavar="N",
help="number of epochs to train (default: 1)",
)
parser.add_argument(
"--lr", type=float, default=0.003, metavar="LR", help="learning rate (default: 0.03)"
)
# Container environment
parser.add_argument("--hosts", type=list, default=json.loads(os.environ["SM_HOSTS"]))
parser.add_argument("--current-host", type=str, default=os.environ["SM_CURRENT_HOST"])
parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--train-dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"])
parser.add_argument("--test-dir", type=str, default=os.environ["SM_CHANNEL_TEST"])
parser.add_argument("--num-gpus", type=int, default=os.environ["SM_NUM_GPUS"])
args = parser.parse_args()
main(args)
print('done')
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.exp",
"torch.cuda.is_available",
"numpy.mean",
"argparse.ArgumentParser",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"json.loads",
"torchvision.transforms.RandomHorizontalFlip",
... | [((3275, 3294), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (3282, 3294), True, 'import numpy as np\n'), ((3651, 3686), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3669, 3686), False, 'from torchvision import datasets, transforms, models\n'), ((5239, 5298), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': 'train_transforms'}), '(train_dir, transform=train_transforms)\n', (5259, 5298), False, 'from torchvision import datasets, transforms, models\n'), ((5315, 5372), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (5335, 5372), False, 'from torchvision import datasets, transforms, models\n'), ((5392, 5468), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_data, batch_size=batch_size, shuffle=True)\n', (5419, 5468), False, 'import torch\n'), ((5486, 5552), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'test_batch_size'}), '(test_data, batch_size=test_batch_size)\n', (5513, 5552), False, 'import torch\n'), ((6359, 6398), 'smdebug.pytorch.get_hook', 'smd.get_hook', ([], {'create_if_not_exists': '(True)'}), '(create_if_not_exists=True)\n', (6371, 6398), True, 'import smdebug.pytorch as smd\n'), ((6482, 6494), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (6492, 6494), False, 'from torch import nn\n'), ((8113, 8138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8136, 8138), False, 'import argparse\n'), ((2501, 2516), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2514, 2516), False, 'import torch\n'), ((2992, 3008), 'torch.exp', 'torch.exp', (['logps'], {}), '(logps)\n', (3001, 3008), False, 'import torch\n'), ((3448, 3473), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3471, 3473), False, 'import torch\n'), ((4331, 4360), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (4356, 4360), False, 'from torchvision import datasets, transforms, models\n'), ((4405, 4438), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (4433, 4438), False, 'from torchvision import datasets, transforms, models\n'), ((4483, 4516), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4514, 4516), False, 'from torchvision import datasets, transforms, models\n'), ((4561, 4582), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4580, 4582), False, 'from torchvision import datasets, transforms, models\n'), ((4627, 4693), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4647, 4693), False, 'from torchvision import datasets, transforms, models\n'), ((4803, 4825), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (4820, 4825), False, 'from torchvision import datasets, transforms, models\n'), ((4869, 4895), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4890, 4895), False, 'from torchvision import datasets, transforms, models\n'), ((4939, 4960), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4958, 4960), False, 'from torchvision import datasets, transforms, models\n'), ((5004, 5070), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (5024, 5070), False, 'from torchvision import datasets, transforms, models\n'), ((8937, 8971), 'json.loads', 'json.loads', (["os.environ['SM_HOSTS']"], {}), "(os.environ['SM_HOSTS'])\n", (8947, 8971), False, 'import json\n'), ((3920, 3940), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(512)'], {}), '(1024, 512)\n', (3929, 3940), False, 'from torch import nn\n'), ((3960, 3969), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3967, 3969), False, 'from torch import nn\n'), ((3992, 4007), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (4002, 4007), False, 'from torch import nn\n'), ((4026, 4045), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(133)'], {}), '(512, 133)\n', (4035, 4045), False, 'from torch import nn\n'), ((4067, 4087), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (4080, 4087), False, 'from torch import nn\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.analyticalproblems.domaindescriptions import KNOWN_BOUNDARY_TYPES
from pymor.core.base import abstractmethod
from pymor.core.cache import CacheableObject, cached
from pymor.discretizers.builtin.inverse import inv_transposed_two_by_two
from pymor.discretizers.builtin.relations import inverse_relation
class ReferenceElement(CacheableObject):
"""Defines a reference element.
All reference elements have the property that all subentities of a given codimension are of the
same type. I.e. a three-dimensional reference element cannot have triangles and rectangles as
faces at the same time.
Attributes
----------
dim
The dimension of the reference element
volume
The volume of the reference element
"""
dim = None
volume = None
cache_region = 'memory'
@abstractmethod
def size(self, codim):
"""Number of subentities of codimension `codim`."""
@abstractmethod
def subentities(self, codim, subentity_codim):
"""`subentities(c,sc)[i,j]` is, with respect to the indexing inside the
reference element, the index of the `j`-th codim-`subentity_codim`
subentity of the `i`-th codim-`codim` subentity of the reference element.
"""
pass
@abstractmethod
def subentity_embedding(self, subentity_codim):
"""Returns a tuple `(A, B)` which defines the embedding of the codim-`subentity_codim`
subentities into the reference element.
For `subentity_codim > 1', the embedding is by default given recursively via
`subentity_embedding(subentity_codim - 1)` and
`sub_reference_element(subentity_codim - 1).subentity_embedding(1)` choosing always
the superentity with smallest index.
"""
return self._subentity_embedding(subentity_codim)
@cached
def _subentity_embedding(self, subentity_codim):
if subentity_codim > 1:
A = []
B = []
for i in range(self.size(subentity_codim)):
P = np.where(self.subentities(subentity_codim - 1, subentity_codim) == i)
parent_index, local_index = P[0][0], P[1][0]
A0, B0 = self.subentity_embedding(subentity_codim - 1)
A0 = A0[parent_index]
B0 = B0[parent_index]
A1, B1 = self.sub_reference_element(subentity_codim - 1).subentity_embedding(1)
A1 = A1[local_index]
B1 = B1[local_index]
A.append(np.dot(A0, A1))
B.append(np.dot(A0, B1) + B0)
return np.array(A), np.array(B)
else:
raise NotImplementedError
@abstractmethod
def sub_reference_element(self, codim):
"""Returns the reference element of the codim-`codim` subentities."""
return self._sub_reference_element(codim)
@cached
def _sub_reference_element(self, codim):
if codim > 1:
return self.sub_reference_element(1).sub_reference_element(codim - 1)
else:
raise NotImplementedError
def __call__(self, codim):
"""Returns the reference element of the codim-`codim` subentities."""
return self.sub_reference_element(codim)
@abstractmethod
def unit_outer_normals(self):
"""`retval[e]` is the unit outer-normal vector to the codim-1 subentity
with index `e`.
"""
pass
@abstractmethod
def center(self):
"""Coordinates of the barycenter."""
pass
@abstractmethod
def mapped_diameter(self, A):
"""The diameter of the reference element after transforming it with the
matrix `A` (vectorized).
"""
pass
@abstractmethod
def quadrature(self, order=None, npoints=None, quadrature_type='default'):
"""Returns tuple `(P, W)` where `P` is an array of quadrature points with
corresponding weights `W`.
The quadrature is of order `order` or has `npoints` integration points.
"""
pass
@abstractmethod
def quadrature_info(self):
"""Returns a tuple of dicts `(O, N)` where `O[quadrature_type]` is a list
of orders which are implemented for `quadrature_type` and `N[quadrature_type]`
is a list of the corresponding numbers of integration points.
"""
pass
def quadrature_types(self):
o, _ = self.quadrature_info()
return frozenset(o.keys())
class Grid(CacheableObject):
"""Topological grid with geometry where each codim-0 entity is affinely mapped to the same
|ReferenceElement|.
The grid is completely determined via the subentity relation given by :meth:`~Grid.subentities`
and the embeddings given by :meth:`~Grid.embeddings`. In addition, only :meth:`~Grid.size` and
:meth:`~Grid.reference_element` have to be implemented.
"""
cache_region = 'memory'
@abstractmethod
def size(self, codim):
"""The number of entities of codimension `codim`."""
pass
@abstractmethod
def subentities(self, codim, subentity_codim):
"""`retval[e,s]` is the global index of the `s`-th codim-`subentity_codim` subentity of the
codim-`codim` entity with global index `e`.
The ordering of `subentities(0, subentity_codim)[e]` has to correspond, w.r.t. the embedding
of `e`, to the local ordering inside the reference element.
For `codim > 0`, we provide a default implementation by calculating the subentities of `e`
as follows:
1. Find the `codim-1` parent entity `e_0` of `e` with minimal global index
2. Lookup the local indices of the subentities of `e` inside `e_0` using the reference
element.
3. Map these local indices to global indices using
`subentities(codim - 1, subentity_codim)`.
This procedures assures that `subentities(codim, subentity_codim)[e]` has the right ordering
w.r.t. the embedding determined by `e_0`, which agrees with what is returned by
`embeddings(codim)`
"""
return self._subentities(codim, subentity_codim)
@cached
def _subentities(self, codim, subentity_codim):
assert 0 <= codim <= self.dim, 'Invalid codimension'
assert 0 < codim, 'Not implemented'
P = self.superentities(codim, codim - 1)[:, 0] # we assume here that superentities() is sorted by global index
I = self.superentity_indices(codim, codim - 1)[:, 0]
SE = self.subentities(codim - 1, subentity_codim)[P]
RSE = self.reference_element(codim - 1).subentities(1, subentity_codim - (codim - 1))[I]
SSE = np.empty_like(RSE)
for i in range(RSE.shape[0]):
SSE[i, :] = SE[i, RSE[i]]
return SSE
def superentities(self, codim, superentity_codim):
"""`retval[e,s]` is the global index of the `s`-th codim-`superentity_codim` superentity of
the codim-`codim` entity with global index `e`.
`retval[e]` is sorted by global index.
The default implementation is to compute the result from
`subentities(superentity_codim, codim)`.
"""
return self._superentities(codim, superentity_codim)
@cached
def _superentities(self, codim, superentity_codim):
return self._superentities_with_indices(codim, superentity_codim)[0]
def superentity_indices(self, codim, superentity_codim):
"""`retval[e,s]` is the local index of the codim-`codim` entity `e` in the
codim-`superentity_codim` superentity `superentities(codim, superentity_codim)[e,s].`
"""
return self._superentity_indices(codim, superentity_codim)
@cached
def _superentity_indices(self, codim, superentity_codim):
return self._superentities_with_indices(codim, superentity_codim)[1]
@cached
def _superentities_with_indices(self, codim, superentity_codim):
assert 0 <= codim <= self.dim, f'Invalid codimension (was {codim})'
assert 0 <= superentity_codim <= codim, f'Invalid codimension (was {superentity_codim})'
SE = self.subentities(superentity_codim, codim)
return inverse_relation(SE, size_rhs=self.size(codim), with_indices=True)
def neighbours(self, codim, neighbour_codim, intersection_codim=None):
"""`retval[e,n]` is the global index of the `n`-th codim-`neighbour_codim` entitiy of the
codim-`codim` entity `e` that shares with `e` a subentity of codimension
`intersection_codim`.
If `intersection_codim == None`, it is set to `codim + 1` if `codim == neighbour_codim` and
to `min(codim, neighbour_codim)` otherwise.
The default implementation is to compute the result from
`subentities(codim, intersection_codim)` and
`superentities(intersection_codim, neihbour_codim)`.
"""
return self._neighbours(codim, neighbour_codim, intersection_codim)
@cached
def _neighbours(self, codim, neighbour_codim, intersection_codim):
assert 0 <= codim <= self.dim, 'Invalid codimension'
assert 0 <= neighbour_codim <= self.dim, 'Invalid codimension'
if intersection_codim is None:
if codim == neighbour_codim:
intersection_codim = codim + 1
else:
intersection_codim = min(codim, neighbour_codim)
assert max(codim, neighbour_codim) <= intersection_codim <= self.dim, 'Invalid codimension'
if intersection_codim == min(codim, neighbour_codim):
if codim < neighbour_codim:
return self.subentities(codim, neighbour_codim)
elif codim > neighbour_codim:
return self.superentities(codim, neighbour_codim)
else:
return np.zeros((self.size(codim), 0), dtype=np.int32)
else:
EI = self.subentities(codim, intersection_codim)
ISE = self.superentities(intersection_codim, neighbour_codim)
NB = np.empty((EI.shape[0], EI.shape[1] * ISE.shape[1]), dtype=np.int32)
NB.fill(-1)
NB_COUNTS = np.zeros(EI.shape[0], dtype=np.int32)
if codim == neighbour_codim:
for ii, i in np.ndenumerate(EI):
if i >= 0:
for _, n in np.ndenumerate(ISE[i]):
if n != ii[0] and n not in NB[ii[0]]:
NB[ii[0], NB_COUNTS[ii[0]]] = n
NB_COUNTS[ii[0]] += 1
else:
for ii, i in np.ndenumerate(EI):
if i >= 0:
for _, n in np.ndenumerate(ISE[i]):
if n not in NB[ii[0]]:
NB[ii[0], NB_COUNTS[ii[0]]] = n
NB_COUNTS[ii[0]] += 1
NB = NB[:NB.shape[0], :NB_COUNTS.max()]
return NB
def boundary_mask(self, codim):
"""`retval[e]` is true iff the codim-`codim` entity with global index `e` is a boundary
entity.
By definition, a codim-1 entity is a boundary entity if it has only one codim-0 superentity.
For `codim != 1`, a codim-`codim` entity is a boundary entity if it has a codim-1
sub/super-entity.
"""
return self._boundary_mask(codim)
@cached
def _boundary_mask(self, codim):
M = np.zeros(self.size(codim), dtype='bool')
B = self.boundaries(codim)
if B.size > 0:
M[self.boundaries(codim)] = True
return M
def boundaries(self, codim):
"""Returns the global indices of all codim-`codim` boundary entities.
By definition, a codim-1 entity is a boundary entity if it has only one codim-0 superentity.
For `codim != 1`, a codim-`codim` entity is a boundary entity if it has a codim-1
sub/super-entity.
"""
return self._boundaries(codim)
@cached
def _boundaries(self, codim):
assert 0 <= codim <= self.dim, 'Invalid codimension'
if codim == 1:
SE = self.superentities(1, 0)
# a codim-1 entity can have at most 2 superentities, and it is a boundary
# if it has only one superentity
if SE.shape[1] > 1:
return np.where(np.any(SE == -1, axis=1))[0].astype('int32')
else:
return np.arange(SE.shape[0], dtype='int32')
elif codim == 0:
B1 = self.boundaries(1)
if B1.size > 0:
B0 = np.unique(self.superentities(1, 0)[B1])
return B0[1:] if B0[0] == -1 else B0
else:
return np.array([], dtype=np.int32)
else:
B1 = self.boundaries(1)
if B1.size > 0:
BC = np.unique(self.subentities(1, codim)[B1])
return BC[1:] if BC[0] == -1 else BC
else:
return np.array([], dtype=np.int32)
@abstractmethod
def reference_element(self, codim):
"""The |ReferenceElement| of the codim-`codim` entities."""
pass
@abstractmethod
def embeddings(self, codim):
"""Returns tuple `(A, B)` where `A[e]` and `B[e]` are the linear part and the translation
part of the map from the reference element of `e` to `e`.
For `codim > 0`, we provide a default implementation by taking the embedding of the codim-1
parent entity `e_0` of `e` with lowest global index and composing it with the
subentity_embedding of `e` into `e_0` determined by the reference element.
"""
return self._embeddings(codim)
@cached
def _embeddings(self, codim):
assert codim > 0, NotImplemented
E = self.superentities(codim, codim - 1)[:, 0]
I = self.superentity_indices(codim, codim - 1)[:, 0]
A0, B0 = self.embeddings(codim - 1)
A0 = A0[E]
B0 = B0[E]
A1, B1 = self.reference_element(codim - 1).subentity_embedding(1)
A = np.zeros((E.shape[0], A0.shape[1], A1.shape[2]))
B = np.zeros((E.shape[0], A0.shape[1]))
for i in range(A1.shape[0]):
INDS = np.where(I == i)[0]
A[INDS] = np.dot(A0[INDS], A1[i])
B[INDS] = np.dot(A0[INDS], B1[i]) + B0[INDS]
return A, B
def jacobian_inverse_transposed(self, codim):
"""`retval[e]` is the transposed (pseudo-)inverse of the Jacobian of `embeddings(codim)[e]`.
"""
return self._jacobian_inverse_transposed(codim)
@cached
def _jacobian_inverse_transposed(self, codim):
assert 0 <= codim < self.dim,\
f'Invalid Codimension (must be between 0 and {self.dim} but was {codim})'
J = self.embeddings(codim)[0]
if J.shape[-1] == J.shape[-2] == 2:
JIT = inv_transposed_two_by_two(J)
else:
pinv = np.linalg.pinv
JIT = np.array([pinv(j) for j in J]).swapaxes(1, 2)
return JIT
def integration_elements(self, codim):
"""`retval[e]` is given as `sqrt(det(A^T*A))`, where `A = embeddings(codim)[0][e]`."""
return self._integration_elements(codim)
@cached
def _integration_elements(self, codim):
assert 0 <= codim <= self.dim,\
f'Invalid Codimension (must be between 0 and {self.dim} but was {codim})'
if codim == self.dim:
return np.ones(self.size(codim))
J = self.embeddings(codim)[0]
JTJ = np.einsum('eji,ejk->eik', J, J)
if JTJ.shape[1] == 1:
D = JTJ.ravel()
elif JTJ.shape[1] == 2:
D = (JTJ[:, 0, 0] * JTJ[:, 1, 1] - JTJ[:, 1, 0] * JTJ[:, 0, 1]).ravel()
else:
def f(A):
return np.linalg.det(A)
D = np.array([f(j) for j in J])
return np.sqrt(D)
def volumes(self, codim):
"""`retval[e]` is the (dim-`codim`)-dimensional volume of the codim-`codim` entity with
global index `e`."""
return self._volumes(codim)
@cached
def _volumes(self, codim):
assert 0 <= codim <= self.dim,\
f'Invalid Codimension (must be between 0 and {self.dim} but was {codim})'
if codim == self.dim:
return np.ones(self.size(self.dim))
return self.reference_element(codim).volume * self.integration_elements(codim)
def volumes_inverse(self, codim):
"""`retval[e] = 1 / volumes(codim)[e]`."""
return self._volumes_inverse(codim)
@cached
def _volumes_inverse(self, codim):
return np.reciprocal(self.volumes(codim))
def unit_outer_normals(self):
"""`retval[e,i]` is the unit outer normal to the i-th codim-1 subentity of the codim-0
entitiy with global index `e`."""
return self._unit_outer_normals()
@cached
def _unit_outer_normals(self):
JIT = self.jacobian_inverse_transposed(0)
N = np.dot(JIT, self.reference_element(0).unit_outer_normals().T).swapaxes(1, 2)
return N / np.apply_along_axis(np.linalg.norm, 2, N)[:, :, np.newaxis]
def centers(self, codim):
"""`retval[e]` is the barycenter of the codim-`codim` entity with global index `e`."""
return self._centers(codim)
@cached
def _centers(self, codim):
assert 0 <= codim <= self.dim,\
f'Invalid Codimension (must be between 0 and {self.dim} but was {codim})'
A, B = self.embeddings(codim)
C = self.reference_element(codim).center()
return np.dot(A, C) + B
def diameters(self, codim):
"""`retval[e]` is the diameter of the codim-`codim` entity with global index `e`."""
return self._diameters(codim)
@cached
def _diameters(self, codim):
assert 0 <= codim <= self.dim,\
f'Invalid Codimension (must be between 0 and {self.dim} but was {codim})'
return np.reshape(self.reference_element(codim).mapped_diameter(self.embeddings(codim)[0]), (-1,))
def quadrature_points(self, codim, order=None, npoints=None, quadrature_type='default'):
"""`retval[e]` is an array of quadrature points in global coordinates for the codim-`codim`
entity with global index `e`.
The quadrature is of order `order` or has `npoints` integration points. To integrate a
function `f` over `e` one has to form ::
np.dot(f(quadrature_points(codim, order)[e]),
reference_element(codim).quadrature(order)[1]) *
integration_elements(codim)[e]. # NOQA
"""
return self._quadrature_points(codim, order, npoints, quadrature_type)
@cached
def _quadrature_points(self, codim, order, npoints, quadrature_type):
P, _ = self.reference_element(codim).quadrature(order, npoints, quadrature_type)
A, B = self.embeddings(codim)
return np.einsum('eij,kj->eki', A, P) + B[:, np.newaxis, :]
def bounding_box(self):
"""returns a `(2, dim)`-shaped array containing lower/upper bounding box coordinates."""
return self._bounding_box()
@cached
def _bounding_box(self):
bbox = np.empty((2, self.dim))
centers = self.centers(self.dim)
for dim in range(self.dim):
bbox[0, dim] = np.min(centers[:, dim])
bbox[1, dim] = np.max(centers[:, dim])
return bbox
class GridWithOrthogonalCenters(Grid):
"""|Grid| with an additional `orthogonal_centers` method."""
@abstractmethod
def orthogonal_centers(self):
"""`retval[e]` is a point inside the codim-0 entity with global index `e` such that the line
segment from `retval[e]` to `retval[e2]` is always orthogonal to the codim-1 entity shared
by the codim-0 entities with global index `e` and `e2`.
(This is mainly useful for gradient approximation in finite volume schemes.)
"""
pass
class BoundaryInfo(CacheableObject):
"""Provides boundary types for the boundaries of a given |Grid|.
For every boundary type and codimension a mask is provided, marking grid entities of the
respective type and codimension by their global index.
Attributes
----------
boundary_types
set of all boundary types the grid has.
"""
boundary_types = frozenset()
cache_region = 'memory'
def mask(self, boundary_type, codim):
"""retval[i] is `True` if the codim-`codim` entity of global index `i` is associated to the
boundary type `boundary_type`."""
raise ValueError(f'Has no boundary_type "{boundary_type}"')
def unique_boundary_type_mask(self, codim):
"""retval[i] is `True` if the codim-`codim` entity of global index `i` is associated to one
and only one boundary type."""
return np.less_equal(sum(self.mask(bt, codim=codim).astype(np.int) for bt in self.boundary_types), 1)
def no_boundary_type_mask(self, codim):
"""retval[i] is `True` if the codim-`codim` entity of global index `i` is associated to no
boundary type."""
return np.equal(sum(self.mask(bt, codim=codim).astype(np.int) for bt in self.boundary_types), 0)
def check_boundary_types(self, assert_unique_type=(1,), assert_some_type=()):
for bt in self.boundary_types:
if bt not in KNOWN_BOUNDARY_TYPES:
self.logger.warning(f'Unknown boundary type: {bt}')
if assert_unique_type:
for codim in assert_unique_type:
assert np.all(self.unique_boundary_type_mask(codim))
if assert_some_type:
for codim in assert_some_type:
assert not np.any(self.no_boundary_type_mask(codim))
@property
def has_dirichlet(self):
return 'dirichlet' in self.boundary_types
@property
def has_neumann(self):
return 'neumann' in self.boundary_types
@property
def has_robin(self):
return 'robin' in self.boundary_types
def dirichlet_mask(self, codim):
return self.mask('dirichlet', codim)
def neumann_mask(self, codim):
return self.mask('neumann', codim)
def robin_mask(self, codim):
return self.mask('robin', codim)
@cached
def _boundaries(self, boundary_type, codim):
return np.where(self.mask(boundary_type, codim))[0].astype('int32')
def boundaries(self, boundary_type, codim):
return self._boundaries(boundary_type, codim)
def dirichlet_boundaries(self, codim):
return self._boundaries('dirichlet', codim)
def neumann_boundaries(self, codim):
return self._boundaries('neumann', codim)
def robin_boundaries(self, codim):
return self._boundaries('robin', codim)
| [
"numpy.any",
"numpy.sqrt",
"numpy.where",
"numpy.ndenumerate",
"pymor.discretizers.builtin.inverse.inv_transposed_two_by_two",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.einsum",
"numpy.empty_like",
"numpy.empty",
"numpy.min",
"numpy.apply_along_axis",
"numpy.arange",... | [((6935, 6953), 'numpy.empty_like', 'np.empty_like', (['RSE'], {}), '(RSE)\n', (6948, 6953), True, 'import numpy as np\n'), ((14299, 14347), 'numpy.zeros', 'np.zeros', (['(E.shape[0], A0.shape[1], A1.shape[2])'], {}), '((E.shape[0], A0.shape[1], A1.shape[2]))\n', (14307, 14347), True, 'import numpy as np\n'), ((14360, 14395), 'numpy.zeros', 'np.zeros', (['(E.shape[0], A0.shape[1])'], {}), '((E.shape[0], A0.shape[1]))\n', (14368, 14395), True, 'import numpy as np\n'), ((15764, 15795), 'numpy.einsum', 'np.einsum', (['"""eji,ejk->eik"""', 'J', 'J'], {}), "('eji,ejk->eik', J, J)\n", (15773, 15795), True, 'import numpy as np\n'), ((16107, 16117), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (16114, 16117), True, 'import numpy as np\n'), ((19402, 19425), 'numpy.empty', 'np.empty', (['(2, self.dim)'], {}), '((2, self.dim))\n', (19410, 19425), True, 'import numpy as np\n'), ((10270, 10337), 'numpy.empty', 'np.empty', (['(EI.shape[0], EI.shape[1] * ISE.shape[1])'], {'dtype': 'np.int32'}), '((EI.shape[0], EI.shape[1] * ISE.shape[1]), dtype=np.int32)\n', (10278, 10337), True, 'import numpy as np\n'), ((10386, 10423), 'numpy.zeros', 'np.zeros', (['EI.shape[0]'], {'dtype': 'np.int32'}), '(EI.shape[0], dtype=np.int32)\n', (10394, 10423), True, 'import numpy as np\n'), ((14494, 14517), 'numpy.dot', 'np.dot', (['A0[INDS]', 'A1[i]'], {}), '(A0[INDS], A1[i])\n', (14500, 14517), True, 'import numpy as np\n'), ((15104, 15132), 'pymor.discretizers.builtin.inverse.inv_transposed_two_by_two', 'inv_transposed_two_by_two', (['J'], {}), '(J)\n', (15129, 15132), False, 'from pymor.discretizers.builtin.inverse import inv_transposed_two_by_two\n'), ((17797, 17809), 'numpy.dot', 'np.dot', (['A', 'C'], {}), '(A, C)\n', (17803, 17809), True, 'import numpy as np\n'), ((19130, 19160), 'numpy.einsum', 'np.einsum', (['"""eij,kj->eki"""', 'A', 'P'], {}), "('eij,kj->eki', A, P)\n", (19139, 19160), True, 'import numpy as np\n'), ((19530, 19553), 'numpy.min', 'np.min', (['centers[:, dim]'], {}), '(centers[:, dim])\n', (19536, 19553), True, 'import numpy as np\n'), ((19581, 19604), 'numpy.max', 'np.max', (['centers[:, dim]'], {}), '(centers[:, dim])\n', (19587, 19604), True, 'import numpy as np\n'), ((2843, 2854), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (2851, 2854), True, 'import numpy as np\n'), ((2856, 2867), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (2864, 2867), True, 'import numpy as np\n'), ((10495, 10513), 'numpy.ndenumerate', 'np.ndenumerate', (['EI'], {}), '(EI)\n', (10509, 10513), True, 'import numpy as np\n'), ((10837, 10855), 'numpy.ndenumerate', 'np.ndenumerate', (['EI'], {}), '(EI)\n', (10851, 10855), True, 'import numpy as np\n'), ((12671, 12708), 'numpy.arange', 'np.arange', (['SE.shape[0]'], {'dtype': '"""int32"""'}), "(SE.shape[0], dtype='int32')\n", (12680, 12708), True, 'import numpy as np\n'), ((14452, 14468), 'numpy.where', 'np.where', (['(I == i)'], {}), '(I == i)\n', (14460, 14468), True, 'import numpy as np\n'), ((14540, 14563), 'numpy.dot', 'np.dot', (['A0[INDS]', 'B1[i]'], {}), '(A0[INDS], B1[i])\n', (14546, 14563), True, 'import numpy as np\n'), ((17301, 17342), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.linalg.norm', '(2)', 'N'], {}), '(np.linalg.norm, 2, N)\n', (17320, 17342), True, 'import numpy as np\n'), ((2762, 2776), 'numpy.dot', 'np.dot', (['A0', 'A1'], {}), '(A0, A1)\n', (2768, 2776), True, 'import numpy as np\n'), ((12953, 12981), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (12961, 12981), True, 'import numpy as np\n'), ((13217, 13245), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (13225, 13245), True, 'import numpy as np\n'), ((16030, 16046), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (16043, 16046), True, 'import numpy as np\n'), ((2803, 2817), 'numpy.dot', 'np.dot', (['A0', 'B1'], {}), '(A0, B1)\n', (2809, 2817), True, 'import numpy as np\n'), ((10582, 10604), 'numpy.ndenumerate', 'np.ndenumerate', (['ISE[i]'], {}), '(ISE[i])\n', (10596, 10604), True, 'import numpy as np\n'), ((10924, 10946), 'numpy.ndenumerate', 'np.ndenumerate', (['ISE[i]'], {}), '(ISE[i])\n', (10938, 10946), True, 'import numpy as np\n'), ((12585, 12609), 'numpy.any', 'np.any', (['(SE == -1)'], {'axis': '(1)'}), '(SE == -1, axis=1)\n', (12591, 12609), True, 'import numpy as np\n')] |
import numpy
import pytest
from nereid.src.tmnt_performance import tmnt
from nereid.src.tmnt_performance import tasks
@pytest.fixture
def eff_conc_mapping(tmnt_params):
return tmnt.build_effluent_function_map(tmnt_params, "bmp", "pollutant")
@pytest.fixture
def pollutant_facilities_map(KTRL_curves):
dct = {
k: [c for c in v.columns if c not in ["xhat", "param"]]
for k, v in KTRL_curves.items()
}
return dct
@pytest.fixture
def pollutant_units_map(tmnt_params):
return tmnt_params.set_index("pollutant")["unit"].to_dict()
@pytest.mark.parametrize(
"poc",
[
"Dissolved Copper",
"Dissolved Zinc",
"Fecal Coliform",
"Total Copper",
"Total Lead",
"Total Nitrogen",
"Total Phosphorus",
"Total Suspended Solids",
"Total Zinc",
],
)
def test_eff_concs(
KTRL_curves, eff_conc_mapping, pollutant_facilities_map, pollutant_units_map, poc
):
check_infs = KTRL_curves[poc]["xhat"]
unit = pollutant_units_map[poc]
for fac_type in pollutant_facilities_map[poc]:
check_curve = KTRL_curves[poc][fac_type]
eff_fxn = eff_conc_mapping[(fac_type, poc)]
results = [eff_fxn(i, unit) for i in check_infs]
numpy.testing.assert_allclose(results, check_curve)
@pytest.mark.parametrize(
"inf_conc, inf_unit, kwargs, exp",
[
(0, "mg/l", {}, 1e-17), # nearly zero
(5, "mg/l", {"A": 2}, 2), # A sets eff to constant
(5, "mg/l", {"B": 2}, 5), # eff can't be greater, so out == in
(5, "mg/l", {"B": 0.5}, 2.5), # eff is half of inf
(5, "mg/l", {"B": 0.5, "unit": "lbs/cubic_feet"}, 2.5), # units are handled
],
)
def test_eff_conc_varied_input(eff_conc_mapping, inf_conc, inf_unit, kwargs, exp):
res = tmnt.effluent_conc(inf_conc, inf_unit, **kwargs)
assert abs(exp - res) < 1e-3
@pytest.mark.parametrize("cxt_key", ["default"])
def test_tmnt_task(
contexts, KTRL_curves, pollutant_facilities_map, pollutant_units_map, cxt_key
):
context = contexts[cxt_key]
eff_conc_mapping = tasks.effluent_function_map(context=context)
for (fac_type, poc), eff_fxn in eff_conc_mapping.items():
check_infs = KTRL_curves[poc]["xhat"]
check_curve = KTRL_curves[poc][fac_type]
unit = pollutant_units_map[poc]
results = [eff_fxn(i, unit) for i in check_infs]
numpy.testing.assert_allclose(results, check_curve)
| [
"numpy.testing.assert_allclose",
"nereid.src.tmnt_performance.tasks.effluent_function_map",
"pytest.mark.parametrize",
"nereid.src.tmnt_performance.tmnt.effluent_conc",
"nereid.src.tmnt_performance.tmnt.build_effluent_function_map"
] | [((571, 775), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""poc"""', "['Dissolved Copper', 'Dissolved Zinc', 'Fecal Coliform', 'Total Copper',\n 'Total Lead', 'Total Nitrogen', 'Total Phosphorus',\n 'Total Suspended Solids', 'Total Zinc']"], {}), "('poc', ['Dissolved Copper', 'Dissolved Zinc',\n 'Fecal Coliform', 'Total Copper', 'Total Lead', 'Total Nitrogen',\n 'Total Phosphorus', 'Total Suspended Solids', 'Total Zinc'])\n", (594, 775), False, 'import pytest\n'), ((1320, 1550), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inf_conc, inf_unit, kwargs, exp"""', "[(0, 'mg/l', {}, 1e-17), (5, 'mg/l', {'A': 2}, 2), (5, 'mg/l', {'B': 2}, 5),\n (5, 'mg/l', {'B': 0.5}, 2.5), (5, 'mg/l', {'B': 0.5, 'unit':\n 'lbs/cubic_feet'}, 2.5)]"], {}), "('inf_conc, inf_unit, kwargs, exp', [(0, 'mg/l', {},\n 1e-17), (5, 'mg/l', {'A': 2}, 2), (5, 'mg/l', {'B': 2}, 5), (5, 'mg/l',\n {'B': 0.5}, 2.5), (5, 'mg/l', {'B': 0.5, 'unit': 'lbs/cubic_feet'}, 2.5)])\n", (1343, 1550), False, 'import pytest\n'), ((1902, 1949), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cxt_key"""', "['default']"], {}), "('cxt_key', ['default'])\n", (1925, 1949), False, 'import pytest\n'), ((183, 248), 'nereid.src.tmnt_performance.tmnt.build_effluent_function_map', 'tmnt.build_effluent_function_map', (['tmnt_params', '"""bmp"""', '"""pollutant"""'], {}), "(tmnt_params, 'bmp', 'pollutant')\n", (215, 248), False, 'from nereid.src.tmnt_performance import tmnt\n'), ((1817, 1865), 'nereid.src.tmnt_performance.tmnt.effluent_conc', 'tmnt.effluent_conc', (['inf_conc', 'inf_unit'], {}), '(inf_conc, inf_unit, **kwargs)\n', (1835, 1865), False, 'from nereid.src.tmnt_performance import tmnt\n'), ((2110, 2154), 'nereid.src.tmnt_performance.tasks.effluent_function_map', 'tasks.effluent_function_map', ([], {'context': 'context'}), '(context=context)\n', (2137, 2154), False, 'from nereid.src.tmnt_performance import tasks\n'), ((1265, 1316), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['results', 'check_curve'], {}), '(results, check_curve)\n', (1294, 1316), False, 'import numpy\n'), ((2418, 2469), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['results', 'check_curve'], {}), '(results, check_curve)\n', (2447, 2469), False, 'import numpy\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Passthrough Runner class, which skips evaluation. Used particularly
for restarting Samplers from existing data currently.
"""
import numpy as np
from .Runner import Runner
class PassthroughRunner(Runner):
"""
A runner for when we already have the answer, but need to go through the mechanics.
"""
def __init__(self, data, func, **kwargs):
"""
Construct
@ In, data, dict, fully-evaluated realization
@ In, func, None, placeholder for consistency with other runners
@ In, kwargs, dict, additional arguments to pass to base
@ Out, None
"""
super().__init__(**kwargs)
self._data = data # realization with completed data
self.returnCode = 0 # passthrough was born successful
def isDone(self):
"""
Method to check if the calculation associated with this Runner is finished
@ In, None
@ Out, isDone, bool, is it finished?
"""
return True # passthrough was born done
def getReturnCode(self):
"""
Returns the return code from "running the code."
@ In, None
@ Out, returnCode, int, the return code of this evaluation
"""
return self.returnCode
def getEvaluation(self):
"""
Return solution.
@ In, None
@ Out, result, dict, results
"""
result = {}
result.update(dict((key, np.atleast_1d(value)) for key, value in self._data['inputs'].items()))
result.update(dict((key, np.atleast_1d(value)) for key, value in self._data['outputs'].items()))
result.update(dict((key, np.atleast_1d(value)) for key, value in self._data['metadata'].items()))
return result
def start(self):
"""
Method to start the job associated to this Runner
@ In, None
@ Out, None
"""
pass # passthrough was born done
def kill(self):
"""
Method to kill the job associated to this Runner
@ In, None
@ Out, None
"""
pass # passthrough was born done; you can't kill it
| [
"numpy.atleast_1d"
] | [((1936, 1956), 'numpy.atleast_1d', 'np.atleast_1d', (['value'], {}), '(value)\n', (1949, 1956), True, 'import numpy as np\n'), ((2036, 2056), 'numpy.atleast_1d', 'np.atleast_1d', (['value'], {}), '(value)\n', (2049, 2056), True, 'import numpy as np\n'), ((2137, 2157), 'numpy.atleast_1d', 'np.atleast_1d', (['value'], {}), '(value)\n', (2150, 2157), True, 'import numpy as np\n')] |
#!usr/bin/env python
"""Main function calls"""
import numpy as np
from scipy import stats
import imp
import summary_stats as sum_stat
import simulate_data as sim
import importance_sampler as isam
import estimators as estim
import selection as select
import assess_sv_estimation as a
__author__ = "yasc"
__date_created__ = "05 July 2016"
obs_x = np.matrix(np.random.randn(30, 2))
obs_y = sim.sim_lin_reg(np.matrix([1, 1, 1]), 0.5, obs_x, 1)
# Integrated expected Bayesian loss (EBL)
#expected_Bayes_loss = select.select_lin_reg(obs_y, obs_x, 1000, 100, 1000,
#'abs_dev_knn')
# Local expected Bayes loss (EBL)
#expected_Bayes_loss = select.select_lin_reg(obs_y, obs_x, 1000, 100, 1000,
#'local_abs_dev')
# Integrated Kullback-Leibler divergence (KLD)
#expected_Bayes_loss = select.select_lin_reg(obs_y, obs_x, 1000, 100, 1000,
#'integrated_kl')
# Local Kullback-Leibler divergence (KLD)
expected_Bayes_loss = select.select_lin_reg(obs_y, obs_x, 3000, 3000, 3000,
'local_kl')
| [
"selection.select_lin_reg",
"numpy.matrix",
"numpy.random.randn"
] | [((1048, 1113), 'selection.select_lin_reg', 'select.select_lin_reg', (['obs_y', 'obs_x', '(3000)', '(3000)', '(3000)', '"""local_kl"""'], {}), "(obs_y, obs_x, 3000, 3000, 3000, 'local_kl')\n", (1069, 1113), True, 'import selection as select\n'), ((360, 382), 'numpy.random.randn', 'np.random.randn', (['(30)', '(2)'], {}), '(30, 2)\n', (375, 382), True, 'import numpy as np\n'), ((408, 428), 'numpy.matrix', 'np.matrix', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (417, 428), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.