code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#! /usr/bin/env python
# -*- coding: iso-8859-15 -*-
# Updates:
# 2019 - 04 - 10: CLD incorporated instrument sensitivity assessment
# to convert transmission to units of % of expected stellar flux
import mircx_pipeline as mrx;
import argparse, glob, os;
import numpy as np;
from astropy.io import fits as pyfits;
import matplotlib.pyplot as plt;
from mircx_pipeline import log, setup, plot, files, signal, headers, qc;
from mircx_pipeline.headers import HM, HMQ, HMP;
#
# Implement options
#
# Describe the script
description = \
"""
description:
Plot a report of data reduced by the pipeline.
Should be run in a directory where the OIFITS
are stored or use the option --oifits-dir
"""
epilog = \
"""
examples:
cd /my/reduced/data/oifits/
mircx_report.py
"""
TrueFalse = ['TRUE','FALSE'];
parser = argparse.ArgumentParser (description=description, epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=True);
parser.add_argument ("--oifits-dir", dest="oifits_dir",default='./',type=str,
help="directory of products [%(default)s]");
parser.add_argument ("--snr-threshold", dest="snr_threshold", type=float,
default=5.0, help="SNR threshold for plotting value [%(default)s]");
parser.add_argument ("--vis2-threshold", dest="vis2_threshold", type=float,
default=0.1, help="Vis2 threshold for plotting TF value [%(default)s]");
parser.add_argument ("--only-reference", dest="only_reference",default='FALSE',
choices=TrueFalse,
help="Use only REFERENCE stars [%(default)s]");
#
# Initialisation
#
# Matplotlib
import matplotlib as mpl;
mpl.rcParams['lines.markersize'] = 2;
# Remove warning for invalid
np.seterr (divide='ignore',invalid='ignore');
# Parse argument
argopt = parser.parse_args ();
# Verbose
elog = log.trace ('mircx_report');
# List of basename
bname = setup.base_name ();
# Load all the headers
hdrs = mrx.headers.loaddir (argopt.oifits_dir);
# Sort the headers by time
ids = np.argsort ([h['MJD-OBS'] for h in hdrs]);
hdrs = [hdrs[i] for i in ids];
# Keep only reference stars
if argopt.only_reference == 'TRUE':
hdrs = [h for h in hdrs if 'OBJECT_TYPE' in h and h['OBJECT_TYPE'] == 'REFERENCE'];
#
# Query CDS to build a catalog of object information
# This is protected since it may fail
#
# Load astroquery
try:
from astroquery.vizier import Vizier;
log.info ('Load astroquery.vizier');
except:
log.warning ('Cannot load astroquery.vizier, try:');
log.warning ('sudo conda install -c astropy astroquery');
# List of object
objlist = list(set([h['OBJECT'] for h in hdrs]));
objlist[:] = [x for x in objlist if x not in ['NOSTAR', '', 'STS']] # remove instances of 'nostar' from
# list of object names
objcat = dict();
exclude = ['NOSTAR', '', 'STS']
for obj in objlist:
try:
cat = Vizier.query_object (obj, catalog='JSDC')[0];
log.info ('Find JSDC for '+obj);
log.info ("diam = %.3f mas"%cat['UDDH'][0]);
log.info ("Hmag = %.3f mas"%cat['Hmag'][0]);
objcat[obj] = cat;
except:
log.info ('Cannot find JSDC for '+obj);
exclude.append(obj)
for h in hdrs:
if h['OBJECT'] not in exclude:
# If we have the info about this star
try:
diam = objcat[h['OBJECT']]['UDDH'][0]
# Loop on baseline
for b in bname:
vis2 = h[HMQ+'VISS'+b+' MEAN'];
spf = h[HMQ+'BASELENGTH'+b] / h['EFF_WAVE'];
vis2m = signal.airy (diam * spf * 4.84813681109536e-09)**2;
h[HMQ+'TF'+b+' MEAN'] = vis2/vis2m;
h[HMQ+'VISSM'+b+' MEAN'] = vis2m;
# If we don't have the info about this star
except NameError:
for b in bname:
h[HMQ+'TF'+b+' MEAN'] = -1.0;
"""
except KeyError:
for b in range(6):
h[HMQ+'TF'+b+' MEAN'] = -1.0;
"""
else:
log.info('Excluding '+h['OBJECT']+' from report summary plots')
for b in bname:
h[HMQ+'TF'+b+' MEAN'] = -1.0;
#
# Plots
#
# Plot coherence
fig,axes = plt.subplots (5,3,sharex=True);
fig.suptitle ('Decoherence Half Time [ms]');
plot.base_name (axes);
plot.compact (axes);
for b in range (15):
data = headers.getval (hdrs, HMQ+'DECOHER'+bname[b]+'_HALF');
snr = headers.getval (hdrs, HMQ+'SNRB'+bname[b]+' MEAN');
data /= (snr>argopt.snr_threshold);
axes.flatten()[b].plot (data, 'o');
axes.flatten()[b].set_ylim (0);
files.write (fig,argopt.oifits_dir+'/report_decoher.png');
# Plot SNR
fig,axes = plt.subplots (5,3,sharex=True);
fig.suptitle ('SNR');
plot.base_name (axes);
plot.compact (axes);
for b in range (15):
data = headers.getval (hdrs, HMQ+'SNR'+bname[b]+' MEAN');
data /= (data>0);
axes.flatten()[b].plot (data, 'o');
axes.flatten()[b].set_yscale ('log');
files.write (fig,argopt.oifits_dir+'/report_snr.png');
# Plot TF
fig,axes = plt.subplots (5,3,sharex=True);
fig.suptitle ('Transfer Function');
plot.base_name (axes);
plot.compact (axes);
for b in range (15):
data = headers.getval (hdrs, HMQ+'TF'+bname[b]+' MEAN');
vis2 = headers.getval (hdrs, HMQ+'VISS'+bname[b]+' MEAN');
snr = headers.getval (hdrs, HMQ+'SNRB'+bname[b]+' MEAN');
data /= (snr>argopt.snr_threshold);
data /= (vis2>argopt.vis2_threshold);
data /= (data>0);
axes.flatten()[b].plot (data, 'o');
axes.flatten()[b].set_ylim (0,1.2);
files.write (fig,argopt.oifits_dir+'/report_tf2.png');
# Plot vis2
fig,axes = plt.subplots (5,3,sharex=True);
fig.suptitle ('Vis2');
plot.base_name (axes);
plot.compact (axes);
for b in range (15):
data = headers.getval (hdrs, HMQ+'VISS'+bname[b]+' MEAN');
snr = headers.getval (hdrs, HMQ+'SNRB'+bname[b]+' MEAN');
data /= (snr>argopt.snr_threshold);
data /= (data>0);
axes.flatten()[b].plot (data, 'o');
# data2 = headers.getval (hdrs, HMQ+'VISSM'+bname[b]+' MEAN');
# axes.flatten()[b].plot (data2, 'o', alpha=0.1);
files.write (fig,argopt.oifits_dir+'/report_vis2.png');
| [
"mircx_pipeline.files.write",
"mircx_pipeline.log.warning",
"argparse.ArgumentParser",
"mircx_pipeline.headers.getval",
"numpy.seterr",
"numpy.argsort",
"mircx_pipeline.log.trace",
"mircx_pipeline.plot.compact",
"mircx_pipeline.plot.base_name",
"mircx_pipeline.setup.base_name",
"mircx_pipeline.l... | [((945, 1081), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'epilog': 'epilog', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'add_help': '(True)'}), '(description=description, epilog=epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter, add_help=True)\n', (968, 1081), False, 'import argparse, glob, os\n'), ((1952, 1996), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1961, 1996), True, 'import numpy as np\n'), ((2065, 2090), 'mircx_pipeline.log.trace', 'log.trace', (['"""mircx_report"""'], {}), "('mircx_report')\n", (2074, 2090), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((2121, 2138), 'mircx_pipeline.setup.base_name', 'setup.base_name', ([], {}), '()\n', (2136, 2138), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((2172, 2210), 'mircx_pipeline.headers.loaddir', 'mrx.headers.loaddir', (['argopt.oifits_dir'], {}), '(argopt.oifits_dir)\n', (2191, 2210), True, 'import mircx_pipeline as mrx\n'), ((2247, 2287), 'numpy.argsort', 'np.argsort', (["[h['MJD-OBS'] for h in hdrs]"], {}), "([h['MJD-OBS'] for h in hdrs])\n", (2257, 2287), True, 'import numpy as np\n'), ((4458, 4489), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(3)'], {'sharex': '(True)'}), '(5, 3, sharex=True)\n', (4470, 4489), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4555), 'mircx_pipeline.plot.base_name', 'plot.base_name', (['axes'], {}), '(axes)\n', (4549, 4555), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((4558, 4576), 'mircx_pipeline.plot.compact', 'plot.compact', (['axes'], {}), '(axes)\n', (4570, 4576), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((4851, 4910), 'mircx_pipeline.files.write', 'files.write', (['fig', "(argopt.oifits_dir + '/report_decoher.png')"], {}), "(fig, argopt.oifits_dir + '/report_decoher.png')\n", (4862, 4910), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((4933, 4964), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(3)'], {'sharex': '(True)'}), '(5, 3, sharex=True)\n', (4945, 4964), True, 'import matplotlib.pyplot as plt\n'), ((4987, 5007), 'mircx_pipeline.plot.base_name', 'plot.base_name', (['axes'], {}), '(axes)\n', (5001, 5007), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5010, 5028), 'mircx_pipeline.plot.compact', 'plot.compact', (['axes'], {}), '(axes)\n', (5022, 5028), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5224, 5279), 'mircx_pipeline.files.write', 'files.write', (['fig', "(argopt.oifits_dir + '/report_snr.png')"], {}), "(fig, argopt.oifits_dir + '/report_snr.png')\n", (5235, 5279), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5301, 5332), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(3)'], {'sharex': '(True)'}), '(5, 3, sharex=True)\n', (5313, 5332), True, 'import matplotlib.pyplot as plt\n'), ((5369, 5389), 'mircx_pipeline.plot.base_name', 'plot.base_name', (['axes'], {}), '(axes)\n', (5383, 5389), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5392, 5410), 'mircx_pipeline.plot.compact', 'plot.compact', (['axes'], {}), '(axes)\n', (5404, 5410), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5807, 5862), 'mircx_pipeline.files.write', 'files.write', (['fig', "(argopt.oifits_dir + '/report_tf2.png')"], {}), "(fig, argopt.oifits_dir + '/report_tf2.png')\n", (5818, 5862), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5885, 5916), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(3)'], {'sharex': '(True)'}), '(5, 3, sharex=True)\n', (5897, 5916), True, 'import matplotlib.pyplot as plt\n'), ((5940, 5960), 'mircx_pipeline.plot.base_name', 'plot.base_name', (['axes'], {}), '(axes)\n', (5954, 5960), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5963, 5981), 'mircx_pipeline.plot.compact', 'plot.compact', (['axes'], {}), '(axes)\n', (5975, 5981), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((6362, 6418), 'mircx_pipeline.files.write', 'files.write', (['fig', "(argopt.oifits_dir + '/report_vis2.png')"], {}), "(fig, argopt.oifits_dir + '/report_vis2.png')\n", (6373, 6418), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((2641, 2675), 'mircx_pipeline.log.info', 'log.info', (['"""Load astroquery.vizier"""'], {}), "('Load astroquery.vizier')\n", (2649, 2675), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((4612, 4670), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'DECOHER' + bname[b] + '_HALF')"], {}), "(hdrs, HMQ + 'DECOHER' + bname[b] + '_HALF')\n", (4626, 4670), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((4678, 4733), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'SNRB' + bname[b] + ' MEAN')"], {}), "(hdrs, HMQ + 'SNRB' + bname[b] + ' MEAN')\n", (4692, 4733), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5064, 5118), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'SNR' + bname[b] + ' MEAN')"], {}), "(hdrs, HMQ + 'SNR' + bname[b] + ' MEAN')\n", (5078, 5118), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5446, 5499), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'TF' + bname[b] + ' MEAN')"], {}), "(hdrs, HMQ + 'TF' + bname[b] + ' MEAN')\n", (5460, 5499), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5507, 5562), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'VISS' + bname[b] + ' MEAN')"], {}), "(hdrs, HMQ + 'VISS' + bname[b] + ' MEAN')\n", (5521, 5562), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((5570, 5625), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'SNRB' + bname[b] + ' MEAN')"], {}), "(hdrs, HMQ + 'SNRB' + bname[b] + ' MEAN')\n", (5584, 5625), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((6018, 6073), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'VISS' + bname[b] + ' MEAN')"], {}), "(hdrs, HMQ + 'VISS' + bname[b] + ' MEAN')\n", (6032, 6073), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((6081, 6136), 'mircx_pipeline.headers.getval', 'headers.getval', (['hdrs', "(HMQ + 'SNRB' + bname[b] + ' MEAN')"], {}), "(hdrs, HMQ + 'SNRB' + bname[b] + ' MEAN')\n", (6095, 6136), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((2690, 2740), 'mircx_pipeline.log.warning', 'log.warning', (['"""Cannot load astroquery.vizier, try:"""'], {}), "('Cannot load astroquery.vizier, try:')\n", (2701, 2740), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((2747, 2802), 'mircx_pipeline.log.warning', 'log.warning', (['"""sudo conda install -c astropy astroquery"""'], {}), "('sudo conda install -c astropy astroquery')\n", (2758, 2802), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((3203, 3235), 'mircx_pipeline.log.info', 'log.info', (["('Find JSDC for ' + obj)"], {}), "('Find JSDC for ' + obj)\n", (3211, 3235), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((3244, 3288), 'mircx_pipeline.log.info', 'log.info', (["('diam = %.3f mas' % cat['UDDH'][0])"], {}), "('diam = %.3f mas' % cat['UDDH'][0])\n", (3252, 3288), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((3297, 3341), 'mircx_pipeline.log.info', 'log.info', (["('Hmag = %.3f mas' % cat['Hmag'][0])"], {}), "('Hmag = %.3f mas' % cat['Hmag'][0])\n", (3305, 3341), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((4278, 4345), 'mircx_pipeline.log.info', 'log.info', (["('Excluding ' + h['OBJECT'] + ' from report summary plots')"], {}), "('Excluding ' + h['OBJECT'] + ' from report summary plots')\n", (4286, 4345), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((3149, 3189), 'astroquery.vizier.Vizier.query_object', 'Vizier.query_object', (['obj'], {'catalog': '"""JSDC"""'}), "(obj, catalog='JSDC')\n", (3168, 3189), False, 'from astroquery.vizier import Vizier\n'), ((3389, 3428), 'mircx_pipeline.log.info', 'log.info', (["('Cannot find JSDC for ' + obj)"], {}), "('Cannot find JSDC for ' + obj)\n", (3397, 3428), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n'), ((3823, 3869), 'mircx_pipeline.signal.airy', 'signal.airy', (['(diam * spf * 4.84813681109536e-09)'], {}), '(diam * spf * 4.84813681109536e-09)\n', (3834, 3869), False, 'from mircx_pipeline import log, setup, plot, files, signal, headers, qc\n')] |
#! /usr/bin/env python3
import os
import math
import h5py as h5
import numpy as np
import sharpy.utils.algebra as algebra
import sharpy.utils.generate_cases as gc
def generate(x_dict={}, case_name=None):
"""
"""
if case_name is None:
case_name = 'base'
route = os.path.dirname(os.path.realpath(__file__)) + '/'
case_notes = str(x_dict)
# EXECUTION
flow = ['BeamLoader',
'AerogridLoader',
# 'StaticTrim',
'StaticCoupled',
'BeamLoads',
'DynamicCoupled',
'PickleData'
]
# FLIGHT CONDITIONS
# the simulation is set such that the aircraft flies at a u_inf velocity while
# the air is calm.
try:
u_inf = x_dict['release_velocity']
except KeyError:
print('Using default value of 10 for u_inf')
u_inf = 10
u_inf_cruise = 10
original_u_inf = u_inf
u_background = 0.5
rho = 1.225
# trim sigma = 1.5
try:
alpha_cato_delta = x_dict['dAoA']*np.pi/180
except KeyError:
print('Using default value of 0 for dAoA')
alpha_cato_delta = 0*np.pi/180
try:
ramp_angle = x_dict['ramp_angle']*np.pi/180
except KeyError:
print('Using default value of 0 for ramp_angle')
ramp_angle = 0.0
alpha = 4.0782*np.pi/180 + alpha_cato_delta
beta = 0
roll = 0
gravity = 'on'
cs_deflection = -1.2703*np.pi/180
rudder_static_deflection = 0.0
# rudder_step = 0.0*np.pi/180
thrust = 3.8682
sigma = 1.5
lambda_dihedral = 20*np.pi/180
# trajectory
t_start = 1.5
try:
acceleration = x_dict['acceleration']
except KeyError:
print('Using default value of 3 for acceleration')
acceleration = 3.
t_ramp = u_inf/acceleration
t_finish = t_start + t_ramp
t_free = 8
controller_ramp = -1
# numerics
n_step = 1
relaxation_factor = 0.4
tolerance = 1e-6
fsi_tolerance = 1e-5
structural_substeps = 1
num_cores = 4
# MODEL GEOMETRY
# beam
span_main = 16.0
lambda_main = 0.25
ea_main = 0.3
ea = 1e7
ga = 1e5
gj = 1e4
eiy = 2e4
eiz = 4e5
m_bar_main = 0.75
j_bar_main = 0.075
length_fuselage = 10
offset_fuselage = 0
sigma_fuselage = 10
m_bar_fuselage = 0.2
j_bar_fuselage = 0.08
span_tail = 2.5
ea_tail = 0.5
fin_height = 2.5
ea_fin = 0.5
sigma_tail = 10
m_bar_tail = 0.3
j_bar_tail = 0.08
# lumped masses
n_lumped_mass = 1
lumped_mass_nodes = np.zeros((n_lumped_mass, ), dtype=int)
lumped_mass = np.zeros((n_lumped_mass, ))
lumped_mass[0] = 50
lumped_mass_inertia = np.zeros((n_lumped_mass, 3, 3))
lumped_mass_position = np.zeros((n_lumped_mass, 3))
# aero
chord_main = 1.0
chord_tail = 0.5
chord_fin = 0.5
# DISCRETISATION
# spatial discretisation
# chordiwse panels
m = 4
# spanwise elements
n_elem_multiplier = 2
n_elem_main = int(4*n_elem_multiplier)
n_elem_tail = int(2*n_elem_multiplier)
n_elem_fin = int(2*n_elem_multiplier)
n_elem_fuselage = int(2*n_elem_multiplier)
n_surfaces = 5
# temporal discretisation
physical_time = t_finish + t_free
tstep_factor = 1.
dt = 1.0/m/u_inf_cruise*tstep_factor
n_tstep = int(round(physical_time/dt))
# END OF INPUT-----------------------------------------------------------------
end_of_fuselage_node = 0
flat_end_node = np.zeros((2, ), dtype=int) - 1
# beam processing
n_node_elem = 3
span_main1 = (1.0 - lambda_main)*span_main
span_main2 = lambda_main*span_main
n_elem_main1 = round(n_elem_main*(1 - lambda_main))
n_elem_main2 = n_elem_main - n_elem_main1
# total number of elements
n_elem = 0
n_elem += n_elem_main1 + n_elem_main1
n_elem += n_elem_main2 + n_elem_main2
n_elem += n_elem_fuselage
n_elem += n_elem_fin
n_elem += n_elem_tail + n_elem_tail
# number of nodes per part
n_node_main1 = n_elem_main1*(n_node_elem - 1) + 1
n_node_main2 = n_elem_main2*(n_node_elem - 1) + 1
n_node_main = n_node_main1 + n_node_main2 - 1
n_node_fuselage = n_elem_fuselage*(n_node_elem - 1) + 1
n_node_fin = n_elem_fin*(n_node_elem - 1) + 1
n_node_tail = n_elem_tail*(n_node_elem - 1) + 1
# total number of nodes
n_node = 0
n_node += n_node_main1 + n_node_main1 - 1
n_node += n_node_main2 - 1 + n_node_main2 - 1
n_node += n_node_fuselage - 1
n_node += n_node_fin - 1
n_node += n_node_tail - 1
n_node += n_node_tail - 1
# stiffness and mass matrices
n_stiffness = 3
base_stiffness_main = sigma*np.diag([ea, ga, ga, gj, eiy, eiz])
base_stiffness_fuselage = base_stiffness_main.copy()*sigma_fuselage
base_stiffness_fuselage[4, 4] = base_stiffness_fuselage[5, 5]
base_stiffness_tail = base_stiffness_main.copy()*sigma_tail
base_stiffness_tail[4, 4] = base_stiffness_tail[5, 5]
n_mass = 3
base_mass_main = np.diag([m_bar_main, m_bar_main, m_bar_main, j_bar_main, 0.5*j_bar_main, 0.5*j_bar_main])
base_mass_fuselage = np.diag([m_bar_fuselage,
m_bar_fuselage,
m_bar_fuselage,
j_bar_fuselage,
j_bar_fuselage*0.5,
j_bar_fuselage*0.5])
base_mass_tail = np.diag([m_bar_tail,
m_bar_tail,
m_bar_tail,
j_bar_tail,
j_bar_tail*0.5,
j_bar_tail*0.5])
# PLACEHOLDERS
# beam
x = np.zeros((n_node, ))
y = np.zeros((n_node, ))
z = np.zeros((n_node, ))
beam_number = np.zeros((n_elem, ), dtype=int)
frame_of_reference_delta = np.zeros((n_elem, n_node_elem, 3))
structural_twist = np.zeros((n_elem, n_node_elem))
conn = np.zeros((n_elem, n_node_elem), dtype=int)
stiffness = np.zeros((n_stiffness, 6, 6))
elem_stiffness = np.zeros((n_elem, ), dtype=int)
mass = np.zeros((n_mass, 6, 6))
elem_mass = np.zeros((n_elem, ), dtype=int)
boundary_conditions = np.zeros((n_node, ), dtype=int)
app_forces = np.zeros((n_node, 6))
trajectory_file = route + '/' + case_name + '.traj.csv'
LC = None
first_node_centre = np.zeros((2, ), dtype=int)
# aero
airfoil_distribution = np.zeros((n_elem, n_node_elem), dtype=int)
surface_distribution = np.zeros((n_elem,), dtype=int) - 1
surface_m = np.zeros((n_surfaces, ), dtype=int)
m_distribution = 'uniform'
aero_node = np.zeros((n_node,), dtype=bool)
twist = np.zeros((n_elem, n_node_elem))
sweep = np.zeros((n_elem, n_node_elem))
chord = np.zeros((n_elem, n_node_elem,))
elastic_axis = np.zeros((n_elem, n_node_elem,))
# FUNCTIONS-------------------------------------------------------------
def clean_test_files():
fem_file_name = route + '/' + case_name + '.fem.h5'
if os.path.isfile(fem_file_name):
os.remove(fem_file_name)
dyn_file_name = route + '/' + case_name + '.dyn.h5'
if os.path.isfile(dyn_file_name):
os.remove(dyn_file_name)
aero_file_name = route + '/' + case_name + '.aero.h5'
if os.path.isfile(aero_file_name):
os.remove(aero_file_name)
solver_file_name = route + '/' + case_name + '.sharpy'
if os.path.isfile(solver_file_name):
os.remove(solver_file_name)
traj_file_name = route + '/' + case_name + '.traj.csv'
if os.path.isfile(traj_file_name):
os.remove(traj_file_name)
flightcon_file_name = route + '/' + case_name + '.flightcon.txt'
if os.path.isfile(flightcon_file_name):
os.remove(flightcon_file_name)
def generate_trajectory_file():
it_start = math.ceil(t_start/dt)
it_ramp = math.ceil(t_ramp/dt)
it_total = it_start + it_ramp
out_t = np.linspace(0, it_total*dt, it_total)
out_x = np.zeros((it_total, ))
out_y = np.zeros((it_total, ))
out_z = np.zeros((it_total, ))
t_hist_ramp = np.linspace(0, dt*it_ramp, it_ramp)
x_ramp = -0.5*np.cos(ramp_angle)*acceleration*t_hist_ramp**2
z_ramp = 0.5*np.sin(ramp_angle)*acceleration*t_hist_ramp**2
out_x[:it_start] = 0.0
out_x[it_start:] = x_ramp
out_z[it_start:] = z_ramp
out = np.zeros((it_total, 4))
out[:, 0] = out_t
out[:, 1] = out_x
out[:, 2] = out_y
out[:, 3] = out_z
np.savetxt(trajectory_file, out, delimiter=',')
def generate_dyn_file():
global dt
global n_tstep
global route
global case_name
global num_elem
global num_node_elem
global num_node
global amplitude
global period
dynamic_forces_time = None
with_dynamic_forces = False
with_forced_vel = False
if with_dynamic_forces:
f1 = 100
dynamic_forces = np.zeros((num_node, 6))
app_node = [int(num_node_main - 1), int(num_node_main)]
dynamic_forces[app_node, 2] = f1
force_time = np.zeros((n_tstep, ))
limit = round(0.05/dt)
force_time[50:61] = 1
dynamic_forces_time = np.zeros((n_tstep, num_node, 6))
for it in range(n_tstep):
dynamic_forces_time[it, :, :] = force_time[it]*dynamic_forces
forced_for_vel = None
if with_forced_vel:
forced_for_vel = np.zeros((n_tstep, 6))
forced_for_acc = np.zeros((n_tstep, 6))
for it in range(n_tstep):
# if dt*it < period:
# forced_for_vel[it, 2] = 2*np.pi/period*amplitude*np.sin(2*np.pi*dt*it/period)
# forced_for_acc[it, 2] = (2*np.pi/period)**2*amplitude*np.cos(2*np.pi*dt*it/period)
forced_for_vel[it, 3] = 2*np.pi/period*amplitude*np.sin(2*np.pi*dt*it/period)
forced_for_acc[it, 3] = (2*np.pi/period)**2*amplitude*np.cos(2*np.pi*dt*it/period)
if with_dynamic_forces or with_forced_vel:
with h5.File(route + '/' + case_name + '.dyn.h5', 'a') as h5file:
if with_dynamic_forces:
h5file.create_dataset(
'dynamic_forces', data=dynamic_forces_time)
if with_forced_vel:
h5file.create_dataset(
'for_vel', data=forced_for_vel)
h5file.create_dataset(
'for_acc', data=forced_for_acc)
h5file.create_dataset(
'num_steps', data=n_tstep)
def generate_fem():
stiffness[0, ...] = base_stiffness_main
stiffness[1, ...] = base_stiffness_fuselage
stiffness[2, ...] = base_stiffness_tail
mass[0, ...] = base_mass_main
mass[1, ...] = base_mass_fuselage
mass[2, ...] = base_mass_tail
we = 0
wn = 0
# inner right wing
beam_number[we:we + n_elem_main1] = 0
y[wn:wn + n_node_main1] = np.linspace(0.0, span_main1, n_node_main1)
for ielem in range(n_elem_main1):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
elem_stiffness[we:we + n_elem_main1] = 0
elem_mass[we:we + n_elem_main1] = 0
flat_end_node[0] = n_node_main1 - 1
first_node_centre[0] = wn + 1 + 1
boundary_conditions[0] = 1
# remember this is in B FoR
app_forces[0] = [0, thrust, 0, 0, 0, 0]
we += n_elem_main1
wn += n_node_main1
# outer right wing
beam_number[we:we + n_elem_main1] = 0
y[wn:wn + n_node_main2 - 1] = y[wn - 1] + np.linspace(0.0, np.cos(lambda_dihedral)*span_main2, n_node_main2)[1:]
z[wn:wn + n_node_main2 - 1] = z[wn - 1] + np.linspace(0.0, np.sin(lambda_dihedral)*span_main2, n_node_main2)[1:]
for ielem in range(n_elem_main2):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
elem_stiffness[we:we + n_elem_main2] = 0
elem_mass[we:we + n_elem_main2] = 0
boundary_conditions[wn + n_node_main2 - 2] = -1
we += n_elem_main2
wn += n_node_main2 - 1
# inner left wing
beam_number[we:we + n_elem_main1 - 1] = 1
y[wn:wn + n_node_main1 - 1] = np.linspace(0.0, -span_main1, n_node_main1)[1:]
for ielem in range(n_elem_main1):
conn[we + ielem, :] = ((np.ones((3, ))*(we+ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
conn[we, 0] = 0
elem_stiffness[we:we + n_elem_main1] = 0
elem_mass[we:we + n_elem_main1] = 0
flat_end_node[1] = wn + n_node_main1 - 1 - 1
first_node_centre[1] = wn + 1
we += n_elem_main1
wn += n_node_main1 - 1
# outer left wing
beam_number[we:we + n_elem_main2] = 1
y[wn:wn + n_node_main2 - 1] = y[wn - 1] + np.linspace(0.0, -np.cos(lambda_dihedral)*span_main2, n_node_main2)[1:]
z[wn:wn + n_node_main2 - 1] = z[wn - 1] + np.linspace(0.0, np.sin(lambda_dihedral)*span_main2, n_node_main2)[1:]
for ielem in range(n_elem_main2):
conn[we + ielem, :] = ((np.ones((3, ))*(we+ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
elem_stiffness[we:we + n_elem_main2] = 0
elem_mass[we:we + n_elem_main2] = 0
boundary_conditions[wn + n_node_main2 - 2] = -1
we += n_elem_main2
wn += n_node_main2 - 1
# fuselage
beam_number[we:we + n_elem_fuselage] = 2
x[wn:wn + n_node_fuselage - 1] = np.linspace(0.0, length_fuselage, n_node_fuselage)[1:]
z[wn:wn + n_node_fuselage - 1] = np.linspace(0.0, offset_fuselage, n_node_fuselage)[1:]
for ielem in range(n_elem_fuselage):
conn[we + ielem, :] = ((np.ones((3,))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [0.0, 1.0, 0.0]
conn[we, 0] = 0
elem_stiffness[we:we + n_elem_fuselage] = 1
elem_mass[we:we + n_elem_fuselage] = 1
we += n_elem_fuselage
wn += n_node_fuselage - 1
global end_of_fuselage_node
end_of_fuselage_node = wn - 1
# fin
beam_number[we:we + n_elem_fin] = 3
x[wn:wn + n_node_fin - 1] = x[end_of_fuselage_node]
z[wn:wn + n_node_fin - 1] = z[end_of_fuselage_node] + np.linspace(0.0, fin_height, n_node_fin)[1:]
for ielem in range(n_elem_fin):
conn[we + ielem, :] = ((np.ones((3,))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
conn[we, 0] = end_of_fuselage_node
elem_stiffness[we:we + n_elem_fin] = 2
elem_mass[we:we + n_elem_fin] = 2
we += n_elem_fin
wn += n_node_fin - 1
end_of_fin_node = wn - 1
# right tail
beam_number[we:we + n_elem_tail] = 4
x[wn:wn + n_node_tail - 1] = x[end_of_fin_node]
y[wn:wn + n_node_tail - 1] = np.linspace(0.0, span_tail, n_node_tail)[1:]
z[wn:wn + n_node_tail - 1] = z[end_of_fin_node]
for ielem in range(n_elem_tail):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
conn[we, 0] = end_of_fin_node
elem_stiffness[we:we + n_elem_tail] = 2
elem_mass[we:we + n_elem_tail] = 2
boundary_conditions[wn + n_node_tail - 2] = -1
we += n_elem_tail
wn += n_node_tail - 1
# left tail
beam_number[we:we + n_elem_tail] = 5
x[wn:wn + n_node_tail - 1] = x[end_of_fin_node]
y[wn:wn + n_node_tail - 1] = np.linspace(0.0, -span_tail, n_node_tail)[1:]
z[wn:wn + n_node_tail - 1] = z[end_of_fin_node]
for ielem in range(n_elem_tail):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
conn[we, 0] = end_of_fin_node
elem_stiffness[we:we + n_elem_tail] = 2
elem_mass[we:we + n_elem_tail] = 2
boundary_conditions[wn + n_node_tail - 2] = -1
we += n_elem_tail
wn += n_node_tail - 1
with h5.File(route + '/' + case_name + '.fem.h5', 'a') as h5file:
coordinates = h5file.create_dataset('coordinates', data=np.column_stack((x, y, z)))
conectivities = h5file.create_dataset('connectivities', data=conn)
num_nodes_elem_handle = h5file.create_dataset(
'num_node_elem', data=n_node_elem)
num_nodes_handle = h5file.create_dataset(
'num_node', data=n_node)
num_elem_handle = h5file.create_dataset(
'num_elem', data=n_elem)
stiffness_db_handle = h5file.create_dataset(
'stiffness_db', data=stiffness)
stiffness_handle = h5file.create_dataset(
'elem_stiffness', data=elem_stiffness)
mass_db_handle = h5file.create_dataset(
'mass_db', data=mass)
mass_handle = h5file.create_dataset(
'elem_mass', data=elem_mass)
frame_of_reference_delta_handle = h5file.create_dataset(
'frame_of_reference_delta', data=frame_of_reference_delta)
structural_twist_handle = h5file.create_dataset(
'structural_twist', data=structural_twist)
bocos_handle = h5file.create_dataset(
'boundary_conditions', data=boundary_conditions)
beam_handle = h5file.create_dataset(
'beam_number', data=beam_number)
app_forces_handle = h5file.create_dataset(
'app_forces', data=app_forces)
lumped_mass_nodes_handle = h5file.create_dataset(
'lumped_mass_nodes', data=lumped_mass_nodes)
lumped_mass_handle = h5file.create_dataset(
'lumped_mass', data=lumped_mass)
lumped_mass_inertia_handle = h5file.create_dataset(
'lumped_mass_inertia', data=lumped_mass_inertia)
lumped_mass_position_handle = h5file.create_dataset(
'lumped_mass_position', data=lumped_mass_position)
def generate_aero_file():
global x, y, z
# control surfaces
n_control_surfaces = 2
control_surface = np.zeros((n_elem, n_node_elem), dtype=int) - 1
control_surface_type = np.zeros((n_control_surfaces, ), dtype=int)
control_surface_deflection = np.zeros((n_control_surfaces, ))
control_surface_chord = np.zeros((n_control_surfaces, ), dtype=int)
control_surface_hinge_coord = np.zeros((n_control_surfaces, ), dtype=float)
# control surface type 0 = static
# control surface type 1 = dynamic
control_surface_type[0] = 0
control_surface_deflection[0] = cs_deflection
control_surface_chord[0] = m
control_surface_hinge_coord[0] = -0.25 # nondimensional wrt elastic axis (+ towards the trailing edge)
control_surface_type[1] = 0
control_surface_deflection[1] = rudder_static_deflection
control_surface_chord[1] = 1
control_surface_hinge_coord[1] = -0. # nondimensional wrt elastic axis (+ towards the trailing edge)
we = 0
wn = 0
# right wing (surface 0, beam 0)
i_surf = 0
airfoil_distribution[we:we + n_elem_main, :] = 0
surface_distribution[we:we + n_elem_main] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_main] = True
temp_chord = np.linspace(chord_main, chord_main, n_node_main)
temp_sweep = np.linspace(0.0, 0*np.pi/180, n_node_main)
node_counter = 0
for i_elem in range(we, we + n_elem_main):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[i_elem, i_local_node] = temp_chord[node_counter]
elastic_axis[i_elem, i_local_node] = ea_main
sweep[i_elem, i_local_node] = temp_sweep[node_counter]
we += n_elem_main
wn += n_node_main
# left wing (surface 1, beam 1)
i_surf = 1
airfoil_distribution[we:we + n_elem_main, :] = 0
# airfoil_distribution[wn:wn + n_node_main - 1] = 0
surface_distribution[we:we + n_elem_main] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_main - 1] = True
# chord[wn:wn + num_node_main - 1] = np.linspace(main_chord, main_tip_chord, num_node_main)[1:]
# chord[wn:wn + num_node_main - 1] = main_chord
# elastic_axis[wn:wn + num_node_main - 1] = main_ea
temp_chord = np.linspace(chord_main, chord_main, n_node_main)
node_counter = 0
for i_elem in range(we, we + n_elem_main):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[i_elem, i_local_node] = temp_chord[node_counter]
elastic_axis[i_elem, i_local_node] = ea_main
sweep[i_elem, i_local_node] = -temp_sweep[node_counter]
we += n_elem_main
wn += n_node_main - 1
we += n_elem_fuselage
wn += n_node_fuselage - 1 - 1
# fin (surface 2, beam 3)
i_surf = 2
airfoil_distribution[we:we + n_elem_fin, :] = 1
# airfoil_distribution[wn:wn + n_node_fin] = 0
surface_distribution[we:we + n_elem_fin] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_fin] = True
# chord[wn:wn + num_node_fin] = fin_chord
for i_elem in range(we, we + n_elem_fin):
for i_local_node in range(n_node_elem):
chord[i_elem, i_local_node] = chord_fin
elastic_axis[i_elem, i_local_node] = ea_fin
control_surface[i_elem, i_local_node] = 1
# twist[end_of_fuselage_node] = 0
# twist[wn:] = 0
# elastic_axis[wn:wn + num_node_main] = fin_ea
we += n_elem_fin
wn += n_node_fin - 1
#
# # # right tail (surface 3, beam 4)
i_surf = 3
airfoil_distribution[we:we + n_elem_tail, :] = 2
# airfoil_distribution[wn:wn + n_node_tail] = 0
surface_distribution[we:we + n_elem_tail] = i_surf
surface_m[i_surf] = m
# XXX not very elegant
aero_node[wn:] = True
# chord[wn:wn + num_node_tail] = tail_chord
# elastic_axis[wn:wn + num_node_main] = tail_ea
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
twist[i_elem, i_local_node] = -0
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
chord[i_elem, i_local_node] = chord_tail
elastic_axis[i_elem, i_local_node] = ea_tail
control_surface[i_elem, i_local_node] = 0
we += n_elem_tail
wn += n_node_tail
#
# # left tail (surface 4, beam 5)
i_surf = 4
airfoil_distribution[we:we + n_elem_tail, :] = 2
# airfoil_distribution[wn:wn + n_node_tail - 1] = 0
surface_distribution[we:we + n_elem_tail] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_tail - 1] = True
# chord[wn:wn + num_node_tail] = tail_chord
# elastic_axis[wn:wn + num_node_main] = tail_ea
# twist[we:we + num_elem_tail] = -tail_twist
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
twist[i_elem, i_local_node] = -0
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
chord[i_elem, i_local_node] = chord_tail
elastic_axis[i_elem, i_local_node] = ea_tail
control_surface[i_elem, i_local_node] = 0
we += n_elem_tail
wn += n_node_tail
with h5.File(route + '/' + case_name + '.aero.h5', 'a') as h5file:
airfoils_group = h5file.create_group('airfoils')
# add one airfoil
naca_airfoil_main = airfoils_group.create_dataset('0', data=np.column_stack(
generate_naca_camber(P=0, M=0)))
naca_airfoil_tail = airfoils_group.create_dataset('1', data=np.column_stack(
generate_naca_camber(P=0, M=0)))
naca_airfoil_fin = airfoils_group.create_dataset('2', data=np.column_stack(
generate_naca_camber(P=0, M=0)))
# chord
chord_input = h5file.create_dataset('chord', data=chord)
dim_attr = chord_input .attrs['units'] = 'm'
# twist
twist_input = h5file.create_dataset('twist', data=twist)
dim_attr = twist_input.attrs['units'] = 'rad'
# sweep
sweep_input = h5file.create_dataset('sweep', data=sweep)
dim_attr = sweep_input.attrs['units'] = 'rad'
# airfoil distribution
airfoil_distribution_input = h5file.create_dataset('airfoil_distribution', data=airfoil_distribution)
surface_distribution_input = h5file.create_dataset('surface_distribution', data=surface_distribution)
surface_m_input = h5file.create_dataset('surface_m', data=surface_m)
m_distribution_input = h5file.create_dataset('m_distribution', data=m_distribution.encode('ascii', 'ignore'))
aero_node_input = h5file.create_dataset('aero_node', data=aero_node)
elastic_axis_input = h5file.create_dataset('elastic_axis', data=elastic_axis)
control_surface_input = h5file.create_dataset('control_surface', data=control_surface)
control_surface_deflection_input = h5file.create_dataset('control_surface_deflection', data=control_surface_deflection)
control_surface_chord_input = h5file.create_dataset('control_surface_chord', data=control_surface_chord)
control_surface_hinge_coord_input = h5file.create_dataset('control_surface_hinge_coord', data=control_surface_hinge_coord)
control_surface_types_input = h5file.create_dataset('control_surface_type', data=control_surface_type)
def generate_naca_camber(M=0, P=0):
mm = M*1e-2
p = P*1e-1
def naca(x, mm, p):
if x < 1e-6:
return 0.0
elif x < p:
return mm/(p*p)*(2*p*x - x*x)
elif x > p and x < 1+1e-6:
return mm/((1-p)*(1-p))*(1 - 2*p + 2*p*x - x*x)
x_vec = np.linspace(0, 1, 1000)
y_vec = np.array([naca(x, mm, p) for x in x_vec])
return x_vec, y_vec
def generate_multibody_file():
global end_of_fuselage_node
global LC
# LCR = gc.LagrangeConstraint()
# LCR.behaviour = 'lin_vel_node_wrtG'
# LCR.velocity = np.zeros((3,))
# LCR.body_number = 0
# LCR.node_number = flat_end_node[0]
# LCL = gc.LagrangeConstraint()
# LCL.behaviour = 'lin_vel_node_wrtG'
# LCL.velocity = np.zeros((3,))
# LCL.body_number = 0
# LCL.node_number = flat_end_node[1]
LCF = gc.LagrangeConstraint()
LCF.behaviour = 'lin_vel_node_wrtG'
LCF.velocity = np.zeros((3,))
LCF.body_number = 0
LCF.node_number = end_of_fuselage_node
LCCR = gc.LagrangeConstraint()
LCCR.behaviour = 'lin_vel_node_wrtG'
LCCR.velocity = np.zeros((3,))
LCCR.body_number = 0
LCCR.node_number = first_node_centre[0]
LCCL = gc.LagrangeConstraint()
LCCL.behaviour = 'lin_vel_node_wrtG'
LCCL.velocity = np.zeros((3,))
LCCL.body_number = 0
LCCL.node_number = first_node_centre[1]
# LC = [LCR, LCL, LCF, LCCR, LCCL]
LC = [LCF, LCCR, LCCL]
MB1 = gc.BodyInformation()
MB1.body_number = 0
MB1.FoR_position = np.zeros((6,))
MB1.FoR_velocity = np.zeros((6,))
MB1.FoR_acceleration = np.zeros((6,))
MB1.FoR_movement = 'free'
MB1.quat = np.array([1.0, 0.0, 0.0, 0.0])
MB = [MB1]
gc.generate_multibody_file(LC, MB, route, case_name)
def generate_solver_file():
file_name = route + '/' + case_name + '.sharpy'
settings = dict()
settings['SHARPy'] = {'case': case_name,
'route': route,
'flow': flow,
'write_screen': 'off',
'write_log': 'on',
'log_folder': route + '/output/',
'log_file': case_name + '.log'}
settings['BeamLoader'] = {'unsteady': 'on',
'orientation':
algebra.euler2quat(np.array([roll,
alpha,
beta]))}
settings['AerogridLoader'] = {'unsteady': 'on',
'aligned_grid': 'on',
# 'mstar': int(160/tstep_factor),
'mstar': int(100/tstep_factor),
'freestream_dir': ['1', '0', '0'],
'control_surface_deflection': ['', ''],
'control_surface_deflection_generator':
{'0': {},
'1': {}}}
settings['NonLinearStatic'] = {'print_info': 'off',
'max_iterations': 150,
'num_load_steps': 1,
'delta_curved': 1e-1,
'min_delta': tolerance,
'gravity_on': gravity,
'gravity': 0*9.81}
settings['StaticUvlm'] = {'print_info': 'off',
'horseshoe': 'off',
'num_cores': num_cores,
'n_rollup': 0,
'rollup_dt': dt,
'rollup_aic_refresh': 1,
'rollup_tolerance': 1e-4,
'velocity_field_generator': 'SteadyVelocityField',
'velocity_field_input': {'u_inf': u_background,
'u_inf_direction': [1., 0, 0]},
'rho': 0*rho}
settings['StaticCoupled'] = {'print_info': 'off',
'structural_solver': 'NonLinearStatic',
'structural_solver_settings':
settings['NonLinearStatic'],
'aero_solver': 'StaticUvlm',
'aero_solver_settings': settings['StaticUvlm'],
'max_iter': 100,
'n_load_steps': n_step,
'tolerance': fsi_tolerance,
'relaxation_factor': relaxation_factor}
settings['StaticTrim'] = {'solver': 'StaticCoupled',
'solver_settings': settings['StaticCoupled'],
'initial_alpha': alpha,
'initial_deflection': cs_deflection,
'initial_thrust': thrust}
settings['NonLinearDynamicCoupledStep'] = {'print_info': 'off',
'max_iterations': 950,
'delta_curved': 1e-1,
'min_delta': tolerance,
'newmark_damp': 1e-2,
'gravity_on': gravity,
'gravity': 9.81,
'num_steps': n_tstep,
'dt': dt,
'initial_velocity': 0}
settings['NonLinearDynamicMultibody'] = {'print_info': 'off',
'max_iterations': 950,
'delta_curved': 1e-1,
'min_delta': tolerance,
'newmark_damp': 1e-2,
'gravity_on': gravity,
'gravity': 9.81,
'num_steps': n_tstep,
'dt': dt}
relative_motion = 'off'
settings['StepUvlm'] = {'print_info': 'off',
'horseshoe': 'off',
'num_cores': num_cores,
'n_rollup': 0,
'convection_scheme': 2,
'rollup_dt': dt,
'rollup_aic_refresh': 1,
'rollup_tolerance': 1e-4,
'gamma_dot_filtering': 6,
'velocity_field_generator': 'SteadyVelocityField',
'velocity_field_input': {'u_inf': u_background,
'u_inf_direction': [1., 0, 0]},
'rho': rho,
'n_time_steps': n_tstep,
'dt': dt}
solver = 'NonLinearDynamicMultibody'
settings['PickleData'] = {'folder': route + '/'}
settings['DynamicCoupled'] = {'structural_solver': solver,
'structural_solver_settings': settings[solver],
'aero_solver': 'StepUvlm',
'aero_solver_settings': settings['StepUvlm'],
'fsi_substeps': 200,
'fsi_tolerance': fsi_tolerance,
'relaxation_factor': relaxation_factor,
'minimum_steps': 2,
'relaxation_steps': 150,
'dynamic_relaxation': 'off',
'final_relaxation_factor': 0.5,
'n_time_steps': n_tstep,
'dt': dt,
'structural_substeps': structural_substeps,
'include_unsteady_force_contribution': 'on',
'steps_without_unsteady_force': 9,
'controller_id': {#'controller_right': 'TakeOffTrajectoryController',
# 'controller_left': 'TakeOffTrajectoryController',
'controller_tail': 'TakeOffTrajectoryController',
'controller_cright': 'TakeOffTrajectoryController',
'controller_cleft': 'TakeOffTrajectoryController',
},
'controller_settings': {
# 'controller_right':
# {
# 'trajectory_input_file': trajectory_file,
# 'dt': dt,
# 'trajectory_method': 'lagrange',
# 'controlled_constraint': 'constraint_00',
# 'initial_ramp_length_structural_substeps': controller_ramp,
# 'write_controller_log': 'off',
# },
# 'controller_left':
# {
# 'trajectory_input_file': trajectory_file,
# 'dt': dt,
# 'trajectory_method': 'lagrange',
# 'controlled_constraint': 'constraint_01',
# 'initial_ramp_length_structural_substeps': controller_ramp,
# 'write_controller_log': 'off',
# },
'controller_tail':
{
'trajectory_input_file': trajectory_file,
'dt': dt,
'trajectory_method': 'lagrange',
'controlled_constraint': 'constraint_00',
'initial_ramp_length_structural_substeps': controller_ramp,
'write_controller_log': 'off',
}, 'controller_cright':
{
'trajectory_input_file': trajectory_file,
'dt': dt,
'trajectory_method': 'lagrange',
'controlled_constraint': 'constraint_01',
'initial_ramp_length_structural_substeps': controller_ramp,
'write_controller_log': 'off',
}, 'controller_cleft':
{
'trajectory_input_file': trajectory_file,
'dt': dt,
'trajectory_method': 'lagrange',
'controlled_constraint': 'constraint_02',
'initial_ramp_length_structural_substeps': controller_ramp,
'write_controller_log': 'off',
}},
'postprocessors': ['BeamLoads'],
'postprocessors_settings': {'BeamLoads': {'folder': route + '/output/',
'csv_output': 'off'},
'BeamPlot': {'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on'},
'AerogridPlot': {
'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'minus_m_star': 0},
}
}
settings['BeamLoads'] = {'folder': route + '/output/',
'csv_output': 'off'}
settings['BeamPlot'] = {'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'include_forward_motion': 'on'}
settings['AerogridPlot'] = {'folder': route + '/output/',
'include_rbm': 'on',
'include_forward_motion': 'off',
'include_applied_forces': 'on',
'minus_m_star': 0,
'u_inf': 0,
'dt': dt}
settings['Notes'] = {'note': case_notes}
import configobj
config = configobj.ConfigObj()
config.filename = file_name
for k, v in settings.items():
config[k] = v
config.write()
gc.clean_test_files(route, case_name)
generate_fem()
generate_multibody_file()
generate_aero_file()
generate_solver_file()
generate_dyn_file()
if 'StaticTrim' not in flow:
generate_trajectory_file()
return {'sharpy': route + '/' + case_name + '.sharpy'}
if __name__ == "__main__":
generate()
| [
"os.remove",
"configobj.ConfigObj",
"numpy.ones",
"os.path.isfile",
"numpy.sin",
"numpy.diag",
"numpy.savetxt",
"numpy.linspace",
"sharpy.utils.generate_cases.LagrangeConstraint",
"h5py.File",
"math.ceil",
"os.path.realpath",
"sharpy.utils.generate_cases.BodyInformation",
"numpy.cos",
"s... | [((2546, 2583), 'numpy.zeros', 'np.zeros', (['(n_lumped_mass,)'], {'dtype': 'int'}), '((n_lumped_mass,), dtype=int)\n', (2554, 2583), True, 'import numpy as np\n'), ((2603, 2629), 'numpy.zeros', 'np.zeros', (['(n_lumped_mass,)'], {}), '((n_lumped_mass,))\n', (2611, 2629), True, 'import numpy as np\n'), ((2681, 2712), 'numpy.zeros', 'np.zeros', (['(n_lumped_mass, 3, 3)'], {}), '((n_lumped_mass, 3, 3))\n', (2689, 2712), True, 'import numpy as np\n'), ((2740, 2768), 'numpy.zeros', 'np.zeros', (['(n_lumped_mass, 3)'], {}), '((n_lumped_mass, 3))\n', (2748, 2768), True, 'import numpy as np\n'), ((4957, 5055), 'numpy.diag', 'np.diag', (['[m_bar_main, m_bar_main, m_bar_main, j_bar_main, 0.5 * j_bar_main, 0.5 *\n j_bar_main]'], {}), '([m_bar_main, m_bar_main, m_bar_main, j_bar_main, 0.5 * j_bar_main, \n 0.5 * j_bar_main])\n', (4964, 5055), True, 'import numpy as np\n'), ((5072, 5194), 'numpy.diag', 'np.diag', (['[m_bar_fuselage, m_bar_fuselage, m_bar_fuselage, j_bar_fuselage, \n j_bar_fuselage * 0.5, j_bar_fuselage * 0.5]'], {}), '([m_bar_fuselage, m_bar_fuselage, m_bar_fuselage, j_bar_fuselage, \n j_bar_fuselage * 0.5, j_bar_fuselage * 0.5])\n', (5079, 5194), True, 'import numpy as np\n'), ((5377, 5475), 'numpy.diag', 'np.diag', (['[m_bar_tail, m_bar_tail, m_bar_tail, j_bar_tail, j_bar_tail * 0.5, \n j_bar_tail * 0.5]'], {}), '([m_bar_tail, m_bar_tail, m_bar_tail, j_bar_tail, j_bar_tail * 0.5, \n j_bar_tail * 0.5])\n', (5384, 5475), True, 'import numpy as np\n'), ((5649, 5668), 'numpy.zeros', 'np.zeros', (['(n_node,)'], {}), '((n_node,))\n', (5657, 5668), True, 'import numpy as np\n'), ((5678, 5697), 'numpy.zeros', 'np.zeros', (['(n_node,)'], {}), '((n_node,))\n', (5686, 5697), True, 'import numpy as np\n'), ((5707, 5726), 'numpy.zeros', 'np.zeros', (['(n_node,)'], {}), '((n_node,))\n', (5715, 5726), True, 'import numpy as np\n'), ((5746, 5776), 'numpy.zeros', 'np.zeros', (['(n_elem,)'], {'dtype': 'int'}), '((n_elem,), dtype=int)\n', (5754, 5776), True, 'import numpy as np\n'), ((5809, 5843), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem, 3)'], {}), '((n_elem, n_node_elem, 3))\n', (5817, 5843), True, 'import numpy as np\n'), ((5867, 5898), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {}), '((n_elem, n_node_elem))\n', (5875, 5898), True, 'import numpy as np\n'), ((5910, 5952), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {'dtype': 'int'}), '((n_elem, n_node_elem), dtype=int)\n', (5918, 5952), True, 'import numpy as np\n'), ((5969, 5998), 'numpy.zeros', 'np.zeros', (['(n_stiffness, 6, 6)'], {}), '((n_stiffness, 6, 6))\n', (5977, 5998), True, 'import numpy as np\n'), ((6020, 6050), 'numpy.zeros', 'np.zeros', (['(n_elem,)'], {'dtype': 'int'}), '((n_elem,), dtype=int)\n', (6028, 6050), True, 'import numpy as np\n'), ((6063, 6087), 'numpy.zeros', 'np.zeros', (['(n_mass, 6, 6)'], {}), '((n_mass, 6, 6))\n', (6071, 6087), True, 'import numpy as np\n'), ((6104, 6134), 'numpy.zeros', 'np.zeros', (['(n_elem,)'], {'dtype': 'int'}), '((n_elem,), dtype=int)\n', (6112, 6134), True, 'import numpy as np\n'), ((6162, 6192), 'numpy.zeros', 'np.zeros', (['(n_node,)'], {'dtype': 'int'}), '((n_node,), dtype=int)\n', (6170, 6192), True, 'import numpy as np\n'), ((6211, 6232), 'numpy.zeros', 'np.zeros', (['(n_node, 6)'], {}), '((n_node, 6))\n', (6219, 6232), True, 'import numpy as np\n'), ((6332, 6357), 'numpy.zeros', 'np.zeros', (['(2,)'], {'dtype': 'int'}), '((2,), dtype=int)\n', (6340, 6357), True, 'import numpy as np\n'), ((6394, 6436), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {'dtype': 'int'}), '((n_elem, n_node_elem), dtype=int)\n', (6402, 6436), True, 'import numpy as np\n'), ((6515, 6549), 'numpy.zeros', 'np.zeros', (['(n_surfaces,)'], {'dtype': 'int'}), '((n_surfaces,), dtype=int)\n', (6523, 6549), True, 'import numpy as np\n'), ((6598, 6629), 'numpy.zeros', 'np.zeros', (['(n_node,)'], {'dtype': 'bool'}), '((n_node,), dtype=bool)\n', (6606, 6629), True, 'import numpy as np\n'), ((6642, 6673), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {}), '((n_elem, n_node_elem))\n', (6650, 6673), True, 'import numpy as np\n'), ((6686, 6717), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {}), '((n_elem, n_node_elem))\n', (6694, 6717), True, 'import numpy as np\n'), ((6730, 6761), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {}), '((n_elem, n_node_elem))\n', (6738, 6761), True, 'import numpy as np\n'), ((6782, 6813), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {}), '((n_elem, n_node_elem))\n', (6790, 6813), True, 'import numpy as np\n'), ((42328, 42365), 'sharpy.utils.generate_cases.clean_test_files', 'gc.clean_test_files', (['route', 'case_name'], {}), '(route, case_name)\n', (42347, 42365), True, 'import sharpy.utils.generate_cases as gc\n'), ((3453, 3478), 'numpy.zeros', 'np.zeros', (['(2,)'], {'dtype': 'int'}), '((2,), dtype=int)\n', (3461, 3478), True, 'import numpy as np\n'), ((4624, 4659), 'numpy.diag', 'np.diag', (['[ea, ga, ga, gj, eiy, eiz]'], {}), '([ea, ga, ga, gj, eiy, eiz])\n', (4631, 4659), True, 'import numpy as np\n'), ((6464, 6494), 'numpy.zeros', 'np.zeros', (['(n_elem,)'], {'dtype': 'int'}), '((n_elem,), dtype=int)\n', (6472, 6494), True, 'import numpy as np\n'), ((6989, 7018), 'os.path.isfile', 'os.path.isfile', (['fem_file_name'], {}), '(fem_file_name)\n', (7003, 7018), False, 'import os\n'), ((7129, 7158), 'os.path.isfile', 'os.path.isfile', (['dyn_file_name'], {}), '(dyn_file_name)\n', (7143, 7158), False, 'import os\n'), ((7271, 7301), 'os.path.isfile', 'os.path.isfile', (['aero_file_name'], {}), '(aero_file_name)\n', (7285, 7301), False, 'import os\n'), ((7416, 7448), 'os.path.isfile', 'os.path.isfile', (['solver_file_name'], {}), '(solver_file_name)\n', (7430, 7448), False, 'import os\n'), ((7565, 7595), 'os.path.isfile', 'os.path.isfile', (['traj_file_name'], {}), '(traj_file_name)\n', (7579, 7595), False, 'import os\n'), ((7720, 7755), 'os.path.isfile', 'os.path.isfile', (['flightcon_file_name'], {}), '(flightcon_file_name)\n', (7734, 7755), False, 'import os\n'), ((7857, 7880), 'math.ceil', 'math.ceil', (['(t_start / dt)'], {}), '(t_start / dt)\n', (7866, 7880), False, 'import math\n'), ((7897, 7919), 'math.ceil', 'math.ceil', (['(t_ramp / dt)'], {}), '(t_ramp / dt)\n', (7906, 7919), False, 'import math\n'), ((7972, 8011), 'numpy.linspace', 'np.linspace', (['(0)', '(it_total * dt)', 'it_total'], {}), '(0, it_total * dt, it_total)\n', (7983, 8011), True, 'import numpy as np\n'), ((8026, 8047), 'numpy.zeros', 'np.zeros', (['(it_total,)'], {}), '((it_total,))\n', (8034, 8047), True, 'import numpy as np\n'), ((8065, 8086), 'numpy.zeros', 'np.zeros', (['(it_total,)'], {}), '((it_total,))\n', (8073, 8086), True, 'import numpy as np\n'), ((8104, 8125), 'numpy.zeros', 'np.zeros', (['(it_total,)'], {}), '((it_total,))\n', (8112, 8125), True, 'import numpy as np\n'), ((8150, 8187), 'numpy.linspace', 'np.linspace', (['(0)', '(dt * it_ramp)', 'it_ramp'], {}), '(0, dt * it_ramp, it_ramp)\n', (8161, 8187), True, 'import numpy as np\n'), ((8438, 8461), 'numpy.zeros', 'np.zeros', (['(it_total, 4)'], {}), '((it_total, 4))\n', (8446, 8461), True, 'import numpy as np\n'), ((8574, 8621), 'numpy.savetxt', 'np.savetxt', (['trajectory_file', 'out'], {'delimiter': '""","""'}), "(trajectory_file, out, delimiter=',')\n", (8584, 8621), True, 'import numpy as np\n'), ((11147, 11189), 'numpy.linspace', 'np.linspace', (['(0.0)', 'span_main1', 'n_node_main1'], {}), '(0.0, span_main1, n_node_main1)\n', (11158, 11189), True, 'import numpy as np\n'), ((19608, 19650), 'numpy.zeros', 'np.zeros', (['(n_control_surfaces,)'], {'dtype': 'int'}), '((n_control_surfaces,), dtype=int)\n', (19616, 19650), True, 'import numpy as np\n'), ((19689, 19720), 'numpy.zeros', 'np.zeros', (['(n_control_surfaces,)'], {}), '((n_control_surfaces,))\n', (19697, 19720), True, 'import numpy as np\n'), ((19754, 19796), 'numpy.zeros', 'np.zeros', (['(n_control_surfaces,)'], {'dtype': 'int'}), '((n_control_surfaces,), dtype=int)\n', (19762, 19796), True, 'import numpy as np\n'), ((19836, 19880), 'numpy.zeros', 'np.zeros', (['(n_control_surfaces,)'], {'dtype': 'float'}), '((n_control_surfaces,), dtype=float)\n', (19844, 19880), True, 'import numpy as np\n'), ((20758, 20806), 'numpy.linspace', 'np.linspace', (['chord_main', 'chord_main', 'n_node_main'], {}), '(chord_main, chord_main, n_node_main)\n', (20769, 20806), True, 'import numpy as np\n'), ((20828, 20874), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0 * np.pi / 180)', 'n_node_main'], {}), '(0.0, 0 * np.pi / 180, n_node_main)\n', (20839, 20874), True, 'import numpy as np\n'), ((21892, 21940), 'numpy.linspace', 'np.linspace', (['chord_main', 'chord_main', 'n_node_main'], {}), '(chord_main, chord_main, n_node_main)\n', (21903, 21940), True, 'import numpy as np\n'), ((27826, 27849), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (27837, 27849), True, 'import numpy as np\n'), ((28444, 28467), 'sharpy.utils.generate_cases.LagrangeConstraint', 'gc.LagrangeConstraint', ([], {}), '()\n', (28465, 28467), True, 'import sharpy.utils.generate_cases as gc\n'), ((28535, 28549), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (28543, 28549), True, 'import numpy as np\n'), ((28641, 28664), 'sharpy.utils.generate_cases.LagrangeConstraint', 'gc.LagrangeConstraint', ([], {}), '()\n', (28662, 28664), True, 'import sharpy.utils.generate_cases as gc\n'), ((28734, 28748), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (28742, 28748), True, 'import numpy as np\n'), ((28842, 28865), 'sharpy.utils.generate_cases.LagrangeConstraint', 'gc.LagrangeConstraint', ([], {}), '()\n', (28863, 28865), True, 'import sharpy.utils.generate_cases as gc\n'), ((28935, 28949), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (28943, 28949), True, 'import numpy as np\n'), ((29117, 29137), 'sharpy.utils.generate_cases.BodyInformation', 'gc.BodyInformation', ([], {}), '()\n', (29135, 29137), True, 'import sharpy.utils.generate_cases as gc\n'), ((29193, 29207), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (29201, 29207), True, 'import numpy as np\n'), ((29235, 29249), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (29243, 29249), True, 'import numpy as np\n'), ((29281, 29295), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (29289, 29295), True, 'import numpy as np\n'), ((29349, 29379), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (29357, 29379), True, 'import numpy as np\n'), ((29408, 29460), 'sharpy.utils.generate_cases.generate_multibody_file', 'gc.generate_multibody_file', (['LC', 'MB', 'route', 'case_name'], {}), '(LC, MB, route, case_name)\n', (29434, 29460), True, 'import sharpy.utils.generate_cases as gc\n'), ((42178, 42199), 'configobj.ConfigObj', 'configobj.ConfigObj', ([], {}), '()\n', (42197, 42199), False, 'import configobj\n'), ((304, 330), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (320, 330), False, 'import os\n'), ((7032, 7056), 'os.remove', 'os.remove', (['fem_file_name'], {}), '(fem_file_name)\n', (7041, 7056), False, 'import os\n'), ((7172, 7196), 'os.remove', 'os.remove', (['dyn_file_name'], {}), '(dyn_file_name)\n', (7181, 7196), False, 'import os\n'), ((7315, 7340), 'os.remove', 'os.remove', (['aero_file_name'], {}), '(aero_file_name)\n', (7324, 7340), False, 'import os\n'), ((7462, 7489), 'os.remove', 'os.remove', (['solver_file_name'], {}), '(solver_file_name)\n', (7471, 7489), False, 'import os\n'), ((7609, 7634), 'os.remove', 'os.remove', (['traj_file_name'], {}), '(traj_file_name)\n', (7618, 7634), False, 'import os\n'), ((7769, 7799), 'os.remove', 'os.remove', (['flightcon_file_name'], {}), '(flightcon_file_name)\n', (7778, 7799), False, 'import os\n'), ((9050, 9073), 'numpy.zeros', 'np.zeros', (['(num_node, 6)'], {}), '((num_node, 6))\n', (9058, 9073), True, 'import numpy as np\n'), ((9212, 9232), 'numpy.zeros', 'np.zeros', (['(n_tstep,)'], {}), '((n_tstep,))\n', (9220, 9232), True, 'import numpy as np\n'), ((9338, 9370), 'numpy.zeros', 'np.zeros', (['(n_tstep, num_node, 6)'], {}), '((n_tstep, num_node, 6))\n', (9346, 9370), True, 'import numpy as np\n'), ((9575, 9597), 'numpy.zeros', 'np.zeros', (['(n_tstep, 6)'], {}), '((n_tstep, 6))\n', (9583, 9597), True, 'import numpy as np\n'), ((9627, 9649), 'numpy.zeros', 'np.zeros', (['(n_tstep, 6)'], {}), '((n_tstep, 6))\n', (9635, 9649), True, 'import numpy as np\n'), ((12782, 12825), 'numpy.linspace', 'np.linspace', (['(0.0)', '(-span_main1)', 'n_node_main1'], {}), '(0.0, -span_main1, n_node_main1)\n', (12793, 12825), True, 'import numpy as np\n'), ((14323, 14373), 'numpy.linspace', 'np.linspace', (['(0.0)', 'length_fuselage', 'n_node_fuselage'], {}), '(0.0, length_fuselage, n_node_fuselage)\n', (14334, 14373), True, 'import numpy as np\n'), ((14419, 14469), 'numpy.linspace', 'np.linspace', (['(0.0)', 'offset_fuselage', 'n_node_fuselage'], {}), '(0.0, offset_fuselage, n_node_fuselage)\n', (14430, 14469), True, 'import numpy as np\n'), ((15938, 15978), 'numpy.linspace', 'np.linspace', (['(0.0)', 'span_tail', 'n_node_tail'], {}), '(0.0, span_tail, n_node_tail)\n', (15949, 15978), True, 'import numpy as np\n'), ((16737, 16778), 'numpy.linspace', 'np.linspace', (['(0.0)', '(-span_tail)', 'n_node_tail'], {}), '(0.0, -span_tail, n_node_tail)\n', (16748, 16778), True, 'import numpy as np\n'), ((17391, 17440), 'h5py.File', 'h5.File', (["(route + '/' + case_name + '.fem.h5')", '"""a"""'], {}), "(route + '/' + case_name + '.fem.h5', 'a')\n", (17398, 17440), True, 'import h5py as h5\n'), ((19530, 19572), 'numpy.zeros', 'np.zeros', (['(n_elem, n_node_elem)'], {'dtype': 'int'}), '((n_elem, n_node_elem), dtype=int)\n', (19538, 19572), True, 'import numpy as np\n'), ((25226, 25276), 'h5py.File', 'h5.File', (["(route + '/' + case_name + '.aero.h5')", '"""a"""'], {}), "(route + '/' + case_name + '.aero.h5', 'a')\n", (25233, 25276), True, 'import h5py as h5\n'), ((10185, 10234), 'h5py.File', 'h5.File', (["(route + '/' + case_name + '.dyn.h5')", '"""a"""'], {}), "(route + '/' + case_name + '.dyn.h5', 'a')\n", (10192, 10234), True, 'import h5py as h5\n'), ((15217, 15257), 'numpy.linspace', 'np.linspace', (['(0.0)', 'fin_height', 'n_node_fin'], {}), '(0.0, fin_height, n_node_fin)\n', (15228, 15257), True, 'import numpy as np\n'), ((30103, 30132), 'numpy.array', 'np.array', (['[roll, alpha, beta]'], {}), '([roll, alpha, beta])\n', (30111, 30132), True, 'import numpy as np\n'), ((8208, 8226), 'numpy.cos', 'np.cos', (['ramp_angle'], {}), '(ramp_angle)\n', (8214, 8226), True, 'import numpy as np\n'), ((8276, 8294), 'numpy.sin', 'np.sin', (['ramp_angle'], {}), '(ramp_angle)\n', (8282, 8294), True, 'import numpy as np\n'), ((9988, 10024), 'numpy.sin', 'np.sin', (['(2 * np.pi * dt * it / period)'], {}), '(2 * np.pi * dt * it / period)\n', (9994, 10024), True, 'import numpy as np\n'), ((10087, 10123), 'numpy.cos', 'np.cos', (['(2 * np.pi * dt * it / period)'], {}), '(2 * np.pi * dt * it / period)\n', (10093, 10123), True, 'import numpy as np\n'), ((17520, 17546), 'numpy.column_stack', 'np.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (17535, 17546), True, 'import numpy as np\n'), ((11269, 11282), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (11276, 11282), True, 'import numpy as np\n'), ((11985, 12008), 'numpy.cos', 'np.cos', (['lambda_dihedral'], {}), '(lambda_dihedral)\n', (11991, 12008), True, 'import numpy as np\n'), ((12106, 12129), 'numpy.sin', 'np.sin', (['lambda_dihedral'], {}), '(lambda_dihedral)\n', (12112, 12129), True, 'import numpy as np\n'), ((12238, 12251), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (12245, 12251), True, 'import numpy as np\n'), ((12908, 12921), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (12915, 12921), True, 'import numpy as np\n'), ((13655, 13678), 'numpy.sin', 'np.sin', (['lambda_dihedral'], {}), '(lambda_dihedral)\n', (13661, 13678), True, 'import numpy as np\n'), ((13787, 13800), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (13794, 13800), True, 'import numpy as np\n'), ((14555, 14568), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (14562, 14568), True, 'import numpy as np\n'), ((15338, 15351), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (15345, 15351), True, 'import numpy as np\n'), ((16116, 16129), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (16123, 16129), True, 'import numpy as np\n'), ((16916, 16929), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (16923, 16929), True, 'import numpy as np\n'), ((13534, 13557), 'numpy.cos', 'np.cos', (['lambda_dihedral'], {}), '(lambda_dihedral)\n', (13540, 13557), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 16:37:20 2019
@author: wei
"""
import os
import sys
sys.path.append(os.getcwd()+'/models')
sys.path.append(os.getcwd()+'/datasets')
import cv2
import time
import torch
import random
import pprint
import datetime
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from PIL import Image
from config import config
from models.fcos import FCOS
from torch.utils.data import DataLoader
from datasets.dataset import ImageFolder
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('--test_path', type=str, default=r'./images/demo_images', help='size of each image dimension')
opt = parser.parse_args()
cfg = config
pprint.pprint(opt)
pprint.pprint(cfg)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = FCOS(cfg).to(device)
ckpt = torch.load('./checkpoints/checkpoint.pth')['weights']
model.load_state_dict(ckpt)
model.eval()
print('loading weights successfully...')
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])
dataset = ImageFolder(opt.test_path, cfg['img_size'], transform)
dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False)
imgs_path = []
imgs_detection = []
prev_time = time.time()
print ('\nPerforming object detection: %d samples...'%len(dataset))
for b, (image_path, input_img) in enumerate(dataloader):
#import pdb
#pdb.set_trace()
input_img = input_img.to(device)
with torch.no_grad():
detections = model(input_img)
# Log progress
current_time = time.time()
inference_time = datetime.timedelta(seconds=current_time - prev_time)
prev_time = current_time
print ('\t+ Batch %d, Inference Time: %s' % (b, inference_time))
for idx, boxList in enumerate(detections):
if len(boxList.bbox):
imgs_path.append(image_path[idx])
imgs_detection.append(boxList)
#
# Bounding-box colors
cmap = plt.get_cmap('tab20b')
colors = [cmap(i) for i in np.linspace(0, 1, 20)]
print ('\nSaving images:')
for img_i, (path, boxList) in enumerate(zip(imgs_path, imgs_detection)):
#
print ("(%d) Image: '%s'" % (img_i, path))
# Create plot
img = np.array(Image.open(path))[...,:3]
img = img.copy()
#The amount of padding that was added
pad_x = max(img.shape[0] - img.shape[1], 0) * (cfg['img_size'] / max(img.shape))
pad_y = max(img.shape[1] - img.shape[0], 0) * (cfg['img_size'] / max(img.shape))
# Image height and width after padding is removed
unpad_h = cfg['img_size'] - pad_y
unpad_w = cfg['img_size'] - pad_x
if len(boxList.bbox):
boxes = boxList.bbox.cpu()
labels = boxList.get_field('labels').cpu()
scores = boxList.get_field('scores').cpu()
unique_labels = labels.unique()
bbox_colors = random.sample(colors, len(unique_labels))
for idx in range(len(boxes)):
box = boxes[idx]
label = labels[idx].item()
score = scores[idx].item()
print('\t+ Label: %s, Conf: %.5f' % (cfg['classes'][int(label)], score))
x1,y1,x2,y2 = box
box_h = int((((y2 - y1) / unpad_h) * img.shape[0]).item())
box_w = int((((x2 - x1) / unpad_w) * img.shape[1]).item())
y1 = int((((y1 - pad_y // 2) / unpad_h) * img.shape[0]).item())
x1 = int((((x1 - pad_x // 2) / unpad_w) * img.shape[1]).item())
x2 = x1 + box_w
y2 = y1 + box_h
color = bbox_colors[int(np.where(unique_labels == int(label))[0])]
color = list(map(lambda a: a*255, color))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.rectangle(img, (x1,y1), (x2,y2), color, 2)
#cv2.rectangle(img, (x1, y1), (x1+30, y1+30),(255, 0, 0), thickness=-1)
#cv2.putText(img, cfg['classes'][int(label)], (int(x1), int(y1+13)), font, 0.6,(255, 255, 255), 1)
#cv2.putText(img,str(score), (int(x1), int(y1)), font, 0.6,(255, 255, 255), 1)
cv2.imwrite(r'./images/results/%d.png' % (img_i), img)
| [
"datasets.dataset.ImageFolder",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.get_cmap",
"os.getcwd",
"cv2.imwrite",
"torch.load",
"torchvision.transforms.Normalize",
"time.time",
"PIL.Image.open",
"cv2.rectangle",
"datetime.timedelta",
"pprint.pprint",
"torc... | [((566, 591), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (589, 591), False, 'import argparse\n'), ((831, 849), 'pprint.pprint', 'pprint.pprint', (['opt'], {}), '(opt)\n', (844, 849), False, 'import pprint\n'), ((850, 868), 'pprint.pprint', 'pprint.pprint', (['cfg'], {}), '(cfg)\n', (863, 868), False, 'import pprint\n'), ((1255, 1309), 'datasets.dataset.ImageFolder', 'ImageFolder', (['opt.test_path', "cfg['img_size']", 'transform'], {}), "(opt.test_path, cfg['img_size'], transform)\n", (1266, 1309), False, 'from datasets.dataset import ImageFolder\n'), ((1323, 1384), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=opt.batch_size, shuffle=False)\n', (1333, 1384), False, 'from torch.utils.data import DataLoader\n'), ((1433, 1444), 'time.time', 'time.time', ([], {}), '()\n', (1442, 1444), False, 'import time\n'), ((2150, 2172), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20b"""'], {}), "('tab20b')\n", (2162, 2172), True, 'import matplotlib.pyplot as plt\n'), ((977, 1019), 'torch.load', 'torch.load', (['"""./checkpoints/checkpoint.pth"""'], {}), "('./checkpoints/checkpoint.pth')\n", (987, 1019), False, 'import torch\n'), ((1751, 1762), 'time.time', 'time.time', ([], {}), '()\n', (1760, 1762), False, 'import time\n'), ((1784, 1836), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(current_time - prev_time)'}), '(seconds=current_time - prev_time)\n', (1802, 1836), False, 'import datetime\n'), ((142, 153), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (151, 153), False, 'import os\n'), ((181, 192), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (190, 192), False, 'import os\n'), ((902, 927), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (925, 927), False, 'import torch\n'), ((949, 958), 'models.fcos.FCOS', 'FCOS', (['cfg'], {}), '(cfg)\n', (953, 958), False, 'from models.fcos import FCOS\n'), ((1151, 1172), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1170, 1172), True, 'import torchvision.transforms as transforms\n'), ((1178, 1244), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (1198, 1244), True, 'import torchvision.transforms as transforms\n'), ((1653, 1668), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1666, 1668), False, 'import torch\n'), ((2200, 2221), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (2211, 2221), True, 'import numpy as np\n'), ((4218, 4269), 'cv2.imwrite', 'cv2.imwrite', (["('./images/results/%d.png' % img_i)", 'img'], {}), "('./images/results/%d.png' % img_i, img)\n", (4229, 4269), False, 'import cv2\n'), ((2410, 2426), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2420, 2426), False, 'from PIL import Image\n'), ((3877, 3925), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', 'color', '(2)'], {}), '(img, (x1, y1), (x2, y2), color, 2)\n', (3890, 3925), False, 'import cv2\n')] |
import glob
import sys
from textwrap import wrap
import matplotlib.pyplot as plt
import numpy as np
import statistics
plt.figure(figsize=(8.5, 6.0))
plt.rcdefaults()
def main(data_filename, output_dir, hexcolour):
generation_values = list(range(1, 11))
x = np.arange(500)
generation_data = []
with open(data_filename) as f:
for line in f.readlines():
if len(line.strip()) != 0:
data = line.strip().split(',')
generation_data.append(statistics.mean(map(float, data)))
line_colour = '#{}'.format(hexcolour)
plt.plot(x, generation_data, linewidth=1.0, color=line_colour)
output_filename = data_filename[:len(data_filename) - 4].split('/')
output_filename = output_filename[len(output_filename) - 1]
data_metadata = output_filename.split('|')
lambda_value = float(data_metadata[4])
num_random_parents = int(data_metadata[7])
data_type = data_metadata[len(data_metadata) - 1] \
.split('-')[1] \
.capitalize()
if data_type == 'Fitness':
data_type = 'Cost'
plot_title = ('Mean {} of Models, Optimized Via Genetic '.format(data_type)
+ 'Algorithm and With λ = {} '.format(lambda_value)
+ 'and {} Random Parents, '.format(num_random_parents)
+ 'As Number of Generations Increase')
plot_title = '\n'.join(wrap(plot_title, 60))
if data_type == 'Accuracy':
y_label = 'Accuracy (in Percentage)'
else:
y_label = data_type
plt.title(plot_title)
plt.xlabel('Number of Generations')
plt.ylabel(y_label)
plt.savefig('{}/{}.png'.format(output_dir, output_filename))
if __name__ == '__main__':
argument_list = sys.argv[1:]
if len(argument_list) != 3:
print('Usage: plot_ga_accuracy.py <filename> <output dir>'
+ '<colour in hex (no prefix)>')
sys.exit(-1)
main(argument_list[0], argument_list[1], argument_list[2])
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"textwrap.wrap",
"matplotlib.pyplot.rcdefaults",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sys.exit"
] | [((121, 151), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8.5, 6.0)'}), '(figsize=(8.5, 6.0))\n', (131, 151), True, 'import matplotlib.pyplot as plt\n'), ((152, 168), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (166, 168), True, 'import matplotlib.pyplot as plt\n'), ((270, 284), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (279, 284), True, 'import numpy as np\n'), ((587, 649), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'generation_data'], {'linewidth': '(1.0)', 'color': 'line_colour'}), '(x, generation_data, linewidth=1.0, color=line_colour)\n', (595, 649), True, 'import matplotlib.pyplot as plt\n'), ((1579, 1600), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (1588, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1640), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Generations"""'], {}), "('Number of Generations')\n", (1615, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (1655, 1664), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1456), 'textwrap.wrap', 'wrap', (['plot_title', '(60)'], {}), '(plot_title, 60)\n', (1440, 1456), False, 'from textwrap import wrap\n'), ((1947, 1959), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1955, 1959), False, 'import sys\n')] |
import csv
from abc import ABC
import numpy as np
from scipy import sparse
from typing import Tuple
def load_data(path, headers=True):
max_item = 0
max_user = 0
data = []
pairs = []
with open(path, 'rt') as file:
reader = csv.reader(file, delimiter=',', quotechar='"')
if headers:
next(reader, None)
for row in reader:
item_id = int(row[0])
user_id = int(row[1])
rating = float(row[2])
max_item = max(item_id, max_item)
max_user = max(user_id, max_user)
data.append((item_id, user_id, rating,))
pairs.append((item_id, user_id,))
ratings = sparse.lil_matrix((max_item, max_user))
for d in data:
ratings[d[0] - 1, d[1] - 1] = d[2]
return ratings
class ALS(ABC):
def __init__(self, ratings: sparse.lil_matrix, rank: int):
self._ratings = ratings
self._rank = rank
DEFAULT_ALPHA = 0.0001
DEFAULT_BETA = 0.01
class BatchALS(ALS):
def __init__(self, ratings: sparse.lil_matrix,
rank: int,
alpha: float = DEFAULT_ALPHA,
beta: float = DEFAULT_BETA) -> None:
super().__init__(ratings, rank)
self._alpha = alpha
self._beta = beta
def run(self,
item_factors: np.ndarray,
user_factors: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
user_factors = user_factors.T
indices = self._ratings.nonzero()
for i in indices[0]:
for j in indices[1]:
rating = self._ratings[i, j]
error: float = rating - \
self.predict(item_factors, user_factors, i, j)
for k in range(self._rank):
item_factors[i][k] = item_factors[i][k] + self._alpha * \
(2 * error * user_factors[k][j] - self._beta * item_factors[i][k])
user_factors[k][j] = user_factors[k][j] + self._alpha * \
(2 * error * item_factors[i][k] - self._beta * user_factors[k][j])
e = 0.0
for i in indices[0]:
for j in indices[1]:
rating = self._ratings[i, j]
e = e + pow(rating -
np.dot(item_factors[i, :], user_factors[:, j]), 2)
for k in range(self._rank):
e = e + (self._beta / 2.0) * \
(item_factors[i][k] ** 2.0 + user_factors[k][j] ** 2.0)
return item_factors, user_factors.T
@staticmethod
def predict(item_factors: np.ndarray,
user_factors: np.ndarray,
i: int,
j: int) -> np.ndarray:
return np.dot(item_factors[i, :], user_factors[:, j])
@staticmethod
def random_factors(nUsers: int,
nRatings: int,
rank: int) -> Tuple[np.ndarray, np.ndarray]:
item_factors: np.ndarray = np.random.rand(nRatings, rank)
user_factors: np.ndarray = np.random.rand(nUsers, rank)
return item_factors, user_factors
| [
"numpy.random.rand",
"numpy.dot",
"scipy.sparse.lil_matrix",
"csv.reader"
] | [((686, 725), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(max_item, max_user)'], {}), '((max_item, max_user))\n', (703, 725), False, 'from scipy import sparse\n'), ((252, 298), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(file, delimiter=\',\', quotechar=\'"\')\n', (262, 298), False, 'import csv\n'), ((2779, 2825), 'numpy.dot', 'np.dot', (['item_factors[i, :]', 'user_factors[:, j]'], {}), '(item_factors[i, :], user_factors[:, j])\n', (2785, 2825), True, 'import numpy as np\n'), ((3022, 3052), 'numpy.random.rand', 'np.random.rand', (['nRatings', 'rank'], {}), '(nRatings, rank)\n', (3036, 3052), True, 'import numpy as np\n'), ((3088, 3116), 'numpy.random.rand', 'np.random.rand', (['nUsers', 'rank'], {}), '(nUsers, rank)\n', (3102, 3116), True, 'import numpy as np\n'), ((2328, 2374), 'numpy.dot', 'np.dot', (['item_factors[i, :]', 'user_factors[:, j]'], {}), '(item_factors[i, :], user_factors[:, j])\n', (2334, 2374), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*=
import math
import random
import director.vtkAll as vtk
import numpy as np
from director.debugVis import DebugData
class RaySensor(object):
"""Ray sensor."""
def __init__(self, num_rays=20, radius=10, min_angle=-90, max_angle=90, z_distance=2.5, bottles=None):
"""Constructs a RaySensor.
Args:
num_rays: Number of rays.
radius: Max distance of the rays.
min_angle: Minimum angle of the rays in degrees.
max_angle: Maximum angle of the rays in degrees.
"""
self._num_rays = num_rays
self._radius = radius
self._min_angle = math.radians(min_angle)
self._max_angle = math.radians(max_angle)
self.z_distance = z_distance
self._locator = None
self._bottles = bottles
self._state = [0., 0., 0.] # x, y, theta
self._hit = np.zeros(self._num_rays)
self._distances = np.zeros(self._num_rays)
self._intersections = [[0, 0, 0] for i in range(self._num_rays)]
self._update_rays(self._state[2])
@property
def distances(self):
"""Array of distances measured by each ray."""
normalized_distances = [
self._distances[i] / self._radius if self._hit[i] else 1.0
for i in range(self._num_rays)
]
return normalized_distances
def has_collided(self, max_distance=0.05):
"""Returns whether a collision has occured or not.
Args:
max_distance: Threshold for collision distance.
"""
for hit, distance in zip(self._hit, self._distances):
if hit and distance <= max_distance:
return True
return False
def set_locator(self, locator):
"""Sets the vtk cell locator.
Args:
locator: Cell locator.
"""
self._locator = locator
def update(self, x, y, theta):
"""Updates the sensor's readings.
Args:
x: X coordinate.
y: Y coordinate.
theta: Yaw.
"""
self._update_rays(theta)
origin = np.array([x, y, self.z_distance])
self._state = [x, y, theta]
if self._locator is None:
return
for i in range(self._num_rays):
hit, dist, inter = self._cast_ray(origin, origin + self._rays[i])
self._hit[i] = hit
self._distances[i] = dist
self._intersections[i] = inter
def _update_rays(self, theta):
"""Updates the rays' readings.
Args:
theta: Yaw.
"""
r = self._radius
angle_step = (self._max_angle - self._min_angle) / (self._num_rays - 1)
self._rays = [
np.array([
r * math.cos(theta + self._min_angle + i * angle_step),
r * math.sin(theta + self._min_angle + i * angle_step),
0
])
for i in range(self._num_rays)
]
def _cast_ray(self, start, end):
"""Casts a ray and determines intersections and distances.
Args:
start: Origin of the ray.
end: End point of the ray.
Returns:
Tuple of (whether it intersected, distance, intersection).
"""
tolerance = 0.0 # intersection tolerance
pt = [0.0, 0.0, 0.0] # coordinate of intersection
distance = vtk.mutable(0.0) # distance of intersection
pcoords = [0.0, 0.0, 0.0] # location within intersected cell
subID = vtk.mutable(0) # subID of intersected cell
hit = self._locator.IntersectWithLine(start, end, tolerance,
distance, pt, pcoords, subID)
return hit, distance, pt
def to_polydata(self, bottle_detected=False):
"""Converts the sensor to polydata."""
d = DebugData()
origin = np.array([self._state[0], self._state[1], self.z_distance])
for hit, intersection, ray in zip(self._hit,
self._intersections,
self._rays):
if hit:
color = [1., 0.45882353, 0.51372549]
endpoint = intersection
else:
color = [0., 0.6, 0.58823529]
endpoint = origin + ray
d.addLine(origin, endpoint, color=color, radius=0.05)
# add vision sensor
center = [self._state[0], self._state[1], 0]
axis = [0, 0, 1]
if bottle_detected:
color = [0., 0.8, 0.]
else:
color = [0., 0.6, 0.58823529]
d.addCircle(center, axis, 5, color=color)
return d.getPolyData()
def check_bottles(self):
range_x = range(int(self._state[0]) - int(np.ceil(self.z_distance)), int(self._state[0]) + int(np.ceil(self.z_distance)))
range_y = range(int(self._state[1]) - int(np.ceil(self.z_distance)), int(self._state[1]) + int(np.ceil(self.z_distance)))
for index, bottle in enumerate(self._bottles):
if int(bottle[0]) in range_x and int(bottle[1]) in range_y:
return self.precision(), index
return False, -1
@staticmethod
def precision(precision=60):
w = [precision, 100-precision]
i = [True, False]
r = random.uniform(0, 100)
# loop through a list of inputs and max cutoff values, returning
# the first value for which the random num r is less than the cutoff value
for n, v in map(None, i, [sum(w[:x + 1]) for x in range(len(w))]):
if r < v:
return n
| [
"director.debugVis.DebugData",
"numpy.ceil",
"random.uniform",
"math.radians",
"director.vtkAll.mutable",
"numpy.zeros",
"math.sin",
"numpy.array",
"math.cos"
] | [((657, 680), 'math.radians', 'math.radians', (['min_angle'], {}), '(min_angle)\n', (669, 680), False, 'import math\n'), ((707, 730), 'math.radians', 'math.radians', (['max_angle'], {}), '(max_angle)\n', (719, 730), False, 'import math\n'), ((902, 926), 'numpy.zeros', 'np.zeros', (['self._num_rays'], {}), '(self._num_rays)\n', (910, 926), True, 'import numpy as np\n'), ((953, 977), 'numpy.zeros', 'np.zeros', (['self._num_rays'], {}), '(self._num_rays)\n', (961, 977), True, 'import numpy as np\n'), ((2144, 2177), 'numpy.array', 'np.array', (['[x, y, self.z_distance]'], {}), '([x, y, self.z_distance])\n', (2152, 2177), True, 'import numpy as np\n'), ((3457, 3473), 'director.vtkAll.mutable', 'vtk.mutable', (['(0.0)'], {}), '(0.0)\n', (3468, 3473), True, 'import director.vtkAll as vtk\n'), ((3596, 3610), 'director.vtkAll.mutable', 'vtk.mutable', (['(0)'], {}), '(0)\n', (3607, 3610), True, 'import director.vtkAll as vtk\n'), ((3938, 3949), 'director.debugVis.DebugData', 'DebugData', ([], {}), '()\n', (3947, 3949), False, 'from director.debugVis import DebugData\n'), ((3967, 4026), 'numpy.array', 'np.array', (['[self._state[0], self._state[1], self.z_distance]'], {}), '([self._state[0], self._state[1], self.z_distance])\n', (3975, 4026), True, 'import numpy as np\n'), ((5408, 5430), 'random.uniform', 'random.uniform', (['(0)', '(100)'], {}), '(0, 100)\n', (5422, 5430), False, 'import random\n'), ((4869, 4893), 'numpy.ceil', 'np.ceil', (['self.z_distance'], {}), '(self.z_distance)\n', (4876, 4893), True, 'import numpy as np\n'), ((4922, 4946), 'numpy.ceil', 'np.ceil', (['self.z_distance'], {}), '(self.z_distance)\n', (4929, 4946), True, 'import numpy as np\n'), ((4999, 5023), 'numpy.ceil', 'np.ceil', (['self.z_distance'], {}), '(self.z_distance)\n', (5006, 5023), True, 'import numpy as np\n'), ((5052, 5076), 'numpy.ceil', 'np.ceil', (['self.z_distance'], {}), '(self.z_distance)\n', (5059, 5076), True, 'import numpy as np\n'), ((2796, 2846), 'math.cos', 'math.cos', (['(theta + self._min_angle + i * angle_step)'], {}), '(theta + self._min_angle + i * angle_step)\n', (2804, 2846), False, 'import math\n'), ((2868, 2918), 'math.sin', 'math.sin', (['(theta + self._min_angle + i * angle_step)'], {}), '(theta + self._min_angle + i * angle_step)\n', (2876, 2918), False, 'import math\n')] |
from itertools import chain
import glob
import torch
from PIL import Image
from os import path
from torch.utils.data import Dataset
import numpy as np
import math
class SegmentationDataset(Dataset):
_EXTENSIONS = ["*.jpg", "*.jpeg", "*.png"] # , "*.tif"
def __init__(self, in_dir, crop_h, crop_w, transform):
"""
Find all images in in_dir and prepare them as crop list
:param in_dir: directory of the input data
:param crop_size: size of the crop, single int value, square crop
:param transform: data normalization
"""
super(SegmentationDataset, self).__init__()
self.im_dir = in_dir + 'grey/'
self.annot_dir = in_dir + 'labels/'
self.transform = transform
self.crop_h = crop_h
self.crop_w = crop_w
self.images = []
# Generate crop list
for img_path in chain(*(glob.iglob(path.join(self.im_dir, ext)) for ext in SegmentationDataset._EXTENSIONS)):
_, name_with_ext = path.split(img_path)
idx, _ = path.splitext(name_with_ext)
self.images.append({
"idx": idx,
"path": img_path,
"annot_path": self.annot_dir + idx + '.png'
})
def __len__(self):
return len(self.images)
def __getitem__(self, item):
# Load image and annotation
img = Image.open(self.images[item]["path"])
annot = Image.open(self.images[item]["annot_path"])
# scale the image
# deepscene images are much smaller than mapillary
scale = 1024 / float(max(img.size[0], img.size[1]))
if scale > 1.:
out_size = tuple(int(dim * scale) for dim in img.size)
img = img.resize(out_size, resample=Image.BILINEAR)
annot = annot.resize(out_size, resample=Image.NEAREST)
# random crop
w, h = img.size
if w<self.crop_w or h<self.crop_h:
scale = self.crop_w/float(min(w, h))
out_size = tuple(int(dim * scale) for dim in img.size)
img = img.resize(out_size, resample=Image.BILINEAR)
w, h = img.size
top = np.random.randint(0, h - self.crop_h)
left = np.random.randint(0, w - self.crop_w)
img = img.crop((left, top, left+self.crop_w, top+self.crop_h))
img = self.transform(img)
annot = annot.crop((left, top, left+self.crop_w, top+self.crop_h))
annot = self.transform(annot, normalize=False).long()
return {"img": img, "annot": annot, "meta": {"idx": self.images[item]["idx"]}}
def segmentation_collate(items):
imgs = torch.stack([item["img"] for item in items])
annots = torch.stack([item["annot"] for item in items])
metas = [item["meta"] for item in items]
return {"img": imgs, "annot": annots, "meta": metas} | [
"torch.stack",
"PIL.Image.open",
"numpy.random.randint",
"os.path.splitext",
"os.path.split",
"os.path.join"
] | [((2644, 2688), 'torch.stack', 'torch.stack', (["[item['img'] for item in items]"], {}), "([item['img'] for item in items])\n", (2655, 2688), False, 'import torch\n'), ((2702, 2748), 'torch.stack', 'torch.stack', (["[item['annot'] for item in items]"], {}), "([item['annot'] for item in items])\n", (2713, 2748), False, 'import torch\n'), ((1396, 1433), 'PIL.Image.open', 'Image.open', (["self.images[item]['path']"], {}), "(self.images[item]['path'])\n", (1406, 1433), False, 'from PIL import Image\n'), ((1450, 1493), 'PIL.Image.open', 'Image.open', (["self.images[item]['annot_path']"], {}), "(self.images[item]['annot_path'])\n", (1460, 1493), False, 'from PIL import Image\n'), ((2175, 2212), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - self.crop_h)'], {}), '(0, h - self.crop_h)\n', (2192, 2212), True, 'import numpy as np\n'), ((2228, 2265), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - self.crop_w)'], {}), '(0, w - self.crop_w)\n', (2245, 2265), True, 'import numpy as np\n'), ((1014, 1034), 'os.path.split', 'path.split', (['img_path'], {}), '(img_path)\n', (1024, 1034), False, 'from os import path\n'), ((1056, 1084), 'os.path.splitext', 'path.splitext', (['name_with_ext'], {}), '(name_with_ext)\n', (1069, 1084), False, 'from os import path\n'), ((908, 935), 'os.path.join', 'path.join', (['self.im_dir', 'ext'], {}), '(self.im_dir, ext)\n', (917, 935), False, 'from os import path\n')] |
# coding:utf-8
import numpy as np
import category_encoders as ce
class XgboostDataPrepare(object):
def __init__(self, *, train_feature, train_label, test_feature):
self.__train_feature = train_feature.copy()
self.__train_label = train_label.copy()
self.__test_feature = test_feature.copy()
self.__categorical_index = None
self.__numeric_index = None
self.__encoder = None
def data_prepare(self):
""" 离散变量 缺失值填充 missing 后均值编码连续化
:return: 训练集特征 测试集特征
"""
self.__categorical_index = np.where(self.__train_feature.dtypes == "object")[0]
self.__numeric_index = np.where(self.__train_feature.dtypes != "object")[0]
self.__train_feature.iloc[:, self.__categorical_index] = (
self.__train_feature.iloc[:, self.__categorical_index].fillna("missing")
)
self.__test_feature.iloc[:, self.__categorical_index] = (
self.__test_feature.iloc[:, self.__categorical_index].fillna("missing")
)
self.__encoder = ce.TargetEncoder()
self.__encoder.fit(
self.__train_feature.iloc[:, self.__categorical_index],
self.__train_label
)
self.__train_feature.iloc[:, self.__categorical_index] = self.__encoder.transform(
self.__train_feature.iloc[:, self.__categorical_index]
)
self.__test_feature.iloc[:, self.__categorical_index] = self.__encoder.transform(
self.__test_feature.iloc[:, self.__categorical_index]
)
return self.__train_feature, self.__test_feature | [
"category_encoders.TargetEncoder",
"numpy.where"
] | [((1058, 1076), 'category_encoders.TargetEncoder', 'ce.TargetEncoder', ([], {}), '()\n', (1074, 1076), True, 'import category_encoders as ce\n'), ((573, 622), 'numpy.where', 'np.where', (["(self.__train_feature.dtypes == 'object')"], {}), "(self.__train_feature.dtypes == 'object')\n", (581, 622), True, 'import numpy as np\n'), ((657, 706), 'numpy.where', 'np.where', (["(self.__train_feature.dtypes != 'object')"], {}), "(self.__train_feature.dtypes != 'object')\n", (665, 706), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from recurrent_controller import RecurrentController
from dnc.dnc import DNC
import tensorflow as tf
import numpy as np
import pickle
import sys
import os
def llprint(message):
"""
Flushes message to stdout
:param message: A string to print.
:return: None.
"""
sys.stdout.write(message)
sys.stdout.flush()
def load(path):
"""
Unpickle the file located at path.
:param path: The path to the pickled file.
:return: Returns the object hierarchy stored in the file.
"""
return pickle.load(open(path, 'rb'))
def onehot(index, size):
"""
Create a numpy vector that has all zeros except at index. index has the value 1.
:param index: The index where the vector should be one.
:param size: The length of the vector.
:return: A one-hot vector encoding for the given index.
"""
vec = np.zeros(size, dtype=np.float32)
vec[index] = 1.0
return vec
def prepare_sample(sample, answers, target_code, word_space_size):
"""
Transform a sequence of letters and the correct response into input and output vectors.
:param sample: list of letters forming word.
:param answers: response that the DNC should give.
:param target_code: code indicating end of sample and beginning of answer (also used in input as
a replacement for each letter in the answer.
:param word_space_size: how many total letters exist.
:return: tuple including input vector, output vector, length of sequence, and associated weights.
"""
input_vec = np.array(sample[0], dtype=np.float32)
output_vec = np.array(sample[0], dtype=np.float32)
seq_len = input_vec.shape[0]
weights_vec = np.zeros(seq_len, dtype=np.float32)
output_vec = np.append(output_vec, np.array(answers, dtype=np.float32))
input_vec = np.append(input_vec, np.array([target_code] * len(answers), dtype=np.float32))
input_vec = np.array([onehot(int(code), word_space_size) for code in input_vec])
output_vec = np.array([onehot(int(code), word_space_size) for code in output_vec])
seq_len = input_vec.shape[0]
return (
np.reshape(input_vec, (1, -1, word_space_size)),
np.reshape(output_vec, (1, -1, word_space_size)),
seq_len,
np.reshape(weights_vec, (1, -1, 1))
)
def get_solution(story):
"""
Find the answer to the question: "What are the instances of vowels (excluding y) contained in story? Repeat them
in order."
:param story: the list of letters to find vowels in
:return: a list of vowels
"""
story.append('#')
my_outputs = story + [letter for letter in story if letter in ['a', 'e', 'i', 'o', 'u']]
return my_outputs
def main():
"""
Runs an interactive shell where the user can submit input with their chosen deliminator and see the output of the
DNC's latest checkpoint.
:return: None
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
ckpts_dir = os.path.join(dir_path, 'checkpoints')
lexicon_dictionary = load(os.path.join(dir_path, 'data', 'encoded', 'lexicon-dict.pkl'))
target_code = lexicon_dictionary["#"]
graph = tf.Graph()
with graph.as_default():
with tf.compat.v1.Session(graph=graph) as session:
ncomputer = DNC(
RecurrentController,
input_size=len(lexicon_dictionary),
output_size=len(lexicon_dictionary),
max_sequence_length=100,
memory_words_num=256,
memory_word_size=64,
memory_read_heads=4,
)
ncomputer.restore(session, ckpts_dir, 'step-100001')
outputs, _ = ncomputer.get_outputs()
softmaxed = tf.nn.softmax(outputs)
print("This is an interactive shell script. Here a user may test a trained neural network by passing it "
"custom inputs and seeing if they elicid the desired output. \n Please note that a user may only "
"test inputs that consists of words in the neural network's lexicon. If the user would like to quit"
" the program, they can type ':q!' when prompted for an input. \n If the user would like to see the"
" network's lexicon, they can type ':dict' when prompted for an input. Otherwise, the user may "
"simply type the sequence of inputs that they would like to use and then hit the enter key. \n "
"They will then be asked to specify the deliminator that distinguishes one word from another word."
" The input will then be split using that deliminator. \n If all resulting inputs are in the "
"network's lexicon, the network will then be fed these inputs and its output will be printed for "
"the user along with its expected output.")
my_input = input("Input:")
while my_input != ":q!":
if my_input == ":dict":
print("The neural network has been trained to recognize the following words:")
print(lexicon_dictionary)
my_input = input("Input:")
continue
deliminator = input("Deliminator:")
story = my_input.split(deliminator)
if not set(story).issubset(lexicon_dictionary):
print("You may only test key in the lexicon dictionary.")
my_input = input("Input:")
continue
desired_answers = get_solution(story)
encoded_story = []
encoded_answers = []
for an_input in story:
encoded_story.append(lexicon_dictionary[an_input])
for an_output in desired_answers:
encoded_answers.append(lexicon_dictionary[an_output])
input_vec, _, seq_len, _ = prepare_sample([encoded_story], encoded_answers, target_code,
len(lexicon_dictionary))
softmax_output = session.run(softmaxed, feed_dict={
ncomputer.input_data: input_vec,
ncomputer.sequence_length: seq_len
})
softmax_output = np.squeeze(softmax_output, axis=0)
given_answers = np.argmax(softmax_output[:len(desired_answers)], axis=1)
print("Output: ", [list(lexicon_dictionary.keys())[list(lexicon_dictionary.values()).index(an_answer)]
for an_answer in given_answers])
is_correct = True
if len(given_answers) != len(encoded_answers):
is_correct = False
else:
for i in range(len(given_answers)):
if given_answers[i] != encoded_answers[i]:
is_correct = False
if is_correct:
print("Correct!")
else:
print("Expected: ", desired_answers)
my_input = input("Input:")
if __name__ == "__main__":
main()
| [
"sys.stdout.write",
"tensorflow.nn.softmax",
"os.path.realpath",
"numpy.zeros",
"tensorflow.compat.v1.Session",
"sys.stdout.flush",
"numpy.array",
"tensorflow.Graph",
"numpy.reshape",
"numpy.squeeze",
"os.path.join"
] | [((312, 337), 'sys.stdout.write', 'sys.stdout.write', (['message'], {}), '(message)\n', (328, 337), False, 'import sys\n'), ((342, 360), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (358, 360), False, 'import sys\n'), ((885, 917), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (893, 917), True, 'import numpy as np\n'), ((1581, 1618), 'numpy.array', 'np.array', (['sample[0]'], {'dtype': 'np.float32'}), '(sample[0], dtype=np.float32)\n', (1589, 1618), True, 'import numpy as np\n'), ((1636, 1673), 'numpy.array', 'np.array', (['sample[0]'], {'dtype': 'np.float32'}), '(sample[0], dtype=np.float32)\n', (1644, 1673), True, 'import numpy as np\n'), ((1725, 1760), 'numpy.zeros', 'np.zeros', (['seq_len'], {'dtype': 'np.float32'}), '(seq_len, dtype=np.float32)\n', (1733, 1760), True, 'import numpy as np\n'), ((3003, 3040), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoints"""'], {}), "(dir_path, 'checkpoints')\n", (3015, 3040), False, 'import os\n'), ((3189, 3199), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3197, 3199), True, 'import tensorflow as tf\n'), ((1801, 1836), 'numpy.array', 'np.array', (['answers'], {'dtype': 'np.float32'}), '(answers, dtype=np.float32)\n', (1809, 1836), True, 'import numpy as np\n'), ((2160, 2207), 'numpy.reshape', 'np.reshape', (['input_vec', '(1, -1, word_space_size)'], {}), '(input_vec, (1, -1, word_space_size))\n', (2170, 2207), True, 'import numpy as np\n'), ((2217, 2265), 'numpy.reshape', 'np.reshape', (['output_vec', '(1, -1, word_space_size)'], {}), '(output_vec, (1, -1, word_space_size))\n', (2227, 2265), True, 'import numpy as np\n'), ((2292, 2327), 'numpy.reshape', 'np.reshape', (['weights_vec', '(1, -1, 1)'], {}), '(weights_vec, (1, -1, 1))\n', (2302, 2327), True, 'import numpy as np\n'), ((2959, 2985), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2975, 2985), False, 'import os\n'), ((3071, 3132), 'os.path.join', 'os.path.join', (['dir_path', '"""data"""', '"""encoded"""', '"""lexicon-dict.pkl"""'], {}), "(dir_path, 'data', 'encoded', 'lexicon-dict.pkl')\n", (3083, 3132), False, 'import os\n'), ((3242, 3275), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (3262, 3275), True, 'import tensorflow as tf\n'), ((3767, 3789), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['outputs'], {}), '(outputs)\n', (3780, 3789), True, 'import tensorflow as tf\n'), ((6343, 6377), 'numpy.squeeze', 'np.squeeze', (['softmax_output'], {'axis': '(0)'}), '(softmax_output, axis=0)\n', (6353, 6377), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def plot_the_graph(filename, data_for_plot):
# data_for_plot = data.T
deltaloc = 0.32
locations = []
for i in range(0,8,2):
locations.extend([i + 1 - deltaloc, i + 1 + deltaloc])
locations = range(8)
fig,ax = plt.subplots(1, figsize=(5,5))
# swp = sns.swarmplot(data=data_for_plot, size=2.5, color='green', zorder=10)
xs = []
ys = []
for i,d in enumerate(data_for_plot):
xs.extend([locations[i]]*len(d))
ys.extend(d)
df = pd.DataFrame(dict(x=xs,
y=ys))
flatui = ['black', 'black', 'orange', 'orange', 'magenta', 'magenta', 'blue', 'blue']
# sns.set_palette(flatui)
# sns.palplot(sns.color_palette())
sns.swarmplot(x="x", y="y", data=df,size=2.5, zorder=10, order=locations, ax=ax, palette=flatui)
# ax2.artists[2].set_edgecolor('red')
# for i, x in enumerate(ax2.artists):
# if i == 0 or i == 1:
# x.set_facecolor('black')
# elif i == 2 or i == 3:
# x.set_facecolor('orange')
# elif i == 4 or i == 5:
# x.set_facecolor('magenta')
# elif i == 6 or i == 7:
# x.set_facecolor('grey')
# if i % 2 == 0:
# mybox.set_facecolor('red')
# mybox.set_edgecolor('black')
# mybox.set_linewidth(3)
bp = ax.boxplot(data_for_plot,
0, 'rs', 1, whis='range', patch_artist=True,
widths=0.6, positions=locations) #list(range(0,8,1))
# for i, x in enumerate(data_for_plot):
# data = x
# plt.plot(np.random.uniform(low=i+1-0.2, high=i+1+0.2, size=len(data)), data,
# '.', color='green',
# alpha=1, markersize=5, zorder=10)
thealpha = 0.4
for i,x in enumerate(bp['boxes']):
x.set_facecolor('white')
x.set_edgecolor('grey')
x.set_alpha(0.3)
for i,x in enumerate(bp['fliers']):
x.set_color('grey')
x.set_alpha(thealpha)
for i,x in enumerate(bp['caps']):
x.set_color('grey')
x.set_alpha(thealpha)
for i,x in enumerate(bp['whiskers']):
x.set_color('grey')
x.set_alpha(thealpha)
# for i,x in enumerate(bp['boxes']):
# if i == 0 or i == 1:
# x.set_facecolor('black')
# elif i == 2 or i == 3:
# x.set_color('orange')
# elif i == 4 or i == 5:
# x.set_facecolor('magenta')
# elif i == 6 or i == 7:
# x.set_facecolor('grey')
# if i % 2 == 0:
# x.set_alpha(1)
# else:
# x.set_alpha(1)
for median in bp['medians']:
median.set(color='grey', linewidth=3, linestyle='--', dashes=[8, 2], alpha=0.5)
plt.ylabel('Galectin3 puncta / cell')
ax.set_xticklabels(['before', 'after']*4)
plt.xlabel('')
ax.set_ylim([-3, 55])
# plt.tight_layout()
fig.savefig('data/galectin_data/{0}.png'.format(filename), dpi=600)
fig.savefig('data/galectin_data/{0}.eps'.format(filename))
# plt.show()
# plt.close(fig)
# del fig
# del ax
# del bp
data = np.genfromtxt("data/galectin_data/MCF7-mAG-gal13.txt", delimiter="\t", skip_header=4)
conditions = ['Control', '100:0', '80:20', 'Siramesine']
data_for_plot = []
for d in data.T:
data_for_plot.append((d[np.logical_not(np.isnan(d))]))
plot_the_graph('b1', data_for_plot)
data = np.genfromtxt("data/galectin_data/U2OS-mchery-gal3.txt", delimiter="\t", skip_header=4)
conditions = ['Control', '100:0', '80:20', 'Siramesine']
data_for_plot = []
for d in data.T:
data_for_plot.append((d[np.logical_not(np.isnan(d))]))
data_for_plot.append([np.nan])
plot_the_graph('b2', data_for_plot) | [
"numpy.genfromtxt",
"seaborn.swarmplot",
"numpy.isnan",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((3223, 3312), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/galectin_data/MCF7-mAG-gal13.txt"""'], {'delimiter': '"""\t"""', 'skip_header': '(4)'}), "('data/galectin_data/MCF7-mAG-gal13.txt', delimiter='\\t',\n skip_header=4)\n", (3236, 3312), True, 'import numpy as np\n'), ((3505, 3596), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/galectin_data/U2OS-mchery-gal3.txt"""'], {'delimiter': '"""\t"""', 'skip_header': '(4)'}), "('data/galectin_data/U2OS-mchery-gal3.txt', delimiter='\\t',\n skip_header=4)\n", (3518, 3596), True, 'import numpy as np\n'), ((335, 366), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(5, 5)'}), '(1, figsize=(5, 5))\n', (347, 366), True, 'import matplotlib.pyplot as plt\n'), ((806, 907), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""x"""', 'y': '"""y"""', 'data': 'df', 'size': '(2.5)', 'zorder': '(10)', 'order': 'locations', 'ax': 'ax', 'palette': 'flatui'}), "(x='x', y='y', data=df, size=2.5, zorder=10, order=locations,\n ax=ax, palette=flatui)\n", (819, 907), True, 'import seaborn as sns\n'), ((2848, 2885), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Galectin3 puncta / cell"""'], {}), "('Galectin3 puncta / cell')\n", (2858, 2885), True, 'import matplotlib.pyplot as plt\n'), ((2936, 2950), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (2946, 2950), True, 'import matplotlib.pyplot as plt\n'), ((3445, 3456), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (3453, 3456), True, 'import numpy as np\n'), ((3729, 3740), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (3737, 3740), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from Discovery import logger
class SentimentTable(object):
"""List of sentiment words"""
def __init__(self, data=None):
self.positive = []
self.negative = []
self.original_data = []
self.y = []
if data is not None:
for item in data:
self.add_word(item[0], float(item[1]))
self.original_data = data
def words(self):
return list(self.get_frame().Word)
def to_dic(self, normalize=False):
frame = self.get_frame()
if normalize:
max = frame.Sentiment.abs().max()
frame.Sentiment /= max
return frame.set_index('Word')['Sentiment'].to_dict()
def add_word(self, word, sentiment):
word = word.lower()
self.y.append(sentiment)
if sentiment > 0:
self.positive.append(word)
if sentiment < 0:
self.negative.append(word)
self.original_data.append((word, sentiment))
def balance(self):
total_positive = len(self.positive)
total_negative = len(self.negative)
take = total_negative
if total_positive < total_negative:
take = total_positive
return self.take_top(take)
def take_top(self, top):
logger.info("Taking top {}".format(top))
positive = [x for x in self.original_data if float(x[1]) > 0]
negative = [x for x in self.original_data if float(x[1]) < 0]
positive = sorted(positive, key=lambda x: float(x[1]), reverse=True)[:top]
negative = sorted(negative, key=lambda x: float(x[1]), reverse=False)[:top]
return SentimentTable(positive + negative)
def get_frame(self):
data = pd.DataFrame([[item[0].lower(), float(item[1])] for item in self.original_data],
columns=['Word', 'Sentiment'])
if len(data.Word) == 0:
return data
return data.groupby(['Word'], as_index=False)['Sentiment'].mean()
def save_csv(self, filename, baseline=None):
logger.info("Save CSV <{}>...".format(filename))
frame = self.get_frame()
if baseline is not None:
frame.join(baseline.get_frame(), rsuffix='_baseline')
frame.to_csv(filename, index=False, header=False)
@staticmethod
def construct_frame(data):
subset = data[['Word', 'Sentiment']]
items = [tuple(x) for x in subset.values]
return SentimentTable(items)
@staticmethod
def construct_from_dict(data):
list_key_value = [[k, v] for k, v in data.items()]
return SentimentTable(list_key_value)
@staticmethod
def construct_bootstrapper(data):
items = []
for i in range(len(data)):
items.append((data[i][0], data[i][1].value))
return SentimentTable(items)
@staticmethod
def construct_positive_negative(positive, negative):
items = []
for item in positive:
items.append((item, 1))
for item in negative:
items.append((item, -1))
return SentimentTable(items)
class SentimentHelper(object):
"""Helper class to perform various functions"""
@staticmethod
def calculate_sentiment_value(positive, negative):
coefficient = 2
if positive == 0 and negative == 0:
return 0
min_value = 0.1
positive += min_value
negative += min_value
rating = np.log2(positive / negative)
if rating < -coefficient:
rating = -coefficient
elif rating > coefficient:
rating = coefficient
rating /= coefficient
return np.round(rating, 2)
class SentimentValue(object):
"""Sentiment value holder"""
def __init__(self, positive, negative):
self.positive = positive
self.negative = negative
if positive < 0 and negative < 0:
self.value = 0
return
if positive < 0:
# increase negative
negative -= positive
positive = 0
elif negative < 0:
# increase positive
positive -= negative
negative = 0
self.value = SentimentHelper.calculate_sentiment_value(positive, negative)
| [
"numpy.log2",
"numpy.round"
] | [((3482, 3510), 'numpy.log2', 'np.log2', (['(positive / negative)'], {}), '(positive / negative)\n', (3489, 3510), True, 'import numpy as np\n'), ((3694, 3713), 'numpy.round', 'np.round', (['rating', '(2)'], {}), '(rating, 2)\n', (3702, 3713), True, 'import numpy as np\n')] |
###############################################################################
# Loader, Resizer, PixelExtractor, DnnFeaturizer
import numpy as np
import pandas
from nimbusml import Pipeline
from nimbusml.datasets.image import get_RevolutionAnalyticslogo, get_Microsoftlogo
from nimbusml.feature_extraction.image import Loader
from nimbusml.feature_extraction.image import PixelExtractor
from nimbusml.feature_extraction.image import Resizer
from nimbusml.linear_model import FastLinearBinaryClassifier
data = pandas.DataFrame(data=dict(
Path=[get_RevolutionAnalyticslogo(), get_Microsoftlogo()],
Label=[True, False]))
X = data[['Path']]
y = data[['Label']]
# transforms and learners
transform_1 = Loader() << 'Path'
transform_2 = Resizer(image_width=227, image_height=227)
transform_3 = PixelExtractor()
algo = FastLinearBinaryClassifier() << 'Path'
# pipeline of transforms and trainer
pipeline = Pipeline([transform_1, transform_2, transform_3, algo])
# fit the model
pipeline.fit(X, y)
# scoring
nimbusml_pred = pipeline.predict(X)
print("Predicted Labels : {0}".format(nimbusml_pred.PredictedLabel.values))
print(
"Accuracy : {0}".format(
np.mean(
y.Label.values == nimbusml_pred.PredictedLabel.values)))
| [
"nimbusml.datasets.image.get_Microsoftlogo",
"nimbusml.feature_extraction.image.Loader",
"nimbusml.linear_model.FastLinearBinaryClassifier",
"nimbusml.Pipeline",
"nimbusml.feature_extraction.image.Resizer",
"nimbusml.datasets.image.get_RevolutionAnalyticslogo",
"nimbusml.feature_extraction.image.PixelEx... | [((743, 785), 'nimbusml.feature_extraction.image.Resizer', 'Resizer', ([], {'image_width': '(227)', 'image_height': '(227)'}), '(image_width=227, image_height=227)\n', (750, 785), False, 'from nimbusml.feature_extraction.image import Resizer\n'), ((800, 816), 'nimbusml.feature_extraction.image.PixelExtractor', 'PixelExtractor', ([], {}), '()\n', (814, 816), False, 'from nimbusml.feature_extraction.image import PixelExtractor\n'), ((912, 967), 'nimbusml.Pipeline', 'Pipeline', (['[transform_1, transform_2, transform_3, algo]'], {}), '([transform_1, transform_2, transform_3, algo])\n', (920, 967), False, 'from nimbusml import Pipeline\n'), ((710, 718), 'nimbusml.feature_extraction.image.Loader', 'Loader', ([], {}), '()\n', (716, 718), False, 'from nimbusml.feature_extraction.image import Loader\n'), ((824, 852), 'nimbusml.linear_model.FastLinearBinaryClassifier', 'FastLinearBinaryClassifier', ([], {}), '()\n', (850, 852), False, 'from nimbusml.linear_model import FastLinearBinaryClassifier\n'), ((1171, 1233), 'numpy.mean', 'np.mean', (['(y.Label.values == nimbusml_pred.PredictedLabel.values)'], {}), '(y.Label.values == nimbusml_pred.PredictedLabel.values)\n', (1178, 1233), True, 'import numpy as np\n'), ((550, 579), 'nimbusml.datasets.image.get_RevolutionAnalyticslogo', 'get_RevolutionAnalyticslogo', ([], {}), '()\n', (577, 579), False, 'from nimbusml.datasets.image import get_RevolutionAnalyticslogo, get_Microsoftlogo\n'), ((581, 600), 'nimbusml.datasets.image.get_Microsoftlogo', 'get_Microsoftlogo', ([], {}), '()\n', (598, 600), False, 'from nimbusml.datasets.image import get_RevolutionAnalyticslogo, get_Microsoftlogo\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 07:00:53 2021
@author: Bianca
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy import signal
fs = 1000 # frequência de amostragem
t = np.arange(0, 2, 1/fs)
# constantes dos sinais
a = 1
b = 0.8
c = 0.75
delta1 = 0.25
delta2 = 0.5
# sinal emitido x(t) com s(t) -> ruído branco
st = np.random.normal(loc = 0, scale = 1, size = len(t))
nx = np.random.normal(loc = 0, scale = 0.1, size = len(t)) # ruído gaussiano
xt = a*st + nx
# sinal no receptor y(t) = y1 + y2
ny = np.random.normal(loc = 0, scale = 0.5, size = len(t)) # ruído gaussiano
yt = np.zeros(len(t)) + st + ny
yt = b*np.roll(st, int(-delta1*fs)) + c*np.roll(st, int(-delta2*fs))
#%% plot dos sinais x(t) e y(t)
plt.figure(figsize = (10, 6))
plt.subplot(2,1,1)
plt.plot(t, xt, linewidth = 1, color='b', alpha = 0.7)
plt.grid(linestyle = '--', which='both')
plt.title('Sinal emitido e contaminado por ruído')
plt.ylabel(r'$x(t)$')
plt.xlabel('Tempo [s]')
plt.xlim((0, t[-1]))
plt.ylim((-5, 5))
plt.subplot(2,1,2)
plt.plot(t, yt, linewidth = 1, color='m', alpha = 0.7)
plt.grid(linestyle = '--', which='both')
plt.title('Sinal gravado e contaminado por ruído')
plt.ylabel(r'$y(t)$')
plt.xlabel('Tempo [s]')
plt.xlim((0, t[-1]))
plt.ylim((-5, 5))
plt.tight_layout()
#%% Calculo das auto-correlações e correlações cruzadas
#Auto correlação de x(t)
Rxx = np.correlate(xt, xt, mode = 'same')
Ryy = np.correlate(yt, yt, mode = 'same')
#Correlação cruzada
Rxy = np.correlate(xt, yt, mode = 'same')
tau = np.linspace(-0.5*len(Rxy)/fs, 0.5*len(Rxy)/fs, len(Rxy))
#%% Plot das auto-correlações de x(t) e y(t)
plt.figure(figsize = (10, 6))
plt.subplot(2,1,1)
plt.plot(tau, Rxx/len(Rxx), linewidth = 1, color='b', alpha = 0.7)
plt.grid(linestyle = '--', which='both')
plt.title('Auto-correlação do sinal $x(t)$')
plt.ylabel(r'$R_{xx}(\tau)$')
plt.xlabel(r'$\tau$ [s]')
plt.subplot(2,1,2)
plt.plot(tau, Ryy/len(Ryy), linewidth = 1, color='m', alpha = 0.7)
plt.grid(linestyle = '--', which='both')
plt.title('Auto-correlação do sinal $y(t)$')
plt.ylabel(r'$R_{yy}(\tau)$')
plt.xlabel(r'$\tau$ [s]')
plt.tight_layout()
#%% Plot da correlação cruzada Rxy(tau)
plt.figure(figsize = (10, 3))
plt.plot(tau, Rxy/len(Ryy), linewidth = 1, color='g')
plt.grid(linestyle = '--', which='both')
plt.title(r'Correlação cruzada de $x(t)$ e $y(t)$')
plt.ylabel(r'$R_{xy}(\tau)$')
plt.xlabel(r'$\tau$ [s]')
plt.tight_layout()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.correlate",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplo... | [((233, 256), 'numpy.arange', 'np.arange', (['(0)', '(2)', '(1 / fs)'], {}), '(0, 2, 1 / fs)\n', (242, 256), True, 'import numpy as np\n'), ((774, 801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (784, 801), True, 'import matplotlib.pyplot as plt\n'), ((804, 824), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (815, 824), True, 'import matplotlib.pyplot as plt\n'), ((823, 873), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'xt'], {'linewidth': '(1)', 'color': '"""b"""', 'alpha': '(0.7)'}), "(t, xt, linewidth=1, color='b', alpha=0.7)\n", (831, 873), True, 'import matplotlib.pyplot as plt\n'), ((878, 916), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""', 'which': '"""both"""'}), "(linestyle='--', which='both')\n", (886, 916), True, 'import matplotlib.pyplot as plt\n'), ((919, 969), 'matplotlib.pyplot.title', 'plt.title', (['"""Sinal emitido e contaminado por ruído"""'], {}), "('Sinal emitido e contaminado por ruído')\n", (928, 969), True, 'import matplotlib.pyplot as plt\n'), ((970, 990), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x(t)$"""'], {}), "('$x(t)$')\n", (980, 990), True, 'import matplotlib.pyplot as plt\n'), ((992, 1015), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tempo [s]"""'], {}), "('Tempo [s]')\n", (1002, 1015), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1036), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, t[-1])'], {}), '((0, t[-1]))\n', (1024, 1036), True, 'import matplotlib.pyplot as plt\n'), ((1037, 1054), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5, 5)'], {}), '((-5, 5))\n', (1045, 1054), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1076), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1067, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1125), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'yt'], {'linewidth': '(1)', 'color': '"""m"""', 'alpha': '(0.7)'}), "(t, yt, linewidth=1, color='m', alpha=0.7)\n", (1083, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1130, 1168), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""', 'which': '"""both"""'}), "(linestyle='--', which='both')\n", (1138, 1168), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1221), 'matplotlib.pyplot.title', 'plt.title', (['"""Sinal gravado e contaminado por ruído"""'], {}), "('Sinal gravado e contaminado por ruído')\n", (1180, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1242), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y(t)$"""'], {}), "('$y(t)$')\n", (1232, 1242), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1267), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tempo [s]"""'], {}), "('Tempo [s]')\n", (1254, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1288), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, t[-1])'], {}), '((0, t[-1]))\n', (1276, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1306), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5, 5)'], {}), '((-5, 5))\n', (1297, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1325), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1323, 1325), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1448), 'numpy.correlate', 'np.correlate', (['xt', 'xt'], {'mode': '"""same"""'}), "(xt, xt, mode='same')\n", (1427, 1448), True, 'import numpy as np\n'), ((1457, 1490), 'numpy.correlate', 'np.correlate', (['yt', 'yt'], {'mode': '"""same"""'}), "(yt, yt, mode='same')\n", (1469, 1490), True, 'import numpy as np\n'), ((1520, 1553), 'numpy.correlate', 'np.correlate', (['xt', 'yt'], {'mode': '"""same"""'}), "(xt, yt, mode='same')\n", (1532, 1553), True, 'import numpy as np\n'), ((1666, 1693), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1676, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1716), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1707, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1820), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""', 'which': '"""both"""'}), "(linestyle='--', which='both')\n", (1790, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1867), 'matplotlib.pyplot.title', 'plt.title', (['"""Auto-correlação do sinal $x(t)$"""'], {}), "('Auto-correlação do sinal $x(t)$')\n", (1832, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1897), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$R_{xx}(\\\\tau)$"""'], {}), "('$R_{xx}(\\\\tau)$')\n", (1878, 1897), True, 'import matplotlib.pyplot as plt\n'), ((1898, 1923), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\tau$ [s]"""'], {}), "('$\\\\tau$ [s]')\n", (1908, 1923), True, 'import matplotlib.pyplot as plt\n'), ((1925, 1945), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1936, 1945), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2049), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""', 'which': '"""both"""'}), "(linestyle='--', which='both')\n", (2019, 2049), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2096), 'matplotlib.pyplot.title', 'plt.title', (['"""Auto-correlação do sinal $y(t)$"""'], {}), "('Auto-correlação do sinal $y(t)$')\n", (2061, 2096), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2126), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$R_{yy}(\\\\tau)$"""'], {}), "('$R_{yy}(\\\\tau)$')\n", (2107, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2152), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\tau$ [s]"""'], {}), "('$\\\\tau$ [s]')\n", (2137, 2152), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2172), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2170, 2172), True, 'import matplotlib.pyplot as plt\n'), ((2215, 2242), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (2225, 2242), True, 'import matplotlib.pyplot as plt\n'), ((2299, 2337), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""', 'which': '"""both"""'}), "(linestyle='--', which='both')\n", (2307, 2337), True, 'import matplotlib.pyplot as plt\n'), ((2340, 2390), 'matplotlib.pyplot.title', 'plt.title', (['"""Correlação cruzada de $x(t)$ e $y(t)$"""'], {}), "('Correlação cruzada de $x(t)$ e $y(t)$')\n", (2349, 2390), True, 'import matplotlib.pyplot as plt\n'), ((2392, 2421), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$R_{xy}(\\\\tau)$"""'], {}), "('$R_{xy}(\\\\tau)$')\n", (2402, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\tau$ [s]"""'], {}), "('$\\\\tau$ [s]')\n", (2432, 2447), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2466), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2464, 2466), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
import yaml
import numpy as np
import cv2
import rospy
from std_msgs.msg import Float32MultiArray
def callback(data):
filepath="/home/soumil/catkin_ws/src/camera_pos_vec/src/scripts"
filename="untitled"
camera_parameters=open(f"{filepath+'/'+filename}.yaml")
camera_parameters=yaml.load(camera_parameters,Loader=yaml.FullLoader)
camera_matrix=camera_parameters['camera_matrix']
camera_matrix=np.array(camera_matrix['data'])
camera_matrix=np.reshape(camera_matrix,(3,3))
distortion_coefficients=camera_parameters['distortion_coefficients']
distortion_coefficients=np.array(distortion_coefficients['data'])
object_points= np.array([(-150.0,100.0,0.0),(150.0,100.0,0.0),(150.0,-100.0,0.0),(-150.0,-100,0.0)])
# image_points=np.array()#get from ros
image_points=np.array(data.data)
image_points=np.array((
(image_points[0],image_points[1]),
(image_points[2],image_points[1]),
(image_points[2],image_points[3]),
(image_points[0],image_points[3]),
)
)
print(type(image_points[0][0]))
success,rotation_vector,translation_vector=cv2.solvePnP(
object_points,
image_points,
camera_matrix,
distortion_coefficients,
flags=cv2.SOLVEPNP_IPPE
)
print ("translation_vector = ",translation_vector)
print ("rotation_vector = ",rotation_vector)
def listener():
rospy.init_node('pos_vector_calc', anonymous=True)
rospy.Subscriber('yolo_xyxy', Float32MultiArray, callback)
rospy.spin()
if __name__ == '__main__':
listener() | [
"yaml.load",
"rospy.Subscriber",
"cv2.solvePnP",
"numpy.array",
"numpy.reshape",
"rospy.init_node",
"rospy.spin"
] | [((318, 370), 'yaml.load', 'yaml.load', (['camera_parameters'], {'Loader': 'yaml.FullLoader'}), '(camera_parameters, Loader=yaml.FullLoader)\n', (327, 370), False, 'import yaml\n'), ((441, 472), 'numpy.array', 'np.array', (["camera_matrix['data']"], {}), "(camera_matrix['data'])\n", (449, 472), True, 'import numpy as np\n'), ((491, 524), 'numpy.reshape', 'np.reshape', (['camera_matrix', '(3, 3)'], {}), '(camera_matrix, (3, 3))\n', (501, 524), True, 'import numpy as np\n'), ((625, 666), 'numpy.array', 'np.array', (["distortion_coefficients['data']"], {}), "(distortion_coefficients['data'])\n", (633, 666), True, 'import numpy as np\n'), ((686, 786), 'numpy.array', 'np.array', (['[(-150.0, 100.0, 0.0), (150.0, 100.0, 0.0), (150.0, -100.0, 0.0), (-150.0, \n -100, 0.0)]'], {}), '([(-150.0, 100.0, 0.0), (150.0, 100.0, 0.0), (150.0, -100.0, 0.0),\n (-150.0, -100, 0.0)])\n', (694, 786), True, 'import numpy as np\n'), ((832, 851), 'numpy.array', 'np.array', (['data.data'], {}), '(data.data)\n', (840, 851), True, 'import numpy as np\n'), ((869, 1031), 'numpy.array', 'np.array', (['((image_points[0], image_points[1]), (image_points[2], image_points[1]), (\n image_points[2], image_points[3]), (image_points[0], image_points[3]))'], {}), '(((image_points[0], image_points[1]), (image_points[2],\n image_points[1]), (image_points[2], image_points[3]), (image_points[0],\n image_points[3])))\n', (877, 1031), True, 'import numpy as np\n'), ((1151, 1261), 'cv2.solvePnP', 'cv2.solvePnP', (['object_points', 'image_points', 'camera_matrix', 'distortion_coefficients'], {'flags': 'cv2.SOLVEPNP_IPPE'}), '(object_points, image_points, camera_matrix,\n distortion_coefficients, flags=cv2.SOLVEPNP_IPPE)\n', (1163, 1261), False, 'import cv2\n'), ((1415, 1465), 'rospy.init_node', 'rospy.init_node', (['"""pos_vector_calc"""'], {'anonymous': '(True)'}), "('pos_vector_calc', anonymous=True)\n", (1430, 1465), False, 'import rospy\n'), ((1468, 1526), 'rospy.Subscriber', 'rospy.Subscriber', (['"""yolo_xyxy"""', 'Float32MultiArray', 'callback'], {}), "('yolo_xyxy', Float32MultiArray, callback)\n", (1484, 1526), False, 'import rospy\n'), ((1529, 1541), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1539, 1541), False, 'import rospy\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 9 16:44:16 2017
@author: fleischer
"""
import pandas as pd
import numpy as np
import seaborn as sns
from astral import *
def firstAndLastLight(data, threshold_list, resamp=False):
''' firstAndLastLight(data, threshold_list, resamp=False) applies all thresholds in the list to each unique person-day in the data, finding the first and last times as well as total times light intensity is above those thresholds for any non-zero number. A 0 threshold is a request to calc amount of time spent at 5 lux and under. Time resampling of the data is done if resamp is of the form [func name,'time'], such as [np.mean,'5T'] or [np.max,'15T'].'''
ids = data.UID.unique()
firstlight = []
lastlight = []
min2fl = []
min2ll = []
whoswatch = []
watchperiod = []
thresholds = []
datelist = []
grouplist = []
totalact=[]
tabvlight=[]
tabvlightAM=[]
tluxmin = []
tluxminAM = []
for uid in ids:
print(uid)
these_rows = (data.UID == uid) & (data['Interval Status'].isin(['ACTIVE','REST'])) & np.logical_not(data['Off-Wrist Status'])
assert (these_rows.sum() > 0),"OOOPS!!#!#! "+uid+" has no ACTIVE rows"
daysofdata = set( data[ these_rows ].index.date )
if 'Group' in data.columns:
group = data[data.UID == uid].iloc[0,:].Group
elif 'Season' in data.columns:
group = data[data.UID == uid].iloc[0,:].Season
else:
print("OOOPS!!#! No group variable??")
raise ValueError
for a_day in daysofdata:
nextday = a_day + pd.tseries.offsets.Day()
nextday = nextday.date().isoformat()
thisday = a_day.isoformat()
daylight = data[these_rows][thisday + ' 04:00:00' : nextday + ' 03:59:00']['White Light']
if resamp: # resample if the function argument is set
daylight = daylight.resample(resamp[1]).apply(resamp[0])
# watch update period for todays data
dperiod = daylight.index.to_series().diff().min()
dpmult = dperiod/pd.Timedelta('1 min') # multiplier to get lux-minutes later
lxmin = dpmult * daylight.sum()
lxminAM = dpmult * daylight[:thisday + ' 12:00'].sum()
for a_thresh in threshold_list:
thresholds.append(a_thresh)
if a_thresh==0:
abovethresh = daylight.index[ daylight < 5] # 0 theshold is a request to calculate under 5 lux
abovethreshAM = daylight[:thisday + ' 12:00'].index[ daylight[:thisday + ' 12:00'] < 5]
else:
abovethresh = daylight.index[ daylight > a_thresh]
abovethreshAM = daylight[:thisday + ' 12:00'].index[ daylight[:thisday + ' 12:00'] > a_thresh]
tabvlight.append( dperiod * len(abovethresh))
tabvlightAM.append( dperiod * len(abovethreshAM))
tluxmin.append( lxmin )
tluxminAM.append( lxminAM )
watchperiod.append(dperiod)
datelist.append(a_day)
grouplist.append(group)
try:
timelight = abovethresh[-1] # last time is above threshold
mins4am = (timelight.time().hour - 4) * 60 + timelight.time().minute
if mins4am<0: # if after midnight, then value above is negative
mins4am += 24*60 # fix by adding 24 hours to it
except IndexError: # there is no above threshold level all day long
timelight = np.nan
mins4am = np.nan
lastlight.append(timelight)
min2ll.append(mins4am)
try:
timelight = abovethresh[0] # first time is above threshold
mins4am = (timelight.time().hour - 4) * 60 + timelight.time().minute
if mins4am<0: # if after midnight, then value above is negative
mins4am += 24*60 # fix by adding 24 hours to it
except IndexError: # there is no above threshold level all day long
timelight = np.nan
mins4am = np.nan
firstlight.append(timelight)
min2fl.append(mins4am)
whoswatch.append(uid)
#print("{} {} {} of 0-lux with period {}\n".format(uid,a_day,len(daylight[daylight==0])*dperiod,dperiod))
#print( len( whoswatch), len(lastlight), len(firstlight), len(min2ll), len(min2fl))
return pd.DataFrame( {'UID': whoswatch, 'Date': datelist, 'Threshold': thresholds,
'Last Light': lastlight, 'Mins to LL from 4AM': min2ll,
'First Light': firstlight, 'Mins to FL from 4AM': min2fl,
'Time above threshold': tabvlight, 'Time above threshold AM': tabvlightAM,
'Minutes above threshold': [ el.total_seconds()/60.0 for el in tabvlight],
'Minutes above threshold AM': [ el.total_seconds()/60.0 for el in tabvlightAM],
'Lux minutes': tluxmin, 'Lux minutes AM': tluxminAM,
'Group': grouplist,
'Watch period': watchperiod
} )
| [
"pandas.tseries.offsets.Day",
"numpy.logical_not",
"pandas.Timedelta"
] | [((1141, 1181), 'numpy.logical_not', 'np.logical_not', (["data['Off-Wrist Status']"], {}), "(data['Off-Wrist Status'])\n", (1155, 1181), True, 'import numpy as np\n'), ((1790, 1814), 'pandas.tseries.offsets.Day', 'pd.tseries.offsets.Day', ([], {}), '()\n', (1812, 1814), True, 'import pandas as pd\n'), ((2337, 2358), 'pandas.Timedelta', 'pd.Timedelta', (['"""1 min"""'], {}), "('1 min')\n", (2349, 2358), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from numba.testing import test_support
import numpy
import unittest
# NOTE: See also numba.tests.ops.test_binary_ops
def maxstar1d(a, b):
M = a.shape[0]
res = numpy.empty(M)
for i in range(M):
res[i] = numpy.max(a[i], b[i]) + numpy.log1p(
numpy.exp(-numpy.abs(a[i] - b[i])))
return res
class TestIssue56(unittest.TestCase):
def test_maxstar1d(self):
test_fn = jit('f8[:](f8[:],f8[:])')(maxstar1d)
test_a = numpy.random.random(10)
test_b = numpy.random.random(10)
self.assertTrue(numpy.allclose(test_fn(test_a, test_b),
maxstar1d(test_a, test_b)))
if __name__ == "__main__":
# TestIssue56("test_maxstar1d").debug()
test_support.main()
| [
"numpy.abs",
"numpy.empty",
"numpy.max",
"numpy.random.random",
"numba.testing.test_support.main"
] | [((281, 295), 'numpy.empty', 'numpy.empty', (['M'], {}), '(M)\n', (292, 295), False, 'import numpy\n'), ((848, 867), 'numba.testing.test_support.main', 'test_support.main', ([], {}), '()\n', (865, 867), False, 'from numba.testing import test_support\n'), ((577, 600), 'numpy.random.random', 'numpy.random.random', (['(10)'], {}), '(10)\n', (596, 600), False, 'import numpy\n'), ((618, 641), 'numpy.random.random', 'numpy.random.random', (['(10)'], {}), '(10)\n', (637, 641), False, 'import numpy\n'), ((336, 357), 'numpy.max', 'numpy.max', (['a[i]', 'b[i]'], {}), '(a[i], b[i])\n', (345, 357), False, 'import numpy\n'), ((396, 418), 'numpy.abs', 'numpy.abs', (['(a[i] - b[i])'], {}), '(a[i] - b[i])\n', (405, 418), False, 'import numpy\n')] |
import gensim
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from matplotlib.mlab import PCA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
# Load Doc2Vec model
model= Doc2Vec.load("d2v.model")
# Names for tags
names = ['Admiral_Ackbar', 'Ahsoka_Tano', 'Aragorn', 'Arwen', 'Asajj_Ventress', 'BB-8', 'Bail_Organa', 'Barliman_Butterbur', 'Beechbone', 'Beregond_and_Bergil', 'Bergil', 'Bilbo_Baggins', 'Boba_Fett', 'Boromir', 'Bregalad', 'C-3PO', 'Cad_Bane', 'Captain_Phasma', 'Celeborn', 'Chewbacca', 'Clone_trooper', 'Count_Dooku', 'Círdan', 'Darth_Maul', 'Darth_Vader', 'Denethor', 'Droid_(Star_Wars)', 'Déagol', 'Dúnhere', 'Elfhelm', 'Elladan_and_Elrohir', 'Elrond', 'Erestor', 'Erkenbrand', 'Faramir', 'Farmer_Maggot', 'Finn_(Star_Wars)', 'Forlong_the_Fat', 'Fredegar_Bolger', 'Frodo_Baggins', 'Galadriel', 'Gamling', 'Gandalf', 'General_Hux', 'Ghân-buri-Ghân', 'Gildor_Inglorion', 'Gimli_(Middle-earth)', 'Glorfindel', 'Goldberry', 'Gollum', 'Gothmog_(Third_Age)', 'Grand_Admiral_Thrawn', 'Grand_Moff_Tarkin', 'Grimbold', 'Gríma_Wormtongue', 'HK-47',
'Haldir_of_Lórien', 'Hamfast_Gamgee', 'Han_Solo', 'Háma_(Middle-earth)', 'Húrin_the_Tall', 'Isildur', 'Jango_Fett', 'K-2SO', 'Kylo_Ren', 'L3-37', 'Lando_Calrissian', 'Legolas', 'Lieutenant_Connix', 'List_of_Star_Wars_Legends_characters', 'Luke_Skywalker', 'Mace_Windu', 'Mandalorian', 'Mara_Jade', 'Max_Rebo_Band', 'Meriadoc_Brandybuck', 'Mouth_of_Sauron', 'Nazgûl', 'Nien_Nunb', 'Old_Man_Willow', 'Orophin', 'Orson_Krennic', 'Padmé_Amidala', 'Peregrin_Took', 'Princess_Leia', 'Qui-Gon_Jinn', 'R2-D2', 'Radagast', 'Rey_(Star_Wars)', 'Rogue_Squadron', 'Rose_Tico', 'Samwise_Gamgee', 'Saruman', 'Sauron', 'Saw_Gerrera', 'Shelob', 'Skywalker_family', 'Solo_family', 'Starkiller', 'Stormtrooper_(Star_Wars)', 'Tag_and_Bink', 'Théoden', 'Théodred', 'Tom_Bombadil', 'Treebeard', 'Tusken_Raiders', 'Wedge_Antilles', 'Witch-king_of_Angmar', 'Éomer', 'Éomund', 'Éothain', 'Éowyn']
# Subset needed for tags
tagnames = ['Éomer', 'Elfhelm', 'Théoden', 'General_Hux', 'Lieutenant_Connix', 'Starkiller', 'HK-47', 'Samwise_Gamgee', 'Legolas', 'Aragorn', 'Darth_Vader', 'Gandalf', 'C-3PO', 'Darth_Maul', 'Beechbone', 'Count_Dooku', 'Bilbo_Baggins' , 'Cad_Bane']
# Take vector embeddings and do PCA analysis
data = [model.docvecs[i] for i in range(len(model.docvecs))]
dataMatrix = np.array(data)
myPCA = PCA(dataMatrix)
'''
# This part plots all tags
for i in range(len(names)):
x = myPCA.Y[i,0]
y = myPCA.Y[i,1]
plt.plot(x, y, 'bo')
plt.text(x * (1 + 0.01), y * (1 + 0.01) , names[i], fontsize=8)
'''
#This part only plots a subset of tags.
for i in range(len(names)):
x = myPCA.Y[i,0]
y = myPCA.Y[i,1]
plt.plot(x, y, 'bo')
for name in tagnames:
i = names.index(name)
x = myPCA.Y[i,0]
y = myPCA.Y[i,1]
plt.text(x * (1 + 0.01), y * (1 + 0.01) , names[i], fontsize=10)
j = names.index('R2-D2')
x = myPCA.Y[j,0]
y = myPCA.Y[j,1]
plt.text(x * (1 + 0.01), y * (1 - 0.2) , names[j], fontsize=10)
# Give plot title and axes
plt.title('Doc2Vec document embeddings plotted in 1. and 2. principal components')
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
#Plot plot
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"gensim.models.doc2vec.Doc2Vec.load",
"matplotlib.pyplot.text",
"matplotlib.mlab.PCA",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((224, 249), 'gensim.models.doc2vec.Doc2Vec.load', 'Doc2Vec.load', (['"""d2v.model"""'], {}), "('d2v.model')\n", (236, 249), False, 'from gensim.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((2378, 2392), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2386, 2392), True, 'import numpy as np\n'), ((2402, 2417), 'matplotlib.mlab.PCA', 'PCA', (['dataMatrix'], {}), '(dataMatrix)\n', (2405, 2417), False, 'from matplotlib.mlab import PCA\n'), ((2974, 3036), 'matplotlib.pyplot.text', 'plt.text', (['(x * (1 + 0.01))', '(y * (1 - 0.2))', 'names[j]'], {'fontsize': '(10)'}), '(x * (1 + 0.01), y * (1 - 0.2), names[j], fontsize=10)\n', (2982, 3036), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3154), 'matplotlib.pyplot.title', 'plt.title', (['"""Doc2Vec document embeddings plotted in 1. and 2. principal components"""'], {}), "(\n 'Doc2Vec document embeddings plotted in 1. and 2. principal components')\n", (3076, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3189), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""First principal component"""'], {}), "('First principal component')\n", (3160, 3189), True, 'import matplotlib.pyplot as plt\n'), ((3190, 3230), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Second principal component"""'], {}), "('Second principal component')\n", (3200, 3230), True, 'import matplotlib.pyplot as plt\n'), ((3243, 3253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3251, 3253), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2753), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""bo"""'], {}), "(x, y, 'bo')\n", (2741, 2753), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2912), 'matplotlib.pyplot.text', 'plt.text', (['(x * (1 + 0.01))', '(y * (1 + 0.01))', 'names[i]'], {'fontsize': '(10)'}), '(x * (1 + 0.01), y * (1 + 0.01), names[i], fontsize=10)\n', (2857, 2912), True, 'import matplotlib.pyplot as plt\n')] |
# mixmodels.py - Parametric model mixer
# ---------------------------------------------------------------
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2021: <NAME>, <NAME> and other contributors.
import numpy as np
import types
def mixmodels(*models):
r"""
Combine DeerLab parametric models into a mixed multi-component model.
Parameters
----------
models : callables
Parametric DeerLab models to be combined.
Returns
--------
newmodel : callable
Mixed model.
Examples
--------
If one mixes a single Gaussian model (2 parameters) with a worm-like chain (WLC) model (2 parameters) into a single model::
newmodel = mixmodels(dd_gauss,dd_wormchain)
the resulting model newmodel will contain 6 parameters in the following order: the 2 single-Gaussian parameters,
1 amplitude parameter for the Gaussian model, the 2 WLC parameters, and 1 amplitude parameter for the WLC model.
"""
if len(models)==0:
raise KeyError('At least one model must be provided.')
if np.any([type(model) is not types.FunctionType for model in models]):
raise TypeError('Input arguments must all be function handles.')
# Detemine number of models to be mixed
nModels = len(models)
# Combine info structures from all models
idx = 0
Info = dict(Parameters=[],Units=[],Start=[],Lower=[],Upper=[])
pidx = []
pidx_amp = []
for i,model in enumerate(models):
nparam = len(model.start)
pidx.append(idx + np.arange(0,nparam))
idx = idx + nparam
for j in range(nparam):
Info['Parameters'].append(f'Model {i+1}: {model.parameters[j]}')
Info['Units'].append(model.units[j])
Info['Lower'].append(model.lower[j])
Info['Upper'].append(model.upper[j])
Info['Start'].append(model.start[j])
# Add amplitudes for each model
Info['Parameters'].append(f'Model {i+1}: Amplitude')
Info['Units'].append('')
Info['Lower'].append(0)
Info['Upper'].append(1)
Info['Start'].append(1/nModels)
pidx_amp.append(len(Info['Start'])-1)
idx = idx + 1
# Convert the numerical fields to numpy arrays
Info['Lower'] = np.asarray(Info['Lower'])
Info['Upper'] = np.asarray(Info['Upper'])
Info['Start'] = np.asarray(Info['Start'])
# =================================================================
def setmetadata(parameters,units,start,lower,upper):
"""
Decorator: Set model metadata as function attributes
"""
def _setmetadata(func):
func.parameters = parameters
func.units = units
func.start = start
func.lower = lower
func.upper = upper
return func
return _setmetadata
# =================================================================
# =================================================================
@setmetadata(
parameters = Info['Parameters'],
units = Info['Units'],
start = Info['Start'],
lower = Info['Lower'],
upper = Info['Upper'])
def mixedFunction(ax, params):
"""
Mixed model function handle
---------------------------
Function to allow request of information structure or model values
"""
params = np.atleast_1d(params)
evaled = 0
for k in range(nModels):
evaled = evaled + params[pidx_amp[k]]*models[k](ax,params[pidx[k]])
#Normalize the distribution if it is a distance distribution model
isddmodel = any(['dd' in model.__name__ for model in models])
if isddmodel and not np.all(evaled==0):
evaled = evaled/np.trapz(evaled,ax)
return evaled
# =======================================================================
return mixedFunction | [
"numpy.trapz",
"numpy.asarray",
"numpy.arange",
"numpy.atleast_1d",
"numpy.all"
] | [((2373, 2398), 'numpy.asarray', 'np.asarray', (["Info['Lower']"], {}), "(Info['Lower'])\n", (2383, 2398), True, 'import numpy as np\n'), ((2420, 2445), 'numpy.asarray', 'np.asarray', (["Info['Upper']"], {}), "(Info['Upper'])\n", (2430, 2445), True, 'import numpy as np\n'), ((2467, 2492), 'numpy.asarray', 'np.asarray', (["Info['Start']"], {}), "(Info['Start'])\n", (2477, 2492), True, 'import numpy as np\n'), ((3525, 3546), 'numpy.atleast_1d', 'np.atleast_1d', (['params'], {}), '(params)\n', (3538, 3546), True, 'import numpy as np\n'), ((1622, 1642), 'numpy.arange', 'np.arange', (['(0)', 'nparam'], {}), '(0, nparam)\n', (1631, 1642), True, 'import numpy as np\n'), ((3869, 3888), 'numpy.all', 'np.all', (['(evaled == 0)'], {}), '(evaled == 0)\n', (3875, 3888), True, 'import numpy as np\n'), ((3917, 3937), 'numpy.trapz', 'np.trapz', (['evaled', 'ax'], {}), '(evaled, ax)\n', (3925, 3937), True, 'import numpy as np\n')] |
# /benchmark.py
#
# Script to benchmark agent performance.
#
# See /LICENCE.md for Copyright information
"""Script to benchmark agent performance."""
import argparse
import sys
import os
import re
import subprocess
import matplotlib
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from glob import glob
from operator import itemgetter
from tqdm import tqdm
def run(argv, *args, **kwargs):
"""Wrap subprocess.run and log what commands it runs."""
return subprocess.run(argv, *args, **kwargs)
def get_repo_dir():
return os.path.dirname(os.path.abspath(__file__))
def get_bin_dir():
return os.path.join(get_repo_dir(), "bin")
def get_source_files():
return glob(
os.path.join(get_repo_dir(), "src", "**", "*.java"),
recursive=True
)
def compile():
run(["javac", "-d", get_bin_dir()] + get_source_files(), check=True)
def simulate_game(number_players):
result = run(
["java", "-cp", get_bin_dir(), "hanabAI.Hanabi", str(number_players)],
encoding="utf8",
stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
check=True
)
match = re.search(r"The final score is (?P<score>\d+).", result.stdout)
score = -1 if match is None else int(match.group("score"))
return result, score
def plt_histogram(values):
num_bins = 25 # bins for each possible score
xs = np.array(values)
fig, ax = plt.subplots()
n, bins, patches = ax.hist(xs, range=(0, 26), bins=num_bins, density=True)
ax.set_xlabel("Scores")
ax.set_ylabel("Probability Density")
ax.set_title("Performance")
plt.savefig("results.svg", format="svg")
plt.show()
def plt_histogram_grid(titles, sets_of_values):
num_bins = 25 # bins for each possible score
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 15))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
for title, ax, values in zip(titles, axes.flatten(), sets_of_values):
xs = np.array(values)
n, bins, patches = ax.hist(xs, range=(0, 26), bins=num_bins, density=True)
ax.set_xlabel("Scores")
ax.set_ylabel("Probability Density")
ax.set_title(title + r' $\mu={},\ \sigma={}$'.format(xs.mean(), xs.std()))
plt.savefig("results.svg", format="svg")
plt.show()
def write_debug_log(game_no, game_proc, log_dir):
os.makedirs(log_dir, exist_ok=True)
with open(os.path.join(log_dir, "game-{}.txt".format(game_no)), "w+") as f:
f.write(game_proc.stderr or "")
f.write(game_proc.stdout or "")
def main(argv=None):
"""Entry points for the benchmarker."""
argv = argv or sys.argv[1:]
parser = argparse.ArgumentParser("""Agent benchmarker""")
# parser.add_argument("--players",
# metavar="NUMBER_PLAYERS",
# type=int,
# default=5,
# nargs="?",
# help="""Number of players in the game.""",
# dest="number_players")
parser.add_argument("--iterations",
metavar="ITERATIONS",
type=int,
default=50,
nargs="?",
help="""Number games to play.""",
dest="iterations")
parser.add_argument("--debug-score",
metavar="DEBUG_SCORE",
type=int,
default=0,
help="""Score below which to dump game logs.""",
dest="debug_score")
parser.add_argument("--debug-dump",
metavar="DEBUG_SCORE",
type=str,
default=os.path.join(get_repo_dir(), "logs"),
help="""Score below which to dump game logs.""",
dest="log_dir")
result = parser.parse_args(argv)
compile()
titles = []
sets_of_scores = []
for n_players in tqdm(range(2, 6)):
title = "{} Players ({} Iterations)".format(n_players, result.iterations)
scores = []
for i in tqdm(range(result.iterations)):
proc, score = simulate_game(n_players)
if score <= result.debug_score:
write_debug_log(i, proc, result.log_dir)
print("Wrote debug log for game {}".format(i), file=sys.stderr)
if score == -1:
continue
scores.append(score)
titles.append(title)
sets_of_scores.append(scores)
plt_histogram_grid(titles, sets_of_scores)
if __name__ == "__main__":
main()
| [
"subprocess.run",
"os.path.abspath",
"matplotlib.pyplot.show",
"os.makedirs",
"argparse.ArgumentParser",
"matplotlib.pyplot.subplots",
"numpy.array",
"matplotlib.pyplot.subplots_adjust",
"re.search",
"matplotlib.pyplot.savefig"
] | [((500, 537), 'subprocess.run', 'subprocess.run', (['argv', '*args'], {}), '(argv, *args, **kwargs)\n', (514, 537), False, 'import subprocess\n'), ((1162, 1225), 're.search', 're.search', (['"""The final score is (?P<score>\\\\d+)."""', 'result.stdout'], {}), "('The final score is (?P<score>\\\\d+).', result.stdout)\n", (1171, 1225), False, 'import re\n'), ((1401, 1417), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1409, 1417), True, 'import numpy as np\n'), ((1432, 1446), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1444, 1446), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1671), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results.svg"""'], {'format': '"""svg"""'}), "('results.svg', format='svg')\n", (1642, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1684, 1686), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1850), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(20, 15)'}), '(nrows=2, ncols=2, figsize=(20, 15))\n', (1814, 1850), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1898), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)', 'wspace': '(0.3)'}), '(hspace=0.3, wspace=0.3)\n', (1874, 1898), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2290), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results.svg"""'], {'format': '"""svg"""'}), "('results.svg', format='svg')\n", (2261, 2290), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2303, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2362, 2397), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (2373, 2397), False, 'import os\n'), ((2670, 2714), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Agent benchmarker"""'], {}), "('Agent benchmarker')\n", (2693, 2714), False, 'import argparse\n'), ((586, 611), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (601, 611), False, 'import os\n'), ((1986, 2002), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1994, 2002), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from scipy.sparse import random as sp_random
import dislib as ds
from dislib.regression import LinearRegression
from dislib.data import random_array
import dislib.data.util.model as utilmodel
class LinearRegressionTest(unittest.TestCase):
def test_univariate(self):
"""Tests fit() and predict(), univariate."""
x_data = np.array([1, 2, 3, 4, 5])
y_data = np.array([2, 1, 1, 2, 4.5])
bn, bm = 2, 1
x = ds.array(x=x_data, block_size=(bn, bm))
y = ds.array(x=y_data, block_size=(bn, bm))
reg = LinearRegression()
reg.fit(x, y)
self.assertTrue(np.allclose(reg.coef_.collect(), 0.6))
self.assertTrue(np.allclose(reg.intercept_.collect(), 0.3))
# Predict one sample
x_test = np.array([3])
test_data = ds.array(x=x_test, block_size=(1, 1))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, 2.1))
# Predict multiple samples
x_test = np.array([3, 5, 6])
test_data = ds.array(x=x_test, block_size=(bn, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, [2.1, 3.3, 3.9]))
def test_univariate_no_intercept(self):
"""Tests fit() and predict(), univariate, fit_intercept=False."""
x_data = np.array([1, 2, 3, 4, 5])
y_data = np.array([2, 1, 1, 2, 4.5])
bn, bm = 2, 1
x = ds.array(x=x_data, block_size=(bn, bm))
y = ds.array(x=y_data, block_size=(bn, bm))
reg = LinearRegression(fit_intercept=False)
reg.fit(x, y)
self.assertTrue(np.allclose(reg.coef_.collect(), 0.68181818))
self.assertTrue(np.allclose(reg.intercept_.collect(), 0))
# Predict one sample
x_test = np.array([3])
test_data = ds.array(x=x_test, block_size=(1, 1))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, 2.04545455))
# Predict multiple samples
x_test = np.array([3, 5, 6])
test_data = ds.array(x=x_test, block_size=(bn, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, [2.04545455, 3.4090909, 4.0909091]))
def test_multivariate(self):
"""Tests fit() and predict(), multivariate."""
x_data = np.array([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])
y_data = np.array([2, 1, 1, 2, 4.5])
bn, bm = 2, 2
x = ds.array(x=x_data, block_size=(bn, bm))
y = ds.array(x=y_data, block_size=(bn, 1))
reg = LinearRegression()
reg.fit(x, y)
self.assertTrue(np.allclose(reg.coef_.collect(), [0.421875, 0.296875]))
self.assertTrue(np.allclose(reg.intercept_.collect(), 0.240625))
# Predict one sample
x_test = np.array([3, 2])
test_data = ds.array(x=x_test, block_size=(1, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, 2.1))
# Predict multiple samples
x_test = np.array([[3, 2], [4, 4], [1, 3]])
test_data = ds.array(x=x_test, block_size=(bn, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, [2.1, 3.115625, 1.553125]))
def test_multivariate_no_intercept(self):
"""Tests fit() and predict(), multivariate, fit_intercept=False."""
x_data = np.array([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])
y_data = np.array([2, 1, 1, 2, 4.5])
bn, bm = 2, 2
x = ds.array(x=x_data, block_size=(bn, bm))
y = ds.array(x=y_data, block_size=(bn, 1))
reg = LinearRegression(fit_intercept=False)
reg.fit(x, y)
self.assertTrue(np.allclose(reg.coef_.collect(),
[0.48305085, 0.30367232]))
self.assertTrue(np.allclose(reg.intercept_.collect(), 0))
# Predict one sample
x_test = np.array([3, 2])
test_data = ds.array(x=x_test, block_size=(1, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, [2.05649718]))
# Predict multiple samples
x_test = np.array([[3, 2], [4, 4], [1, 3]])
test_data = ds.array(x=x_test, block_size=(bn, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, [2.05649718, 3.14689266, 1.3940678]))
def test_multivariate_multiobjective(self):
"""Tests fit() and predict(), multivariate, multiobjective."""
x_data = np.array([[1, 2, 3], [2, 0, 4], [3, 1, 8],
[4, 4, 2], [5, 3, 1], [2, 7, 1]])
y_data = np.array([[2, 0, 3], [1, 5, 2], [1, 3, 4],
[2, 7, 9], [4.5, -1, 4], [0, 0, 0]])
bn, bm = 2, 2
x = ds.array(x=x_data, block_size=(bn, bm))
y = ds.array(x=y_data, block_size=(bn, bm))
reg = LinearRegression()
reg.fit(x, y)
# Predict one sample
x_test = np.array([3, 2, 1])
test_data = ds.array(x=x_test, block_size=(1, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, [3.0318415, 1.97164872, 3.85410906]))
# Predict multiple samples
x_test = np.array([[3, 2, 1], [4, 3, 3], [1, 1, 1]])
test_data = ds.array(x=x_test, block_size=(bn, bm))
pred = reg.predict(test_data).collect()
self.assertTrue(np.allclose(pred, [[3.0318415, 1.97164872, 3.85410906],
[2.5033157, 2.65809327, 5.05310495],
[2.145797, 1.4840121, 1.5739791]]))
# Check attributes values
self.assertTrue(np.allclose(reg.coef_.collect(),
[[0.65034768, 0.34673933, 1.22176283],
[-0.41465084, -0.20584208, -0.16339571],
[-0.38211131, 0.27277365, 0.07031439]]))
self.assertTrue(np.allclose(reg.intercept_.collect(),
[2.29221145, 1.07034124, 0.44529761]))
def test_sparse(self):
"""Tests LR raises NotImplementedError for sparse data."""
np.random.seed(0)
coo_matrix = sp_random(10, 1, density=0.5)
sparse_arr = ds.array(x=coo_matrix, block_size=(5, 1))
reg = LinearRegression()
with self.assertRaises(NotImplementedError):
reg.fit(sparse_arr, sparse_arr)
dense_arr = random_array((10, 1), (5, 1))
reg.fit(dense_arr, dense_arr)
with self.assertRaises(NotImplementedError):
reg.predict(sparse_arr)
def test_load_save(self):
""" Tests LR's methods of save and load for all supported formats
and if a ValueError exception raises for non-supported formats."""
x_data = np.array([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])
y_data = np.array([2, 1, 1, 2, 4.5])
bn, bm = 2, 2
x = ds.array(x=x_data, block_size=(bn, bm))
y = ds.array(x=y_data, block_size=(bn, 1))
reg = LinearRegression()
reg.fit(x, y)
reg.save_model("./model_LR")
reg2 = LinearRegression()
reg2.load_model("./model_LR")
x_test = np.array([3, 2])
test_data = ds.array(x=x_test, block_size=(1, bm))
x_test_m = np.array([[3, 2], [4, 4], [1, 3]])
test_data_m = ds.array(x=x_test_m, block_size=(bn, bm))
pred = reg2.predict(test_data).collect()
pred_m = reg2.predict(test_data_m).collect()
self.assertTrue(np.allclose(reg2.coef_.collect(),
[0.421875, 0.296875]))
self.assertTrue(np.allclose(reg2.intercept_.collect(), 0.240625))
self.assertTrue(np.allclose(pred, 2.1))
self.assertTrue(np.allclose(pred_m, [2.1, 3.115625, 1.553125]))
reg.save_model("./model_LR", save_format="cbor")
reg2 = LinearRegression()
reg2.load_model("./model_LR", load_format="cbor")
pred = reg2.predict(test_data).collect()
pred_m = reg2.predict(test_data_m).collect()
self.assertTrue(np.allclose(reg2.coef_.collect(),
[0.421875, 0.296875]))
self.assertTrue(np.allclose(reg2.intercept_.collect(), 0.240625))
self.assertTrue(np.allclose(pred, 2.1))
self.assertTrue(np.allclose(pred_m, [2.1, 3.115625, 1.553125]))
reg.save_model("./model_LR", save_format="pickle")
reg2 = LinearRegression()
reg2.load_model("./model_LR", load_format="pickle")
pred = reg2.predict(test_data).collect()
pred_m = reg2.predict(test_data_m).collect()
self.assertTrue(np.allclose(reg2.coef_.collect(),
[0.421875, 0.296875]))
self.assertTrue(np.allclose(reg2.intercept_.collect(), 0.240625))
self.assertTrue(np.allclose(pred, 2.1))
self.assertTrue(np.allclose(pred_m, [2.1, 3.115625, 1.553125]))
with self.assertRaises(ValueError):
reg.save_model("./model_LR", save_format="txt")
with self.assertRaises(ValueError):
reg2 = LinearRegression()
reg2.load_model("./model_LR", load_format="txt")
x_data = np.array([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])
y_data = np.array([4.5, 2, 1, 1, 2])
bn, bm = 2, 2
x = ds.array(x=x_data, block_size=(bn, bm))
y = ds.array(x=y_data, block_size=(bn, 1))
reg = LinearRegression()
reg.fit(x, y)
reg.save_model("./model_LR", overwrite=False)
reg2 = LinearRegression()
reg2.load_model("./model_LR", load_format="pickle")
pred = reg2.predict(test_data).collect()
pred_m = reg2.predict(test_data_m).collect()
self.assertTrue(np.allclose(reg2.coef_.collect(),
[0.421875, 0.296875]))
self.assertTrue(np.allclose(reg2.intercept_.collect(), 0.240625))
self.assertTrue(np.allclose(pred, 2.1))
self.assertTrue(np.allclose(pred_m, [2.1, 3.115625, 1.553125]))
cbor2_module = utilmodel.cbor2
utilmodel.cbor2 = None
with self.assertRaises(ModuleNotFoundError):
reg.save_model("./model_LR", save_format="cbor")
with self.assertRaises(ModuleNotFoundError):
reg2.load_model("./model_LR", load_format="cbor")
utilmodel.cbor2 = cbor2_module
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"unittest.main",
"dislib.regression.LinearRegression",
"numpy.random.seed",
"scipy.sparse.random",
"dislib.data.random_array",
"numpy.allclose",
"dislib.array",
"numpy.array"
] | [((10464, 10479), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10477, 10479), False, 'import unittest\n'), ((380, 405), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (388, 405), True, 'import numpy as np\n'), ((423, 450), 'numpy.array', 'np.array', (['[2, 1, 1, 2, 4.5]'], {}), '([2, 1, 1, 2, 4.5])\n', (431, 450), True, 'import numpy as np\n'), ((487, 526), 'dislib.array', 'ds.array', ([], {'x': 'x_data', 'block_size': '(bn, bm)'}), '(x=x_data, block_size=(bn, bm))\n', (495, 526), True, 'import dislib as ds\n'), ((539, 578), 'dislib.array', 'ds.array', ([], {'x': 'y_data', 'block_size': '(bn, bm)'}), '(x=y_data, block_size=(bn, bm))\n', (547, 578), True, 'import dislib as ds\n'), ((594, 612), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (610, 612), False, 'from dislib.regression import LinearRegression\n'), ((813, 826), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (821, 826), True, 'import numpy as np\n'), ((847, 884), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(1, 1)'}), '(x=x_test, block_size=(1, 1))\n', (855, 884), True, 'import dislib as ds\n'), ((1034, 1053), 'numpy.array', 'np.array', (['[3, 5, 6]'], {}), '([3, 5, 6])\n', (1042, 1053), True, 'import numpy as np\n'), ((1074, 1113), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(bn, bm)'}), '(x=x_test, block_size=(bn, bm))\n', (1082, 1113), True, 'import dislib as ds\n'), ((1358, 1383), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1366, 1383), True, 'import numpy as np\n'), ((1401, 1428), 'numpy.array', 'np.array', (['[2, 1, 1, 2, 4.5]'], {}), '([2, 1, 1, 2, 4.5])\n', (1409, 1428), True, 'import numpy as np\n'), ((1465, 1504), 'dislib.array', 'ds.array', ([], {'x': 'x_data', 'block_size': '(bn, bm)'}), '(x=x_data, block_size=(bn, bm))\n', (1473, 1504), True, 'import dislib as ds\n'), ((1517, 1556), 'dislib.array', 'ds.array', ([], {'x': 'y_data', 'block_size': '(bn, bm)'}), '(x=y_data, block_size=(bn, bm))\n', (1525, 1556), True, 'import dislib as ds\n'), ((1572, 1609), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (1588, 1609), False, 'from dislib.regression import LinearRegression\n'), ((1815, 1828), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (1823, 1828), True, 'import numpy as np\n'), ((1849, 1886), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(1, 1)'}), '(x=x_test, block_size=(1, 1))\n', (1857, 1886), True, 'import dislib as ds\n'), ((2043, 2062), 'numpy.array', 'np.array', (['[3, 5, 6]'], {}), '([3, 5, 6])\n', (2051, 2062), True, 'import numpy as np\n'), ((2083, 2122), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(bn, bm)'}), '(x=x_test, block_size=(bn, bm))\n', (2091, 2122), True, 'import dislib as ds\n'), ((2356, 2406), 'numpy.array', 'np.array', (['[[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]]'], {}), '([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])\n', (2364, 2406), True, 'import numpy as np\n'), ((2424, 2451), 'numpy.array', 'np.array', (['[2, 1, 1, 2, 4.5]'], {}), '([2, 1, 1, 2, 4.5])\n', (2432, 2451), True, 'import numpy as np\n'), ((2488, 2527), 'dislib.array', 'ds.array', ([], {'x': 'x_data', 'block_size': '(bn, bm)'}), '(x=x_data, block_size=(bn, bm))\n', (2496, 2527), True, 'import dislib as ds\n'), ((2540, 2578), 'dislib.array', 'ds.array', ([], {'x': 'y_data', 'block_size': '(bn, 1)'}), '(x=y_data, block_size=(bn, 1))\n', (2548, 2578), True, 'import dislib as ds\n'), ((2594, 2612), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2610, 2612), False, 'from dislib.regression import LinearRegression\n'), ((2835, 2851), 'numpy.array', 'np.array', (['[3, 2]'], {}), '([3, 2])\n', (2843, 2851), True, 'import numpy as np\n'), ((2872, 2910), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(1, bm)'}), '(x=x_test, block_size=(1, bm))\n', (2880, 2910), True, 'import dislib as ds\n'), ((3060, 3094), 'numpy.array', 'np.array', (['[[3, 2], [4, 4], [1, 3]]'], {}), '([[3, 2], [4, 4], [1, 3]])\n', (3068, 3094), True, 'import numpy as np\n'), ((3115, 3154), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(bn, bm)'}), '(x=x_test, block_size=(bn, bm))\n', (3123, 3154), True, 'import dislib as ds\n'), ((3413, 3463), 'numpy.array', 'np.array', (['[[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]]'], {}), '([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])\n', (3421, 3463), True, 'import numpy as np\n'), ((3481, 3508), 'numpy.array', 'np.array', (['[2, 1, 1, 2, 4.5]'], {}), '([2, 1, 1, 2, 4.5])\n', (3489, 3508), True, 'import numpy as np\n'), ((3545, 3584), 'dislib.array', 'ds.array', ([], {'x': 'x_data', 'block_size': '(bn, bm)'}), '(x=x_data, block_size=(bn, bm))\n', (3553, 3584), True, 'import dislib as ds\n'), ((3597, 3635), 'dislib.array', 'ds.array', ([], {'x': 'y_data', 'block_size': '(bn, 1)'}), '(x=y_data, block_size=(bn, 1))\n', (3605, 3635), True, 'import dislib as ds\n'), ((3651, 3688), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (3667, 3688), False, 'from dislib.regression import LinearRegression\n'), ((3944, 3960), 'numpy.array', 'np.array', (['[3, 2]'], {}), '([3, 2])\n', (3952, 3960), True, 'import numpy as np\n'), ((3981, 4019), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(1, bm)'}), '(x=x_test, block_size=(1, bm))\n', (3989, 4019), True, 'import dislib as ds\n'), ((4178, 4212), 'numpy.array', 'np.array', (['[[3, 2], [4, 4], [1, 3]]'], {}), '([[3, 2], [4, 4], [1, 3]])\n', (4186, 4212), True, 'import numpy as np\n'), ((4233, 4272), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(bn, bm)'}), '(x=x_test, block_size=(bn, bm))\n', (4241, 4272), True, 'import dislib as ds\n'), ((4538, 4614), 'numpy.array', 'np.array', (['[[1, 2, 3], [2, 0, 4], [3, 1, 8], [4, 4, 2], [5, 3, 1], [2, 7, 1]]'], {}), '([[1, 2, 3], [2, 0, 4], [3, 1, 8], [4, 4, 2], [5, 3, 1], [2, 7, 1]])\n', (4546, 4614), True, 'import numpy as np\n'), ((4659, 4738), 'numpy.array', 'np.array', (['[[2, 0, 3], [1, 5, 2], [1, 3, 4], [2, 7, 9], [4.5, -1, 4], [0, 0, 0]]'], {}), '([[2, 0, 3], [1, 5, 2], [1, 3, 4], [2, 7, 9], [4.5, -1, 4], [0, 0, 0]])\n', (4667, 4738), True, 'import numpy as np\n'), ((4802, 4841), 'dislib.array', 'ds.array', ([], {'x': 'x_data', 'block_size': '(bn, bm)'}), '(x=x_data, block_size=(bn, bm))\n', (4810, 4841), True, 'import dislib as ds\n'), ((4854, 4893), 'dislib.array', 'ds.array', ([], {'x': 'y_data', 'block_size': '(bn, bm)'}), '(x=y_data, block_size=(bn, bm))\n', (4862, 4893), True, 'import dislib as ds\n'), ((4909, 4927), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4925, 4927), False, 'from dislib.regression import LinearRegression\n'), ((4997, 5016), 'numpy.array', 'np.array', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (5005, 5016), True, 'import numpy as np\n'), ((5037, 5075), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(1, bm)'}), '(x=x_test, block_size=(1, bm))\n', (5045, 5075), True, 'import dislib as ds\n'), ((5257, 5300), 'numpy.array', 'np.array', (['[[3, 2, 1], [4, 3, 3], [1, 1, 1]]'], {}), '([[3, 2, 1], [4, 3, 3], [1, 1, 1]])\n', (5265, 5300), True, 'import numpy as np\n'), ((5321, 5360), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(bn, bm)'}), '(x=x_test, block_size=(bn, bm))\n', (5329, 5360), True, 'import dislib as ds\n'), ((6211, 6228), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6225, 6228), True, 'import numpy as np\n'), ((6250, 6279), 'scipy.sparse.random', 'sp_random', (['(10)', '(1)'], {'density': '(0.5)'}), '(10, 1, density=0.5)\n', (6259, 6279), True, 'from scipy.sparse import random as sp_random\n'), ((6301, 6342), 'dislib.array', 'ds.array', ([], {'x': 'coo_matrix', 'block_size': '(5, 1)'}), '(x=coo_matrix, block_size=(5, 1))\n', (6309, 6342), True, 'import dislib as ds\n'), ((6357, 6375), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6373, 6375), False, 'from dislib.regression import LinearRegression\n'), ((6493, 6522), 'dislib.data.random_array', 'random_array', (['(10, 1)', '(5, 1)'], {}), '((10, 1), (5, 1))\n', (6505, 6522), False, 'from dislib.data import random_array\n'), ((6847, 6897), 'numpy.array', 'np.array', (['[[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]]'], {}), '([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])\n', (6855, 6897), True, 'import numpy as np\n'), ((6915, 6942), 'numpy.array', 'np.array', (['[2, 1, 1, 2, 4.5]'], {}), '([2, 1, 1, 2, 4.5])\n', (6923, 6942), True, 'import numpy as np\n'), ((6979, 7018), 'dislib.array', 'ds.array', ([], {'x': 'x_data', 'block_size': '(bn, bm)'}), '(x=x_data, block_size=(bn, bm))\n', (6987, 7018), True, 'import dislib as ds\n'), ((7031, 7069), 'dislib.array', 'ds.array', ([], {'x': 'y_data', 'block_size': '(bn, 1)'}), '(x=y_data, block_size=(bn, 1))\n', (7039, 7069), True, 'import dislib as ds\n'), ((7085, 7103), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7101, 7103), False, 'from dislib.regression import LinearRegression\n'), ((7180, 7198), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7196, 7198), False, 'from dislib.regression import LinearRegression\n'), ((7255, 7271), 'numpy.array', 'np.array', (['[3, 2]'], {}), '([3, 2])\n', (7263, 7271), True, 'import numpy as np\n'), ((7292, 7330), 'dislib.array', 'ds.array', ([], {'x': 'x_test', 'block_size': '(1, bm)'}), '(x=x_test, block_size=(1, bm))\n', (7300, 7330), True, 'import dislib as ds\n'), ((7351, 7385), 'numpy.array', 'np.array', (['[[3, 2], [4, 4], [1, 3]]'], {}), '([[3, 2], [4, 4], [1, 3]])\n', (7359, 7385), True, 'import numpy as np\n'), ((7408, 7449), 'dislib.array', 'ds.array', ([], {'x': 'x_test_m', 'block_size': '(bn, bm)'}), '(x=x_test_m, block_size=(bn, bm))\n', (7416, 7449), True, 'import dislib as ds\n'), ((7938, 7956), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7954, 7956), False, 'from dislib.regression import LinearRegression\n'), ((8505, 8523), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8521, 8523), False, 'from dislib.regression import LinearRegression\n'), ((9265, 9315), 'numpy.array', 'np.array', (['[[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]]'], {}), '([[1, 2], [2, 0], [3, 1], [4, 4], [5, 3]])\n', (9273, 9315), True, 'import numpy as np\n'), ((9333, 9360), 'numpy.array', 'np.array', (['[4.5, 2, 1, 1, 2]'], {}), '([4.5, 2, 1, 1, 2])\n', (9341, 9360), True, 'import numpy as np\n'), ((9397, 9436), 'dislib.array', 'ds.array', ([], {'x': 'x_data', 'block_size': '(bn, bm)'}), '(x=x_data, block_size=(bn, bm))\n', (9405, 9436), True, 'import dislib as ds\n'), ((9449, 9487), 'dislib.array', 'ds.array', ([], {'x': 'y_data', 'block_size': '(bn, 1)'}), '(x=y_data, block_size=(bn, 1))\n', (9457, 9487), True, 'import dislib as ds\n'), ((9503, 9521), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9519, 9521), False, 'from dislib.regression import LinearRegression\n'), ((9614, 9632), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9630, 9632), False, 'from dislib.regression import LinearRegression\n'), ((957, 979), 'numpy.allclose', 'np.allclose', (['pred', '(2.1)'], {}), '(pred, 2.1)\n', (968, 979), True, 'import numpy as np\n'), ((1186, 1220), 'numpy.allclose', 'np.allclose', (['pred', '[2.1, 3.3, 3.9]'], {}), '(pred, [2.1, 3.3, 3.9])\n', (1197, 1220), True, 'import numpy as np\n'), ((1959, 1988), 'numpy.allclose', 'np.allclose', (['pred', '(2.04545455)'], {}), '(pred, 2.04545455)\n', (1970, 1988), True, 'import numpy as np\n'), ((2195, 2248), 'numpy.allclose', 'np.allclose', (['pred', '[2.04545455, 3.4090909, 4.0909091]'], {}), '(pred, [2.04545455, 3.4090909, 4.0909091])\n', (2206, 2248), True, 'import numpy as np\n'), ((2983, 3005), 'numpy.allclose', 'np.allclose', (['pred', '(2.1)'], {}), '(pred, 2.1)\n', (2994, 3005), True, 'import numpy as np\n'), ((3227, 3271), 'numpy.allclose', 'np.allclose', (['pred', '[2.1, 3.115625, 1.553125]'], {}), '(pred, [2.1, 3.115625, 1.553125])\n', (3238, 3271), True, 'import numpy as np\n'), ((4092, 4123), 'numpy.allclose', 'np.allclose', (['pred', '[2.05649718]'], {}), '(pred, [2.05649718])\n', (4103, 4123), True, 'import numpy as np\n'), ((4345, 4399), 'numpy.allclose', 'np.allclose', (['pred', '[2.05649718, 3.14689266, 1.3940678]'], {}), '(pred, [2.05649718, 3.14689266, 1.3940678])\n', (4356, 4399), True, 'import numpy as np\n'), ((5148, 5202), 'numpy.allclose', 'np.allclose', (['pred', '[3.0318415, 1.97164872, 3.85410906]'], {}), '(pred, [3.0318415, 1.97164872, 3.85410906])\n', (5159, 5202), True, 'import numpy as np\n'), ((5433, 5565), 'numpy.allclose', 'np.allclose', (['pred', '[[3.0318415, 1.97164872, 3.85410906], [2.5033157, 2.65809327, 5.05310495],\n [2.145797, 1.4840121, 1.5739791]]'], {}), '(pred, [[3.0318415, 1.97164872, 3.85410906], [2.5033157, \n 2.65809327, 5.05310495], [2.145797, 1.4840121, 1.5739791]])\n', (5444, 5565), True, 'import numpy as np\n'), ((7768, 7790), 'numpy.allclose', 'np.allclose', (['pred', '(2.1)'], {}), '(pred, 2.1)\n', (7779, 7790), True, 'import numpy as np\n'), ((7816, 7862), 'numpy.allclose', 'np.allclose', (['pred_m', '[2.1, 3.115625, 1.553125]'], {}), '(pred_m, [2.1, 3.115625, 1.553125])\n', (7827, 7862), True, 'import numpy as np\n'), ((8333, 8355), 'numpy.allclose', 'np.allclose', (['pred', '(2.1)'], {}), '(pred, 2.1)\n', (8344, 8355), True, 'import numpy as np\n'), ((8381, 8427), 'numpy.allclose', 'np.allclose', (['pred_m', '[2.1, 3.115625, 1.553125]'], {}), '(pred_m, [2.1, 3.115625, 1.553125])\n', (8392, 8427), True, 'import numpy as np\n'), ((8902, 8924), 'numpy.allclose', 'np.allclose', (['pred', '(2.1)'], {}), '(pred, 2.1)\n', (8913, 8924), True, 'import numpy as np\n'), ((8950, 8996), 'numpy.allclose', 'np.allclose', (['pred_m', '[2.1, 3.115625, 1.553125]'], {}), '(pred_m, [2.1, 3.115625, 1.553125])\n', (8961, 8996), True, 'import numpy as np\n'), ((9167, 9185), 'dislib.regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9183, 9185), False, 'from dislib.regression import LinearRegression\n'), ((10011, 10033), 'numpy.allclose', 'np.allclose', (['pred', '(2.1)'], {}), '(pred, 2.1)\n', (10022, 10033), True, 'import numpy as np\n'), ((10059, 10105), 'numpy.allclose', 'np.allclose', (['pred_m', '[2.1, 3.115625, 1.553125]'], {}), '(pred_m, [2.1, 3.115625, 1.553125])\n', (10070, 10105), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Image input-output functions.
Created on Thu Apr 19 22:00:00 2018
Author: <NAME> | CVPRU-ISICAL (http://www.isical.ac.in/~cvpr)
GitHub: https://github.com/prasunroy/cvutils
"""
# imports
import cv2
import numpy
import os
import requests
from .validation import imvalidate
# reads an image file
def imread(path, flag=-1):
"""Reads an image file.
Supported image file formats:
Bitmap : *.bmp, *.dib
JPEG : *.jpe, *.jpeg, *.jpg
JPEG 2000 : *.jp2
PNG : *.png
Portable : *.pbm, *.pgm, *.ppm
Raster : *.ras, *.sr
TIFF : *.tif, *.tiff
WebP : *.webp
Args:
path : Path to an image file or an image url.
flag : Read flag. Defaults to -1.
>0 -- read as color image (ignores alpha channel)
=0 -- read as grayscale image
<0 -- read as original image (keeps alpha channel)
Returns:
An image as a numpy array if read is successful None otherwise.
The order of channels is BGR(A) for color image.
"""
image = None
try:
if os.path.exists(path):
image = cv2.imread(path, flag)
else:
image = bytearray(requests.get(path).content)
image = numpy.asarray(image, dtype='uint8')
image = cv2.imdecode(image, flag)
except:
pass
return image
# writes an image file
def imwrite(path, image):
"""Writes an image file.
Supported image file formats:
Bitmap : *.bmp, *.dib
JPEG : *.jpe, *.jpeg, *.jpg
JPEG 2000 : *.jp2
PNG : *.png
Portable : *.pbm, *.pgm, *.ppm
Raster : *.ras, *.sr
TIFF : *.tif, *.tiff
WebP : *.webp
Args:
path : Path to the image file to be written. If the file already
exists it will be overwritten.
image : A numpy array. The order of channels is BGR(A) for color image.
Returns:
True if write is successful False otherwise.
"""
flag = False
try:
image = imvalidate(image)
if not image is None:
cv2.imwrite(path, image)
flag = True
except:
pass
return flag
# shows an image in a window
def imshow(image, title=''):
"""Shows an image in a window.
Args:
image : Image source. This can be either a numpy array, a path to an
image file or an image url.
title : Window title. Defaults to an empty string.
Returns:
None
"""
try:
array = imvalidate(image)
if array is None:
array = imread(image, -1)
array = imvalidate(array)
if not array is None:
cv2.imshow(str(title), array)
cv2.waitKey(0)
cv2.destroyAllWindows()
except:
pass
return
| [
"cv2.waitKey",
"cv2.imwrite",
"numpy.asarray",
"cv2.imdecode",
"os.path.exists",
"cv2.imread",
"requests.get",
"cv2.destroyAllWindows"
] | [((1164, 1184), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1178, 1184), False, 'import os\n'), ((1206, 1228), 'cv2.imread', 'cv2.imread', (['path', 'flag'], {}), '(path, flag)\n', (1216, 1228), False, 'import cv2\n'), ((1321, 1356), 'numpy.asarray', 'numpy.asarray', (['image'], {'dtype': '"""uint8"""'}), "(image, dtype='uint8')\n", (1334, 1356), False, 'import numpy\n'), ((1377, 1402), 'cv2.imdecode', 'cv2.imdecode', (['image', 'flag'], {}), '(image, flag)\n', (1389, 1402), False, 'import cv2\n'), ((2230, 2254), 'cv2.imwrite', 'cv2.imwrite', (['path', 'image'], {}), '(path, image)\n', (2241, 2254), False, 'import cv2\n'), ((2888, 2902), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2899, 2902), False, 'import cv2\n'), ((2915, 2938), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2936, 2938), False, 'import cv2\n'), ((1273, 1291), 'requests.get', 'requests.get', (['path'], {}), '(path)\n', (1285, 1291), False, 'import requests\n')] |
import os
import random
import unittest
from itertools import product
from src.models.layers import conv2d_complex
import tensorflow as tf
import numpy as np
from numpy.random import seed
from scipy.ndimage import rotate
from scipy.signal import convolve2d
from src.models.layers import ECHConv2D, CHConv2DCompleteRadial
os.environ['PYTHONHASHSEED'] = '0'
random.seed(12345)
seed(1)
tf.random.set_seed(2)
class TestComplexConv(unittest.TestCase):
def _conv2d(self, image, filters):
"""
rough implementation of convolution
"""
(filter_height, filter_width, input_channels, streams,
n_harmonics) = filters.shape
batch_size, in_height, in_width, _ = image.shape
output_channels = streams * n_harmonics
filters_reshaped = np.reshape(filters, (
filter_height,
filter_width,
input_channels,
output_channels,
))
output = np.zeros(
(
batch_size,
in_height - filter_height + 1,
in_width - filter_width + 1,
input_channels,
output_channels,
),
dtype=np.csingle,
)
for batch, c_in, c_out in product(range(batch_size),
range(input_channels),
range(output_channels)):
output[batch, :, :, c_in,
c_out] = convolve2d(image[batch, :, :, c_in],
filters_reshaped[::-1, ::-1, c_in,
c_out],
mode="valid")
output = np.sum(output, axis=3)
return np.reshape(
output, (output.shape[0], output.shape[1], streams, n_harmonics))
def test_conv(self):
x = np.random.uniform(size=(200, 9, 9, 1))
layer = CHConv2DCompleteRadial(
1,
4,
9,
initializer=tf.keras.initializers.Constant(1.0),
)
y = np.squeeze(layer(x).numpy())
filters = layer.filters.numpy()
y_np = np.squeeze(self._conv2d(x, filters))
np.testing.assert_allclose(np.real(y), np.real(y_np), rtol=1e-3)
np.testing.assert_allclose(np.imag(y), np.imag(y_np), rtol=1e-3)
if __name__ == '__main__':
unittest.main()
| [
"tensorflow.random.set_seed",
"unittest.main",
"numpy.random.uniform",
"numpy.random.seed",
"numpy.sum",
"scipy.signal.convolve2d",
"numpy.zeros",
"numpy.imag",
"random.seed",
"numpy.reshape",
"numpy.real",
"tensorflow.keras.initializers.Constant"
] | [((360, 378), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (371, 378), False, 'import random\n'), ((379, 386), 'numpy.random.seed', 'seed', (['(1)'], {}), '(1)\n', (383, 386), False, 'from numpy.random import seed\n'), ((387, 408), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(2)'], {}), '(2)\n', (405, 408), True, 'import tensorflow as tf\n'), ((2396, 2411), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2409, 2411), False, 'import unittest\n'), ((794, 881), 'numpy.reshape', 'np.reshape', (['filters', '(filter_height, filter_width, input_channels, output_channels)'], {}), '(filters, (filter_height, filter_width, input_channels,\n output_channels))\n', (804, 881), True, 'import numpy as np\n'), ((955, 1092), 'numpy.zeros', 'np.zeros', (['(batch_size, in_height - filter_height + 1, in_width - filter_width + 1,\n input_channels, output_channels)'], {'dtype': 'np.csingle'}), '((batch_size, in_height - filter_height + 1, in_width -\n filter_width + 1, input_channels, output_channels), dtype=np.csingle)\n', (963, 1092), True, 'import numpy as np\n'), ((1723, 1745), 'numpy.sum', 'np.sum', (['output'], {'axis': '(3)'}), '(output, axis=3)\n', (1729, 1745), True, 'import numpy as np\n'), ((1761, 1837), 'numpy.reshape', 'np.reshape', (['output', '(output.shape[0], output.shape[1], streams, n_harmonics)'], {}), '(output, (output.shape[0], output.shape[1], streams, n_harmonics))\n', (1771, 1837), True, 'import numpy as np\n'), ((1889, 1927), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(200, 9, 9, 1)'}), '(size=(200, 9, 9, 1))\n', (1906, 1927), True, 'import numpy as np\n'), ((1478, 1575), 'scipy.signal.convolve2d', 'convolve2d', (['image[batch, :, :, c_in]', 'filters_reshaped[::-1, ::-1, c_in, c_out]'], {'mode': '"""valid"""'}), "(image[batch, :, :, c_in], filters_reshaped[::-1, ::-1, c_in,\n c_out], mode='valid')\n", (1488, 1575), False, 'from scipy.signal import convolve2d\n'), ((2252, 2262), 'numpy.real', 'np.real', (['y'], {}), '(y)\n', (2259, 2262), True, 'import numpy as np\n'), ((2264, 2277), 'numpy.real', 'np.real', (['y_np'], {}), '(y_np)\n', (2271, 2277), True, 'import numpy as np\n'), ((2325, 2335), 'numpy.imag', 'np.imag', (['y'], {}), '(y)\n', (2332, 2335), True, 'import numpy as np\n'), ((2337, 2350), 'numpy.imag', 'np.imag', (['y_np'], {}), '(y_np)\n', (2344, 2350), True, 'import numpy as np\n'), ((2037, 2072), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), '(1.0)\n', (2067, 2072), True, 'import tensorflow as tf\n')] |
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multitask training prompts."""
from unittest import mock
from absl.testing import absltest
import jax.numpy as jnp
import numpy as np
from prompt_tuning.extended import multitask_prompts
from prompt_tuning.extended.train import multitask_prompts as train_multitask_prompts
from prompt_tuning.train import prompts as train_prompts
class PromptsTest(absltest.TestCase):
def test_multitask_prompt_does_concatenation(self):
embed_size = 20
prompt_length = 5
batch_size = 2
seq_len = 20
mock_prompt = mock.create_autospec(
multitask_prompts.MultiTaskPrompt, spec_set=True, instance=True)
prompt = jnp.zeros((batch_size, prompt_length, embed_size))
mock_prompt.return_value = prompt
mock_combine = mock.create_autospec(
train_prompts.prefix_prompt, spec_set=True)
prompt_module = train_multitask_prompts.MultiTaskPrompt(
prompt=mock_prompt, combine=mock_combine)
input_tokens = jnp.ones((batch_size, seq_len))
embed = jnp.ones((batch_size, seq_len, embed_size))
prompt_module.apply({"params": {}}, input_tokens, embed)
self.assertEqual(mock_prompt.call_args_list[0],
mock.call(input_tokens, embed))
np.testing.assert_allclose(mock_combine.call_args_list[0][0][0], prompt)
np.testing.assert_allclose(mock_combine.call_args_list[0][0][1], embed[:,
1:])
if __name__ == "__main__":
absltest.main()
| [
"absl.testing.absltest.main",
"unittest.mock.create_autospec",
"numpy.testing.assert_allclose",
"jax.numpy.ones",
"jax.numpy.zeros",
"unittest.mock.call",
"prompt_tuning.extended.train.multitask_prompts.MultiTaskPrompt"
] | [((2047, 2062), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2060, 2062), False, 'from absl.testing import absltest\n'), ((1107, 1196), 'unittest.mock.create_autospec', 'mock.create_autospec', (['multitask_prompts.MultiTaskPrompt'], {'spec_set': '(True)', 'instance': '(True)'}), '(multitask_prompts.MultiTaskPrompt, spec_set=True,\n instance=True)\n', (1127, 1196), False, 'from unittest import mock\n'), ((1215, 1265), 'jax.numpy.zeros', 'jnp.zeros', (['(batch_size, prompt_length, embed_size)'], {}), '((batch_size, prompt_length, embed_size))\n', (1224, 1265), True, 'import jax.numpy as jnp\n'), ((1323, 1387), 'unittest.mock.create_autospec', 'mock.create_autospec', (['train_prompts.prefix_prompt'], {'spec_set': '(True)'}), '(train_prompts.prefix_prompt, spec_set=True)\n', (1343, 1387), False, 'from unittest import mock\n'), ((1417, 1503), 'prompt_tuning.extended.train.multitask_prompts.MultiTaskPrompt', 'train_multitask_prompts.MultiTaskPrompt', ([], {'prompt': 'mock_prompt', 'combine': 'mock_combine'}), '(prompt=mock_prompt, combine=\n mock_combine)\n', (1456, 1503), True, 'from prompt_tuning.extended.train import multitask_prompts as train_multitask_prompts\n'), ((1527, 1558), 'jax.numpy.ones', 'jnp.ones', (['(batch_size, seq_len)'], {}), '((batch_size, seq_len))\n', (1535, 1558), True, 'import jax.numpy as jnp\n'), ((1571, 1614), 'jax.numpy.ones', 'jnp.ones', (['(batch_size, seq_len, embed_size)'], {}), '((batch_size, seq_len, embed_size))\n', (1579, 1614), True, 'import jax.numpy as jnp\n'), ((1785, 1857), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mock_combine.call_args_list[0][0][0]', 'prompt'], {}), '(mock_combine.call_args_list[0][0][0], prompt)\n', (1811, 1857), True, 'import numpy as np\n'), ((1862, 1940), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mock_combine.call_args_list[0][0][1]', 'embed[:, 1:]'], {}), '(mock_combine.call_args_list[0][0][1], embed[:, 1:])\n', (1888, 1940), True, 'import numpy as np\n'), ((1749, 1779), 'unittest.mock.call', 'mock.call', (['input_tokens', 'embed'], {}), '(input_tokens, embed)\n', (1758, 1779), False, 'from unittest import mock\n')] |
# -*- coding: utf-8 -*-
"""losses.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10XqSVHkvHpf-XabH9bEXqDZsx-UxagV-
"""
import torch
import numpy as np
import torch.nn as nn
import pdb
from lifelines.utils import concordance_index
from surv_ci_info.utilities import lindisc, mmd2_lin, auc
np.random.seed(1234)
torch.manual_seed(seed=1234)
def imb_loss(model,x,w):
''' Returns the IPM (Integral Probability Metric) term '''
imb = 0.0
fi = model.get_repr(x)
if model.imb_func=='lin_disc':
imb = lindisc(fi, w, model.p_ipm)
elif model.imb_func=='mmd2_lin':
imb = mmd2_lin(fi, w, model.p_ipm)
return imb
######################################################################################
def mse_loss(model,x,y,e,w):
''' Returns MSE Loss'''
#shape_co,scale_co, logits_co , shape_tr,scale_tr, logits_tr = model.forward(x,w)
shape_co, scale_co, logits_co, shape_tr, scale_tr, logits_tr, shape_co_c, scale_co_c, logits_co_c, shape_tr_c, scale_tr_c, logits_tr_c = model.forward(x, w)
t_max = y[e==1] #Time-to-event/Uncensored Samples
c_max = y[e==0] #Time-to-censoring/Censored Samples
w_t = w[e==1] #Treatment indicator for Uncensored Samples
w_c = w[e==0] #Treatment indicator for Censored Samples
#pdb.set_trace()
t_pred_co = auc(model,t_max[w_t==0],shape_co,scale_co,logits_co) #Predicted time to event for control samples
t_pred_tr = auc(model,t_max[w_t==1], shape_tr, scale_tr,logits_tr) #Predicted time to event for treated samples
#true_t = torch.cat((t[w==0],t[w==1]),0)
pred_t = torch.cat((t_pred_co,t_pred_tr),0) #Predicted T
c_pred_co = auc(model, c_max[w_c == 0], shape_co_c, scale_co_c, logits_co_c) #Predicted time to censoring for control samples
c_pred_tr = auc(model, c_max[w_c == 1], shape_tr_c, scale_tr_c, logits_tr_c) #Predicted time to censoring for treated samples
# true_t = torch.cat((t[w==0],t[w==1]),0)
pred_c = torch.cat((c_pred_co, c_pred_tr), 0) #Predicted C
pred_y = pred_t*e + pred_c*(1-e) #Predicted Y
p_w = w.sum() / len(w) # Probability of Treatement=1
w_tr = w/(2*p_w)
w_co = (1-w)/(2*(1-p_w))
wts = w_tr + w_co
return torch.mean(torch.multiply(wts,torch.square((y-pred_y))))
###############################################################################
def l2_loss(model):
layers = model.get_layers()
#n_layers = model.get_layers_dim()
#pdb.set_trace()
#reg_loss = sum([torch.square(layers[i].weight).sum()/2 for i in range(len(layers))])
reg_loss = sum([torch.square(layers[i].weight).sum()/2 for i in range(0,len(layers)) if i%2==0])
return reg_loss
# def mse_loss(model,x,y,e,w):
# '''Returns MSE Loss '''
# shape_co,scale_co, logits_co , shape_tr,scale_tr, logits_tr,shape_co_c,scale_co_c, logits_co_c , shape_tr_c,scale_tr_c, logits_tr_c = model.forward(x,w)
# t_pred_co = auc(model,y[w==0],shape_co,scale_co,logits_co)
# t_pred_tr = auc(model,y[w==1], shape_tr, scale_tr,logits_tr)
# c_pred_co = auc(model,y[w==0],shape_co_c,scale_co_c,logits_co_c)
# c_pred_tr = auc(model,y[w==1], shape_tr_c, scale_tr_c,logits_tr_c)
# #pdb.set_trace()
# true_t = torch.cat((t[w==0],t[w==1]),0)
# pred_t = torch.cat((t_pred_co,t_pred_tr),0)
# # true_c = torch.cat((c[w==0],c[w==1]),0)
# # pred_c = torch.cat((c_pred_co,c_pred_tr),0)
# # true_y = min(true_t,true_c)
# # pred_y = min(pred_t,pred_c)
# # p_w = w.sum()/len(w) #Probability of Treatement=1
# # w_tr = w/(2*p_w)
# # w_co = (1-w)/(2*(1-p_w))
# # wts = w_tr + w_co
# #return torch.mean(torch.multiply(wts,torch.square((true_t-pred_t))))
# return torch.mean(torch.square((true_t-pred_t)))
# def mse_loss(model,x,t,e,w):
# '''Returns MSE Loss '''
# shape_co,scale_co, logits_co , shape_tr,scale_tr, logits_tr,shape_co_c,scale_co_c, logits_co_c , shape_tr_c,scale_tr_c, logits_tr_c = model.forward(x,w)
# t_pred_co = auc(model,t[w==0],shape_co,scale_co,logits_co)
# t_pred_tr = auc(model,t[w==1], shape_tr, scale_tr,logits_tr)
# true_t = torch.cat((t[w==0],t[w==1]),0)
# pred_t = torch.cat((t_pred_co,t_pred_tr),0)
# true_y = min(true_t,true_c)
# pred_y = min(pred_t,pred_c)
# p_w = w.sum()/len(w) #Probability of Treatement=1
# w_tr = w/(2*p_w)
# w_co = (1-w)/(2*(1-p_w))
# wts = w_tr + w_co
# return torch.mean(torch.multiply(wts,torch.square((true_t-pred_t))))
# # return torch.mean(torch.square((true_t-pred_t)))
# #return torch.mean(torch.square((true_t[e==1]-pred_t[e==1])))
# def _lognormal_loss(t, c,e, shape, scale,shape_c,scale_c):
# '''Returns Unconditional Log Normal Loss (Does not depends on features x)'''
# eta_ = shape.expand(t.shape[0], -1)
# beta_ = scale.expand(t.shape[0], -1)
# eta_c = shape_c.expand(c.shape[0], -1)
# beta_c = scale_c.expand(c.shape[0], -1)
# ll = 0.
# mu = eta_
# sigma = beta_
# mu_c = eta_c
# sigma_c = beta_c
# f = - torch.log(t)- sigma - 0.5*np.log(2*np.pi)
# f = f - torch.div((torch.log(t) - mu)**2, 2.*torch.exp(2*sigma))
# f_c = - torch.log(c)- sigma_c - 0.5*np.log(2*np.pi)
# f_c = f_c - torch.div((torch.log(c) - mu_c)**2, 2.*torch.exp(2*sigma_c))
# s = torch.div(torch.log(t) - mu, torch.exp(sigma)*np.sqrt(2))
# s = 0.5 - 0.5*torch.erf(s)
# s = torch.log(s)
# s_c = torch.div(torch.log(c) - mu_c, torch.exp(sigma_c)*np.sqrt(2))
# s_c = 0.5 - 0.5*torch.erf(s_c)
# s_c = torch.log(s_c)
# uncens = np.where(e.cpu().data.numpy() == 1)[0]
# cens = np.where(e.cpu().data.numpy() != 1)[0]
# ll += f[uncens].sum() + s[cens].sum() + s_c[uncens].sum() + f_c[cens].sum()
# return -ll.mean()
# def _weibull_loss(t, e, shape, scale):
# '''Returns Unconditional Weibull Loss (Does not depends on features x)'''
# eta_ = shape.expand(t.shape[0], -1)
# beta_ = scale.expand(t.shape[0], -1)
# ll = 0.
# s = - (torch.pow(torch.exp(beta_)*t, torch.exp(eta_)))
# f = eta_ + beta_ + ((torch.exp(eta_)-1)*(beta_+torch.log(t)))
# f = f + s
# uncens = np.where(e.cpu().data.numpy() == 1)[0]
# cens = np.where(e.cpu().data.numpy() != 1)[0]
# ll += f[uncens].sum() + s[cens].sum()
# return -ll.mean()
# def unconditional_mse(model,t,e,w):
# shape_co,scale_co,shape_tr,scale_tr = model.get_shape_scale(w)
# t_pred_co = auc_unconditional(model,t[w==0],shape_co,scale_co)
# t_pred_tr = auc_unconditional(model,t[w==1], shape_tr, scale_tr)
# true_t = torch.cat((t[w==0],t[w==1]),0)
# pred_t = torch.cat((t_pred_co,t_pred_tr),0)
# return torch.mean(torch.square((true_t-pred_t)))
def _lognormal_loss(y,e, shape, scale,shape_c,scale_c):
'''Returns Unconditional Log Normal Loss (Does not depends on features x)'''
eta_ = shape.expand(y.shape[0], -1)
beta_ = scale.expand(y.shape[0], -1)
eta_c = shape_c.expand(y.shape[0], -1)
beta_c = scale_c.expand(y.shape[0], -1)
ll = 0
mu = eta_
sigma = beta_
mu_c = eta_c
sigma_c = beta_c
f = - torch.log(y)- sigma - 0.5*np.log(2*np.pi)
f = f - torch.div((torch.log(y) - mu)**2, 2.*torch.exp(2*sigma))
f_c = - torch.log(y)- sigma_c - 0.5*np.log(2*np.pi)
f_c = f_c - torch.div((torch.log(y) - mu_c)**2, 2.*torch.exp(2*sigma_c))
s_c = torch.div(torch.log(y) - mu, torch.exp(sigma)*np.sqrt(2))
s_c = 0.5 - 0.5*torch.erf(s_c)
s_c = torch.log(s_c)
s = torch.div(torch.log(y) - mu_c, torch.exp(sigma_c)*np.sqrt(2))
s = 0.5 - 0.5*torch.erf(s)
s = torch.log(s)
uncens = np.where(e.cpu().data.numpy() == 1)[0]
cens = np.where(e.cpu().data.numpy() != 1)[0]
#ll += f[uncens].sum() + s[cens].sum() + s_c[uncens].sum() + f_c[cens].sum()
ll += f[uncens].sum() + s[uncens].sum() + f_c[cens].sum() + s_c[cens].sum()
return -ll.mean()
def _weibull_loss(t, e, shape, scale):
'''Returns Unconditional Weibull Loss (Does not depends on features x)'''
eta_ = shape.expand(t.shape[0], -1)
beta_ = scale.expand(t.shape[0], -1)
ll = 0.0
s = - (torch.pow(torch.exp(beta_)*t, torch.exp(eta_)))
f = eta_ + beta_ + ((torch.exp(eta_)-1)*(beta_+torch.log(t)))
f = f + s
uncens = np.where(e.cpu().data.numpy() == 1)[0]
cens = np.where(e.cpu().data.numpy() != 1)[0]
ll += f[uncens].sum() + s[cens].sum()
return -ll.mean()
# def unconditional_loss(model, t, c, e, w):
# '''Returns Unconditional Loss (Does not depends on features x)'''
# shape_co,scale_co,shape_tr,scale_tr = model.get_shape_scale(w)
# shape_co_c,scale_co_c,shape_tr_c, scale_tr_c = model.get_shape_scale_c(w)
# tot_unc_loss = 0.0
# if model.dist == 'Weibull':
# unco_loss_co = _weibull_loss(t[w==0], e[w==0], shape_co,scale_co)
# unco_loss_tr = _weibull_loss(t[w==1], e[w==1], shape_tr,scale_tr)
# unco_loss_co_c = _weibull_loss(c[w==0], e[w==0], shape_co_c,scale_co_c)
# elif model.dist == 'LogNormal':
# unco_loss_co = _lognormal_loss(t[w==0], c[w==0], e[w==0], shape_co,scale_co,shape_co_c,scale_co_c)
# unco_loss_tr = _lognormal_loss(t[w==1], c[w==1], e[w==1], shape_tr,scale_tr, shape_tr_c,scale_tr_c)
# else:
# raise NotImplementedError('Distribution: '+model.dist+
# ' not implemented yet.')
# tot_unc_loss = unco_loss_co + unco_loss_tr
# return tot_unc_loss
def unconditional_loss(model, y, e, w):
'''Returns Unconditional Loss (Does not depends on features x)'''
shape_co,scale_co,shape_tr,scale_tr = model.get_shape_scale(w)
shape_co_c,scale_co_c,shape_tr_c, scale_tr_c = model.get_shape_scale_c(w)
tot_unc_loss = 0.0
if model.dist == 'Weibull':
unco_loss_co = _weibull_loss(t[w==0], e[w==0], shape_co,scale_co)
unco_loss_tr = _weibull_loss(t[w==1], e[w==1], shape_tr,scale_tr)
unco_loss_co_c = _weibull_loss(c[w==0], e[w==0], shape_co_c,scale_co_c)
elif model.dist == 'LogNormal':
unco_loss_co = _lognormal_loss(y[w==0], e[w==0], shape_co,scale_co,shape_co_c,scale_co_c)
unco_loss_tr = _lognormal_loss(y[w==1], e[w==1], shape_tr,scale_tr, shape_tr_c,scale_tr_c)
else:
raise NotImplementedError('Distribution: '+model.dist+
' not implemented yet.')
tot_unc_loss = unco_loss_co + unco_loss_tr
return tot_unc_loss
######################################################################################
def _conditional_lognormal_loss(y, e, shape,scale,logits,shape_c,scale_c,logits_c,alpha, elbo=True):
'''Returns Conditional Loss (Depends on features x)'''
lossf = []
losss = []
lossf_c = []
losss_c = []
k = shape.shape[1] #Number of Primitive Distributions
eta_ = shape
beta_ = scale
eta_c = shape_c
beta_c = scale_c
for g in range(k):
mu = eta_[:, g]
sigma = beta_[:, g]
mu_c = eta_c[:, g]
sigma_c = beta_c[:, g]
f = - torch.log(y) - sigma - 0.5*np.log(2*np.pi)
f = f - torch.div((torch.log(y) - mu)**2, 2.*torch.exp(2*sigma))
f_c = - torch.log(y) - sigma_c - 0.5*np.log(2*np.pi)
f_c = f_c - torch.div((torch.log(y) - mu_c)**2, 2.*torch.exp(2*sigma_c))
s_c = torch.div(torch.log(y) - mu, torch.exp(sigma)*np.sqrt(2))
s_c = 0.5 - 0.5*torch.erf(s_c)
s_c = torch.log(s_c)
s = torch.div(torch.log(y) - mu_c, torch.exp(sigma_c)*np.sqrt(2))
s = 0.5 - 0.5*torch.erf(s)
s = torch.log(s)
lossf.append(f)
losss.append(s)
lossf_c.append(f_c)
losss_c.append(s_c)
losss = torch.stack(losss, dim=1)
lossf = torch.stack(lossf, dim=1)
losss_c = torch.stack(losss_c, dim=1)
lossf_c = torch.stack(lossf_c, dim=1)
if elbo:
lossg = nn.Softmax(dim=1)(logits)
lossg_c = nn.Softmax(dim=1)(logits_c)
#losss = lossg*losss
losss = lossg_c*losss
lossf = lossg*lossf
#losss_c = lossg_c*losss_c
losss_c = lossg*losss_c
lossf_c = lossg_c*lossf_c
losss = losss.sum(dim=1)
lossf = lossf.sum(dim=1)
losss_c = losss_c.sum(dim=1)
lossf_c = lossf_c.sum(dim=1)
else:
lossg_c = nn.LogSoftmax(dim=1)(logits_c)
lossg = nn.LogSoftmax(dim=1)(logits)
#losss = lossg + losss
losss = lossg_c + losss
lossf = lossg + lossf
#losss_c = lossg_c + losss_c
losss_c = lossg + losss_c
lossf_c = lossg_c + lossf_c
losss = torch.logsumexp(losss, dim=1)
lossf = torch.logsumexp(lossf, dim=1)
losss_c = torch.logsumexp(losss_c, dim=1)
lossf_c = torch.logsumexp(lossf_c, dim=1)
uncens = np.where(e.cpu().data.numpy() == 1)[0]
cens = np.where(e.cpu().data.numpy() != 1)[0]
#ll = lossf[uncens].sum() + losss_c[uncens].sum() + alpha*(losss[cens].sum() + lossf_c[cens].sum())
ll = lossf[uncens].sum() + losss[uncens].sum() + alpha*(losss_c[cens].sum() + lossf_c[cens].sum())
return -ll/float(len(uncens)+len(cens))
def _conditional_weibull_loss(t, e, shape, scale, logits, alpha, elbo=True):
'''Returns Conditional Loss (Depends on features x)'''
k = shape.shape[1] #Number of Primitive Distributions
eta_ = shape
beta_ = scale
lossf = []
losss = []
for g in range(k):
shape_ = eta_[:, g]
scale_ = beta_[:, g]
s = - (torch.pow(torch.exp(scale_)*t, torch.exp(shape_)))
f = shape_ + scale_ + ((torch.exp(scale_)-1)*(scale_+torch.log(t)))
f = f + s
lossf.append(f)
losss.append(s)
losss = torch.stack(losss, dim=1)
lossf = torch.stack(lossf, dim=1)
if elbo:
lossg = nn.Softmax(dim=1)(logits)
losss = lossg*losss
lossf = lossg*lossf
losss = losss.sum(dim=1)
lossf = lossf.sum(dim=1)
else:
lossg = nn.LogSoftmax(dim=1)(logits)
losss = lossg + losss
lossf = lossg + lossf
losss = torch.logsumexp(losss, dim=1)
lossf = torch.logsumexp(lossf, dim=1)
uncens = np.where(e.cpu().data.numpy() == 1)[0]
cens = np.where(e.cpu().data.numpy() != 1)[0]
ll = lossf[uncens].sum() + alpha*losss[cens].sum()
return -ll/float(len(uncens)+len(cens))
def conditional_loss(model, x, y, e, w, elbo=True):
alpha = model.discount
shape_co, scale_co, logits_co, shape_tr, scale_tr, logits_tr,shape_co_c, scale_co_c, logits_co_c, shape_tr_c, scale_tr_c, logits_tr_c = model.forward(x, w)
if model.dist == 'Weibull':
cond_loss_co = _conditional_weibull_loss(t[w==0], e[w==0], shape_co, scale_co, logits_co,alpha,elbo)
cond_loss_tr = _conditional_weibull_loss(t[w==1], e[w==1],shape_tr,scale_tr,logits_tr, alpha,elbo)
elif model.dist == 'LogNormal':
cond_loss_co = _conditional_lognormal_loss(y[w==0],e[w==0],shape_co,scale_co,logits_co,shape_co_c, scale_co_c, logits_co_c,alpha, elbo)
cond_loss_tr = _conditional_lognormal_loss(y[w==1], e[w==1],shape_tr,scale_tr,logits_tr, shape_tr_c, scale_tr_c, logits_tr_c,alpha,elbo)
else:
raise NotImplementedError('Distribution: '+model.dist+
' not implemented yet.')
tot_cond_loss = cond_loss_co + cond_loss_tr
return tot_cond_loss
######################################################################################
# def calculate_ci(model,x,t,e,w):
# treated_idx = torch.where(w>0)[0]
# control_idx = torch.where(w<1)[0]
# shape_co,scale_co,logits_co, shape_tr,scale_tr, logits_tr = model.forward(x,w) #Predicted Factual Parameters
# shape_co ,scale_co = softmax_out(shape_co, scale_co, logits_co)
# shape_tr, scale_tr = softmax_out(shape_tr,scale_tr,logits_tr)
# shape_co = shape_co.detach().numpy()
# scale_co = scale_co.detach().numpy()
# shape_tr = shape_tr.detach().numpy()
# scale_tr = scale_tr.detach().numpy()
# if model.dist == 'LogNormal':
# t_co_samples = sample_lognormal(mu=shape_co, sigma=np.exp(scale_co))
# t_tr_samples = sample_lognormal(mu=shape_tr, sigma=np.exp(scale_tr))
# elif model.dist == 'Weibull':
# t_co_samples = sample_weibull(shape=shape_co, scale=np.exp(scale_co))
# t_tr_samples = sample_weibull(shape=shape_tr, scale=np.exp(scale_tr))
# else:
# print('Sampling Distribution function not defined')
# t_pred_co = np.median(t_co_samples,axis=1)
# t_pred_tr = np.median(t_tr_samples,axis=1)
# c_index_co = concordance_index(event_times=t[control_idx],predicted_scores=t_pred_co,event_observed=e[control_idx])
# c_index_tr = concordance_index(event_times=t[treated_idx],predicted_scores=t_pred_tr,event_observed=e[treated_idx])
# return ((c_index_co + c_index_tr) * 0.5) | [
"torch.logsumexp",
"numpy.random.seed",
"torch.stack",
"numpy.log",
"torch.erf",
"torch.manual_seed",
"torch.nn.LogSoftmax",
"torch.cat",
"surv_ci_info.utilities.mmd2_lin",
"torch.exp",
"torch.nn.Softmax",
"surv_ci_info.utilities.lindisc",
"surv_ci_info.utilities.auc",
"torch.log",
"torc... | [((365, 385), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (379, 385), True, 'import numpy as np\n'), ((386, 414), 'torch.manual_seed', 'torch.manual_seed', ([], {'seed': '(1234)'}), '(seed=1234)\n', (403, 414), False, 'import torch\n'), ((1341, 1399), 'surv_ci_info.utilities.auc', 'auc', (['model', 't_max[w_t == 0]', 'shape_co', 'scale_co', 'logits_co'], {}), '(model, t_max[w_t == 0], shape_co, scale_co, logits_co)\n', (1344, 1399), False, 'from surv_ci_info.utilities import lindisc, mmd2_lin, auc\n'), ((1453, 1511), 'surv_ci_info.utilities.auc', 'auc', (['model', 't_max[w_t == 1]', 'shape_tr', 'scale_tr', 'logits_tr'], {}), '(model, t_max[w_t == 1], shape_tr, scale_tr, logits_tr)\n', (1456, 1511), False, 'from surv_ci_info.utilities import lindisc, mmd2_lin, auc\n'), ((1607, 1643), 'torch.cat', 'torch.cat', (['(t_pred_co, t_pred_tr)', '(0)'], {}), '((t_pred_co, t_pred_tr), 0)\n', (1616, 1643), False, 'import torch\n'), ((1669, 1733), 'surv_ci_info.utilities.auc', 'auc', (['model', 'c_max[w_c == 0]', 'shape_co_c', 'scale_co_c', 'logits_co_c'], {}), '(model, c_max[w_c == 0], shape_co_c, scale_co_c, logits_co_c)\n', (1672, 1733), False, 'from surv_ci_info.utilities import lindisc, mmd2_lin, auc\n'), ((1797, 1861), 'surv_ci_info.utilities.auc', 'auc', (['model', 'c_max[w_c == 1]', 'shape_tr_c', 'scale_tr_c', 'logits_tr_c'], {}), '(model, c_max[w_c == 1], shape_tr_c, scale_tr_c, logits_tr_c)\n', (1800, 1861), False, 'from surv_ci_info.utilities import lindisc, mmd2_lin, auc\n'), ((1966, 2002), 'torch.cat', 'torch.cat', (['(c_pred_co, c_pred_tr)', '(0)'], {}), '((c_pred_co, c_pred_tr), 0)\n', (1975, 2002), False, 'import torch\n'), ((7251, 7265), 'torch.log', 'torch.log', (['s_c'], {}), '(s_c)\n', (7260, 7265), False, 'import torch\n'), ((7370, 7382), 'torch.log', 'torch.log', (['s'], {}), '(s)\n', (7379, 7382), False, 'import torch\n'), ((11245, 11270), 'torch.stack', 'torch.stack', (['losss'], {'dim': '(1)'}), '(losss, dim=1)\n', (11256, 11270), False, 'import torch\n'), ((11281, 11306), 'torch.stack', 'torch.stack', (['lossf'], {'dim': '(1)'}), '(lossf, dim=1)\n', (11292, 11306), False, 'import torch\n'), ((11319, 11346), 'torch.stack', 'torch.stack', (['losss_c'], {'dim': '(1)'}), '(losss_c, dim=1)\n', (11330, 11346), False, 'import torch\n'), ((11359, 11386), 'torch.stack', 'torch.stack', (['lossf_c'], {'dim': '(1)'}), '(lossf_c, dim=1)\n', (11370, 11386), False, 'import torch\n'), ((13097, 13122), 'torch.stack', 'torch.stack', (['losss'], {'dim': '(1)'}), '(losss, dim=1)\n', (13108, 13122), False, 'import torch\n'), ((13133, 13158), 'torch.stack', 'torch.stack', (['lossf'], {'dim': '(1)'}), '(lossf, dim=1)\n', (13144, 13158), False, 'import torch\n'), ((581, 608), 'surv_ci_info.utilities.lindisc', 'lindisc', (['fi', 'w', 'model.p_ipm'], {}), '(fi, w, model.p_ipm)\n', (588, 608), False, 'from surv_ci_info.utilities import lindisc, mmd2_lin, auc\n'), ((11008, 11022), 'torch.log', 'torch.log', (['s_c'], {}), '(s_c)\n', (11017, 11022), False, 'import torch\n'), ((11132, 11144), 'torch.log', 'torch.log', (['s'], {}), '(s)\n', (11141, 11144), False, 'import torch\n'), ((12063, 12092), 'torch.logsumexp', 'torch.logsumexp', (['losss'], {'dim': '(1)'}), '(losss, dim=1)\n', (12078, 12092), False, 'import torch\n'), ((12105, 12134), 'torch.logsumexp', 'torch.logsumexp', (['lossf'], {'dim': '(1)'}), '(lossf, dim=1)\n', (12120, 12134), False, 'import torch\n'), ((12149, 12180), 'torch.logsumexp', 'torch.logsumexp', (['losss_c'], {'dim': '(1)'}), '(losss_c, dim=1)\n', (12164, 12180), False, 'import torch\n'), ((12195, 12226), 'torch.logsumexp', 'torch.logsumexp', (['lossf_c'], {'dim': '(1)'}), '(lossf_c, dim=1)\n', (12210, 12226), False, 'import torch\n'), ((13431, 13460), 'torch.logsumexp', 'torch.logsumexp', (['losss'], {'dim': '(1)'}), '(losss, dim=1)\n', (13446, 13460), False, 'import torch\n'), ((13473, 13502), 'torch.logsumexp', 'torch.logsumexp', (['lossf'], {'dim': '(1)'}), '(lossf, dim=1)\n', (13488, 13502), False, 'import torch\n'), ((655, 683), 'surv_ci_info.utilities.mmd2_lin', 'mmd2_lin', (['fi', 'w', 'model.p_ipm'], {}), '(fi, w, model.p_ipm)\n', (663, 683), False, 'from surv_ci_info.utilities import lindisc, mmd2_lin, auc\n'), ((2225, 2249), 'torch.square', 'torch.square', (['(y - pred_y)'], {}), '(y - pred_y)\n', (2237, 2249), False, 'import torch\n'), ((6930, 6947), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6936, 6947), True, 'import numpy as np\n'), ((7052, 7069), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (7058, 7069), True, 'import numpy as np\n'), ((7162, 7174), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (7171, 7174), False, 'import torch\n'), ((7181, 7197), 'torch.exp', 'torch.exp', (['sigma'], {}), '(sigma)\n', (7190, 7197), False, 'import torch\n'), ((7198, 7208), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7205, 7208), True, 'import numpy as np\n'), ((7228, 7242), 'torch.erf', 'torch.erf', (['s_c'], {}), '(s_c)\n', (7237, 7242), False, 'import torch\n'), ((7283, 7295), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (7292, 7295), False, 'import torch\n'), ((7304, 7322), 'torch.exp', 'torch.exp', (['sigma_c'], {}), '(sigma_c)\n', (7313, 7322), False, 'import torch\n'), ((7323, 7333), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7330, 7333), True, 'import numpy as np\n'), ((7351, 7363), 'torch.erf', 'torch.erf', (['s'], {}), '(s)\n', (7360, 7363), False, 'import torch\n'), ((7908, 7923), 'torch.exp', 'torch.exp', (['eta_'], {}), '(eta_)\n', (7917, 7923), False, 'import torch\n'), ((11412, 11429), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (11422, 11429), True, 'import torch.nn as nn\n'), ((11452, 11469), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (11462, 11469), True, 'import torch.nn as nn\n'), ((11797, 11817), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (11810, 11817), True, 'import torch.nn as nn\n'), ((11840, 11860), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (11853, 11860), True, 'import torch.nn as nn\n'), ((13184, 13201), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (13194, 13201), True, 'import torch.nn as nn\n'), ((13338, 13358), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (13351, 13358), True, 'import torch.nn as nn\n'), ((6904, 6916), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (6913, 6916), False, 'import torch\n'), ((6993, 7013), 'torch.exp', 'torch.exp', (['(2 * sigma)'], {}), '(2 * sigma)\n', (7002, 7013), False, 'import torch\n'), ((7024, 7036), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (7033, 7036), False, 'import torch\n'), ((7121, 7143), 'torch.exp', 'torch.exp', (['(2 * sigma_c)'], {}), '(2 * sigma_c)\n', (7130, 7143), False, 'import torch\n'), ((7888, 7904), 'torch.exp', 'torch.exp', (['beta_'], {}), '(beta_)\n', (7897, 7904), False, 'import torch\n'), ((7949, 7964), 'torch.exp', 'torch.exp', (['eta_'], {}), '(eta_)\n', (7958, 7964), False, 'import torch\n'), ((7975, 7987), 'torch.log', 'torch.log', (['t'], {}), '(t)\n', (7984, 7987), False, 'import torch\n'), ((10676, 10693), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (10682, 10693), True, 'import numpy as np\n'), ((10802, 10819), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (10808, 10819), True, 'import numpy as np\n'), ((10915, 10927), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (10924, 10927), False, 'import torch\n'), ((10934, 10950), 'torch.exp', 'torch.exp', (['sigma'], {}), '(sigma)\n', (10943, 10950), False, 'import torch\n'), ((10951, 10961), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10958, 10961), True, 'import numpy as np\n'), ((10983, 10997), 'torch.erf', 'torch.erf', (['s_c'], {}), '(s_c)\n', (10992, 10997), False, 'import torch\n'), ((11041, 11053), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (11050, 11053), False, 'import torch\n'), ((11062, 11080), 'torch.exp', 'torch.exp', (['sigma_c'], {}), '(sigma_c)\n', (11071, 11080), False, 'import torch\n'), ((11081, 11091), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11088, 11091), True, 'import numpy as np\n'), ((11111, 11123), 'torch.erf', 'torch.erf', (['s'], {}), '(s)\n', (11120, 11123), False, 'import torch\n'), ((12939, 12956), 'torch.exp', 'torch.exp', (['shape_'], {}), '(shape_)\n', (12948, 12956), False, 'import torch\n'), ((6967, 6979), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (6976, 6979), False, 'import torch\n'), ((7093, 7105), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (7102, 7105), False, 'import torch\n'), ((10649, 10661), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (10658, 10661), False, 'import torch\n'), ((10741, 10761), 'torch.exp', 'torch.exp', (['(2 * sigma)'], {}), '(2 * sigma)\n', (10750, 10761), False, 'import torch\n'), ((10773, 10785), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (10782, 10785), False, 'import torch\n'), ((10873, 10895), 'torch.exp', 'torch.exp', (['(2 * sigma_c)'], {}), '(2 * sigma_c)\n', (10882, 10895), False, 'import torch\n'), ((12918, 12935), 'torch.exp', 'torch.exp', (['scale_'], {}), '(scale_)\n', (12927, 12935), False, 'import torch\n'), ((12987, 13004), 'torch.exp', 'torch.exp', (['scale_'], {}), '(scale_)\n', (12996, 13004), False, 'import torch\n'), ((13016, 13028), 'torch.log', 'torch.log', (['t'], {}), '(t)\n', (13025, 13028), False, 'import torch\n'), ((2545, 2575), 'torch.square', 'torch.square', (['layers[i].weight'], {}), '(layers[i].weight)\n', (2557, 2575), False, 'import torch\n'), ((10715, 10727), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (10724, 10727), False, 'import torch\n'), ((10845, 10857), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (10854, 10857), False, 'import torch\n')] |
from suspect.fitting import singlet
from suspect import basis, MRSData
import numpy
import pytest
import random
numpy.random.seed(1024)
@pytest.fixture
def fixed_fid():
time_axis = numpy.arange(0, 0.512, 5e-4)
fid = basis.gaussian(time_axis, 0, 0, 50.0) + 0.00001 * (numpy.random.rand(1024) - 0.5)
return fid
@pytest.fixture
def fixed_fid_sum():
time_axis = numpy.arange(0, 0.512, 5e-4)
fid = basis.gaussian(time_axis, 0, 0, 50.0) + 0.00001 * (numpy.random.rand(1024) - 0.5)
fid2 = basis.gaussian(time_axis, 200, 0, 50.0)
return fid + fid2
def test_gaussian(fixed_fid):
data = MRSData(fixed_fid, 5e-4, 123)
# Original test with all parameters passed in; correct data types; integer values
model = {
"phase0": 0,
"phase1": 0,
"pcr": {
"amplitude": 1,
"fwhm": {
"value": 45,
"min": 42.0,
"max": 55
},
"phase": "0",
"frequency": 0.0
}
}
fitting_result = singlet.fit(data, model)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["fwhm"], 50.0, rtol=1e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["amplitude"], 1.0, rtol=2e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["frequency"], 0.0, atol=1e-1)
numpy.testing.assert_allclose(fitting_result["fit"], fixed_fid, atol=0.001)
assert(isinstance(fitting_result["fit"], MRSData))
def test_bad_param(fixed_fid):
data = MRSData(fixed_fid, 5e-4, 123)
# invalid key added to width dict, to test whether KeyError is raised
model = {
"phase0": 0.0,
"phase1": 0.0,
"pcr": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
"avg": 47 # this is the bad key
},
"phase": "0",
"frequency": 0.0
}
}
with pytest.raises(KeyError):
fitting_result = singlet.fit(data, model)
def test_missing_param(fixed_fid):
data = MRSData(fixed_fid, 5e-4, 123)
# No width value passed in, to test whether KeyError is raised
model = {
"phase0": 0,
"phase1": 0,
"pcr": {
"amplitude": 1,
"fwhm": {
# "value": 45,
"min": 42,
"max": 55,
},
"phase": "0",
"frequency": 0
}
}
with pytest.raises(KeyError):
fitting_result = singlet.fit(data, model)
def test_missing_peak_phase(fixed_fid):
data = MRSData(fixed_fid, 5e-4, 123)
# No phase value passed in, to test whether phase is fixed to 0 by default
model = {
"phase0": 0,
"phase1": 0,
"pcr": {
"amplitude": 1,
"fwhm": {
"value": 45,
"min": 42,
"max": 55,
},
# "phase": "0",
"frequency": 0
}
}
fitting_result = singlet.fit(data, model)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["fwhm"], 50.0, rtol=5e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["amplitude"], 1.0, rtol=5e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["frequency"], 0.0, atol=1e-1)
numpy.testing.assert_allclose(fitting_result["fit"], fixed_fid, atol=0.001)
def test_missing_global_phase(fixed_fid):
data = MRSData(fixed_fid, 5e-4, 123)
# None value supplied for phase0 and phase1, to test whether TypeError is raised
model = {
"phase0": None,
"phase1": None,
"pcr": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": 0.0
}
}
with pytest.raises(TypeError):
fitting_result = singlet.fit(data, model)
def test_bad_param_value(fixed_fid):
data = MRSData(fixed_fid, 5e-4, 123)
# None value supplied for amplitude, to test whether TypeError is raised
model = {
"phase0": 0.0,
"phase1": 0.0,
"pcr": {
"amplitude": None,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": 0.0
}
}
with pytest.raises(TypeError):
fitting_result = singlet.fit(data, model)
def test_circular_dependencies(fixed_fid):
data = MRSData(fixed_fid, 5e-4, 123)
model = {
"phase0": 0.0,
"phase1": 0.0,
"pcr": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": "pcr2_frequency+200"
},
"pcr2": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": "pcr_frequency-200"
}
}
with pytest.raises(ReferenceError):
fitting_result = singlet.fit(data, model)
def test_dependencies(fixed_fid_sum):
data = MRSData(fixed_fid_sum, 5e-4, 123)
model = {
"phase0": 0.0,
"phase1": 0.0,
"pcr": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": 0
},
"pcr2": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": "pcr_frequency+200"
}
}
fitting_result = singlet.fit(data, model)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["fwhm"], 50.0, rtol=1e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["amplitude"], 1.0, rtol=2e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["frequency"], 0.0, atol=1e-1)
numpy.testing.assert_allclose(fitting_result["fit"], fixed_fid_sum, atol=0.001)
def test_reordered_dependencies(fixed_fid_sum):
data = MRSData(fixed_fid_sum, 5e-4, 123)
model = {
"phase0": 0.0,
"phase1": 0.0,
"pcr": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": "pcr2_frequency+200"
},
"pcr2": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": 0
}
}
fitting_result = singlet.fit(data, model)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["fwhm"], 50.0, rtol=1e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["amplitude"], 1.0, rtol=2e-2)
numpy.testing.assert_allclose(fitting_result["model"]["pcr"]["frequency"], 200.0, atol=1e-1)
numpy.testing.assert_allclose(fitting_result["fit"], fixed_fid_sum, atol=0.001)
def test_missing_dependencies(fixed_fid_sum):
data = MRSData(fixed_fid_sum, 5e-4, 123)
model = {
"phase0": 0.0,
"phase1": 0.0,
"pcr2": {
"amplitude": 1.0,
"frequency": "pcr3_frequency+200",
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
},
"pcr": {
"amplitude": 1.0,
"fwhm": {
"value": 45.0,
"min": 42.0,
"max": 55.0,
},
"phase": "0",
"frequency": 0
}
}
with pytest.raises(NameError):
fitting_result = singlet.fit(data, model)
| [
"suspect.fitting.singlet.fit",
"numpy.random.seed",
"suspect.basis.gaussian",
"pytest.raises",
"numpy.arange",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"suspect.MRSData"
] | [((114, 137), 'numpy.random.seed', 'numpy.random.seed', (['(1024)'], {}), '(1024)\n', (131, 137), False, 'import numpy\n'), ((189, 219), 'numpy.arange', 'numpy.arange', (['(0)', '(0.512)', '(0.0005)'], {}), '(0, 0.512, 0.0005)\n', (201, 219), False, 'import numpy\n'), ((381, 411), 'numpy.arange', 'numpy.arange', (['(0)', '(0.512)', '(0.0005)'], {}), '(0, 0.512, 0.0005)\n', (393, 411), False, 'import numpy\n'), ((513, 552), 'suspect.basis.gaussian', 'basis.gaussian', (['time_axis', '(200)', '(0)', '(50.0)'], {}), '(time_axis, 200, 0, 50.0)\n', (527, 552), False, 'from suspect import basis, MRSData\n'), ((619, 650), 'suspect.MRSData', 'MRSData', (['fixed_fid', '(0.0005)', '(123)'], {}), '(fixed_fid, 0.0005, 123)\n', (626, 650), False, 'from suspect import basis, MRSData\n'), ((1050, 1074), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (1061, 1074), False, 'from suspect.fitting import singlet\n'), ((1080, 1170), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['fwhm']", '(50.0)'], {'rtol': '(0.01)'}), "(fitting_result['model']['pcr']['fwhm'], 50.0,\n rtol=0.01)\n", (1109, 1170), False, 'import numpy\n'), ((1171, 1266), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['amplitude']", '(1.0)'], {'rtol': '(0.02)'}), "(fitting_result['model']['pcr']['amplitude'], \n 1.0, rtol=0.02)\n", (1200, 1266), False, 'import numpy\n'), ((1266, 1360), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['frequency']", '(0.0)'], {'atol': '(0.1)'}), "(fitting_result['model']['pcr']['frequency'], \n 0.0, atol=0.1)\n", (1295, 1360), False, 'import numpy\n'), ((1362, 1437), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['fit']", 'fixed_fid'], {'atol': '(0.001)'}), "(fitting_result['fit'], fixed_fid, atol=0.001)\n", (1391, 1437), False, 'import numpy\n'), ((1539, 1570), 'suspect.MRSData', 'MRSData', (['fixed_fid', '(0.0005)', '(123)'], {}), '(fixed_fid, 0.0005, 123)\n', (1546, 1570), False, 'from suspect import basis, MRSData\n'), ((2130, 2161), 'suspect.MRSData', 'MRSData', (['fixed_fid', '(0.0005)', '(123)'], {}), '(fixed_fid, 0.0005, 123)\n', (2137, 2161), False, 'from suspect import basis, MRSData\n'), ((2658, 2689), 'suspect.MRSData', 'MRSData', (['fixed_fid', '(0.0005)', '(123)'], {}), '(fixed_fid, 0.0005, 123)\n', (2665, 2689), False, 'from suspect import basis, MRSData\n'), ((3082, 3106), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (3093, 3106), False, 'from suspect.fitting import singlet\n'), ((3112, 3202), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['fwhm']", '(50.0)'], {'rtol': '(0.05)'}), "(fitting_result['model']['pcr']['fwhm'], 50.0,\n rtol=0.05)\n", (3141, 3202), False, 'import numpy\n'), ((3203, 3298), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['amplitude']", '(1.0)'], {'rtol': '(0.05)'}), "(fitting_result['model']['pcr']['amplitude'], \n 1.0, rtol=0.05)\n", (3232, 3298), False, 'import numpy\n'), ((3298, 3392), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['frequency']", '(0.0)'], {'atol': '(0.1)'}), "(fitting_result['model']['pcr']['frequency'], \n 0.0, atol=0.1)\n", (3327, 3392), False, 'import numpy\n'), ((3394, 3469), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['fit']", 'fixed_fid'], {'atol': '(0.001)'}), "(fitting_result['fit'], fixed_fid, atol=0.001)\n", (3423, 3469), False, 'import numpy\n'), ((3526, 3557), 'suspect.MRSData', 'MRSData', (['fixed_fid', '(0.0005)', '(123)'], {}), '(fixed_fid, 0.0005, 123)\n', (3533, 3557), False, 'from suspect import basis, MRSData\n'), ((4084, 4115), 'suspect.MRSData', 'MRSData', (['fixed_fid', '(0.0005)', '(123)'], {}), '(fixed_fid, 0.0005, 123)\n', (4091, 4115), False, 'from suspect import basis, MRSData\n'), ((4640, 4671), 'suspect.MRSData', 'MRSData', (['fixed_fid', '(0.0005)', '(123)'], {}), '(fixed_fid, 0.0005, 123)\n', (4647, 4671), False, 'from suspect import basis, MRSData\n'), ((5391, 5426), 'suspect.MRSData', 'MRSData', (['fixed_fid_sum', '(0.0005)', '(123)'], {}), '(fixed_fid_sum, 0.0005, 123)\n', (5398, 5426), False, 'from suspect import basis, MRSData\n'), ((6006, 6030), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (6017, 6030), False, 'from suspect.fitting import singlet\n'), ((6036, 6126), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['fwhm']", '(50.0)'], {'rtol': '(0.01)'}), "(fitting_result['model']['pcr']['fwhm'], 50.0,\n rtol=0.01)\n", (6065, 6126), False, 'import numpy\n'), ((6127, 6222), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['amplitude']", '(1.0)'], {'rtol': '(0.02)'}), "(fitting_result['model']['pcr']['amplitude'], \n 1.0, rtol=0.02)\n", (6156, 6222), False, 'import numpy\n'), ((6222, 6316), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['frequency']", '(0.0)'], {'atol': '(0.1)'}), "(fitting_result['model']['pcr']['frequency'], \n 0.0, atol=0.1)\n", (6251, 6316), False, 'import numpy\n'), ((6318, 6397), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['fit']", 'fixed_fid_sum'], {'atol': '(0.001)'}), "(fitting_result['fit'], fixed_fid_sum, atol=0.001)\n", (6347, 6397), False, 'import numpy\n'), ((6460, 6495), 'suspect.MRSData', 'MRSData', (['fixed_fid_sum', '(0.0005)', '(123)'], {}), '(fixed_fid_sum, 0.0005, 123)\n', (6467, 6495), False, 'from suspect import basis, MRSData\n'), ((7076, 7100), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (7087, 7100), False, 'from suspect.fitting import singlet\n'), ((7106, 7196), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['fwhm']", '(50.0)'], {'rtol': '(0.01)'}), "(fitting_result['model']['pcr']['fwhm'], 50.0,\n rtol=0.01)\n", (7135, 7196), False, 'import numpy\n'), ((7197, 7292), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['amplitude']", '(1.0)'], {'rtol': '(0.02)'}), "(fitting_result['model']['pcr']['amplitude'], \n 1.0, rtol=0.02)\n", (7226, 7292), False, 'import numpy\n'), ((7292, 7388), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['model']['pcr']['frequency']", '(200.0)'], {'atol': '(0.1)'}), "(fitting_result['model']['pcr']['frequency'], \n 200.0, atol=0.1)\n", (7321, 7388), False, 'import numpy\n'), ((7390, 7469), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (["fitting_result['fit']", 'fixed_fid_sum'], {'atol': '(0.001)'}), "(fitting_result['fit'], fixed_fid_sum, atol=0.001)\n", (7419, 7469), False, 'import numpy\n'), ((7530, 7565), 'suspect.MRSData', 'MRSData', (['fixed_fid_sum', '(0.0005)', '(123)'], {}), '(fixed_fid_sum, 0.0005, 123)\n', (7537, 7565), False, 'from suspect import basis, MRSData\n'), ((228, 265), 'suspect.basis.gaussian', 'basis.gaussian', (['time_axis', '(0)', '(0)', '(50.0)'], {}), '(time_axis, 0, 0, 50.0)\n', (242, 265), False, 'from suspect import basis, MRSData\n'), ((420, 457), 'suspect.basis.gaussian', 'basis.gaussian', (['time_axis', '(0)', '(0)', '(50.0)'], {}), '(time_axis, 0, 0, 50.0)\n', (434, 457), False, 'from suspect import basis, MRSData\n'), ((2006, 2029), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2019, 2029), False, 'import pytest\n'), ((2056, 2080), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (2067, 2080), False, 'from suspect.fitting import singlet\n'), ((2529, 2552), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2542, 2552), False, 'import pytest\n'), ((2579, 2603), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (2590, 2603), False, 'from suspect.fitting import singlet\n'), ((3957, 3981), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3970, 3981), False, 'import pytest\n'), ((4008, 4032), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (4019, 4032), False, 'from suspect.fitting import singlet\n'), ((4507, 4531), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4520, 4531), False, 'import pytest\n'), ((4558, 4582), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (4569, 4582), False, 'from suspect.fitting import singlet\n'), ((5258, 5287), 'pytest.raises', 'pytest.raises', (['ReferenceError'], {}), '(ReferenceError)\n', (5271, 5287), False, 'import pytest\n'), ((5314, 5338), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (5325, 5338), False, 'from suspect.fitting import singlet\n'), ((8136, 8160), 'pytest.raises', 'pytest.raises', (['NameError'], {}), '(NameError)\n', (8149, 8160), False, 'import pytest\n'), ((8187, 8211), 'suspect.fitting.singlet.fit', 'singlet.fit', (['data', 'model'], {}), '(data, model)\n', (8198, 8211), False, 'from suspect.fitting import singlet\n'), ((279, 302), 'numpy.random.rand', 'numpy.random.rand', (['(1024)'], {}), '(1024)\n', (296, 302), False, 'import numpy\n'), ((471, 494), 'numpy.random.rand', 'numpy.random.rand', (['(1024)'], {}), '(1024)\n', (488, 494), False, 'import numpy\n')] |
from __future__ import division, print_function
import os
import numpy as np
import pandas as pd
from pdb import set_trace
from metrics import rank_diff
from DataUtil import get_all_projects
from Model import train_prediction_model, train_transfer_model
def main(n_reps=30):
data_path = os.path.realpath("./data")
projects = get_all_projects(data_path)
results = dict()
for project in projects:
files = project.files()
results_0 = dict()
for source_name, source_conf in files.iteritems():
results_0.update({source_name: {}})
for target_name, target_conf in files.iteritems():
if not source_name == target_name:
r_diff = []
for _ in xrange(n_reps):
"Construct a prediction model using source"
predict_model = train_prediction_model(
source_conf, T=5)
"""Sample 15 from train and test datasets
to train a transfer model
"""
"Find common configs between source and target"
common = pd.merge(
source_conf, target_conf, how="inner")
"Pick random 15 samples"
some = common.sample(n=10)
"Get the dependent variables to construct a LR model"
p_src = some[source_conf.columns[-1]]
p_tgt = some[target_conf.columns[-1]]
"Train a transfer model"
transfer_model = train_transfer_model(
p_src=p_src, p_tgt=p_tgt)
"Remove elements used to train transfer model from target"
target_conf = target_conf.drop(
some.index, errors="ignore")
"Perform tansfer"
target_indep = target_conf[target_conf.columns[:-1]]
target_actual = target_conf[target_conf.columns[-1]]
predicted_raw = predict_model.predict(
target_indep).reshape(-1, 1)
target_predicted = transfer_model.predict(
predicted_raw).reshape(1, -1)[0]
"Get rank difference"
r_diff.append(rank_diff(actual=target_actual,
predicted=target_predicted))
results_0[source_name].update(
{target_name: int(np.median(r_diff))})
results.update({project.name: pd.DataFrame(results_0)})
# -------------------- DEBUG -------------------- #
set_trace()
if __name__ == "__main__":
main(n_reps=31)
| [
"DataUtil.get_all_projects",
"pandas.DataFrame",
"numpy.median",
"Model.train_prediction_model",
"os.path.realpath",
"pandas.merge",
"pdb.set_trace",
"metrics.rank_diff",
"Model.train_transfer_model"
] | [((294, 320), 'os.path.realpath', 'os.path.realpath', (['"""./data"""'], {}), "('./data')\n", (310, 320), False, 'import os\n'), ((336, 363), 'DataUtil.get_all_projects', 'get_all_projects', (['data_path'], {}), '(data_path)\n', (352, 363), False, 'from DataUtil import get_all_projects\n'), ((2820, 2831), 'pdb.set_trace', 'set_trace', ([], {}), '()\n', (2829, 2831), False, 'from pdb import set_trace\n'), ((2734, 2757), 'pandas.DataFrame', 'pd.DataFrame', (['results_0'], {}), '(results_0)\n', (2746, 2757), True, 'import pandas as pd\n'), ((879, 919), 'Model.train_prediction_model', 'train_prediction_model', (['source_conf'], {'T': '(5)'}), '(source_conf, T=5)\n', (901, 919), False, 'from Model import train_prediction_model, train_transfer_model\n'), ((1200, 1247), 'pandas.merge', 'pd.merge', (['source_conf', 'target_conf'], {'how': '"""inner"""'}), "(source_conf, target_conf, how='inner')\n", (1208, 1247), True, 'import pandas as pd\n'), ((1672, 1718), 'Model.train_transfer_model', 'train_transfer_model', ([], {'p_src': 'p_src', 'p_tgt': 'p_tgt'}), '(p_src=p_src, p_tgt=p_tgt)\n', (1692, 1718), False, 'from Model import train_prediction_model, train_transfer_model\n'), ((2475, 2534), 'metrics.rank_diff', 'rank_diff', ([], {'actual': 'target_actual', 'predicted': 'target_predicted'}), '(actual=target_actual, predicted=target_predicted)\n', (2484, 2534), False, 'from metrics import rank_diff\n'), ((2674, 2691), 'numpy.median', 'np.median', (['r_diff'], {}), '(r_diff)\n', (2683, 2691), True, 'import numpy as np\n')] |
import numpy as np
from polimorfo.utils import maskutils
import pytest
def test_mask_to_polygons():
mask = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
polygons = maskutils.mask_to_polygon(mask)
assert len(polygons) == 1
def test_bbox():
polygons = [[1, 2, 2, 3, 5, 5, 10, 25]]
bbox = maskutils.bbox(polygons, 64, 64)
area = maskutils.area(bbox)
assert area == 4
def test_bbox_zero_area():
polygons = [[1, 1, 2, 2, 3, 3, 4, 4]]
bbox = maskutils.bbox(polygons, 64, 64)
area = maskutils.area(bbox)
assert area == 0
def test_bbox_invalid_polygons():
with pytest.raises(Exception) as ex:
polygons = [[]]
maskutils.bbox(polygons, 64, 64)
assert ex.value == 'input type is not supported.'
with pytest.raises(Exception) as ex:
polygons = [[1, 1, 2, 2]]
maskutils.bbox(polygons, 64, 64)
assert ex.value == "Argument 'bb' has incorrect type (expected numpy.ndarray, got list)" | [
"polimorfo.utils.maskutils.mask_to_polygon",
"polimorfo.utils.maskutils.area",
"pytest.raises",
"numpy.array",
"polimorfo.utils.maskutils.bbox"
] | [((113, 444), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, \n 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1,\n 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0,\n 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.uint8'}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0,\n 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1,\n 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0]], dtype=np.uint8)\n', (121, 444), True, 'import numpy as np\n'), ((497, 528), 'polimorfo.utils.maskutils.mask_to_polygon', 'maskutils.mask_to_polygon', (['mask'], {}), '(mask)\n', (522, 528), False, 'from polimorfo.utils import maskutils\n'), ((633, 665), 'polimorfo.utils.maskutils.bbox', 'maskutils.bbox', (['polygons', '(64)', '(64)'], {}), '(polygons, 64, 64)\n', (647, 665), False, 'from polimorfo.utils import maskutils\n'), ((677, 697), 'polimorfo.utils.maskutils.area', 'maskutils.area', (['bbox'], {}), '(bbox)\n', (691, 697), False, 'from polimorfo.utils import maskutils\n'), ((801, 833), 'polimorfo.utils.maskutils.bbox', 'maskutils.bbox', (['polygons', '(64)', '(64)'], {}), '(polygons, 64, 64)\n', (815, 833), False, 'from polimorfo.utils import maskutils\n'), ((845, 865), 'polimorfo.utils.maskutils.area', 'maskutils.area', (['bbox'], {}), '(bbox)\n', (859, 865), False, 'from polimorfo.utils import maskutils\n'), ((932, 956), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (945, 956), False, 'import pytest\n'), ((996, 1028), 'polimorfo.utils.maskutils.bbox', 'maskutils.bbox', (['polygons', '(64)', '(64)'], {}), '(polygons, 64, 64)\n', (1010, 1028), False, 'from polimorfo.utils import maskutils\n'), ((1097, 1121), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1110, 1121), False, 'import pytest\n'), ((1171, 1203), 'polimorfo.utils.maskutils.bbox', 'maskutils.bbox', (['polygons', '(64)', '(64)'], {}), '(polygons, 64, 64)\n', (1185, 1203), False, 'from polimorfo.utils import maskutils\n')] |
import sys
import numpy as np
from mygrad.nnet.activations import sigmoid
from tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory
@fwdprop_test_factory(
mygrad_func=sigmoid,
true_func=lambda x: 1 / (1 + np.exp(-x)),
num_arrays=1,
index_to_bnds={0: (-np.log(sys.float_info.max), None)},
)
def test_sigmoid_fwd():
pass
@backprop_test_factory(
mygrad_func=sigmoid,
true_func=lambda x: 1 / (1 + np.exp(-x)),
num_arrays=1,
index_to_bnds={0: (-np.log(sys.float_info.max), None)},
)
def test_sigmoid_bkwd():
pass
| [
"numpy.log",
"numpy.exp"
] | [((235, 245), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (241, 245), True, 'import numpy as np\n'), ((290, 316), 'numpy.log', 'np.log', (['sys.float_info.max'], {}), '(sys.float_info.max)\n', (296, 316), True, 'import numpy as np\n'), ((445, 455), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (451, 455), True, 'import numpy as np\n'), ((500, 526), 'numpy.log', 'np.log', (['sys.float_info.max'], {}), '(sys.float_info.max)\n', (506, 526), True, 'import numpy as np\n')] |
import numpy
# ========================== Some observation engine specifications
oracle_engine_specification = [
("turn_index", "all"),
("task_state", "all"),
("user_state", "all"),
("assistant_state", "all"),
("user_action", "all"),
("assistant_action", "all"),
]
blind_engine_specification = [
("turn_index", "all"),
("task_state", None),
("user_state", None),
("assistant_state", None),
("user_action", "all"),
("assistant_action", "all"),
]
base_task_engine_specification = [
("turn_index", "all"),
("task_state", "all"),
("user_state", None),
("assistant_state", None),
("user_action", "all"),
("assistant_action", "all"),
]
base_user_engine_specification = [
("turn_index", "all"),
("task_state", "all"),
("user_state", "all"),
("assistant_state", None),
("user_action", "all"),
("assistant_action", "all"),
]
base_assistant_engine_specification = [
("turn_index", "all"),
("task_state", "all"),
("user_state", None),
("assistant_state", "all"),
("user_action", "all"),
("assistant_action", "all"),
]
custom_example_specification = [
("turn_index", "all"),
("task_state", "substate1", "all"),
("user_state", "substate1", slice(0, 2, 1)),
("assistant_state", "None"),
("user_action", "all"),
("assistant_action", "all"),
]
# ===================== Passing extra rules ==============
# each rule is a dictionnary {key:value} with
# key = (state, substate)
# value = (function, args)
# Be careful: args has to be a tuple, so for a single argument arg, do (arg,)
# An exemple
# obs_matrix = {('task_state', 'x'): (coopihc.observation.f_obs_matrix, (C,))}
# extradeterministicrules = {}
# extradeterministicrules.update(obs_matrix)
# ==================== Deterministic functions
# A linear combination of observation components
def observation_linear_combination(_obs, game_state, C):
return C @ _obs[0]
# ==================== Noise functions
# Additive Gaussian Noise where D shapes the Noise
def additive_gaussian_noise(_obs, gamestate, D, *args):
try:
mu, sigma = args
except ValueError:
mu, sigma = numpy.zeros(_obs.shape), numpy.eye(max(_obs.shape))
return _obs + D @ numpy.random.multivariate_normal(mu, sigma, size=1).reshape(
-1, 1
), D @ numpy.random.multivariate_normal(mu, sigma, size=1).reshape(-1, 1)
| [
"numpy.zeros",
"numpy.random.multivariate_normal"
] | [((2203, 2226), 'numpy.zeros', 'numpy.zeros', (['_obs.shape'], {}), '(_obs.shape)\n', (2214, 2226), False, 'import numpy\n'), ((2363, 2414), 'numpy.random.multivariate_normal', 'numpy.random.multivariate_normal', (['mu', 'sigma'], {'size': '(1)'}), '(mu, sigma, size=1)\n', (2395, 2414), False, 'import numpy\n'), ((2277, 2328), 'numpy.random.multivariate_normal', 'numpy.random.multivariate_normal', (['mu', 'sigma'], {'size': '(1)'}), '(mu, sigma, size=1)\n', (2309, 2328), False, 'import numpy\n')] |
import numpy as np
from scipy.spatial.distance import cdist
from pymoo.model.indicator import Indicator
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
class RMetric(Indicator):
def __init__(self, curr_pop, whole_pop, ref_points, problem, w=None):
"""
Parameters
----------
curr_pop : numpy.array
Population from algorithm being evaluated
whole_pop : numpy.array
Whole population of all algorithms
ref_points : numpy.array
list of reference points
problem : class
problem instance
w : numpy.array
weights for each objective
"""
Indicator.__init__(self)
self.curr_pop = curr_pop
self.whole_pop = whole_pop
self.ref_points = ref_points
self.problem = problem
w_ = np.ones(self.ref_points.shape[1]) if not w else w
self.w_points = self.ref_points + 2 * w_
def _filter(self):
def check_dominance(a, b, n_obj):
flag1 = False
flag2 = False
for i in range(n_obj):
if a[i] < b[i]:
flag1 = True
else:
if a[i] > b[i]:
flag2 = True
if flag1 and not flag2:
return 1
elif not flag1 and flag2:
return -1
else:
return 0
num_objs = np.size(self.curr_pop, axis=1)
index_array = np.zeros(np.size(self.curr_pop, axis=0))
for i in range(np.size(self.curr_pop, 0)):
for j in range(np.size(self.whole_pop, 0)):
flag = check_dominance(self.curr_pop[i, :], self.whole_pop[j, :], num_objs)
if flag == -1:
index_array[i] = 1
break
final_index = np.logical_not(index_array)
filtered_pop = self.curr_pop[final_index, :]
return filtered_pop
def _filter_fast(self):
filtered_pop = NonDominatedSorting.get_non_dominated(self.whole_pop, self.curr_pop)
return filtered_pop
def _preprocess(self, data, ref_point, w_point):
datasize = np.size(data, 0)
# Identify representative point
ref_matrix = np.tile(ref_point, (datasize, 1))
w_matrix = np.tile(w_point, (datasize, 1))
# ratio of distance to the ref point over the distance between the w_point and the ref_point
diff_matrix = (data - ref_matrix) / (w_matrix - ref_matrix)
agg_value = np.amax(diff_matrix, axis=1)
idx = np.argmin(agg_value)
zp = [data[idx, :]]
return zp,
def _translate(self, zp, trimmed_data, ref_point, w_point):
# Solution translation - Matlab reproduction
# find k
temp = ((zp[0] - ref_point) / (w_point - ref_point))
kIdx = np.argmax(temp)
# find zl
temp = (zp[0][kIdx] - ref_point[kIdx]) / (w_point[kIdx] - ref_point[kIdx])
zl = ref_point + temp * (w_point - ref_point)
temp = zl - zp
shift_direction = np.tile(temp, (trimmed_data.shape[0], 1))
# new_size = self.curr_pop.shape[0]
return trimmed_data + shift_direction
def _trim(self, pop, centeroid, range=0.2):
"""
Box trimming
:param pop:
:param centeroid:
:param range:
:return:
"""
popsize, objDim = pop.shape
diff_matrix = pop - np.tile(centeroid, (popsize, 1))[0]
flags = np.sum(abs(diff_matrix) < range / 2, axis=1)
filtered_matrix = pop[np.where(flags == objDim)]
return filtered_matrix
def _trim_fast(self, pop, centeroid, range=0.2):
"""
Euclidean trimming
:param pop:
:param centeroid:
:param range:
:return:
"""
centeroid_matrix = cdist(pop, centeroid, metric='euclidean')
filtered_matrix = pop[np.where(centeroid_matrix < range / 2), :][0]
return filtered_matrix
def calc(self, hyper_volume=True, delta=0.2, pf=None):
"""
This method calculates the R-IGD and R-HV based off of the population that was provided
:return: R-IGD and R-HV
"""
translated = []
final_PF = []
# 1. Prescreen Procedure - NDS Filtering
pop = self._filter()
if pf is not None:
solution = pf
else:
solution = self.problem.pareto_front()
# solution = calc_PF(1, 10000, 2)
labels = np.argmin(cdist(pop, self.ref_points), axis=1)
for i in range(len(self.ref_points)):
cluster = pop[np.where(labels == i)]
if len(cluster) != 0:
# 2. Representative Point Identification
zp = self._preprocess(cluster, self.ref_points[i], w_point=self.w_points[i])[0]
# 3. Filtering Procedure - Filter points
trimmed_data = self._trim(cluster, zp, range=delta)
# 4. Solution Translation
pop_t = self._translate(zp, trimmed_data, self.ref_points[i], w_point=self.w_points[i])
translated.extend(pop_t)
# 5. R-Metric Computation
target = self._preprocess(data=solution, ref_point=self.ref_points[i], w_point=self.w_points[i])
PF = self._trim(solution, target)
final_PF.extend(PF)
translated = np.array(translated)
if np.size(translated) == 0:
igd = -1
volume = -1
else:
# IGD Computation
from pymoo.performance_indicator.igd import IGD
IGD_ = IGD(final_PF)
igd = IGD_.calc(translated)
# HV Computation
nadir_point = np.amax(self.w_points, axis=0)
front = translated
dim = self.ref_points[0].shape[0]
if hyper_volume:
if dim < 3:
try:
# Python
from pymoo.performance_indicator.hv import HyperVolume
hv = HyperVolume(nadir_point)
volume = hv.compute(front)
except TypeError:
volume = -1
else:
# cpp
from pymoo.cpp.hypervolume.build import hypervolume
volume = hypervolume.calculate(dim, len(front), front, nadir_point)
else:
volume = np.nan
return igd, volume | [
"scipy.spatial.distance.cdist",
"numpy.size",
"pymoo.performance_indicator.hv.HyperVolume",
"numpy.argmax",
"numpy.logical_not",
"numpy.ones",
"numpy.argmin",
"numpy.amax",
"pymoo.util.nds.non_dominated_sorting.NonDominatedSorting.get_non_dominated",
"numpy.where",
"numpy.array",
"numpy.tile",... | [((704, 728), 'pymoo.model.indicator.Indicator.__init__', 'Indicator.__init__', (['self'], {}), '(self)\n', (722, 728), False, 'from pymoo.model.indicator import Indicator\n'), ((1479, 1509), 'numpy.size', 'np.size', (['self.curr_pop'], {'axis': '(1)'}), '(self.curr_pop, axis=1)\n', (1486, 1509), True, 'import numpy as np\n'), ((1890, 1917), 'numpy.logical_not', 'np.logical_not', (['index_array'], {}), '(index_array)\n', (1904, 1917), True, 'import numpy as np\n'), ((2052, 2120), 'pymoo.util.nds.non_dominated_sorting.NonDominatedSorting.get_non_dominated', 'NonDominatedSorting.get_non_dominated', (['self.whole_pop', 'self.curr_pop'], {}), '(self.whole_pop, self.curr_pop)\n', (2089, 2120), False, 'from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting\n'), ((2223, 2239), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (2230, 2239), True, 'import numpy as np\n'), ((2302, 2335), 'numpy.tile', 'np.tile', (['ref_point', '(datasize, 1)'], {}), '(ref_point, (datasize, 1))\n', (2309, 2335), True, 'import numpy as np\n'), ((2355, 2386), 'numpy.tile', 'np.tile', (['w_point', '(datasize, 1)'], {}), '(w_point, (datasize, 1))\n', (2362, 2386), True, 'import numpy as np\n'), ((2576, 2604), 'numpy.amax', 'np.amax', (['diff_matrix'], {'axis': '(1)'}), '(diff_matrix, axis=1)\n', (2583, 2604), True, 'import numpy as np\n'), ((2619, 2639), 'numpy.argmin', 'np.argmin', (['agg_value'], {}), '(agg_value)\n', (2628, 2639), True, 'import numpy as np\n'), ((2899, 2914), 'numpy.argmax', 'np.argmax', (['temp'], {}), '(temp)\n', (2908, 2914), True, 'import numpy as np\n'), ((3121, 3162), 'numpy.tile', 'np.tile', (['temp', '(trimmed_data.shape[0], 1)'], {}), '(temp, (trimmed_data.shape[0], 1))\n', (3128, 3162), True, 'import numpy as np\n'), ((3898, 3939), 'scipy.spatial.distance.cdist', 'cdist', (['pop', 'centeroid'], {'metric': '"""euclidean"""'}), "(pop, centeroid, metric='euclidean')\n", (3903, 3939), False, 'from scipy.spatial.distance import cdist\n'), ((5455, 5475), 'numpy.array', 'np.array', (['translated'], {}), '(translated)\n', (5463, 5475), True, 'import numpy as np\n'), ((878, 911), 'numpy.ones', 'np.ones', (['self.ref_points.shape[1]'], {}), '(self.ref_points.shape[1])\n', (885, 911), True, 'import numpy as np\n'), ((1541, 1571), 'numpy.size', 'np.size', (['self.curr_pop'], {'axis': '(0)'}), '(self.curr_pop, axis=0)\n', (1548, 1571), True, 'import numpy as np\n'), ((1596, 1621), 'numpy.size', 'np.size', (['self.curr_pop', '(0)'], {}), '(self.curr_pop, 0)\n', (1603, 1621), True, 'import numpy as np\n'), ((3623, 3648), 'numpy.where', 'np.where', (['(flags == objDim)'], {}), '(flags == objDim)\n', (3631, 3648), True, 'import numpy as np\n'), ((4575, 4602), 'scipy.spatial.distance.cdist', 'cdist', (['pop', 'self.ref_points'], {}), '(pop, self.ref_points)\n', (4580, 4602), False, 'from scipy.spatial.distance import cdist\n'), ((5488, 5507), 'numpy.size', 'np.size', (['translated'], {}), '(translated)\n', (5495, 5507), True, 'import numpy as np\n'), ((5682, 5695), 'pymoo.performance_indicator.igd.IGD', 'IGD', (['final_PF'], {}), '(final_PF)\n', (5685, 5695), False, 'from pymoo.performance_indicator.igd import IGD\n'), ((5792, 5822), 'numpy.amax', 'np.amax', (['self.w_points'], {'axis': '(0)'}), '(self.w_points, axis=0)\n', (5799, 5822), True, 'import numpy as np\n'), ((1651, 1677), 'numpy.size', 'np.size', (['self.whole_pop', '(0)'], {}), '(self.whole_pop, 0)\n', (1658, 1677), True, 'import numpy as np\n'), ((3496, 3528), 'numpy.tile', 'np.tile', (['centeroid', '(popsize, 1)'], {}), '(centeroid, (popsize, 1))\n', (3503, 3528), True, 'import numpy as np\n'), ((4685, 4706), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (4693, 4706), True, 'import numpy as np\n'), ((3970, 4008), 'numpy.where', 'np.where', (['(centeroid_matrix < range / 2)'], {}), '(centeroid_matrix < range / 2)\n', (3978, 4008), True, 'import numpy as np\n'), ((6123, 6147), 'pymoo.performance_indicator.hv.HyperVolume', 'HyperVolume', (['nadir_point'], {}), '(nadir_point)\n', (6134, 6147), False, 'from pymoo.performance_indicator.hv import HyperVolume\n')] |
import gym
import matplotlib.pyplot as plt
import numpy as np
import copy
class PushBall:
def __init__(self, args, rank):
self.args = args
self.rank = rank
self.initialization(args)
def random_start(self):
#return np.array([self.size // 2, self.size // 2])
#return np.array(np.random.randint(self.size // 4, self.size // 4 * 3, [self.n_dim]))
#return np.array(np.random.randint(1, self.size - 1, [self.n_dim]))
return np.array(np.random.randint(self.random_l, self.random_r, [self.n_dim]))
def random_start_n(self):
ball_list = []
for i in range(self.n_ball):
while True:
new_ball = self.random_start()
flag = True
for j in range(i):
if (new_ball == ball_list[j]).all():
flag = False
for j, t_state in enumerate(self.state_n):
if (new_ball == t_state).all():
flag = False
if flag:
break
ball_list.append(new_ball)
return ball_list
def initialization(self, args):
self.is_print = self.rank == 0
self.args = args
self.size = args.size
self.map = np.zeros([self.size, self.size])
self.dec_int = args.gamma_dec != 0
self.penalty = args.penalty
if (self.is_print):
print(args.save_path)
print('>>>>>>>>>>>>>dec_int', self.dec_int)
self.n_agent = 2
self.n_action = 5
self.n_dim = 2
self.random_l = args.pushball_random_l
self.random_r = args.pushball_random_r
self.reward_wall = [1000, 1000, 1000, 1000]
self.state_n = [np.array([1, 1]) for _ in range(self.n_agent)]
self.n_ball = 1
self.ball_n = self.random_start_n()
self.eye = np.eye(self.size)
self.flag = np.eye(2)
# Used by OpenAI baselines
self.action_space = gym.spaces.Discrete(self.n_action)
self.observation_space = gym.spaces.Box(low=-1, high=1, shape=[args.size * 2 * self.n_agent +
args.size * 2 * self.n_ball])
self.num_envs = args.num_env
self.metadata = {'render.modes': []}
self.reward_range = (-100., 20000.)
self.spec = 2
self.t_step = 0
def step(self, action_n):
self.t_step += 1
ball_count = [np.zeros((5)) for i in range(self.n_ball)]
for i, action in enumerate(action_n):
new_row = -1
new_column = -1
if action == 0:
new_row = max(self.state_n[i][0] - 1, 1)
new_column = self.state_n[i][1]
elif action == 1:
new_row = self.state_n[i][0]
new_column = min(self.state_n[i][1] + 1, self.size - 2)
elif action == 2:
new_row = min(self.state_n[i][0] + 1, self.size - 2)
new_column = self.state_n[i][1]
elif action == 3:
new_row = self.state_n[i][0]
new_column = max(self.state_n[i][1] - 1, 1)
elif action == 4:
new_row = self.state_n[i][0]
new_column = self.state_n[i][1]
for j, ball in enumerate(self.ball_n):
if (self.state_n[i] != ball).any() and new_row == ball[0] and new_column == ball[1]:
ball_count[j][action] += 1
assert (action < 5)
new_ball_n = []
# Move Ball
for i, ball in enumerate(self.ball_n):
move_x = 0
if ball_count[i][0] - ball_count[i][2] >= 2:
move_x = -1
if ball_count[i][2] - ball_count[i][0] >= 2:
move_x = 1
move_y = 0
if ball_count[i][3] - ball_count[i][1] >= 2:
move_y = -1
if ball_count[i][1] - ball_count[i][3] >= 2:
move_y = 1
new_ball = np.array([ball[0] + move_x, ball[1] + move_y])
flag = True
for j, t_ball in enumerate(self.ball_n):
if (new_ball == t_ball).all():
flag = False
for j, t_state in enumerate(self.state_n):
if (new_ball == t_state).all():
flag = False
if flag:
new_ball_n.append(new_ball)
else:
new_ball_n.append(ball)
self.ball_n = new_ball_n
for i, action in enumerate(action_n):
new_row = -1
new_column = -1
if action == 0:
new_row = max(self.state_n[i][0] - 1, 1)
new_column = self.state_n[i][1]
elif action == 1:
new_row = self.state_n[i][0]
new_column = min(self.state_n[i][1] + 1, self.size - 2)
elif action == 2:
new_row = min(self.state_n[i][0] + 1, self.size - 2)
new_column = self.state_n[i][1]
elif action == 3:
new_row = self.state_n[i][0]
new_column = max(self.state_n[i][1] - 1, 1)
elif action == 4:
new_row = self.state_n[i][0]
new_column = self.state_n[i][1]
flag = False
for j, ball in enumerate(self.ball_n):
if (self.state_n[i] != ball).any() and new_row == ball[0] and new_column == ball[1]:
assert (action < 5)
flag = True
if flag:
new_row = self.state_n[i][0]
new_column = self.state_n[i][1]
self.state_n[i] = np.array([new_row, new_column])
ball_info = np.concatenate([[ball[0], ball[1]] for ball in self.ball_n], axis=None)
info_state_n = []
for i, state in enumerate(self.state_n):
full_state = np.concatenate([state, ball_info], axis=0)
info_state_n.append(full_state)
info = {'ball': self.ball_n, 'state': copy.deepcopy(info_state_n)}
return_obs = self.obs_n()
return_rew, info_r = self.reward()
return_done = self.done()
info['rew'] = info_r
return return_obs, return_rew, return_done, info
def reset(self):
self.t_step = 0
self.state_n = [np.array([1, 1]) for _ in range(self.n_agent)]
self.ball_n = self.random_start_n()
return self.obs_n()
def obs_n(self):
return [self.obs(i) for i in range(self.n_agent)]
def obs(self, i):
ball = np.concatenate([np.concatenate([self.eye[ball[0]], self.eye[ball[1]]], axis=0)
for ball in self.ball_n], axis=0)
return np.concatenate([self.eye[self.state_n[0][0]], self.eye[self.state_n[0][1]],
self.eye[self.state_n[1][0]], self.eye[self.state_n[1][1]],
ball]).copy()
def reward(self):
reward = 0
win_count = np.zeros(4)
for i, ball in enumerate(self.ball_n):
pre_reward = reward
if ball[0] == 0:
reward += self.reward_wall[0]
win_count[0] += 1
if ball[1] == 0:
reward += self.reward_wall[1]
win_count[1] += 1
if ball[0] == self.size - 1:
reward += self.reward_wall[2]
win_count[2] += 1
if ball[1] == self.size - 1:
reward += self.reward_wall[3]
win_count[3] += 1
if reward > pre_reward:
while True:
new_ball = self.random_start()
flag = True
for j, t_ball in enumerate(self.ball_n):
if (new_ball == t_ball).all():
flag = False
for j, t_state in enumerate(self.state_n):
if (new_ball == t_state).all():
flag = False
if flag:
break
self.ball_n[i] = new_ball
return [reward, reward], win_count
def done(self):
if self.t_step >= self.args.episode_length:
self.reset()
return 1
return 0
def close(self):
self.reset() | [
"copy.deepcopy",
"numpy.zeros",
"gym.spaces.Discrete",
"numpy.random.randint",
"numpy.array",
"gym.spaces.Box",
"numpy.eye",
"numpy.concatenate"
] | [((1036, 1068), 'numpy.zeros', 'np.zeros', (['[self.size, self.size]'], {}), '([self.size, self.size])\n', (1044, 1068), True, 'import numpy as np\n'), ((1555, 1572), 'numpy.eye', 'np.eye', (['self.size'], {}), '(self.size)\n', (1561, 1572), True, 'import numpy as np\n'), ((1587, 1596), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1593, 1596), True, 'import numpy as np\n'), ((1650, 1684), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['self.n_action'], {}), '(self.n_action)\n', (1669, 1684), False, 'import gym\n'), ((1712, 1815), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '[args.size * 2 * self.n_agent + args.size * 2 * self.n_ball]'}), '(low=-1, high=1, shape=[args.size * 2 * self.n_agent + args.\n size * 2 * self.n_ball])\n', (1726, 1815), False, 'import gym\n'), ((4596, 4667), 'numpy.concatenate', 'np.concatenate', (['[[ball[0], ball[1]] for ball in self.ball_n]'], {'axis': 'None'}), '([[ball[0], ball[1]] for ball in self.ball_n], axis=None)\n', (4610, 4667), True, 'import numpy as np\n'), ((5706, 5717), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5714, 5717), True, 'import numpy as np\n'), ((447, 508), 'numpy.random.randint', 'np.random.randint', (['self.random_l', 'self.random_r', '[self.n_dim]'], {}), '(self.random_l, self.random_r, [self.n_dim])\n', (464, 508), True, 'import numpy as np\n'), ((1437, 1453), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1445, 1453), True, 'import numpy as np\n'), ((2084, 2095), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2092, 2095), True, 'import numpy as np\n'), ((3287, 3333), 'numpy.array', 'np.array', (['[ball[0] + move_x, ball[1] + move_y]'], {}), '([ball[0] + move_x, ball[1] + move_y])\n', (3295, 3333), True, 'import numpy as np\n'), ((4549, 4580), 'numpy.array', 'np.array', (['[new_row, new_column]'], {}), '([new_row, new_column])\n', (4557, 4580), True, 'import numpy as np\n'), ((4747, 4789), 'numpy.concatenate', 'np.concatenate', (['[state, ball_info]'], {'axis': '(0)'}), '([state, ball_info], axis=0)\n', (4761, 4789), True, 'import numpy as np\n'), ((4866, 4893), 'copy.deepcopy', 'copy.deepcopy', (['info_state_n'], {}), '(info_state_n)\n', (4879, 4893), False, 'import copy\n'), ((5120, 5136), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (5128, 5136), True, 'import numpy as np\n'), ((5343, 5405), 'numpy.concatenate', 'np.concatenate', (['[self.eye[ball[0]], self.eye[ball[1]]]'], {'axis': '(0)'}), '([self.eye[ball[0]], self.eye[ball[1]]], axis=0)\n', (5357, 5405), True, 'import numpy as np\n'), ((5459, 5605), 'numpy.concatenate', 'np.concatenate', (['[self.eye[self.state_n[0][0]], self.eye[self.state_n[0][1]], self.eye[self.\n state_n[1][0]], self.eye[self.state_n[1][1]], ball]'], {}), '([self.eye[self.state_n[0][0]], self.eye[self.state_n[0][1]],\n self.eye[self.state_n[1][0]], self.eye[self.state_n[1][1]], ball])\n', (5473, 5605), True, 'import numpy as np\n')] |
'''
###################################
Modified from Mike's predict_acc.py
###################################
'''
import os
import sys
import random
import pickle
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from keras.models import load_model
base_path = '/home/tyt/how2ml/mfcc4'
base_data_path = os.path.join(base_path, 'data')
num_fold = 10
def getTrainData():
X = []
y = []
fname = []
for i in range(num_fold):
fileX = os.path.join(base_data_path, 'X/X' + str(i+1) + '.npy')
fileY = os.path.join(base_data_path, 'y/y' + str(i+1) + '.npy')
filefname = os.path.join(base_data_path, 'fname/fname' + str(i+1) + '.npy')
X.append(np.load(fileX))
y.append(np.load(fileY))
fname.append(np.load(filefname))
X = np.array(X)
y = np.array(y)
fname = np.array(fname)
return X, y, fname
def split_data(X, y, fname, idx):
X_train = []
y_train = []
fname_train = []
for i in range(num_fold):
if i == idx:
X_val = X[i]
y_val = y[i]
fname_val = fname[i]
continue
if X_train == []:
X_train = X[i]
y_train = y[i]
fname_train = fname[i]
else:
X_train = np.concatenate((X_train, X[i]))
y_train = np.concatenate((y_train, y[i]))
fname_train = np.concatenate((fname_train, fname[i]))
return X_train, y_train, fname_train, X_val, y_val, fname_val
def getUnData():
fileX = os.path.join(base_data_path, 'X_unverified.npy')
fileY = os.path.join(base_data_path, 'y_unverified.npy')
filefname = os.path.join(base_data_path, 'fname_unverified.npy')
X_un = np.load(fileX)
y_un = np.load(fileY)
fname_un = np.load(filefname)
return X_un, y_un, fname_un
def getTestData():
fileX = os.path.join(base_data_path, 'X_test.npy')
filefname = os.path.join('./', 'fname_test.npy')
X_test = np.load(fileX)
fname_test = np.load(filefname)
return X_test, fname_test
if __name__ == '__main__':
X_train_all, y_train_all, fname_train_all = getTrainData()
X_test, fname_test = getTestData()
result_all = []
for i in range(10):
_, _, _, X_all, _, fname_all = split_data(X_train_all, y_train_all, fname_train_all, i)
base_model_path = os.path.join(base_path, 'cnn_model_152')
model_name = 'model{}'.format(i)
filename = os.path.join(base_model_path, model_name)
npy_predict = os.path.join(base_path, 'final_npy_predict_phase3_val_152')
if not os.path.exists(npy_predict):
os.makedirs(npy_predict)
csv_predict = os.path.join(base_path, 'final_csv_predict_phase3_val_152')
if not os.path.exists(csv_predict):
os.makedirs(csv_predict)
model = load_model(filename)
print('Predicting X_all...')
result = model.predict(X_all)
np.save(os.path.join(npy_predict, 'mow_mfcc4_resnet152_phase3_val_{}.npy'.format(i+1)), result)
if result_all == []:
result_all = result
else:
result_all = np.concatenate((result_all, result))
df = pd.DataFrame(result)
df.insert(0, 'fname', fname_all)
df.to_csv(os.path.join(csv_predict, 'mow_mfcc4_resnet152_phase3_val_{}.csv'.format(i+1)), index=False, header=True)
print(result_all.shape)
print(fname_train_all.shape)
fname_train_all = fname_train_all.reshape((-1, 1))
df = pd.DataFrame(result_all)
df.insert(0, 'fname', fname_train_all)
df.to_csv(os.path.join(csv_predict, 'mow_mfcc4_resnet152_phase3_val_all.csv'), index=False, header=True)
| [
"keras.models.load_model",
"pandas.DataFrame",
"numpy.load",
"os.makedirs",
"os.path.exists",
"numpy.array",
"os.path.join",
"numpy.concatenate"
] | [((336, 367), 'os.path.join', 'os.path.join', (['base_path', '"""data"""'], {}), "(base_path, 'data')\n", (348, 367), False, 'import os\n'), ((816, 827), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (824, 827), True, 'import numpy as np\n'), ((836, 847), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (844, 847), True, 'import numpy as np\n'), ((860, 875), 'numpy.array', 'np.array', (['fname'], {}), '(fname)\n', (868, 875), True, 'import numpy as np\n'), ((1546, 1594), 'os.path.join', 'os.path.join', (['base_data_path', '"""X_unverified.npy"""'], {}), "(base_data_path, 'X_unverified.npy')\n", (1558, 1594), False, 'import os\n'), ((1607, 1655), 'os.path.join', 'os.path.join', (['base_data_path', '"""y_unverified.npy"""'], {}), "(base_data_path, 'y_unverified.npy')\n", (1619, 1655), False, 'import os\n'), ((1672, 1724), 'os.path.join', 'os.path.join', (['base_data_path', '"""fname_unverified.npy"""'], {}), "(base_data_path, 'fname_unverified.npy')\n", (1684, 1724), False, 'import os\n'), ((1737, 1751), 'numpy.load', 'np.load', (['fileX'], {}), '(fileX)\n', (1744, 1751), True, 'import numpy as np\n'), ((1763, 1777), 'numpy.load', 'np.load', (['fileY'], {}), '(fileY)\n', (1770, 1777), True, 'import numpy as np\n'), ((1793, 1811), 'numpy.load', 'np.load', (['filefname'], {}), '(filefname)\n', (1800, 1811), True, 'import numpy as np\n'), ((1877, 1919), 'os.path.join', 'os.path.join', (['base_data_path', '"""X_test.npy"""'], {}), "(base_data_path, 'X_test.npy')\n", (1889, 1919), False, 'import os\n'), ((1936, 1972), 'os.path.join', 'os.path.join', (['"""./"""', '"""fname_test.npy"""'], {}), "('./', 'fname_test.npy')\n", (1948, 1972), False, 'import os\n'), ((1987, 2001), 'numpy.load', 'np.load', (['fileX'], {}), '(fileX)\n', (1994, 2001), True, 'import numpy as np\n'), ((2019, 2037), 'numpy.load', 'np.load', (['filefname'], {}), '(filefname)\n', (2026, 2037), True, 'import numpy as np\n'), ((3528, 3552), 'pandas.DataFrame', 'pd.DataFrame', (['result_all'], {}), '(result_all)\n', (3540, 3552), True, 'import pandas as pd\n'), ((2372, 2412), 'os.path.join', 'os.path.join', (['base_path', '"""cnn_model_152"""'], {}), "(base_path, 'cnn_model_152')\n", (2384, 2412), False, 'import os\n'), ((2473, 2514), 'os.path.join', 'os.path.join', (['base_model_path', 'model_name'], {}), '(base_model_path, model_name)\n', (2485, 2514), False, 'import os\n'), ((2538, 2597), 'os.path.join', 'os.path.join', (['base_path', '"""final_npy_predict_phase3_val_152"""'], {}), "(base_path, 'final_npy_predict_phase3_val_152')\n", (2550, 2597), False, 'import os\n'), ((2702, 2761), 'os.path.join', 'os.path.join', (['base_path', '"""final_csv_predict_phase3_val_152"""'], {}), "(base_path, 'final_csv_predict_phase3_val_152')\n", (2714, 2761), False, 'import os\n'), ((2860, 2880), 'keras.models.load_model', 'load_model', (['filename'], {}), '(filename)\n', (2870, 2880), False, 'from keras.models import load_model\n'), ((3214, 3234), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (3226, 3234), True, 'import pandas as pd\n'), ((3610, 3677), 'os.path.join', 'os.path.join', (['csv_predict', '"""mow_mfcc4_resnet152_phase3_val_all.csv"""'], {}), "(csv_predict, 'mow_mfcc4_resnet152_phase3_val_all.csv')\n", (3622, 3677), False, 'import os\n'), ((717, 731), 'numpy.load', 'np.load', (['fileX'], {}), '(fileX)\n', (724, 731), True, 'import numpy as np\n'), ((750, 764), 'numpy.load', 'np.load', (['fileY'], {}), '(fileY)\n', (757, 764), True, 'import numpy as np\n'), ((787, 805), 'numpy.load', 'np.load', (['filefname'], {}), '(filefname)\n', (794, 805), True, 'import numpy as np\n'), ((1297, 1328), 'numpy.concatenate', 'np.concatenate', (['(X_train, X[i])'], {}), '((X_train, X[i]))\n', (1311, 1328), True, 'import numpy as np\n'), ((1351, 1382), 'numpy.concatenate', 'np.concatenate', (['(y_train, y[i])'], {}), '((y_train, y[i]))\n', (1365, 1382), True, 'import numpy as np\n'), ((1409, 1448), 'numpy.concatenate', 'np.concatenate', (['(fname_train, fname[i])'], {}), '((fname_train, fname[i]))\n', (1423, 1448), True, 'import numpy as np\n'), ((2613, 2640), 'os.path.exists', 'os.path.exists', (['npy_predict'], {}), '(npy_predict)\n', (2627, 2640), False, 'import os\n'), ((2654, 2678), 'os.makedirs', 'os.makedirs', (['npy_predict'], {}), '(npy_predict)\n', (2665, 2678), False, 'import os\n'), ((2777, 2804), 'os.path.exists', 'os.path.exists', (['csv_predict'], {}), '(csv_predict)\n', (2791, 2804), False, 'import os\n'), ((2818, 2842), 'os.makedirs', 'os.makedirs', (['csv_predict'], {}), '(csv_predict)\n', (2829, 2842), False, 'import os\n'), ((3162, 3198), 'numpy.concatenate', 'np.concatenate', (['(result_all, result)'], {}), '((result_all, result))\n', (3176, 3198), True, 'import numpy as np\n')] |
from abc import ABCMeta
import numpy as np
from torch.utils.data import ConcatDataset, Dataset, WeightedRandomSampler
from mmpose.datasets.builder import DATASETS
from .mesh_base_dataset import MeshBaseDataset
@DATASETS.register_module()
class MeshMixDataset(Dataset, metaclass=ABCMeta):
"""Mix Dataset for 3D human mesh estimation.
The dataset combines data from multiple datasets (MeshBaseDataset) and
sample the data from different datasets with the provided proportions.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
Args:
configs (list): List of configs for multiple datasets.
partition (list): Sample proportion of multiple datasets. The length
of partition should be same with that of configs. The elements
of it should be non-negative and is not necessary summing up to
one.
Example:
>>> from mmpose.datasets import MeshMixDataset
>>> data_cfg = dict(
>>> image_size=[256, 256],
>>> iuv_size=[64, 64],
>>> num_joints=24,
>>> use_IUV=True,
>>> uv_type='BF')
>>>
>>> mix_dataset = MeshMixDataset(
>>> configs=[
>>> dict(
>>> ann_file='tests/data/h36m/test_h36m.npz',
>>> img_prefix='tests/data/h36m',
>>> data_cfg=data_cfg,
>>> pipeline=[]),
>>> dict(
>>> ann_file='tests/data/h36m/test_h36m.npz',
>>> img_prefix='tests/data/h36m',
>>> data_cfg=data_cfg,
>>> pipeline=[]),
>>> ],
>>> partition=[0.6, 0.4])
"""
def __init__(self, configs, partition):
"""Load data from multiple datasets."""
assert min(partition) >= 0
datasets = [MeshBaseDataset(**cfg) for cfg in configs]
self.dataset = ConcatDataset(datasets)
self.length = max(len(ds) for ds in datasets)
weights = [
np.ones(len(ds)) * p / len(ds)
for (p, ds) in zip(partition, datasets)
]
weights = np.concatenate(weights, axis=0)
self.sampler = WeightedRandomSampler(weights, 1)
def __len__(self):
"""Get the size of the dataset."""
return self.length
def __getitem__(self, idx):
"""Given index, sample the data from multiple datasets with the given
proportion."""
idx_new = list(self.sampler)[0]
return self.dataset[idx_new]
| [
"torch.utils.data.WeightedRandomSampler",
"torch.utils.data.ConcatDataset",
"mmpose.datasets.builder.DATASETS.register_module",
"numpy.concatenate"
] | [((215, 241), 'mmpose.datasets.builder.DATASETS.register_module', 'DATASETS.register_module', ([], {}), '()\n', (239, 241), False, 'from mmpose.datasets.builder import DATASETS\n'), ((2026, 2049), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['datasets'], {}), '(datasets)\n', (2039, 2049), False, 'from torch.utils.data import ConcatDataset, Dataset, WeightedRandomSampler\n'), ((2247, 2278), 'numpy.concatenate', 'np.concatenate', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (2261, 2278), True, 'import numpy as np\n'), ((2302, 2335), 'torch.utils.data.WeightedRandomSampler', 'WeightedRandomSampler', (['weights', '(1)'], {}), '(weights, 1)\n', (2323, 2335), False, 'from torch.utils.data import ConcatDataset, Dataset, WeightedRandomSampler\n')] |
'''
quick_sigma_detect using numpy
given a window size and an array quick sigma can give you the densest (most 1s)
area of that array (the first most dense area)
given an array length (number of observations) and the desired sigma, quicksigma
can give you the lower and upper bound, considering a prior of 50% (fair coin).
'''
import math
import typing as t
import numpy as np
from scipy import signal
def densest(observations: np.array, window_size: int) -> t.Tuple[int, np.array]:
density = signal.convolve(observations, np.ones([window_size]), mode='valid')
index = np.argmax(density)
densest_sub_array = observations[index:index + window_size]
return index, densest_sub_array
def get_normal_sigma(observation_count: int, desired_sigma: float = 2.0) -> t.Tuple[float, float]:
sigma = math.sqrt(observation_count*(.5)*(.5)) * desired_sigma
upper_bound = (observation_count/2)+(sigma/2)
lower_bound = (observation_count/2)-(sigma/2)
return lower_bound, upper_bound
| [
"numpy.ones",
"math.sqrt",
"numpy.argmax"
] | [((582, 600), 'numpy.argmax', 'np.argmax', (['density'], {}), '(density)\n', (591, 600), True, 'import numpy as np\n'), ((532, 554), 'numpy.ones', 'np.ones', (['[window_size]'], {}), '([window_size])\n', (539, 554), True, 'import numpy as np\n'), ((814, 854), 'math.sqrt', 'math.sqrt', (['(observation_count * 0.5 * 0.5)'], {}), '(observation_count * 0.5 * 0.5)\n', (823, 854), False, 'import math\n')] |
import argparse
import numpy as np
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--images_dir", type=str)
parser.add_argument("--mats_dir", type=str)
parser.add_argument("--lands_dir", type=str)
parser.add_argument("--transform", action="store_true")
parser.add_argument("--resume", action="store_true")
parser.add_argument("--start_epoch", type=int, default=0)
parser.add_argument("--test_img", type=str)
parser.add_argument("--checkpoint", type=str, help="path to saved model")
return parser.parse_args()
def gaussian_distribution(center_x, center_y, size=400):
img = np.zeros((size, size))
for i in range(center_x - 10, center_x + 10):
for j in range(center_y - 10, center_y + 10):
exp_fact = ((i - center_x) ** 2) / 2 + ((j - center_y) ** 2) / 2
img[i, size - 1 - j] = 1 / np.sqrt(2 * np.pi) * np.exp(-exp_fact)
img /= np.sum(img)
return img
def to_cuda(data, use_cuda):
input_ = data.float()
if use_cuda:
input_ = input_.cuda()
return input_
def num_flat_features(x):
return np.product(x.size()[1:])
| [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.zeros",
"numpy.exp",
"numpy.sqrt"
] | [((66, 91), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (89, 91), False, 'import argparse\n'), ((649, 671), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (657, 671), True, 'import numpy as np\n'), ((943, 954), 'numpy.sum', 'np.sum', (['img'], {}), '(img)\n', (949, 954), True, 'import numpy as np\n'), ((913, 930), 'numpy.exp', 'np.exp', (['(-exp_fact)'], {}), '(-exp_fact)\n', (919, 930), True, 'import numpy as np\n'), ((892, 910), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (899, 910), True, 'import numpy as np\n')] |
from __future__ import print_function
import logging
from copy import copy
import numpy as np
from ..IO.image_stack import ImageStack
from ..elements.b_splines import BSplineSurface
from ..elements.q4 import Q4
def make_grid_Q4(c1x, c1y, c2x, c2y, nx, ny, elm):
# type: (float, float, float, float, int, int, instance) -> object
"""
Makes regular grid for the given corned coordinates, number of elements along each axis and finite element definitions
:rtype: np.array,np.array,np.array
:param c1x: X-position of upper left corner
:param c1y: Y-position of upper left corner
:param c2x: X-position of lower right corner
:param c2y: Y-position of lower right corner
:param nx: Number of elements along X-axis
:param ny: Number of elements along Y-axis
:param elm: Finite element instance
:return: Connectivity matrix, X-coordinates of nodes, Y-Coordinates of nodes
"""
n_decimals = 2
elmwidth = float(c2x - c1x) / float(nx)
elmheigt = float(c2y - c1y) / float(ny)
xnodes = elm.nodal_xpos * elmwidth
ynodes = elm.nodal_ypos * elmheigt
elements = []
nodes = set()
for i in range(ny):
for j in range(nx):
elements.append(zip(np.around(ynodes[:] + elmheigt * i,n_decimals), np.around(xnodes[:] + elmwidth * j,n_decimals)))
nodes.update(zip(np.around(ynodes[:] + elmheigt * i,n_decimals), np.around(xnodes[:] + elmwidth * j,n_decimals)))
nodes = sorted(list(nodes))
con_matrix = []
for e in range(nx * ny):
con_matrix.append(list(map(nodes.index, list(elements[e]))))
ynod, xnod = zip(*nodes)
ynode = np.array(ynod) + c1y
xnode = np.array(xnod) + c1x
return np.array(con_matrix).transpose(), xnode, ynode
def make_grid(c1x, c1y, c2x, c2y, ny, nx, elm):
"""
Makes regular grid for the given corner coordinates, number of elements along each axis and finite element
definitions.
Parameters
----------
c1x : float
X-position of upper left corner
c1y : float
Y-position of upper left corner
c2x : float
X-position of lower right corner
c2y : float
Y-position of lower right corner
nx : int
Number of elements along X-axis
ny : int
Number of elements along Y-axis
elm : Element object
The element definitions
Returns
-------
X-coordinates of nodes, Y-Coordinates of nodes
"""
elm.set_n_nodes((nx, ny))
elm_width = float(c2x - c1x)
elm_heigt = float(c2y - c1y)
# Scale one element to element width and height
nodes_x = elm.ctrl_pos_e * elm_width
nodes_y = elm.ctrl_pos_n * elm_heigt
# Generate elements
elements = list(zip((nodes_y), (nodes_x)))
# Unpack nodes
ynod, xnod = zip(*elements)
# Shift nodes to global frame
node_x = np.array(xnod) + c1x
node_y = np.array(ynod) + c1y
con_matrix = np.zeros((nx * ny,1),dtype=int)
con_matrix[:,0] = np.arange(nx*ny,dtype=int)
return con_matrix, node_x, node_y
class Mesher(object):
def __init__(self, deg_e=1, deg_n=1, type="q4"):
"""
Mesher utility
The Mesher is used to generate an Mesh object and provides a lightweigt GUI.
Parameters
----------
deg_e : int
The polynomial degree in the e-direction
deg_n : int
The polynomial degree in the n-direction
Returns
-------
Mesh : Mehser object
"""
self.deg_e = deg_e
self.deg_n = deg_n
self.type = type
def __gui__(self):
from matplotlib.widgets import Button, RectangleSelector
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 8
def render_mesh():
try:
data.set_xdata(
self._mesh_.xnodes.transpose())
data.set_ydata(
self._mesh_.ynodes.transpose())
fig.canvas.draw()
except:
print('Could not render mesh')
pass
def line_select_callback(eclick, erelease):
'eclick and erelease are the press and release events'
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
self._mesh_.Xc1 = min([x1, x2])
self._mesh_.Xc2 = max([x1, x2])
self._mesh_.Yc1 = min([y1, y2])
self._mesh_.Yc2 = max([y1, y2])
self._mesh_.gen_node_positions()
render_mesh()
def toggle_selector(event):
if event.key in ['W', 'w']:
self._mesh_.n_ely += 1
if event.key in ['X', 'x']:
self._mesh_.n_ely -= 1
if event.key in ['A', 'a']:
self._mesh_.n_elx += 1
if event.key in ['D', 'd']:
self._mesh_.n_elx -= 1
if event.key in ['up']:
self._mesh_.Yc1 -= 1
self._mesh_.Yc2 -= 1
if event.key in ['down']:
self._mesh_.Yc1 += 1
self._mesh_.Yc2 += 1
if event.key in ['left']:
self._mesh_.Xc1 -= 1
self._mesh_.Xc2 -= 1
if event.key in ['right']:
self._mesh_.Xc1 += 1
self._mesh_.Xc2 += 1
try:
self._mesh_.gen_node_positions()
render_mesh()
pass
except:
pass
def print_instructions():
print(
'Use arraow keys to move mesh, W and X to change refinement in Y-directions, A and D to change refinement in X-direction')
def ok(event):
plt.close()
plt.ioff()
fig = plt.figure()
# Doing some layout with subplots:
fig.subplots_adjust(0.05, 0.05, 0.98, 0.98, 0.1)
overview = plt.subplot2grid((12, 4), (0, 0), rowspan=11, colspan=4)
n, m = self.image.shape
overview.imshow(self.image, cmap=plt.cm.gray, origin="lower", extent=(0, m, 0, n))
data, = overview.plot([], [], 'ro')
overview.autoscale(1, 'both', 1)
but_ax1 = plt.subplot2grid((12, 4), (11, 2), colspan=1)
ok_button = Button(but_ax1, 'OK')
ok_button.on_clicked(ok)
but_ax2 = plt.subplot2grid((12, 4), (11, 3), colspan=1)
reset_button = Button(but_ax2, 'Reset')
rectangle = RectangleSelector(overview, line_select_callback,
drawtype='box', useblit=True,
button=[1, 3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels')
fig.canvas.mpl_connect('key_press_event', toggle_selector)
_widgets = [rectangle, reset_button, ok_button]
print_instructions()
plt.show(block=True)
def mesh(self, images, Xc1=0.0, Xc2=100.0, Yc1=0.0, Yc2=100., n_elx=4, n_ely=4, GUI=True, **kwargs):
if isinstance(images, (ImageStack)):
self.image = images[0]
else:
raise TypeError("Images should be in an ImageReader instance")
if not type(Xc1) == float and type(Xc2) == float and type(Yc1) == float and type(Yc2) == float:
raise TypeError("Coordinates should be given as floats")
if not type(n_elx) == int and type(n_ely) == int:
raise TypeError("Coordinates should be given as floats")
if self.type == "spline":
element = BSplineSurface(self.deg_e, self.deg_n, **kwargs)
else:
element = Q4()
self._mesh_ = Mesh(element, Xc1, Xc2, Yc1, Yc2, n_elx, n_ely)
if GUI:
self.__gui__()
return copy(self._mesh_)
class Mesh(object):
def __init__(self, element, corner1_x, corner2_x, corner1_y, corner2_y, n_elx, n_ely):
"""
Mesh class
Generates a grid based on the provided Finite Element definitions and geometrical measures.
The class contains methods for generating the grid and for moving and resizing the grid.
Parameters
----------
element : object
Instance of FiniteElement containing element definitions
Xc1 : float
X-Coordinate of upper left corner
Yc1 : float
Y-Coordinate of upper left corner
Xc2 : float
X-Coordinate of lower right corner
Yc2 : float
Y-Coordinate of lower right corner
n_elx : int
Number of elements in the x-direction
n_ely : int
Number of elements in the y-direction
Returns
-------
Mesh : Mesh object
"""
self.element_def = element
self.Xc1 = corner1_x
self.Xc2 = corner2_x
self.Yc1 = corner1_y
self.Yc2 = corner2_y
self.n_elx = n_elx
self.n_ely = n_ely
# Fields that are set after gen_mesh is called
self.xnodes = None
self.ynodes = None
self.n_nodes = None
self.n_elms = None
self.ele = None
self.gen_node_positions()
def gen_node_positions(self):
logger = logging.getLogger(__name__)
try:
if isinstance(self.element_def, Q4):
logger.info("Using Q4 elements")
self.ele, self.xnodes, self.ynodes = make_grid_Q4(self.Xc1, self.Yc1, self.Xc2, self.Yc2,
self.n_elx,
self.n_ely, self.element_def)
logger.info('Element contains %.1f X %.1f pixels and is divided in %i X %i ' % (
(self.Xc2 - self.Xc1) / self.n_elx, (self.Yc2 - self.Yc1) / self.n_ely, self.n_elx, self.n_ely))
self.n_nodes = len(self.xnodes)
self.n_elms = self.n_elx * self.n_ely
elif isinstance(self.element_def, BSplineSurface):
logger.info("Using B-Spline elements")
self.ele, self.xnodes, self.ynodes = make_grid(self.Xc1, self.Yc1, self.Xc2, self.Yc2,
self.n_elx,
self.n_ely, self.element_def)
logger.info('Element contains %.1f X %.1f pixels and is divided in %i X %i ' % (
(self.Xc2 - self.Xc1) / self.n_elx, (self.Yc2 - self.Yc1) / self.n_ely, self.n_elx, self.n_ely))
self.n_nodes = len(self.xnodes)
self.n_elms = 1
else:
raise ValueError("Unknown element type")
except Exception as e:
logger.exception("Mesh generation failed")
def scale_mesh_y(self, factor):
"""
Scale mesh in the y direction by a factor
Parameters
----------
factor : float
The factor which the mesh is scaled by in the y direction
"""
center = (self.Yc2 + self.Yc1) / 2.
height = self.Yc2 - self.Yc1
self.Yc1 = center + (height / 2.) * factor
self.Yc2 = center - (height / 2.) * factor
def scale_mesh_x(self, factor):
"""
Scale mesh in the x direction by a factor
Parameters
----------
factor : float
The factor which the mesh is scaled by in the x direction
"""
center = (self.Xc2 + self.Xc1) / 2.
height = self.Xc2 - self.Xc1
self.Xc1 = center + (height / 2.) * factor
self.Xc2 = center - (height / 2.) * factor
def center_mesh_at(self, center_point_x, center_point_y):
"""
Center the mesh at coordinates
Parameters
----------
center_pointx : float
The center point of the mesh in the x-direction
center_pointy : float
The center point of the mesh in the y-direction
"""
width = self.Xc2 - self.Xc1
height = self.Yc2 - self.Yc1
self.Xc1 = center_point_x - (width / 2.)
self.Xc2 = center_point_x + (width / 2.)
self.Yc1 = center_point_y - (height / 2.)
self.Yc2 = center_point_y + (height / 2.)
def single_element_mesh(self):
"""
Convert mesh to a single element mesh
"""
self.n_elx = self.element_def.degree_e + 1
self.n_ely = self.element_def.degree_n + 1
self.n_elms = 1
self.gen_node_positions()
| [
"matplotlib.widgets.RectangleSelector",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplot2grid",
"numpy.zeros",
"copy.copy",
"matplotlib.widgets.Button",
"numpy.around",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"loggin... | [((2947, 2980), 'numpy.zeros', 'np.zeros', (['(nx * ny, 1)'], {'dtype': 'int'}), '((nx * ny, 1), dtype=int)\n', (2955, 2980), True, 'import numpy as np\n'), ((3001, 3030), 'numpy.arange', 'np.arange', (['(nx * ny)'], {'dtype': 'int'}), '(nx * ny, dtype=int)\n', (3010, 3030), True, 'import numpy as np\n'), ((1655, 1669), 'numpy.array', 'np.array', (['ynod'], {}), '(ynod)\n', (1663, 1669), True, 'import numpy as np\n'), ((1688, 1702), 'numpy.array', 'np.array', (['xnod'], {}), '(xnod)\n', (1696, 1702), True, 'import numpy as np\n'), ((2874, 2888), 'numpy.array', 'np.array', (['xnod'], {}), '(xnod)\n', (2882, 2888), True, 'import numpy as np\n'), ((2908, 2922), 'numpy.array', 'np.array', (['ynod'], {}), '(ynod)\n', (2916, 2922), True, 'import numpy as np\n'), ((5803, 5813), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (5811, 5813), True, 'import matplotlib.pyplot as plt\n'), ((5828, 5840), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5838, 5840), True, 'import matplotlib.pyplot as plt\n'), ((5961, 6017), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 4)', '(0, 0)'], {'rowspan': '(11)', 'colspan': '(4)'}), '((12, 4), (0, 0), rowspan=11, colspan=4)\n', (5977, 6017), True, 'import matplotlib.pyplot as plt\n'), ((6247, 6292), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 4)', '(11, 2)'], {'colspan': '(1)'}), '((12, 4), (11, 2), colspan=1)\n', (6263, 6292), True, 'import matplotlib.pyplot as plt\n'), ((6313, 6334), 'matplotlib.widgets.Button', 'Button', (['but_ax1', '"""OK"""'], {}), "(but_ax1, 'OK')\n", (6319, 6334), False, 'from matplotlib.widgets import Button, RectangleSelector\n'), ((6387, 6432), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(12, 4)', '(11, 3)'], {'colspan': '(1)'}), '((12, 4), (11, 3), colspan=1)\n', (6403, 6432), True, 'import matplotlib.pyplot as plt\n'), ((6456, 6480), 'matplotlib.widgets.Button', 'Button', (['but_ax2', '"""Reset"""'], {}), "(but_ax2, 'Reset')\n", (6462, 6480), False, 'from matplotlib.widgets import Button, RectangleSelector\n'), ((6502, 6646), 'matplotlib.widgets.RectangleSelector', 'RectangleSelector', (['overview', 'line_select_callback'], {'drawtype': '"""box"""', 'useblit': '(True)', 'button': '[1, 3]', 'minspanx': '(5)', 'minspany': '(5)', 'spancoords': '"""pixels"""'}), "(overview, line_select_callback, drawtype='box', useblit=\n True, button=[1, 3], minspanx=5, minspany=5, spancoords='pixels')\n", (6519, 6646), False, 'from matplotlib.widgets import Button, RectangleSelector\n'), ((6984, 7004), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (6992, 7004), True, 'import matplotlib.pyplot as plt\n'), ((7862, 7879), 'copy.copy', 'copy', (['self._mesh_'], {}), '(self._mesh_)\n', (7866, 7879), False, 'from copy import copy\n'), ((9321, 9348), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (9338, 9348), False, 'import logging\n'), ((5782, 5793), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5791, 5793), True, 'import matplotlib.pyplot as plt\n'), ((1721, 1741), 'numpy.array', 'np.array', (['con_matrix'], {}), '(con_matrix)\n', (1729, 1741), True, 'import numpy as np\n'), ((1237, 1284), 'numpy.around', 'np.around', (['(ynodes[:] + elmheigt * i)', 'n_decimals'], {}), '(ynodes[:] + elmheigt * i, n_decimals)\n', (1246, 1284), True, 'import numpy as np\n'), ((1285, 1332), 'numpy.around', 'np.around', (['(xnodes[:] + elmwidth * j)', 'n_decimals'], {}), '(xnodes[:] + elmwidth * j, n_decimals)\n', (1294, 1332), True, 'import numpy as np\n'), ((1363, 1410), 'numpy.around', 'np.around', (['(ynodes[:] + elmheigt * i)', 'n_decimals'], {}), '(ynodes[:] + elmheigt * i, n_decimals)\n', (1372, 1410), True, 'import numpy as np\n'), ((1411, 1458), 'numpy.around', 'np.around', (['(xnodes[:] + elmwidth * j)', 'n_decimals'], {}), '(xnodes[:] + elmwidth * j, n_decimals)\n', (1420, 1458), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 19:49:00 2019
@author: cdebezenac
"""
import numpy as np
import math
import pandas as pd
import geopandas as gdp
import matplotlib.pyplot as plt
from scipy.integrate import quad
import statsmodels.api as sm
import scipy.stats
import sys
class Demographics():
'''Compute the general population statistics.
Attributes
==========
- population_matrix: numpy array of integers
ordered population per group per geographic unit.
- sum_pop_local: numpy array of integers
total population in each unit.
- sum_pop_group: numpy array of integers
total population in each group
- total_pop: int
total population in total area.
_ global_statistics: numpy array of float numbers
population share for each group.
- relative_density: numpy array of float numbers
population share in each geographic unit.
Example
=======
'''
def transform_population(geodata,groups):
''' Tranform population count for *groups* from dataframe *geodata* into numpy array.'''
pop_matrix = np.zeros((len(geodata),len(groups)))
for j in range (len(groups)):
pop_matrix[:,j] = list(map(float,geodata[groups[j]]))
return(pop_matrix)
def __init__(self,geodata,groups):
#transform population count in geodata
self.population_matrix=Demographics.transform_population(geodata,groups)
#total population in unit
self.sum_pop_local = np.sum(self.population_matrix, axis=1)
#total population of each group
self.sum_pop_group = np.sum(self.population_matrix, axis=0)
#total population
self.total_pop=np.sum(self.population_matrix)
#global proportions of each group
self.global_statistics=self.sum_pop_group/self.total_pop
#weight of each unit in the overall city
self.relative_density=self.sum_pop_local/self.total_pop
class LorenzCurve():
'''Plots the Lorenz Curve for one population group distribution and returns the related Gini index.
This index informs on the relative spatial concentration of social groups on a given territory.
It has been used as an aspatial segregation index in literature throughout the years.
Attributes
==========
- group: string
name of population group in data.
- lorenz_array: numpy array of float
cumulated proportions of ordered group count in all units.
- gini: float
gini index associated with the lorenz_curve.
Example
=======
'''
def lorenz_list(dataset,variable):
'''
Create the list of cumulated population on the ordered units from smallest proportions to largest.
Parameters
----------
- dataset: Pandas or Geopandas dataframe
dataframe with quantitative variable for concentration measures.
- variable : string
quantitative variable for analysis.
Retruns
-------
numpy array of shape (1,len(dataset)) of discretised cumulated proportions.
'''
#access data and order it
sorted_list=list(dataset[variable].copy())
sorted_list.sort()
#calculate the cumulated proportions
lorenz=np.cumsum(np.array(sorted_list))
total_population=lorenz[-1]
lorenz=lorenz/total_population
return(lorenz)
def __init__(self,data,group):
#initialise class attributes
self.group=group
self.lorenz_array=LorenzCurve.lorenz_list(data,group)
#calculate gini index as the area between the lorenz curve and the first bisector.
self.gini=(0.5-np.sum(self.lorenz_array)/len(data))/0.5
def plot(self,title='Lorenz Curve',figure_size=(12,10),legend='Lorenz curve',line=1, line_color="black",show_axis=False,save=True,path='lorenz_curve.png'):
'''
Paramaters :
- title : Plot title.
- figure size.
- legend : the legend of the plot.
- line : the curve width.
- line_color : the curve color.
- show_axis : show the x and y axis and ticks.
- save : save the plot.
- path : location of image.
Returns :
plot of the Lorenz_curve and Gini index.
'''
plt.figure(figsize=figure_size)
#initialise the figure based on parameters
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
if show_axis==False:
ax.set_xticks([], [])
ax.set_yticks([], [])
else:
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#plot the curve
plt.plot(list(self.lorenz_array), lw=line, color=line_color, alpha=0.3)
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
plt.text(0, -0.18,legend, fontsize=12, family='serif')
#save the figure if wanted
if save==True :
plt.savefig(path)
| [
"matplotlib.pyplot.subplot",
"numpy.sum",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.savefig"
] | [((1720, 1758), 'numpy.sum', 'np.sum', (['self.population_matrix'], {'axis': '(1)'}), '(self.population_matrix, axis=1)\n', (1726, 1758), True, 'import numpy as np\n'), ((1828, 1866), 'numpy.sum', 'np.sum', (['self.population_matrix'], {'axis': '(0)'}), '(self.population_matrix, axis=0)\n', (1834, 1866), True, 'import numpy as np\n'), ((1916, 1946), 'numpy.sum', 'np.sum', (['self.population_matrix'], {}), '(self.population_matrix)\n', (1922, 1946), True, 'import numpy as np\n'), ((4734, 4765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figure_size'}), '(figsize=figure_size)\n', (4744, 4765), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4850), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4845, 4850), True, 'import matplotlib.pyplot as plt\n'), ((5382, 5512), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""on"""', 'left': '"""off"""', 'right': '"""off"""', 'labelleft': '"""on"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='on', left='off', right='off', labelleft='on')\n", (5397, 5512), True, 'import matplotlib.pyplot as plt\n'), ((5546, 5601), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(-0.18)', 'legend'], {'fontsize': '(12)', 'family': '"""serif"""'}), "(0, -0.18, legend, fontsize=12, family='serif')\n", (5554, 5601), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3633), 'numpy.array', 'np.array', (['sorted_list'], {}), '(sorted_list)\n', (3620, 3633), True, 'import numpy as np\n'), ((5681, 5698), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (5692, 5698), True, 'import matplotlib.pyplot as plt\n'), ((4013, 4038), 'numpy.sum', 'np.sum', (['self.lorenz_array'], {}), '(self.lorenz_array)\n', (4019, 4038), True, 'import numpy as np\n')] |
"""
This module includes the two methods for baseline computation: stochastic
gradient descent and alternating least squares.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from six.moves import range
def baseline_als(self):
"""Optimize biases using ALS.
Args:
self: The algorithm that needs to compute baselines.
Returns:
A tuple ``(bu, bi)``, which are users and items baselines.
"""
# This piece of code is largely inspired by that of MyMediaLite:
# https://github.com/zenogantner/MyMediaLite/blob/master/src/MyMediaLite/RatingPrediction/UserItemBaseline.cs
# see also https://www.youtube.com/watch?v=gCaOa3W9kM0&t=32m55s
# (<NAME> on RS, ML Class 10-701)
bu = np.zeros(self.trainset.n_users)
bi = np.zeros(self.trainset.n_items)
global_mean = self.trainset.global_mean
n_epochs = self.bsl_options.get('n_epochs', 10)
reg_u = self.bsl_options.get('reg_u', 15)
reg_i = self.bsl_options.get('reg_i', 10)
for dummy in range(n_epochs):
for i in self.trainset.all_items():
dev_i = 0
for (u, r) in self.trainset.ir[i]:
dev_i += r - global_mean - bu[u]
bi[i] = dev_i / (reg_i + len(self.trainset.ir[i]))
for u in self.trainset.all_users():
dev_u = 0
for (i, r) in self.trainset.ur[u]:
dev_u += r - global_mean - bi[i]
bu[u] = dev_u / (reg_u + len(self.trainset.ur[u]))
return bu, bi
def baseline_sgd(self):
"""Optimize biases using SGD.
Args:
self: The algorithm that needs to compute baselines.
Returns:
A tuple ``(bu, bi)``, which are users and items baselines.
"""
bu = np.zeros(self.trainset.n_users)
bi = np.zeros(self.trainset.n_items)
global_mean = self.trainset.global_mean
n_epochs = self.bsl_options.get('n_epochs', 20)
reg = self.bsl_options.get('reg', 0.02)
lr = self.bsl_options.get('learning_rate', 0.005)
for dummy in range(n_epochs):
for u, i, r in self.trainset.all_ratings():
err = (r - (global_mean + bu[u] + bi[i]))
bu[u] += lr * (err - reg * bu[u])
bi[i] += lr * (err - reg * bi[i])
return bu, bi
| [
"numpy.zeros",
"six.moves.range"
] | [((808, 839), 'numpy.zeros', 'np.zeros', (['self.trainset.n_users'], {}), '(self.trainset.n_users)\n', (816, 839), True, 'import numpy as np\n'), ((849, 880), 'numpy.zeros', 'np.zeros', (['self.trainset.n_items'], {}), '(self.trainset.n_items)\n', (857, 880), True, 'import numpy as np\n'), ((1089, 1104), 'six.moves.range', 'range', (['n_epochs'], {}), '(n_epochs)\n', (1094, 1104), False, 'from six.moves import range\n'), ((1808, 1839), 'numpy.zeros', 'np.zeros', (['self.trainset.n_users'], {}), '(self.trainset.n_users)\n', (1816, 1839), True, 'import numpy as np\n'), ((1849, 1880), 'numpy.zeros', 'np.zeros', (['self.trainset.n_items'], {}), '(self.trainset.n_items)\n', (1857, 1880), True, 'import numpy as np\n'), ((2095, 2110), 'six.moves.range', 'range', (['n_epochs'], {}), '(n_epochs)\n', (2100, 2110), False, 'from six.moves import range\n')] |
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from onnx import numpy_helper
from onnx.helper import make_attribute
from dragon.vm.onnx.nodes.common import CommonONNXExporter
from dragon.vm.onnx.helper import fetch_argument
def _normalize_tuple(value, rank):
if len(value) > rank:
return [value[i] for i in range(rank)]
else:
return [value[i] for i in range(len(value))] + \
[value[-1] for i in range(len(value), rank)]
def ReshapeONNXExporter(op_def, shape_dict, ws):
output_shape = list(shape_dict[op_def.output[0]])
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
for arg in op_def.arg:
if arg.name == 'dims':
for axis, s in enumerate(arg.ints):
if s == -1 or s == 0:
output_shape[axis] = s
else:
if s != output_shape[axis]:
raise ValueError('Expected shape[{}] to be {}, but go {}.\n'
'Please follow the static data shape on exporting.'.format(
axis, s, output_shape[axis]))
elif arg.name == 'dims_desc':
for axis, s in enumerate(arg.strings):
s = fetch_argument(op_def, s, ws)
if s == -1 or s == 0:
output_shape[axis] = s
else:
if s != output_shape[axis]:
raise ValueError('Expected shape[{}] to be {}, but go {}.\n'
'Please follow the static data shape on exporting.'.format(
axis, s, output_shape[axis]))
shape = numpy_helper.from_array(
np.array(output_shape, dtype=np.int64),
name=op_def.input[0] + '/onnx/shape')
node_proto.input.extend([shape.name])
return node_proto, [shape]
def ConcatONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
for arg in op_def.arg:
if arg.name == 'axis':
node_proto.attribute.extend([
make_attribute('axis', arg.i)])
return node_proto, None
def FlattenONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
for arg in op_def.arg:
if arg.name == 'axis':
node_proto.attribute.extend([
make_attribute('axis', arg.i)])
elif arg.name == 'num_axes':
if arg.i != -1:
raise ValueError('Excepted num_axes == -1, but got {}.'.format(arg.i))
elif arg.name == 'keep_axes':
raise ValueError('keep_axes should not be set. (Theano Style).')
return node_proto, None
def TransposeONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
for arg in op_def.arg:
if arg.name == 'perm':
node_proto.attribute.extend([
make_attribute('perm', arg.ints)])
elif arg.name == 'perm_desc':
node_proto.attribute.extend([
make_attribute('perm', [
fetch_argument(op_def, desc, ws)
for desc in arg.strings])])
return node_proto, None
def ArgReduceONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
# ONNX requires indices only, remove the values
indices = node_proto.output[0]
node_proto.ClearField('output')
node_proto.output.extend([indices])
for arg in op_def.arg:
if arg.name == 'axis':
node_proto.attribute.extend([
make_attribute('axis', arg.i)])
elif arg.name == 'keep_dims':
node_proto.attribute.extend([
make_attribute('keepdims', arg.i)])
elif arg.name == 'top_k':
if arg.i != 1:
raise ValueError('ONNX requires top_k == 1.')
elif arg.name == 'operation':
if arg.s == b'ARGMAX':
node_proto.op_type = 'ArgMax'
elif arg.s == b'ARGMIN':
node_proto.op_type = 'ArgMin'
return node_proto, None
def CropONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
node_proto.op_type = 'ATen' # Template
node_proto.attribute.extend([make_attribute('op_type', 'Crop')])
for arg in op_def.arg:
if arg.name == 'starts':
if len(arg.ints) > 0:
node_proto.attribute.extend([
make_attribute('starts', arg.ints)])
elif arg.name == 'starts_desc':
if len(arg.strings) > 0:
node_proto.attribute.extend([
make_attribute('starts', [
fetch_argument(op_def, desc, ws)
for desc in arg.strings])])
elif arg.name == 'sizes':
if len(arg.ints) > 0:
node_proto.attribute.extend([
make_attribute('sizes', arg.ints)])
elif arg.name == 'sizes_desc':
if len(arg.strings) > 0:
node_proto.attribute.extend([
make_attribute('sizes', [
fetch_argument(op_def, desc, ws)
for desc in arg.strings])])
elif arg.name == 'start_axis':
node_proto.attribute.extend([
make_attribute('start_axis', arg.i)])
elif arg.name == 'offsets':
node_proto.attribute.extend([
make_attribute('offsets', arg.ints)])
elif arg.name == 'shape_like':
node_proto.attribute.extend([
make_attribute('shape_like', arg.s)])
return node_proto, None | [
"onnx.helper.make_attribute",
"dragon.vm.onnx.helper.fetch_argument",
"numpy.array",
"dragon.vm.onnx.nodes.common.CommonONNXExporter"
] | [((1060, 1098), 'dragon.vm.onnx.nodes.common.CommonONNXExporter', 'CommonONNXExporter', (['op_def', 'shape_dict'], {}), '(op_def, shape_dict)\n', (1078, 1098), False, 'from dragon.vm.onnx.nodes.common import CommonONNXExporter\n'), ((2409, 2447), 'dragon.vm.onnx.nodes.common.CommonONNXExporter', 'CommonONNXExporter', (['op_def', 'shape_dict'], {}), '(op_def, shape_dict)\n', (2427, 2447), False, 'from dragon.vm.onnx.nodes.common import CommonONNXExporter\n'), ((2709, 2747), 'dragon.vm.onnx.nodes.common.CommonONNXExporter', 'CommonONNXExporter', (['op_def', 'shape_dict'], {}), '(op_def, shape_dict)\n', (2727, 2747), False, 'from dragon.vm.onnx.nodes.common import CommonONNXExporter\n'), ((3278, 3316), 'dragon.vm.onnx.nodes.common.CommonONNXExporter', 'CommonONNXExporter', (['op_def', 'shape_dict'], {}), '(op_def, shape_dict)\n', (3296, 3316), False, 'from dragon.vm.onnx.nodes.common import CommonONNXExporter\n'), ((3809, 3847), 'dragon.vm.onnx.nodes.common.CommonONNXExporter', 'CommonONNXExporter', (['op_def', 'shape_dict'], {}), '(op_def, shape_dict)\n', (3827, 3847), False, 'from dragon.vm.onnx.nodes.common import CommonONNXExporter\n'), ((4727, 4765), 'dragon.vm.onnx.nodes.common.CommonONNXExporter', 'CommonONNXExporter', (['op_def', 'shape_dict'], {}), '(op_def, shape_dict)\n', (4745, 4765), False, 'from dragon.vm.onnx.nodes.common import CommonONNXExporter\n'), ((2163, 2201), 'numpy.array', 'np.array', (['output_shape'], {'dtype': 'np.int64'}), '(output_shape, dtype=np.int64)\n', (2171, 2201), True, 'import numpy as np\n'), ((4842, 4875), 'onnx.helper.make_attribute', 'make_attribute', (['"""op_type"""', '"""Crop"""'], {}), "('op_type', 'Crop')\n", (4856, 4875), False, 'from onnx.helper import make_attribute\n'), ((1701, 1730), 'dragon.vm.onnx.helper.fetch_argument', 'fetch_argument', (['op_def', 's', 'ws'], {}), '(op_def, s, ws)\n', (1715, 1730), False, 'from dragon.vm.onnx.helper import fetch_argument\n'), ((2565, 2594), 'onnx.helper.make_attribute', 'make_attribute', (['"""axis"""', 'arg.i'], {}), "('axis', arg.i)\n", (2579, 2594), False, 'from onnx.helper import make_attribute\n'), ((2865, 2894), 'onnx.helper.make_attribute', 'make_attribute', (['"""axis"""', 'arg.i'], {}), "('axis', arg.i)\n", (2879, 2894), False, 'from onnx.helper import make_attribute\n'), ((3434, 3466), 'onnx.helper.make_attribute', 'make_attribute', (['"""perm"""', 'arg.ints'], {}), "('perm', arg.ints)\n", (3448, 3466), False, 'from onnx.helper import make_attribute\n'), ((4129, 4158), 'onnx.helper.make_attribute', 'make_attribute', (['"""axis"""', 'arg.i'], {}), "('axis', arg.i)\n", (4143, 4158), False, 'from onnx.helper import make_attribute\n'), ((4257, 4290), 'onnx.helper.make_attribute', 'make_attribute', (['"""keepdims"""', 'arg.i'], {}), "('keepdims', arg.i)\n", (4271, 4290), False, 'from onnx.helper import make_attribute\n'), ((5039, 5073), 'onnx.helper.make_attribute', 'make_attribute', (['"""starts"""', 'arg.ints'], {}), "('starts', arg.ints)\n", (5053, 5073), False, 'from onnx.helper import make_attribute\n'), ((3610, 3642), 'dragon.vm.onnx.helper.fetch_argument', 'fetch_argument', (['op_def', 'desc', 'ws'], {}), '(op_def, desc, ws)\n', (3624, 3642), False, 'from dragon.vm.onnx.helper import fetch_argument\n'), ((5493, 5526), 'onnx.helper.make_attribute', 'make_attribute', (['"""sizes"""', 'arg.ints'], {}), "('sizes', arg.ints)\n", (5507, 5526), False, 'from onnx.helper import make_attribute\n'), ((5270, 5302), 'dragon.vm.onnx.helper.fetch_argument', 'fetch_argument', (['op_def', 'desc', 'ws'], {}), '(op_def, desc, ws)\n', (5284, 5302), False, 'from dragon.vm.onnx.helper import fetch_argument\n'), ((5907, 5942), 'onnx.helper.make_attribute', 'make_attribute', (['"""start_axis"""', 'arg.i'], {}), "('start_axis', arg.i)\n", (5921, 5942), False, 'from onnx.helper import make_attribute\n'), ((6039, 6074), 'onnx.helper.make_attribute', 'make_attribute', (['"""offsets"""', 'arg.ints'], {}), "('offsets', arg.ints)\n", (6053, 6074), False, 'from onnx.helper import make_attribute\n'), ((5721, 5753), 'dragon.vm.onnx.helper.fetch_argument', 'fetch_argument', (['op_def', 'desc', 'ws'], {}), '(op_def, desc, ws)\n', (5735, 5753), False, 'from dragon.vm.onnx.helper import fetch_argument\n'), ((6174, 6209), 'onnx.helper.make_attribute', 'make_attribute', (['"""shape_like"""', 'arg.s'], {}), "('shape_like', arg.s)\n", (6188, 6209), False, 'from onnx.helper import make_attribute\n')] |
# copan:DISCOUNT model integration and analysis script as used in:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# and <NAME>, Taxonomies for structuring models for World-Earth system
# analysis of the Anthropocene: subsystems, their interactions and
# social-ecological feedback loops, Earth System Dynamics, in press (2021),
# Discussion paper: DOI: 10.5194/esd-2018-27.
from pylab import *
import numpy
from scipy.optimize import fsolve
# possible damage functions:
def f_lin(S): return gamma*S
def f_quad(S):
if S>2*sigma: 0/0
else: return 2.*gamma*(S-S**2/2/sigma)/sigma
def f_exp(S): return gamma*S*exp(1-S/sigma)
def f_norm(S): return zeta + gamma*exp(-(S-mu)**2/(2*sigma**2)) # USED IN PAPER
# other derived quantities, see paper for definitions:
def z(A): # this is called phi in the paper
if coop: return A**2*N*al/(1-al)+(1-A)*be/(1-be)
else: return A*al/(1-al)+(1-A)*be/(1-be)
def D(A,S):
b=unit(S)
if coop: return ((al-be)*(sn-b*(E-b*eff*z(A))-1)-((A*N)**2*al**2/(1-al)-be**2/(1-be))*b**2*eff/2/N)
else: return ((al-be)*(sn-b*(E-b*eff*z(A))-1)-(al**2/(1-al)-be**2/(1-be))*b**2*eff/2/N)
def P(D): return 1./(1.+(1./p0-1.)*exp(-D*q/p0/(1.-p0)))
def A(z): return (z - be/(1-be))/(al/(1-al)-be/(1-be))
# derivatives:
def dA(A,S): theD=D(A,S); return l*dt*A*(1-A)*(P(theD)-P(-theD))
def dS(A,S): b=unit(S); return dt*(s*(max(0,E-b*eff*z(A))) - r*S)
def z_dA0(S): b=unit(S); return max(be/(1-be),min(al/(1-al),((al-be)*(1+b*E-sn)+(al**2/(1-al)-be**2/(1-be))*b**2*eff/2/N)/((al-be)*b**2*eff)))
def z_dS0(S): b=unit(S); return max(be/(1-be),min(al/(1-al),(s*E-r*S)/(b*eff*s)))
# for cooperative version (not used in paper):
def coopks_dA0(S):
b=unit(S)
C1 = (al-be)*b**2*eff*al/(1-al)/N - al**2/(1-al)*b**2*eff/2/N
C2 = -(al-be)*b**2*eff*be/(1-be)/N
C3 = (al-be)*(sn-b*E+b**2*eff*be/(1-be)-1) + be**2/(1-be)*b**2*eff/2/N
print(C1,C2,C3)
return maximum(-inf,minimum(inf,(-C2+array([1.,-1.])*sqrt(C2**2-4*C1*C3))/C1/2))
def coopk_dS0(S):
b=unit(S)
def target(klocal): return z(1.*klocal/N) - (s*E-r*S)/(b*eff*s)
return max(0,min(N,fsolve(target,N*1.)[0]))
# META-PARAMETERS:
case = 3
stoch = True
plot_nullclines = True#False
# DETAILED PARAMETER SETS:
if case == 1:
tit = 'exponential marginal damages'
unit = f_exp
gamma = 1.
sigma = 1.
f_prefix = 'exp_'+str(gamma)+'_'+str(sigma)
al = 0.5
be = 0.1
eff = 1.
E = 1.5
sn = 2.
N = 20
p0 = 0.5
q = 3.
r = 1.25
s = 1.
S1 = 1.85
k1 = 5
Smax = 5. # 2sigma if squared
if case == 2:
tit = 'linear marginal damages'
unit = f_lin
gamma = 2.
f_prefix = 'lin_'+str(gamma)
al,be = 0.5,0.1
eff = 1.
E = 1.5
sn = 2.
N = 20
p0,q = 0.5,3.
r = 0.4 # .3 -- .5
s = 1.
S1,k1 = 0.,6 # 3.2,10 # 2.6,6
Amin,Amax = 0.,1. #.2,.6 # 0.,1.
Smin,Smax = 0.,6. # 2.,3. # 0.,6.
if case == 3: # USED IN PAPER!
# interesting cases:
# norm_0.0_2.0_1.0_2.0_0.5_0.1_1.0_1.5_2.0_20_0.5_3.0_0.3_1.0_3.2,10: complex
# norm_0.0_1.0_1.0_2.0_0.5_0.1_1.0_1.5_2.0_20_0.5_3.0_0.45_1.0_2.6_6: stable focus
# norm_0.0_2.0_1.0_2.0_0.5_0.1_1.0_1.5_2.0_20_0.5_3.0_0.45_1.0_0.0_6
# norm_0.0_1.0_1.0_2.0_0.5_0.1_1.0_1.5_2.0_20_0.5_3.0_0.35_1.0_0.0_6
# todo: norm_0.0_2.0_1.0_2.0_0.5_0.1_1.0_1.5_2.0_20_0.5_3.0_0.5_1.0_1.85_5
# norm_0.0_1.5_1.0_2.0_0.5_0.3_1.0_1.5_2.0_20_0.5_3.0_0.3_1.0_3.0_10.0 coop: unstable focus, stable cycle for fast learning, globally attractive desirable state for slow learning!
tit = 'normal marginal damages'
unit = f_norm
eff = 1.0
coop, zeta, gamma,sigma,mu, al, be, eff, E, sn, N, p0, q, r, s, S1, k1 =\
0, 0., 1.1, 1., 2., 0.5, 0.1, 1.*eff**2, \
1.6/eff, 2., 50, 0.5, 3., 0.45, 1., 1., 25
# choose from:
# eff = 1.0 or 1.1
# 0, 0., 2., 1., 2., 0.5, 0.1, 1.*eff**2, \
# 1.5/eff, 2., 20, 0.5, 3., 0.3, 1., 3.2, 10
# stable/unstable focus
# eff = 1.0 or 0.9
# 0, 0., 1., 1., 2., 0.5, 0.1, 1.*eff**2, \
# 1.5/eff, 2., 20, 0.5, 3., 0.45, 1., 0., 6
# eff = 1.0 or 0.8
# 0, 0., 2., 1., 2., 0.5, 0.1, 1.*eff**2, \
# 1.5/eff, 2., 20, 0.5, 3., 0.45, 1., 0., 6
# 0, 0., 1., 1., 2., 0.5, 0.1, 1., 1.5, 2., 20, 0.5, 3., 0.35, 1., 0., 6
# 1, 0., 1.5, 1., 2., 0.5, 0.3, 1., 1.5, 2., 20, 0.5, 3., 0.3, 1., 3., .5*20
f_prefix = 'norm_'+str(zeta)+'_'+str(gamma)+'_'+str(sigma)+'_'+str(mu)
Amin,Amax = 0.,1.
Smin,Smax = 0.5,3.5
# find intersection of z_dA0 and z_dS0 around S=2.6:
def target(S): return z_dA0(S)-z_dS0(S)
Sfocus = fsolve(target,2.6)[0]
Afocus = A((z_dA0(Sfocus)+z_dS0(Sfocus))/2)
print(Sfocus,Afocus)
fileprefix = f_prefix+'_'+str(al)+'_'+str(be)+'_'+str(eff)+'_'+str(E)+'_'+str(sn)+'_'+str(N)+'_'+str(p0)+'_'+str(q)+'_'+str(r)+'_'+str(s)+'_'+str(S1)+'_'+str(k1)
print(fileprefix)
# timestep:
dt = 0.001
# grid:
nSs = 301
nAs = 301
As = linspace(Amin,Amax,nAs)
Ss = linspace(Smin,Smax,nSs)
# quantities on grid:
A_dA0s = array([A(z_dA0(Ss[x])) for x in range(nSs)])
A_dS0s = array([A(z_dS0(Ss[x])) for x in range(nSs)])
coopA_dA0s = array([coopks_dA0(Ss[x])/N for x in range(nSs)])
coopA_dS0s = array([coopk_dS0(Ss[x])/N for x in range(nSs)])
# plot of nullclines:
figure()
title(tit)
plot(Ss,A_dA0s,'r--',lw=2)
plot(Ss,A_dS0s,'r:',lw=2)
plot(Ss,coopA_dA0s,'y--',lw=2)
plot(Ss,coopA_dS0s,'y:',lw=2)
gca().invert_xaxis()
savefig(fileprefix+'_regions.pdf')
# plot of dynamics (figure in paper):
iterations = 50000
repetitions = 5
fracs = []
ls = [.2,1.3]
spl = 0
figure(figsize=(4,10))
ax = subplot2grid((5,1),(2, 0))
ax.plot(Ss,cumsum([unit(S) for S in Ss]),"r")
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_ylabel('Climate damages')
# generate one panel per l parameter value:
for l in ls:
ax = subplot2grid((5,1),(spl, 0),rowspan=2)
#title(tit)
dAs = array([[dA(As[y],Ss[x]) for x in range(nSs)] for y in range(nAs)])
dSs = array([[dS(As[y],Ss[x]) for x in range(nSs)] for y in range(nAs)])
dAs2 = dAs**2
dSs2 = dSs**2
speed = sqrt(dAs2/dAs2.mean()+dSs2/dSs2.mean())
# average dynamics from ODEs:
print('streamplot...')
ax.streamplot(Ss,As,dSs,dAs,linewidth=3.*speed/speed.max(),density=1.)
if plot_nullclines:
if coop:
ax.plot(Ss,coopA_dA0s,'y--',lw=2)
ax.plot(Ss,coopA_dS0s,'y:',lw=2)
else:
ax.plot(Ss,1.01*A_dA0s-.005,'r',lw=1)
ax.plot(Ss,1.01*A_dS0s-.005,'r',lw=2)
ax.set_xlim(Smin,Smax)
ax.set_ylim(Amin,Amax)
if spl == 3:
ax.set_xlabel('Excess atmospheric carbon stock $C$ [fictitious units]')
else:
ax.set_xticklabels([])
ax.set_ylabel('Fraction $F$ of patient countries')
pmax=0.
pmin=1.
ngood = 0
# individual stochastic trajectories:
if repetitions>0: plot(S1,k1*1./N,'g.',ms=10)
for rep in range(repetitions):
Ss2 = [S1]
ks2 = [k1]
usal = []
usbe = []
for it in range(1,iterations):
theA = 1.*ks2[it-1]/N
theD = D(theA,Ss2[it-1])
pal = P(theD)
pbe = P(-theD)
if pal>pmax: pmax=pal
if pbe>pmax: pmax=pbe
if pal<pmin: pmin=pal
if pbe<pmin: pmin=pbe
Ss2.append(max(0,Ss2[it-1] + dS(theA,Ss2[it-1])))
if Ss2[it]>100.:
ks2.append(0)
else:
if stoch:
ks2.append(ks2[it-1] + numpy.random.binomial(N-ks2[it-1],l*dt*pal*theA) - numpy.random.binomial(ks2[it-1],l*dt*pbe*(1.-theA)))
else:
ks2.append(ks2[it-1] + ((N-ks2[it-1])*l*dt*pal*theA - ks2[it-1]*l*dt*pbe*(1.-theA)))
b = unit(Ss2[it])
thez = z(ks2[it]*1./N)
usal.append(1+al*(sn-b*E+b**2*eff*thez-1)-al**2*b**2*eff/2/N/(1-al))
usbe.append(1+be*(sn-b*E+b**2*eff*thez-1)-be**2*b**2*eff/2/N/(1-be))
if ks2[it] in [0,N]:
if ks2[it] == N: ngood += 1
break
if it%1000 == 0: print(rep,it,Ss2[it],ks2[it])
if rep<20: ax.plot(Ss2,array(ks2)*1./N,'g-',alpha=0.5,color=(.5*rand(),.75,.5*rand()))
if repetitions>0:
fracs.append(ngood*1./repetitions)
print(l,pmin,pmax,ngood*1./repetitions)
spl = spl+3
savefig(fileprefix+'.pdf')
print("saved ",fileprefix+'.pdf')
show()
| [
"scipy.optimize.fsolve",
"numpy.random.binomial"
] | [((4922, 4941), 'scipy.optimize.fsolve', 'fsolve', (['target', '(2.6)'], {}), '(target, 2.6)\n', (4928, 4941), False, 'from scipy.optimize import fsolve\n'), ((2121, 2144), 'scipy.optimize.fsolve', 'fsolve', (['target', '(N * 1.0)'], {}), '(target, N * 1.0)\n', (2127, 2144), False, 'from scipy.optimize import fsolve\n'), ((7853, 7916), 'numpy.random.binomial', 'numpy.random.binomial', (['ks2[it - 1]', '(l * dt * pbe * (1.0 - theA))'], {}), '(ks2[it - 1], l * dt * pbe * (1.0 - theA))\n', (7874, 7916), False, 'import numpy\n'), ((7802, 7861), 'numpy.random.binomial', 'numpy.random.binomial', (['(N - ks2[it - 1])', '(l * dt * pal * theA)'], {}), '(N - ks2[it - 1], l * dt * pal * theA)\n', (7823, 7861), False, 'import numpy\n')] |
import numpy as np
from ase.atoms import Atoms
from TB2J.utils import symbol_number
from collections import defaultdict
from scipy.linalg import eigh
class SislWrapper():
def __init__(self, sisl_hamiltonian, spin=None):
self.ham = sisl_hamiltonian
# k2Rfactor : H(k) = \int_R H(R) * e^(k2Rfactor * k.R)
self.R2kfactor = -2.0j*np.pi #
if spin=='up':
spin=0
elif spin=='down':
spin=1
if spin not in [ None, 0, 1, 'merge']:
raise ValueError("spin should be None/0/1, but is %s"%spin)
self.spin=spin
self.orbs=[]
self.orb_dict=defaultdict(lambda:[])
g=self.ham._geometry
_atoms=self.ham._geometry._atoms
atomic_numbers=[]
self.positions=g.xyz
self.cell=np.array(g.sc.cell)
for ia, a in enumerate(_atoms):
atomic_numbers.append(a.Z)
self.atoms=Atoms(numbers=atomic_numbers, cell=self.cell, positions=self.positions)
sdict=list(symbol_number(self.atoms).keys())
if self.ham.spin.is_colinear and (self.spin in [0,1]):
for ia, a in enumerate(_atoms):
symnum=sdict[ia]
try:
orb_names=[f"{symnum}|{x.name()}|up" for x in a.orbital]
except:
orb_names=[f"{symnum}|{x.name()}|up" for x in a.orbitals]
self.orbs+=orb_names
self.orb_dict[ia]+=orb_names
self.norb = len(self.orbs)
self.nbasis=self.norb
elif self.ham.spin.is_spinorbit or self.spin=='merge':
for spin in {'up', 'down'}:
for ia, a in enumerate(_atoms):
symnum=sdict[ia]
orb_names=[]
try:
for x in a.orbital: # sisl < v0.10
orb_names.append(f"{symnum}|{x.name()}|{spin}")
except:
for x in a.orbitals: # sisl >= v0.10
orb_names.append(f"{symnum}|{x.name()}|{spin}")
self.orbs+=orb_names
self.orb_dict[ia]+=orb_names
#print(self.orb_dict)
self.norb=len(self.orbs)//2
#print(f"Norb: {self.norb}")
self.nbasis= len(self.orbs)
else:
raise ValueError("The hamiltonian should be either spin-orbit or colinear")
def view_info(self):
print(self.orb_dict)
print(self.atoms)
def solve(self, k, convention=2):
if convention==1:
gauge='r'
elif convention==2:
gauge='R'
if self.spin in [0,1]:
evals, evecs = self.ham.eigh(k=k, spin=self.spin, eigvals_only=False, gauge=gauge)
elif self.spin is None:
evals, evecs = self.ham.eigh(k=k, eigvals_only=False, gauge=gauge)
# reorder the first (basis) dimension so that it is 1up,1down, 2up, 2down...
evecs=np.vstack([evecs[::2, :], evecs[1::2,:]])
elif self.spin=='merge':
evals0, evecs0 = self.ham.eigh(k=k, spin=0, eigvals_only=False, gauge=gauge)
evals1, evecs1 = self.ham.eigh(k=k, spin=1, eigvals_only=False, gauge=gauge)
evals=np.zeros(self.nbasis, dtype=float)
evecs=np.zeros((self.nbasis, self.nbasis), dtype=complex)
evals[:self.norb]=evals0
evals[self.norb:]=evals1
evecs[:self.norb, :self.norb]=evecs0
evecs[self.norb:, self.norb:]=evecs1
return evals, evecs
def Hk(self, k, convention=2):
if convention==1:
gauge='r'
elif convention==2:
gauge='R'
if self.spin is None:
H=self.ham.Hk(k, gauge=gauge,format='dense')
H=np.vstack([H[::2,:], H[1::2,:]])
H=np.hstack([H[:,::2], H[:,1::2]])
elif self.spin in [0,1]:
H=self.ham.Hk(k, spin=self.spin, gauge=gauge, format='dense')
elif self.spin == 'merge':
H=np.zeros((self.nbasis, self.nbasis), dtype='complex')
H[:self.norb, :self.norb]=self.ham.Hk(k, spin=0, gauge=gauge, format='dense')
H[self.norb:, self.norb:]=self.ham.Hk(k, spin=1, gauge=gauge, format='dense')
return H
def eigen(self, k, convention=2):
return self.solve(k)
def gen_ham(self, k, convention=2):
return self.Hk(k, convention=convention)
def Sk(self, k, convention=2):
if convention==1:
gauge='r'
elif convention==2:
gauge='R'
S0=self.ham.Sk(k, gauge='R', format='dense')
#print(f"shape:{S0.shape}")
#print(f"{self.nbasis}")
if self.spin is None:
S=np.vstack([S0[::2,:], S0[1::2,:]])
S=np.hstack([S[:,::2], S[:,1::2]])
#S=np.zeros((self.nbasis, self.nbasis), dtype='complex')
#S[:self.norb,:self.norb]=S0
#S[self.norb:, self.norb:]=S0
#S=np.zeros((self.nbasis, self.nbasis), dtype='complex')
#S[:self.nbasis//2,:self.norb//2]=S0
#S[self.norb//2:, self.norb//2:]=S0
elif self.spin in [0,1]:
S=S0
elif self.spin=='merge':
S=np.zeros((self.nbasis, self.nbasis), dtype='complex')
S[:self.norb,:self.norb]=S0
S[self.norb:, self.norb:]=S0
return S
def solve_all(self, kpts, orth=True):
evals = []
evecs = []
for ik, k in enumerate(kpts):
if orth:
S=self.Sk(k)
Smh=Lowdin(S)
H=self.gen_ham(k)
Horth=Smh.T.conj() @ H @ Smh
evalue, evec = eigh(Horth)
else:
evalue, evec = self.solve(k)
evals.append(evalue)
evecs.append(evec)
return np.array(evals, dtype=float), np.array(evecs, dtype=complex, order='C')
def HS_and_eigen(self, kpts, convention=2):
evals = []
evecs = []
H=[]
S=[]
for ik, k in enumerate(kpts):
Hk = self.Hk(k, convention=convention)
Sk = self.Sk(k, convention=convention)
H.append(self.Hk(k, convention=convention))
S.append(self.Sk(k, convention=convention))
evalue, evec = self.solve(k, convention=convention)
evals.append(evalue)
evecs.append(evec)
return np.array(H), np.array(S), np.array(evals, dtype=float), np.array(evecs, dtype=complex, order='C')
def get_fermi_level(self):
return 0.0
#def test():
# fdf = sisl.get_sile('/home/hexu/projects/learn_siesta/SMO_Wannier/siesta.fdf')
# H = fdf.read_hamiltonian(order='nc',dim=2)
# print(H._spin._is_polarized)
# print(H.__dict__.keys())
# print(H._geometry.__dict__.keys())
# print(H._geometry.xyz)
# print(H._geometry.sc)
# H._geometry.sc.cell
# orb=H._geometry._atoms[0].orbital[0]
# print(orb.name())
# s=SislWrapper(H)
# s.view_info()
#
#import sisl
#fdf = sisl.get_sile('/home/hexu/projects/learn_siesta/SMO_Wannier/siesta.fdf')
#H = fdf.read_hamiltonian(order='nc',dim=2)
#if __name__=='__main__':
# test()
| [
"TB2J.utils.symbol_number",
"numpy.zeros",
"numpy.hstack",
"collections.defaultdict",
"numpy.array",
"ase.atoms.Atoms",
"scipy.linalg.eigh",
"numpy.vstack"
] | [((639, 663), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (650, 663), False, 'from collections import defaultdict\n'), ((805, 824), 'numpy.array', 'np.array', (['g.sc.cell'], {}), '(g.sc.cell)\n', (813, 824), True, 'import numpy as np\n'), ((923, 994), 'ase.atoms.Atoms', 'Atoms', ([], {'numbers': 'atomic_numbers', 'cell': 'self.cell', 'positions': 'self.positions'}), '(numbers=atomic_numbers, cell=self.cell, positions=self.positions)\n', (928, 994), False, 'from ase.atoms import Atoms\n'), ((3812, 3846), 'numpy.vstack', 'np.vstack', (['[H[::2, :], H[1::2, :]]'], {}), '([H[::2, :], H[1::2, :]])\n', (3821, 3846), True, 'import numpy as np\n'), ((3859, 3893), 'numpy.hstack', 'np.hstack', (['[H[:, ::2], H[:, 1::2]]'], {}), '([H[:, ::2], H[:, 1::2]])\n', (3868, 3893), True, 'import numpy as np\n'), ((4757, 4793), 'numpy.vstack', 'np.vstack', (['[S0[::2, :], S0[1::2, :]]'], {}), '([S0[::2, :], S0[1::2, :]])\n', (4766, 4793), True, 'import numpy as np\n'), ((4806, 4840), 'numpy.hstack', 'np.hstack', (['[S[:, ::2], S[:, 1::2]]'], {}), '([S[:, ::2], S[:, 1::2]])\n', (4815, 4840), True, 'import numpy as np\n'), ((5869, 5897), 'numpy.array', 'np.array', (['evals'], {'dtype': 'float'}), '(evals, dtype=float)\n', (5877, 5897), True, 'import numpy as np\n'), ((5899, 5940), 'numpy.array', 'np.array', (['evecs'], {'dtype': 'complex', 'order': '"""C"""'}), "(evecs, dtype=complex, order='C')\n", (5907, 5940), True, 'import numpy as np\n'), ((6449, 6460), 'numpy.array', 'np.array', (['H'], {}), '(H)\n', (6457, 6460), True, 'import numpy as np\n'), ((6462, 6473), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (6470, 6473), True, 'import numpy as np\n'), ((6475, 6503), 'numpy.array', 'np.array', (['evals'], {'dtype': 'float'}), '(evals, dtype=float)\n', (6483, 6503), True, 'import numpy as np\n'), ((6505, 6546), 'numpy.array', 'np.array', (['evecs'], {'dtype': 'complex', 'order': '"""C"""'}), "(evecs, dtype=complex, order='C')\n", (6513, 6546), True, 'import numpy as np\n'), ((3000, 3042), 'numpy.vstack', 'np.vstack', (['[evecs[::2, :], evecs[1::2, :]]'], {}), '([evecs[::2, :], evecs[1::2, :]])\n', (3009, 3042), True, 'import numpy as np\n'), ((5715, 5726), 'scipy.linalg.eigh', 'eigh', (['Horth'], {}), '(Horth)\n', (5719, 5726), False, 'from scipy.linalg import eigh\n'), ((1014, 1039), 'TB2J.utils.symbol_number', 'symbol_number', (['self.atoms'], {}), '(self.atoms)\n', (1027, 1039), False, 'from TB2J.utils import symbol_number\n'), ((3271, 3305), 'numpy.zeros', 'np.zeros', (['self.nbasis'], {'dtype': 'float'}), '(self.nbasis, dtype=float)\n', (3279, 3305), True, 'import numpy as np\n'), ((3324, 3375), 'numpy.zeros', 'np.zeros', (['(self.nbasis, self.nbasis)'], {'dtype': 'complex'}), '((self.nbasis, self.nbasis), dtype=complex)\n', (3332, 3375), True, 'import numpy as np\n'), ((4048, 4101), 'numpy.zeros', 'np.zeros', (['(self.nbasis, self.nbasis)'], {'dtype': '"""complex"""'}), "((self.nbasis, self.nbasis), dtype='complex')\n", (4056, 4101), True, 'import numpy as np\n'), ((5254, 5307), 'numpy.zeros', 'np.zeros', (['(self.nbasis, self.nbasis)'], {'dtype': '"""complex"""'}), "((self.nbasis, self.nbasis), dtype='complex')\n", (5262, 5307), True, 'import numpy as np\n')] |
import os
import pytest
import numpy as np
import sys
#sys.path.append('/home/mts87985/ml-tomo/')
# Random seed to ensure that tests are repeatable
RANDOM_SEED = 23
np.random.seed(RANDOM_SEED)
def test_norm_disc():
from super_tomo_py.data_handeling.tools import normalise_discritise_data
img = np.random.randint(0, 255, size=(100, 100, 1))
mask = np.zeros((100, 100, 1))
sq_lower = np.random.randint(0, 50)
sq_upper = sq_lower + np.random.randint(0, 50)
mask[sq_lower:sq_upper, sq_lower:sq_upper, 0] = 12
img_norm, mask_norm = normalise_discritise_data(img, mask)
assert img_norm.shape == img.shape
assert mask_norm.shape == mask.shape
assert np.max(img_norm) <= 1.
assert np.min(img_norm) >= 0.
assert np.max(mask_norm) <= 1.
assert np.min(mask_norm) >= 0.
img = np.random.randint(0, 255, size=(100, 100, 1))
mask = np.zeros((100, 100, 1))
sq_lower = np.random.randint(0, 50)
sq_upper = sq_lower + np.random.randint(0, 50)
mask[sq_lower:sq_upper, sq_lower:sq_upper, 0] = 12
sq_lower = np.random.randint(0, 50)
sq_upper = sq_lower + np.random.randint(0, 50)
mask[sq_lower:sq_upper, sq_lower:sq_upper, 0] = 26
img_norm, mask_norm = normalise_discritise_data(img, mask, flag_multi_class=True)
assert img_norm.shape == img.shape
assert mask_norm.shape == mask.shape
assert np.max(img_norm) <= 1.
assert np.min(img_norm) >= 0.
assert np.max(mask_norm) <= len(np.unique(mask))
assert np.min(mask_norm) >= 0.
| [
"numpy.random.seed",
"numpy.zeros",
"numpy.max",
"numpy.random.randint",
"numpy.min",
"super_tomo_py.data_handeling.tools.normalise_discritise_data",
"numpy.unique"
] | [((167, 194), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (181, 194), True, 'import numpy as np\n'), ((306, 351), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(100, 100, 1)'}), '(0, 255, size=(100, 100, 1))\n', (323, 351), True, 'import numpy as np\n'), ((363, 386), 'numpy.zeros', 'np.zeros', (['(100, 100, 1)'], {}), '((100, 100, 1))\n', (371, 386), True, 'import numpy as np\n'), ((402, 426), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (419, 426), True, 'import numpy as np\n'), ((560, 596), 'super_tomo_py.data_handeling.tools.normalise_discritise_data', 'normalise_discritise_data', (['img', 'mask'], {}), '(img, mask)\n', (585, 596), False, 'from super_tomo_py.data_handeling.tools import normalise_discritise_data\n'), ((827, 872), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(100, 100, 1)'}), '(0, 255, size=(100, 100, 1))\n', (844, 872), True, 'import numpy as np\n'), ((884, 907), 'numpy.zeros', 'np.zeros', (['(100, 100, 1)'], {}), '((100, 100, 1))\n', (892, 907), True, 'import numpy as np\n'), ((923, 947), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (940, 947), True, 'import numpy as np\n'), ((1069, 1093), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (1086, 1093), True, 'import numpy as np\n'), ((1227, 1286), 'super_tomo_py.data_handeling.tools.normalise_discritise_data', 'normalise_discritise_data', (['img', 'mask'], {'flag_multi_class': '(True)'}), '(img, mask, flag_multi_class=True)\n', (1252, 1286), False, 'from super_tomo_py.data_handeling.tools import normalise_discritise_data\n'), ((453, 477), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (470, 477), True, 'import numpy as np\n'), ((689, 705), 'numpy.max', 'np.max', (['img_norm'], {}), '(img_norm)\n', (695, 705), True, 'import numpy as np\n'), ((723, 739), 'numpy.min', 'np.min', (['img_norm'], {}), '(img_norm)\n', (729, 739), True, 'import numpy as np\n'), ((757, 774), 'numpy.max', 'np.max', (['mask_norm'], {}), '(mask_norm)\n', (763, 774), True, 'import numpy as np\n'), ((792, 809), 'numpy.min', 'np.min', (['mask_norm'], {}), '(mask_norm)\n', (798, 809), True, 'import numpy as np\n'), ((974, 998), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (991, 998), True, 'import numpy as np\n'), ((1120, 1144), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (1137, 1144), True, 'import numpy as np\n'), ((1379, 1395), 'numpy.max', 'np.max', (['img_norm'], {}), '(img_norm)\n', (1385, 1395), True, 'import numpy as np\n'), ((1413, 1429), 'numpy.min', 'np.min', (['img_norm'], {}), '(img_norm)\n', (1419, 1429), True, 'import numpy as np\n'), ((1447, 1464), 'numpy.max', 'np.max', (['mask_norm'], {}), '(mask_norm)\n', (1453, 1464), True, 'import numpy as np\n'), ((1500, 1517), 'numpy.min', 'np.min', (['mask_norm'], {}), '(mask_norm)\n', (1506, 1517), True, 'import numpy as np\n'), ((1472, 1487), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (1481, 1487), True, 'import numpy as np\n')] |
"""
Copyright (c) 2016, Granular, Inc.
All rights reserved.
License: BSD 3-Clause ("BSD New" or "BSD Simplified")
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import numpy as np
from pyspatial.raster import rasterize, read_catalog
from pyspatial.raster import RasterBand
from pyspatial.vector import read_layer
cwd = os.getcwd()
base = os.path.abspath(os.path.dirname(__file__))
def get_path(x):
return os.path.join(base, "data", x)
class TestRasterDatasetTiled:
@classmethod
def setup_class(cls):
os.chdir(base)
# Let's add some CLUs for Washington County, IL
cls.vl, _ = read_layer(get_path("vector/clu/clu_public_a_il189.shp"),
index="uniqueid")
# Figure out which raster files we'll need.
cls.dataset = read_catalog(get_path("../catalog/cdl_2014.json"))
# Create a RasterBand for the raster. Raster data is stored
# and read from there.
cls.rb = RasterBand(get_path("raster/95000_45000.tif"))
# Convert CLUs to pixels.
cls.px_shps = cls.dataset.to_pixels(cls.vl)
@classmethod
def teardown_class(cls):
os.chdir(cwd)
def test_single_shape_should_have_equivalent_counts(self):
# Read in raster projection (applies to all raster tiles).
rast_proj = self.dataset.proj
# Compute counts for a single shape using single-tile computation for reference.
shp = self.px_shps[0]
mask = rasterize(shp, ext_outline=0, int_outline=1).T
minx, miny, maxx, maxy = shp.bounds
pts = (np.argwhere(mask>0) + np.array([minx, miny])).astype(int)
counts_reference = np.bincount(self.rb[pts[:,1],pts[:, 0]], minlength=256)
# Now, compute counts for a single shape using RasterDataset, and compare against single-tile computation.
values = np.array(self.dataset.get_values_for_pixels(pts)).astype(np.uint8)
counts = np.bincount(values, minlength=256)
assert(np.array_equal(counts, counts_reference))
# Compare all shapes between single-tile computation and RasterDataset computation.
def test_all_shapes_should_have_equivalent_counts(self):
# Read in raster projection (applies to all raster tiles).
rast_proj = self.dataset.proj
# All shapes in the layer should already be in pixel coords. Check that we have the right number of them.
assert (len(self.px_shps) == 23403)
# Compute counts for a single shape using single-tile computation for reference.
for i, shp in enumerate(self.px_shps):
# Rasterize the shape, and find list of all points. This is the same for either case.
mask = rasterize(shp, ext_outline=0, int_outline=1).T
minx, miny, maxx, maxy = shp.bounds
pts = (np.argwhere(mask>0) + np.array([minx, miny])).astype(int)
# Tiled data (in RasterDataset): Compute counts.
values_td = np.array(self.dataset.get_values_for_pixels(pts)).astype(np.uint8)
counts_td = np.bincount(values_td, minlength=256)
# Single-tile Dataset: Compute counts for reference, and compare for equality.
counts_ref = np.bincount(self.rb[pts[:,1],pts[:, 0]], minlength=256)
assert(np.array_equal(counts_td, counts_ref))
| [
"os.getcwd",
"os.path.dirname",
"numpy.argwhere",
"numpy.array",
"numpy.array_equal",
"numpy.bincount",
"pyspatial.raster.rasterize",
"os.path.join",
"os.chdir"
] | [((1691, 1702), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1700, 1702), False, 'import os\n'), ((1726, 1751), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1741, 1751), False, 'import os\n'), ((1783, 1812), 'os.path.join', 'os.path.join', (['base', '"""data"""', 'x'], {}), "(base, 'data', x)\n", (1795, 1812), False, 'import os\n'), ((1896, 1910), 'os.chdir', 'os.chdir', (['base'], {}), '(base)\n', (1904, 1910), False, 'import os\n'), ((2526, 2539), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (2534, 2539), False, 'import os\n'), ((3036, 3093), 'numpy.bincount', 'np.bincount', (['self.rb[pts[:, 1], pts[:, 0]]'], {'minlength': '(256)'}), '(self.rb[pts[:, 1], pts[:, 0]], minlength=256)\n', (3047, 3093), True, 'import numpy as np\n'), ((3309, 3343), 'numpy.bincount', 'np.bincount', (['values'], {'minlength': '(256)'}), '(values, minlength=256)\n', (3320, 3343), True, 'import numpy as np\n'), ((3359, 3399), 'numpy.array_equal', 'np.array_equal', (['counts', 'counts_reference'], {}), '(counts, counts_reference)\n', (3373, 3399), True, 'import numpy as np\n'), ((2845, 2889), 'pyspatial.raster.rasterize', 'rasterize', (['shp'], {'ext_outline': '(0)', 'int_outline': '(1)'}), '(shp, ext_outline=0, int_outline=1)\n', (2854, 2889), False, 'from pyspatial.raster import rasterize, read_catalog\n'), ((4419, 4456), 'numpy.bincount', 'np.bincount', (['values_td'], {'minlength': '(256)'}), '(values_td, minlength=256)\n', (4430, 4456), True, 'import numpy as np\n'), ((4574, 4631), 'numpy.bincount', 'np.bincount', (['self.rb[pts[:, 1], pts[:, 0]]'], {'minlength': '(256)'}), '(self.rb[pts[:, 1], pts[:, 0]], minlength=256)\n', (4585, 4631), True, 'import numpy as np\n'), ((4649, 4686), 'numpy.array_equal', 'np.array_equal', (['counts_td', 'counts_ref'], {}), '(counts_td, counts_ref)\n', (4663, 4686), True, 'import numpy as np\n'), ((4070, 4114), 'pyspatial.raster.rasterize', 'rasterize', (['shp'], {'ext_outline': '(0)', 'int_outline': '(1)'}), '(shp, ext_outline=0, int_outline=1)\n', (4079, 4114), False, 'from pyspatial.raster import rasterize, read_catalog\n'), ((2951, 2972), 'numpy.argwhere', 'np.argwhere', (['(mask > 0)'], {}), '(mask > 0)\n', (2962, 2972), True, 'import numpy as np\n'), ((2973, 2995), 'numpy.array', 'np.array', (['[minx, miny]'], {}), '([minx, miny])\n', (2981, 2995), True, 'import numpy as np\n'), ((4184, 4205), 'numpy.argwhere', 'np.argwhere', (['(mask > 0)'], {}), '(mask > 0)\n', (4195, 4205), True, 'import numpy as np\n'), ((4206, 4228), 'numpy.array', 'np.array', (['[minx, miny]'], {}), '([minx, miny])\n', (4214, 4228), True, 'import numpy as np\n')] |
#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 noet:
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2013 <NAME>'
__license__ = 'MIT'
import os
import shutil
from glob import glob
from os.path import exists, join as pjoin, dirname, basename
from nose.tools import ok_, eq_
from numpy.testing import assert_array_equal
#import logging
#log = logging.getLogger('vb')
#log.setLevel('DEBUG')
def test_benchmarkrunner():
from vbench.api import BenchmarkRunner
from suite import *
# Just to make sure there are no left-overs
shutil.rmtree(TMP_DIR)
if exists(DB_PATH):
os.unlink(DB_PATH)
ok_(not exists(DB_PATH))
runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL,
BUILD, DB_PATH, TMP_DIR, PREPARE,
clean_cmd=CLEAN,
run_option='all', run_order='normal',
start_date=START_DATE,
module_dependencies=DEPENDENCIES)
revisions_to_run = runner._get_revisions_to_run()
eq_(len(revisions_to_run), 4) # we had 4 so far
revisions_ran = runner.run()
# print "D1: ", revisions_ran
assert_array_equal([x[0] for x in revisions_ran],
revisions_to_run)
# First revision
eq_(revisions_ran[0][1], (False, 3)) # no functions were available at that point
eq_(revisions_ran[1][1], (True, 3)) # all 3 tests were available in the first rev
ok_(exists(TMP_DIR))
ok_(exists(DB_PATH))
eq_(len(runner.blacklist), 0)
# Run 2nd time and verify that all are still listed BUT none new succeeds
revisions_ran = runner.run()
#print "D2: ", revisions_ran
for rev, v in revisions_ran:
eq_(v, (False, 0))
# What if we expand list of benchmarks and run 3rd time
runner.benchmarks = collect_benchmarks(['vb_sins', 'vb_sins2'])
revisions_ran = runner.run()
# for that single added benchmark there still were no function
eq_(revisions_ran[0][1], (False, 1))
# all others should have "succeeded" on that single one
for rev, v in revisions_ran[1:]:
eq_(v, (True, 1))
# and on 4th run -- nothing new
revisions_ran = runner.run()
for rev, v in revisions_ran:
eq_(v, (False, 0))
# Now let's smoke test generation of the .rst files
from vbench.reports import generate_rst_files
rstdir = pjoin(TMP_DIR, 'sources')
generate_rst_files(runner.benchmarks, DB_PATH, rstdir, """VERY LONG DESCRIPTION""")
# Verify that it all looks close to the desired
image_files = [basename(x) for x in glob(pjoin(rstdir, 'vbench/figures/*.png'))]
target_image_files = [b.name + '.png' for b in runner.benchmarks]
eq_(set(image_files), set(target_image_files))
rst_files = [basename(x) for x in glob(pjoin(rstdir, 'vbench/*.rst'))]
target_rst_files = [b.name + '.rst' for b in runner.benchmarks]
eq_(set(rst_files), set(target_rst_files))
module_files = [basename(x) for x in glob(pjoin(rstdir, '*.rst'))]
target_module_files = list(set(['vb_' + b.module_name + '.rst' for b in runner.benchmarks]))
eq_(set(module_files), set(target_module_files + ['index.rst']))
#print TMP_DIR
shutil.rmtree(TMP_DIR)
shutil.rmtree(dirname(DB_PATH))
| [
"vbench.api.BenchmarkRunner",
"os.unlink",
"os.path.basename",
"numpy.testing.assert_array_equal",
"vbench.reports.generate_rst_files",
"os.path.dirname",
"os.path.exists",
"nose.tools.eq_",
"shutil.rmtree",
"os.path.join"
] | [((619, 641), 'shutil.rmtree', 'shutil.rmtree', (['TMP_DIR'], {}), '(TMP_DIR)\n', (632, 641), False, 'import shutil\n'), ((649, 664), 'os.path.exists', 'exists', (['DB_PATH'], {}), '(DB_PATH)\n', (655, 664), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((736, 938), 'vbench.api.BenchmarkRunner', 'BenchmarkRunner', (['benchmarks', 'REPO_PATH', 'REPO_URL', 'BUILD', 'DB_PATH', 'TMP_DIR', 'PREPARE'], {'clean_cmd': 'CLEAN', 'run_option': '"""all"""', 'run_order': '"""normal"""', 'start_date': 'START_DATE', 'module_dependencies': 'DEPENDENCIES'}), "(benchmarks, REPO_PATH, REPO_URL, BUILD, DB_PATH, TMP_DIR,\n PREPARE, clean_cmd=CLEAN, run_option='all', run_order='normal',\n start_date=START_DATE, module_dependencies=DEPENDENCIES)\n", (751, 938), False, 'from vbench.api import BenchmarkRunner\n'), ((1269, 1336), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[x[0] for x in revisions_ran]', 'revisions_to_run'], {}), '([x[0] for x in revisions_ran], revisions_to_run)\n', (1287, 1336), False, 'from numpy.testing import assert_array_equal\n'), ((1385, 1421), 'nose.tools.eq_', 'eq_', (['revisions_ran[0][1]', '(False, 3)'], {}), '(revisions_ran[0][1], (False, 3))\n', (1388, 1421), False, 'from nose.tools import ok_, eq_\n'), ((1473, 1508), 'nose.tools.eq_', 'eq_', (['revisions_ran[1][1]', '(True, 3)'], {}), '(revisions_ran[1][1], (True, 3))\n', (1476, 1508), False, 'from nose.tools import ok_, eq_\n'), ((2083, 2119), 'nose.tools.eq_', 'eq_', (['revisions_ran[0][1]', '(False, 1)'], {}), '(revisions_ran[0][1], (False, 1))\n', (2086, 2119), False, 'from nose.tools import ok_, eq_\n'), ((2493, 2518), 'os.path.join', 'pjoin', (['TMP_DIR', '"""sources"""'], {}), "(TMP_DIR, 'sources')\n", (2498, 2518), True, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((2523, 2602), 'vbench.reports.generate_rst_files', 'generate_rst_files', (['runner.benchmarks', 'DB_PATH', 'rstdir', '"""VERY LONG DESCRIPTION"""'], {}), "(runner.benchmarks, DB_PATH, rstdir, 'VERY LONG DESCRIPTION')\n", (2541, 2602), False, 'from vbench.reports import generate_rst_files\n'), ((3319, 3341), 'shutil.rmtree', 'shutil.rmtree', (['TMP_DIR'], {}), '(TMP_DIR)\n', (3332, 3341), False, 'import shutil\n'), ((674, 692), 'os.unlink', 'os.unlink', (['DB_PATH'], {}), '(DB_PATH)\n', (683, 692), False, 'import os\n'), ((1568, 1583), 'os.path.exists', 'exists', (['TMP_DIR'], {}), '(TMP_DIR)\n', (1574, 1583), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((1593, 1608), 'os.path.exists', 'exists', (['DB_PATH'], {}), '(DB_PATH)\n', (1599, 1608), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((1831, 1849), 'nose.tools.eq_', 'eq_', (['v', '(False, 0)'], {}), '(v, (False, 0))\n', (1834, 1849), False, 'from nose.tools import ok_, eq_\n'), ((2225, 2242), 'nose.tools.eq_', 'eq_', (['v', '(True, 1)'], {}), '(v, (True, 1))\n', (2228, 2242), False, 'from nose.tools import ok_, eq_\n'), ((2354, 2372), 'nose.tools.eq_', 'eq_', (['v', '(False, 0)'], {}), '(v, (False, 0))\n', (2357, 2372), False, 'from nose.tools import ok_, eq_\n'), ((2679, 2690), 'os.path.basename', 'basename', (['x'], {}), '(x)\n', (2687, 2690), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((2884, 2895), 'os.path.basename', 'basename', (['x'], {}), '(x)\n', (2892, 2895), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((3078, 3089), 'os.path.basename', 'basename', (['x'], {}), '(x)\n', (3086, 3089), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((3360, 3376), 'os.path.dirname', 'dirname', (['DB_PATH'], {}), '(DB_PATH)\n', (3367, 3376), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((705, 720), 'os.path.exists', 'exists', (['DB_PATH'], {}), '(DB_PATH)\n', (711, 720), False, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((2705, 2742), 'os.path.join', 'pjoin', (['rstdir', '"""vbench/figures/*.png"""'], {}), "(rstdir, 'vbench/figures/*.png')\n", (2710, 2742), True, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((2910, 2939), 'os.path.join', 'pjoin', (['rstdir', '"""vbench/*.rst"""'], {}), "(rstdir, 'vbench/*.rst')\n", (2915, 2939), True, 'from os.path import exists, join as pjoin, dirname, basename\n'), ((3104, 3126), 'os.path.join', 'pjoin', (['rstdir', '"""*.rst"""'], {}), "(rstdir, '*.rst')\n", (3109, 3126), True, 'from os.path import exists, join as pjoin, dirname, basename\n')] |
"""This module contains code for the bias monitor Bokeh plots.
Author
------
- <NAME>
- <NAME>
Use
---
This module can be used from the command line as such:
::
from jwql.website.apps.jwql import monitor_pages
monitor_template = monitor_pages.BiasMonitor()
monitor_template.input_parameters = ('NIRCam', 'NRCA1_FULL')
"""
from datetime import datetime, timedelta
import os
from astropy.stats import sigma_clip
import numpy as np
from jwql.bokeh_templating import BokehTemplate
from jwql.database.database_interface import session, NIRCamBiasStats, NIRISSBiasStats, NIRSpecBiasStats
from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
class BiasMonitor(BokehTemplate):
# Combine the input parameters into a single property because we
# do not want to invoke the setter unless all are updated
@property
def input_parameters(self):
return (self._instrument, self._aperture)
@input_parameters.setter
def input_parameters(self, info):
self._instrument, self._aperture = info
self.pre_init()
self.post_init()
def identify_tables(self):
"""Determine which database tables to use for the given instrument"""
mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()]
self.stats_table = eval('{}BiasStats'.format(mixed_case_name))
def load_data(self):
"""Query the database tables to get all of the relevant bias data"""
# Determine which database tables are needed based on instrument
self.identify_tables()
# Query database for all data in bias stats with a matching aperture,
# and sort the data by exposure start time.
self.query_results = session.query(self.stats_table) \
.filter(self.stats_table.aperture == self._aperture) \
.order_by(self.stats_table.expstart) \
.all()
session.close()
def pre_init(self):
# Start with default values for instrument and aperture because
# BokehTemplate's __init__ method does not allow input arguments
try:
dummy_instrument = self._instrument
dummy_aperture = self._aperture
except AttributeError:
self._instrument = 'NIRCam'
self._aperture = ''
self._embed = True
self.format_string = None
self.interface_file = os.path.join(SCRIPT_DIR, 'yaml', 'monitor_bias_interface.yaml')
def post_init(self):
# Load the bias data
self.load_data()
# Update the mean bias over time figures
self.update_mean_bias_figures()
# Update the calibrated 0th group image
self.update_calibrated_image()
# Update the histogram of the calibrated 0th group image
if self._instrument == 'NIRISS':
self.update_calibrated_histogram()
# Update the calibrated collapsed values figures
if self._instrument != 'NIRISS':
self.update_collapsed_vals_figures()
def update_calibrated_histogram(self):
"""Updates the calibrated 0th group histogram"""
if len(self.query_results) != 0:
# Get the most recent data; the entries were sorted by time when
# loading the database, so the last entry will always be the most recent.
counts = np.array(self.query_results[-1].counts)
bin_centers = np.array(self.query_results[-1].bin_centers)
# Update the calibrated image histogram
self.refs['cal_hist_source'].data = {'counts': counts,
'bin_centers': bin_centers}
self.refs['cal_hist_xr'].start = bin_centers.min()
self.refs['cal_hist_xr'].end = bin_centers.max()
self.refs['cal_hist_yr'].start = counts.min()
self.refs['cal_hist_yr'].end = counts.max() + counts.max() * 0.05
def update_calibrated_image(self):
"""Updates the calibrated 0th group image"""
if len(self.query_results) != 0:
# Get the most recent data; the entries were sorted by time when
# loading the database, so the last entry will always be the most recent.
cal_image_png = self.query_results[-1].cal_image
cal_image_png = os.path.join('/static', '/'.join(cal_image_png.split('/')[-6:]))
# Update the image source for the figure
self.refs['cal_image'].image_url(url=[cal_image_png], x=0, y=0, w=2048, h=2048, anchor="bottom_left")
# Update the calibrated image style
self.refs['cal_image'].xaxis.visible = False
self.refs['cal_image'].yaxis.visible = False
self.refs['cal_image'].xgrid.grid_line_color = None
self.refs['cal_image'].ygrid.grid_line_color = None
self.refs['cal_image'].title.text_font_size = '22px'
self.refs['cal_image'].title.align = 'center'
def update_collapsed_vals_figures(self):
"""Updates the calibrated median-collapsed row and column figures"""
if len(self.query_results) != 0:
for direction in ['rows', 'columns']:
# Get most recent data; the entries were sorted by time when
# loading the database, so the last entry will always be the most recent.
vals = np.array(self.query_results[-1].__dict__['collapsed_{}'.format(direction)])
pixels = np.arange(len(vals))
self.refs['collapsed_{}_source'.format(direction)].data = {'pixel': pixels,
'signal': vals}
# Update the pixel and signal limits
self.refs['collapsed_{}_pixel_range'.format(direction)].start = pixels.min() - 10
self.refs['collapsed_{}_pixel_range'.format(direction)].end = pixels.max() + 10
self.refs['collapsed_{}_signal_range'.format(direction)].start = vals[4:2044].min() - 10 # excluding refpix
self.refs['collapsed_{}_signal_range'.format(direction)].end = vals[4:2044].max() + 10
def update_mean_bias_figures(self):
"""Updates the mean bias over time bokeh plots"""
# Get the dark exposures and their starts times
filenames = [os.path.basename(result.uncal_filename).replace('_uncal.fits', '') for result in self.query_results]
expstarts_iso = np.array([result.expstart for result in self.query_results])
expstarts = np.array([datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f') for date in expstarts_iso])
# Update the mean bias figures for all amps and odd/even columns
for amp in ['1', '2', '3', '4']:
for kind in ['odd', 'even']:
bias_vals = np.array([getattr(result, 'amp{}_{}_med'.format(amp, kind)) for result in self.query_results])
self.refs['mean_bias_source_amp{}_{}'.format(amp, kind)].data = {'time': expstarts,
'time_iso': expstarts_iso,
'mean_bias': bias_vals,
'filename': filenames}
self.refs['mean_bias_figure_amp{}_{}'.format(amp, kind)].title.text = 'Amp {} {}'.format(amp, kind.capitalize())
self.refs['mean_bias_figure_amp{}_{}'.format(amp, kind)].hover.tooltips = [('file', '@filename'),
('time', '@time_iso'),
('bias level', '@mean_bias')]
# Update plot limits if data exists
if len(bias_vals) != 0:
self.refs['mean_bias_xr_amp{}_{}'.format(amp, kind)].start = expstarts.min() - timedelta(days=3)
self.refs['mean_bias_xr_amp{}_{}'.format(amp, kind)].end = expstarts.max() + timedelta(days=3)
self.refs['mean_bias_yr_amp{}_{}'.format(amp, kind)].start = min(x for x in bias_vals if x is not None) - 20
self.refs['mean_bias_yr_amp{}_{}'.format(amp, kind)].end = max(x for x in bias_vals if x is not None) + 20
| [
"jwql.database.database_interface.session.close",
"jwql.database.database_interface.session.query",
"os.path.abspath",
"os.path.basename",
"datetime.datetime.strptime",
"numpy.array",
"datetime.timedelta",
"os.path.join"
] | [((727, 752), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (742, 752), False, 'import os\n'), ((1997, 2012), 'jwql.database.database_interface.session.close', 'session.close', ([], {}), '()\n', (2010, 2012), False, 'from jwql.database.database_interface import session, NIRCamBiasStats, NIRISSBiasStats, NIRSpecBiasStats\n'), ((2484, 2547), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""yaml"""', '"""monitor_bias_interface.yaml"""'], {}), "(SCRIPT_DIR, 'yaml', 'monitor_bias_interface.yaml')\n", (2496, 2547), False, 'import os\n'), ((6498, 6558), 'numpy.array', 'np.array', (['[result.expstart for result in self.query_results]'], {}), '([result.expstart for result in self.query_results])\n', (6506, 6558), True, 'import numpy as np\n'), ((3436, 3475), 'numpy.array', 'np.array', (['self.query_results[-1].counts'], {}), '(self.query_results[-1].counts)\n', (3444, 3475), True, 'import numpy as np\n'), ((3502, 3546), 'numpy.array', 'np.array', (['self.query_results[-1].bin_centers'], {}), '(self.query_results[-1].bin_centers)\n', (3510, 3546), True, 'import numpy as np\n'), ((6589, 6636), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(date, '%Y-%m-%dT%H:%M:%S.%f')\n", (6606, 6636), False, 'from datetime import datetime, timedelta\n'), ((6373, 6412), 'os.path.basename', 'os.path.basename', (['result.uncal_filename'], {}), '(result.uncal_filename)\n', (6389, 6412), False, 'import os\n'), ((8031, 8048), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (8040, 8048), False, 'from datetime import datetime, timedelta\n'), ((8146, 8163), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (8155, 8163), False, 'from datetime import datetime, timedelta\n'), ((1817, 1848), 'jwql.database.database_interface.session.query', 'session.query', (['self.stats_table'], {}), '(self.stats_table)\n', (1830, 1848), False, 'from jwql.database.database_interface import session, NIRCamBiasStats, NIRISSBiasStats, NIRSpecBiasStats\n')] |
"""Defines utilities intended for internal use only, most notably
:class:`hyperparameter_hunter.space.space_core.Space`. These tools are used behind the scenes by
:class:`hyperparameter_hunter.optimization.protocol_core.BaseOptPro` to combine instances of
dimensions defined in :mod:`hyperparameter_hunter.space.dimensions` into a usable hyperparameter
search Space
Related
-------
:mod:`hyperparameter_hunter.space.dimensions`
Defines concrete descendants of :class:`hyperparameter_hunter.space.dimensions.Dimension`, which
are intended for direct use. :class:`hyperparameter_hunter.space.space_core.Space` is used
to combine these Dimension instances
Notes
-----
Many of the tools defined herein (although substantially modified) are based on those provided by
the excellent [Scikit-Optimize](https://github.com/scikit-optimize/scikit-optimize) library. See
:mod:`hyperparameter_hunter.optimization.backends.skopt` for a copy of SKOpt's license"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical
from hyperparameter_hunter.utils.general_utils import short_repr
##################################################
# Import Miscellaneous Assets
##################################################
from functools import reduce
import numbers
import numpy as np
from sys import maxsize
##################################################
# Import Learning Assets
##################################################
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
NONE = object()
##################################################
# Utilities
##################################################
def check_dimension(dimension, transform=None):
"""Turn a provided dimension description into a dimension object. Checks that the provided
dimension falls into one of the supported types, listed below in the description of `dimension`
Parameters
----------
dimension: Tuple, list, or Dimension
Search space `Dimension`. May be any of the following:
* `(lower_bound, upper_bound)` tuple (`Real` or `Integer`)
* `(lower_bound, upper_bound, prior)` tuple (`Real`)
* List of categories (`Categorical`)
* `Dimension` instance (`Real`, `Integer` or `Categorical`)
transform: {"identity", "normalize", "onehot"} (optional)
* `Categorical` dimensions support "onehot" or "identity". See `Categorical` documentation
for more information
* `Real` and `Integer` dimensions support "identity" or "normalize". See `Real` or `Integer`
documentation for more information
Returns
-------
dimension: Dimension
Dimension instance created from the provided `dimension` description. If `dimension` is
already an instance of `Dimension`, it is returned unchanged"""
if isinstance(dimension, Dimension):
return dimension
if not isinstance(dimension, (list, tuple, np.ndarray)):
raise ValueError("Dimension has to be a list or tuple")
# `Dimension` subclasses define actual `transform` defaults - Only pass `transform` if not None
kwargs = dict(transform=transform) if transform else {}
if len(dimension) == 1:
return Categorical(dimension, **kwargs)
if len(dimension) == 2:
if any([isinstance(d, (str, bool)) or isinstance(d, np.bool_) for d in dimension]):
return Categorical(dimension, **kwargs)
elif all([isinstance(dim, numbers.Integral) for dim in dimension]):
return Integer(*dimension, **kwargs)
elif any([isinstance(dim, numbers.Real) for dim in dimension]):
return Real(*dimension, **kwargs)
if len(dimension) == 3:
# TODO: Below `any` should prolly be `all`
if any([isinstance(dim, (float, int)) for dim in dimension[:2]]) and dimension[2] in [
"uniform",
"log-uniform",
]:
return Real(*dimension, **kwargs)
else:
return Categorical(dimension, **kwargs)
if len(dimension) > 3:
return Categorical(dimension, **kwargs)
raise ValueError(f"Invalid `dimension` {dimension}. See documentation for supported types")
##################################################
# Space
##################################################
class Space:
def __init__(self, dimensions):
"""Initialize a search space from given specifications
Parameters
----------
dimensions: List
List of search space `Dimension` instances or representatives. Each search dimension
may be any of the following:
* `(lower_bound, upper_bound)` tuple (`Real` or `Integer`)
* `(lower_bound, upper_bound, prior)` tuple (`Real`)
* List of categories (`Categorical`)
* `Dimension` instance (`Real`, `Integer` or `Categorical`)
Notes
-----
The upper and lower bounds are inclusive for `Integer` dimensions"""
self.dimensions = [check_dimension(dim) for dim in dimensions]
def __eq__(self, other):
return all([a == b for a, b in zip(self.dimensions, other.dimensions)])
def __repr__(self):
dims = short_repr(self.dimensions, affix_size=15)
return "Space([{}])".format(",\n ".join(map(str, dims)))
def __iter__(self):
return iter(self.dimensions)
def __len__(self):
"""Determine the number of possible search points in :attr:`dimensions`
Returns
-------
search_space_size: Integer, or `sys.maxsize`
The number of different hyperparameter search points. If the hyperparameter search space
is infinitely large, `sys.maxsize` is returned to represent `np.inf`, which cannot
itself be returned because `__len__` is required to produce an int >= 0"""
if any(isinstance(_, Real) for _ in self.dimensions):
search_space_size = maxsize
else:
search_space_size = reduce(
lambda x, y: x * y,
[
(_.high - _.low + 1) if isinstance(_, Integer) else len(_.bounds)
for _ in self.dimensions
],
1,
)
return search_space_size
def __contains__(self, point):
"""Determine whether `point` fits within the bounds of the space
Parameters
----------
point: List
Search space point, expected to be of the same length as :attr:`dimensions`
Returns
-------
Boolean
True if `point` fits within :attr:`dimensions`. Else, False"""
for component, dim in zip(point, self.dimensions):
if component not in dim:
return False
return True
##################################################
# Core Methods
##################################################
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples. Samples are in the original (untransformed) space. They must be
transformed before being passed to a model or minimizer via :meth:`transform`
Parameters
----------
n_samples: Int, default=1
Number of samples to be drawn from the space
random_state: Int, RandomState, or None, default=None
Set random state to something other than None for reproducible results
Returns
-------
List
Randomly drawn samples from the original space. Will be a list of lists, of shape
(`n_samples`, :attr:`n_dims`)"""
rng = check_random_state(random_state)
#################### Draw ####################
columns = []
for dim in self.dimensions:
new_val = None
try:
if sp_version < (0, 16):
new_val = dim.rvs(n_samples=n_samples)
else:
new_val = dim.rvs(n_samples=n_samples, random_state=rng)
except TypeError: # `'<' not supported between instances of 'Version' and 'str'`
new_val = dim.rvs(n_samples=n_samples, random_state=rng)
finally:
columns.append(new_val)
#################### Transpose ####################
rows = []
# TODO: Use `np.transpose`? Might that screw up the dimension types (mostly `Categorical`)
for i in range(n_samples):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
def transform(self, data):
"""Transform samples from the original space into a warped space
Parameters
----------
data: List
Samples to transform. Should be of shape (<# samples>, :attr:`n_dims`)
Returns
-------
data_t: List
Samples transformed into a warped space. Will be of shape
(<# samples>, :attr:`transformed_n_dims`)
Notes
-----
Expected to be used to project samples into a suitable space for numerical optimization"""
#################### Pack by Dimension ####################
columns = [[] for _ in self.dimensions]
for i in range(len(data)):
for j in range(self.n_dims):
columns[j].append(data[i][j])
#################### Transform ####################
for j in range(self.n_dims):
columns[j] = self.dimensions[j].transform(columns[j])
#################### Repack as Array ####################
data_t = np.hstack([np.asarray(c).reshape((len(data), -1)) for c in columns])
return data_t
def inverse_transform(self, data_t):
"""Inverse transform samples from the warped space back to the original space
Parameters
----------
data_t: List
Samples to inverse transform. Should be of shape
(<# samples>, :attr:`transformed_n_dims`)
Returns
-------
List
Samples transformed back to the original space. Will be of shape
(<# samples>, :attr:`n_dims`)"""
#################### Inverse Transform ####################
columns = []
start = 0
for j in range(self.n_dims):
dim = self.dimensions[j]
offset = dim.transformed_size
if offset == 1:
columns.append(dim.inverse_transform(data_t[:, start]))
else:
columns.append(dim.inverse_transform(data_t[:, start : start + offset]))
start += offset
#################### Transpose ####################
rows = []
# TODO: Use `np.transpose`? Might that screw up the dimension types (mostly `Categorical`)
for i in range(len(data_t)):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
##################################################
# Descriptive Properties
##################################################
@property
def n_dims(self) -> int:
"""Dimensionality of the original space
Returns
-------
Int
Length of :attr:`dimensions`"""
return len(self.dimensions)
@property
def transformed_n_dims(self) -> int:
"""Dimensionality of the warped space
Returns
-------
Int
Sum of the `transformed_size` of all dimensions in :attr:`dimensions`"""
return sum([dim.transformed_size for dim in self.dimensions])
@property
def bounds(self):
"""The dimension bounds, in the original space
Returns
-------
List
Collection of the `bounds` of each dimension in :attr:`dimensions`"""
b = []
for dim in self.dimensions:
if dim.size == 1:
b.append(dim.bounds)
else:
b.extend(dim.bounds)
return b
@property
def transformed_bounds(self):
"""The dimension bounds, in the warped space
Returns
-------
List
Collection of the `transformed_bounds` of each dimension in :attr:`dimensions`"""
b = []
for dim in self.dimensions:
if dim.transformed_size == 1:
b.append(dim.transformed_bounds)
else:
b.extend(dim.transformed_bounds)
return b
@property
def is_real(self):
"""Whether :attr:`dimensions` contains exclusively `Real` dimensions
Returns
-------
Boolean
True if all dimensions in :attr:`dimensions` are `Real`. Else, False"""
return all([isinstance(dim, Real) for dim in self.dimensions])
@property
def is_categorical(self) -> bool:
"""Whether :attr:`dimensions` contains exclusively `Categorical` dimensions
Returns
-------
Boolean
True if all dimensions in :attr:`dimensions` are `Categorical`. Else, False"""
return all([isinstance(dim, Categorical) for dim in self.dimensions])
##################################################
# Helper Methods
##################################################
def names(self, use_location=True):
"""Retrieve the names, or locations of all dimensions in the hyperparameter search space
Parameters
----------
use_location: Boolean, default=True
If True and a dimension has a non-null attribute called 'location', its value will be
used instead of 'name'
Returns
-------
names: List
A list of strings or tuples, in which each value is the name or location of the
dimension at that index"""
names = []
for dimension in self.dimensions:
if use_location and hasattr(dimension, "location") and dimension.location:
names.append(dimension.location)
else:
names.append(dimension.name)
return names
def get_by_name(self, name, use_location=True, default=NONE):
"""Retrieve a single dimension by its name
Parameters
----------
name: Tuple, or str
Name of the dimension in :attr:`dimensions` to return
use_location: Boolean, default=True
If True and a dimension has a non-null attribute called "location", its value will be
used instead of that dimension's "name"
default: Any (optional)
If given and `name` is not found, `default` will be returned. Otherwise, `KeyError` will
be raised when `name` is not found
Returns
-------
Dimension
Dimension subclass in :attr:`dimensions`, whose "name" attribute is equal to `name`"""
for dimension in self.dimensions:
if use_location and getattr(dimension, "location", None) == name:
return dimension
elif dimension.name == name:
return dimension
if default != NONE:
return default
raise KeyError(f"{name} not found in dimensions")
def distance(self, point_a, point_b):
"""Compute distance between two points in this space. Both `point_a` and `point_b` are
expected to be of the same length as :attr:`dimensions`, with values corresponding to the
`Dimension` bounds of :attr:`dimensions`
Parameters
----------
point_a: List
First point
point_b: List
Second point
Returns
-------
Number
Distance between `point_a` and `point_b`"""
distance = 0.0
for a, b, dim in zip(point_a, point_b, self.dimensions):
distance += dim.distance(a, b)
return distance
def normalize_dimensions(dimensions):
"""Create a `Space` where all dimensions are instructed to be normalized to unit range. Note
that this doesn't *really* return normalized `dimensions`. It just returns the given
`dimensions`, with each one's `transform` set to the appropriate value, so that when each
dimension's :meth:`transform` is called, the dimensions are actually normalized
Parameters
----------
dimensions: List
List of search space dimensions. Each search dimension can be defined as any of the
following: 1) a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer` dimensions).
2) A `(lower_bound, upper_bound, "prior")` tuple (for `Real` dimensions).
3) A list of categories (for `Categorical` dimensions).
4) An instance of a `Dimension` object (`Real`, `Integer`, or `Categorical`)
Returns
-------
:class:`hyperparameter_hunter.space.Space`
Hyperparameter space class instance, in which dimensions have been instructed to be
normalized to unit range upon invocation of the `transform` method
Raises
------
RuntimeError
If a processed element of `dimensions` is not one of: `Real`, `Integer`, `Categorical`
Notes
-----
The upper and lower bounds are inclusive for `Integer` dimensions"""
space = Space(dimensions)
transformed_dimensions = []
if space.is_categorical:
for dim in space:
# `skopt.utils.normalize_dimensions` makes comment on explicitly setting
# `transform="identity"`, so apparently there's a good reason for it...
# Using original `transform` fixes all-`Categorical`/`BayesianOptPro` bug and proper
# saved experiment result matching, but optimizer could be secretly misbehaving...
transformed_dimensions.append(
Categorical(dim.categories, dim.prior, transform=dim.transform_, name=dim.name)
# Categorical(dim.categories, dim.prior, transform="identity", name=dim.name)
)
else:
for dim in space.dimensions:
if isinstance(dim, Categorical):
transformed_dimensions.append(dim)
elif isinstance(dim, Real):
transformed_dimensions.append(
Real(dim.low, dim.high, dim.prior, transform="normalize", name=dim.name)
)
elif isinstance(dim, Integer):
transformed_dimensions.append(
Integer(dim.low, dim.high, transform="normalize", name=dim.name)
)
else:
raise RuntimeError(f"Unknown dimension type: {type(dim)}")
#################### Replace Lost Attributes ####################
if hasattr(dim, "location"):
transformed_dimensions[-1].location = dim.location
return Space(transformed_dimensions)
| [
"sklearn.utils.check_random_state",
"hyperparameter_hunter.utils.general_utils.short_repr",
"numpy.asarray",
"hyperparameter_hunter.space.dimensions.Real",
"hyperparameter_hunter.space.dimensions.Categorical",
"hyperparameter_hunter.space.dimensions.Integer"
] | [((3380, 3412), 'hyperparameter_hunter.space.dimensions.Categorical', 'Categorical', (['dimension'], {}), '(dimension, **kwargs)\n', (3391, 3412), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((4220, 4252), 'hyperparameter_hunter.space.dimensions.Categorical', 'Categorical', (['dimension'], {}), '(dimension, **kwargs)\n', (4231, 4252), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((5360, 5402), 'hyperparameter_hunter.utils.general_utils.short_repr', 'short_repr', (['self.dimensions'], {'affix_size': '(15)'}), '(self.dimensions, affix_size=15)\n', (5370, 5402), False, 'from hyperparameter_hunter.utils.general_utils import short_repr\n'), ((7798, 7830), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (7816, 7830), False, 'from sklearn.utils import check_random_state\n'), ((3553, 3585), 'hyperparameter_hunter.space.dimensions.Categorical', 'Categorical', (['dimension'], {}), '(dimension, **kwargs)\n', (3564, 3585), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((4084, 4110), 'hyperparameter_hunter.space.dimensions.Real', 'Real', (['*dimension'], {}), '(*dimension, **kwargs)\n', (4088, 4110), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((4144, 4176), 'hyperparameter_hunter.space.dimensions.Categorical', 'Categorical', (['dimension'], {}), '(dimension, **kwargs)\n', (4155, 4176), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((3681, 3710), 'hyperparameter_hunter.space.dimensions.Integer', 'Integer', (['*dimension'], {}), '(*dimension, **kwargs)\n', (3688, 3710), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((18048, 18127), 'hyperparameter_hunter.space.dimensions.Categorical', 'Categorical', (['dim.categories', 'dim.prior'], {'transform': 'dim.transform_', 'name': 'dim.name'}), '(dim.categories, dim.prior, transform=dim.transform_, name=dim.name)\n', (18059, 18127), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((3802, 3828), 'hyperparameter_hunter.space.dimensions.Real', 'Real', (['*dimension'], {}), '(*dimension, **kwargs)\n', (3806, 3828), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((9828, 9841), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (9838, 9841), True, 'import numpy as np\n'), ((18486, 18558), 'hyperparameter_hunter.space.dimensions.Real', 'Real', (['dim.low', 'dim.high', 'dim.prior'], {'transform': '"""normalize"""', 'name': 'dim.name'}), "(dim.low, dim.high, dim.prior, transform='normalize', name=dim.name)\n", (18490, 18558), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n'), ((18687, 18751), 'hyperparameter_hunter.space.dimensions.Integer', 'Integer', (['dim.low', 'dim.high'], {'transform': '"""normalize"""', 'name': 'dim.name'}), "(dim.low, dim.high, transform='normalize', name=dim.name)\n", (18694, 18751), False, 'from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical\n')] |
import numpy as np
from PIL import Image
import pdb
import os
data_path = '/home/datasets/prml/computervision/re-id/sysu-mm01/ori_data'
rgb_cameras = ['cam1','cam2','cam4','cam5']
ir_cameras = ['cam3','cam6']
# load id info
file_path_train = os.path.join(data_path,'exp/train_id.txt')
file_path_val = os.path.join(data_path,'exp/val_id.txt')
with open(file_path_train, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
id_train = ["%04d" % x for x in ids]
with open(file_path_val, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
id_val = ["%04d" % x for x in ids]
# combine train and val split
id_train.extend(id_val)
files_rgb = []
files_ir = []
for id in sorted(id_train):
for cam in rgb_cameras:
img_dir = os.path.join(data_path,cam,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
files_rgb.extend(new_files)
for cam in ir_cameras:
img_dir = os.path.join(data_path,cam,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
files_ir.extend(new_files)
# relabel
pid_container = set()
for img_path in files_ir:
pid = int(img_path[-13:-9])
pid_container.add(pid)
pid2label = {pid:label for label, pid in enumerate(pid_container)}
fix_image_width = 144
fix_image_height = 288
def read_imgs(train_image):
train_img = []
train_label = []
for img_path in train_image:
# img
img = Image.open(img_path)
img = img.resize((fix_image_width, fix_image_height), Image.ANTIALIAS)
pix_array = np.array(img)
train_img.append(pix_array)
# label
pid = int(img_path[-13:-9])
pid = pid2label[pid]
train_label.append(pid)
return np.array(train_img), np.array(train_label)
# rgb imges
train_img, train_label = read_imgs(files_rgb)
np.save(data_path + 'train_rgb_resized_img.npy', train_img)
np.save(data_path + 'train_rgb_resized_label.npy', train_label)
# ir imges
train_img, train_label = read_imgs(files_ir)
np.save(data_path + 'train_ir_resized_img.npy', train_img)
np.save(data_path + 'train_ir_resized_label.npy', train_label)
| [
"numpy.save",
"os.path.isdir",
"PIL.Image.open",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((245, 288), 'os.path.join', 'os.path.join', (['data_path', '"""exp/train_id.txt"""'], {}), "(data_path, 'exp/train_id.txt')\n", (257, 288), False, 'import os\n'), ((306, 347), 'os.path.join', 'os.path.join', (['data_path', '"""exp/val_id.txt"""'], {}), "(data_path, 'exp/val_id.txt')\n", (318, 347), False, 'import os\n'), ((2037, 2096), 'numpy.save', 'np.save', (["(data_path + 'train_rgb_resized_img.npy')", 'train_img'], {}), "(data_path + 'train_rgb_resized_img.npy', train_img)\n", (2044, 2096), True, 'import numpy as np\n'), ((2097, 2160), 'numpy.save', 'np.save', (["(data_path + 'train_rgb_resized_label.npy')", 'train_label'], {}), "(data_path + 'train_rgb_resized_label.npy', train_label)\n", (2104, 2160), True, 'import numpy as np\n'), ((2218, 2276), 'numpy.save', 'np.save', (["(data_path + 'train_ir_resized_img.npy')", 'train_img'], {}), "(data_path + 'train_ir_resized_img.npy', train_img)\n", (2225, 2276), True, 'import numpy as np\n'), ((2277, 2339), 'numpy.save', 'np.save', (["(data_path + 'train_ir_resized_label.npy')", 'train_label'], {}), "(data_path + 'train_ir_resized_label.npy', train_label)\n", (2284, 2339), True, 'import numpy as np\n'), ((841, 873), 'os.path.join', 'os.path.join', (['data_path', 'cam', 'id'], {}), '(data_path, cam, id)\n', (853, 873), False, 'import os\n'), ((883, 905), 'os.path.isdir', 'os.path.isdir', (['img_dir'], {}), '(img_dir)\n', (896, 905), False, 'import os\n'), ((1082, 1114), 'os.path.join', 'os.path.join', (['data_path', 'cam', 'id'], {}), '(data_path, cam, id)\n', (1094, 1114), False, 'import os\n'), ((1124, 1146), 'os.path.isdir', 'os.path.isdir', (['img_dir'], {}), '(img_dir)\n', (1137, 1146), False, 'import os\n'), ((1623, 1643), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1633, 1643), False, 'from PIL import Image\n'), ((1743, 1756), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1751, 1756), True, 'import numpy as np\n'), ((1928, 1947), 'numpy.array', 'np.array', (['train_img'], {}), '(train_img)\n', (1936, 1947), True, 'import numpy as np\n'), ((1949, 1970), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (1957, 1970), True, 'import numpy as np\n'), ((962, 981), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (972, 981), False, 'import os\n'), ((1203, 1222), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (1213, 1222), False, 'import os\n')] |
import netCDF4 as nc
import numpy as np
dataset = nc.Dataset('ocean_dataset.nc', 'w')
dataset.createDimension('xu', size=3600)
dataset.createDimension('yu', size=2700)
dataset.createDimension('xt', size=3600)
dataset.createDimension('yt', size=2700)
dataset.createDimension('z', size=75)
dataset.createDimension('time', size=1)
T = dataset.createVariable('T', "f8", ('time', 'z', 'yt', 'xt'))
u = dataset.createVariable('u', "f8", ('time', 'z', 'yu', 'xu'))
v = dataset.createVariable('v', "f8", ('time', 'z', 'yu', 'xu'))
u[0, :, :, :] = np.random.rand(75, 2700, 3600)
v[0, :, :, :] = np.random.rand(75, 2700, 3600)
T[0, :, :, :] = np.random.rand(75, 2700, 3600)
dataset.close()
| [
"netCDF4.Dataset",
"numpy.random.rand"
] | [((51, 86), 'netCDF4.Dataset', 'nc.Dataset', (['"""ocean_dataset.nc"""', '"""w"""'], {}), "('ocean_dataset.nc', 'w')\n", (61, 86), True, 'import netCDF4 as nc\n'), ((543, 573), 'numpy.random.rand', 'np.random.rand', (['(75)', '(2700)', '(3600)'], {}), '(75, 2700, 3600)\n', (557, 573), True, 'import numpy as np\n'), ((590, 620), 'numpy.random.rand', 'np.random.rand', (['(75)', '(2700)', '(3600)'], {}), '(75, 2700, 3600)\n', (604, 620), True, 'import numpy as np\n'), ((637, 667), 'numpy.random.rand', 'np.random.rand', (['(75)', '(2700)', '(3600)'], {}), '(75, 2700, 3600)\n', (651, 667), True, 'import numpy as np\n')] |
""" This is the package responsible for realizing the data handling process
for features and labels previously generated. """
import pandas as pd
import numpy as np
import pyCBPE.constants as consts
def load():
""" Load dataset from filepat and return it as a pandas dataframe. """
df_split_1 = pd.read_csv(consts.FEATURES_AND_LABELS_DF_SPLIT_1_PATH, index_col=False, header=0)
df_split_2 = pd.read_csv(consts.FEATURES_AND_LABELS_DF_SPLIT_2_PATH, index_col=False, header=0)
df_split_3 = pd.read_csv(consts.FEATURES_AND_LABELS_DF_SPLIT_3_PATH, index_col=False, header=0)
df_split_4 = pd.read_csv(consts.FEATURES_AND_LABELS_DF_SPLIT_4_PATH, index_col=False, header=0)
dataframe = pd.concat([df_split_1, df_split_2, df_split_3, df_split_4])
return dataframe.reset_index(drop=True)
def handle(dataframe):
""" Handle dataset values by dropping values that are not going to be
used. """
handled_df = dataframe.copy()
handled_df = handled_df.replace(-1, np.nan) # TO DO: Change -1 to a constant in constants.py
handled_df = handled_df.replace([np.inf , -np.inf], np.nan)
handled_df = handled_df.dropna()
handled_df[consts.HEART_RATE] = handled_df[consts.HEART_RATE].astype(int)
handled_df[consts.LABELS_COLUMNS] = handled_df[consts.LABELS_COLUMNS].astype(int)
return handled_df.reset_index(drop=True)
def remove_outliers(dataframe):
""" Remove outliers based on labels values. """
temp_df = dataframe.copy()
labels_df = temp_df[consts.LABELS_COLUMNS]
summary_df = pd.DataFrame(labels_df.describe())
sbp_min = summary_df.loc["min", consts.SBP]
sbp_max = summary_df.loc["max", consts.SBP]
dbp_min = summary_df.loc["min", consts.DBP]
dbp_max = summary_df.loc["max", consts.DBP]
map_min = summary_df.loc["min", consts.MAP]
map_max = summary_df.loc["max", consts.MAP]
sbp_condition = (temp_df.loc[:, consts.SBP] > sbp_min) & (temp_df.loc[:, consts.SBP] < sbp_max)
dbp_condition = (temp_df.loc[:, consts.DBP] > dbp_min) & (temp_df.loc[:, consts.DBP] < dbp_max)
map_condition = (temp_df.loc[:, consts.MAP] > map_min) & (temp_df.loc[:, consts.MAP] < map_max)
outlier_free_df = temp_df.loc[sbp_condition & dbp_condition & map_condition, :]
return outlier_free_df.reset_index(drop=True)
def get_features_as_array(dataframe):
""" Get features columns from dataframe as a numpy array."""
features_dataframe = dataframe.drop(consts.LABELS_COLUMNS, axis=1)
features_array = np.array(features_dataframe)
return features_array
def get_labels_as_array(dataframe):
""" Get labels columns from dataframe as a numpy array."""
labels_dataframe = dataframe[consts.LABELS_COLUMNS]
labels_array = np.array(labels_dataframe)
return labels_array
| [
"pandas.read_csv",
"numpy.array",
"pandas.concat"
] | [((305, 391), 'pandas.read_csv', 'pd.read_csv', (['consts.FEATURES_AND_LABELS_DF_SPLIT_1_PATH'], {'index_col': '(False)', 'header': '(0)'}), '(consts.FEATURES_AND_LABELS_DF_SPLIT_1_PATH, index_col=False,\n header=0)\n', (316, 391), True, 'import pandas as pd\n'), ((405, 491), 'pandas.read_csv', 'pd.read_csv', (['consts.FEATURES_AND_LABELS_DF_SPLIT_2_PATH'], {'index_col': '(False)', 'header': '(0)'}), '(consts.FEATURES_AND_LABELS_DF_SPLIT_2_PATH, index_col=False,\n header=0)\n', (416, 491), True, 'import pandas as pd\n'), ((505, 591), 'pandas.read_csv', 'pd.read_csv', (['consts.FEATURES_AND_LABELS_DF_SPLIT_3_PATH'], {'index_col': '(False)', 'header': '(0)'}), '(consts.FEATURES_AND_LABELS_DF_SPLIT_3_PATH, index_col=False,\n header=0)\n', (516, 591), True, 'import pandas as pd\n'), ((605, 691), 'pandas.read_csv', 'pd.read_csv', (['consts.FEATURES_AND_LABELS_DF_SPLIT_4_PATH'], {'index_col': '(False)', 'header': '(0)'}), '(consts.FEATURES_AND_LABELS_DF_SPLIT_4_PATH, index_col=False,\n header=0)\n', (616, 691), True, 'import pandas as pd\n'), ((705, 764), 'pandas.concat', 'pd.concat', (['[df_split_1, df_split_2, df_split_3, df_split_4]'], {}), '([df_split_1, df_split_2, df_split_3, df_split_4])\n', (714, 764), True, 'import pandas as pd\n'), ((2508, 2536), 'numpy.array', 'np.array', (['features_dataframe'], {}), '(features_dataframe)\n', (2516, 2536), True, 'import numpy as np\n'), ((2740, 2766), 'numpy.array', 'np.array', (['labels_dataframe'], {}), '(labels_dataframe)\n', (2748, 2766), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# coding: utf-8
import SimpleITK as sitk
import numpy as np
import copy
import glob
import pandas
import datetime
path="/home/alicja/"
def obtainDCEimages(pat_no="04",timept="1",path=path):
folder="PET_LAB_PROCESSED/WES_0"+pat_no+"/IMAGES/"
baseline_image=sitk.ReadImage(path+folder+"WES_0"+pat_no+"_TIMEPOINT_"+timept+"_MRI_T1W_DCE_ACQ_0.nii.gz")
baseline_image=sitk.Cast(baseline_image,sitk.sitkFloat64)
DCE_images=glob.glob(path+folder+"WES_0"+pat_no+"_TIMEPOINT_"+timept+"_MRI_T1W_DCE_ACQ_*.nii.gz")
DCE_images=[image for image in DCE_images if "ACQ_0.nii.gz" not in image]
return(baseline_image,DCE_images)
def returnNormImages(baseline_image,DCE_images):
norm_images=[]
for image in DCE_images:
image=sitk.Cast(sitk.ReadImage(image),sitk.sitkFloat64)
img_array=sitk.GetArrayFromImage(image)
baseline_array=sitk.GetArrayFromImage(baseline_image)
#new_img_arr=100*(img_array-baseline_array)/baseline_array
a=img_array-baseline_array
b=baseline_array
new_img_arr = 100*np.divide(a, b, out=np.zeros_like(a), where=b!=0) #to avoid divide by zero
new_img=sitk.GetImageFromArray(new_img_arr)
new_img.CopyInformation(image)
norm_images.append(new_img)
print(f"image added to array")
return(norm_images)
def generateMPE(norm_images, pat_no, timept,path):
stacked_arr = np.stack([sitk.GetArrayFromImage(i) for i in norm_images])
max_arr_values = np.max(stacked_arr, axis=0)
MPE_img = sitk.GetImageFromArray(max_arr_values)
MPE_img.CopyInformation(norm_images[0])
folder="PET_LAB_PROCESSED/WES_0"+pat_no+"/IMAGES/"
sitk.WriteImage(MPE_img, path+folder+"WES_0"+pat_no+"_TIMEPOINT_"+timept+"_MRI_T1W_DCE_MPE.nii.gz")
return(MPE_img)
def obtainImageTimes(pat_no,timept):
patient_data=pandas.read_csv(path+"PET_LAB_PROCESSED/PATIENT_DATA.csv")
DCE_data=patient_data[patient_data["IMAGE_TYPE"].str.contains("MRI_T1W_DCE_ACQ")]
DCE_data=(DCE_data[DCE_data["TIMEPOINT"]==timept])
DCE_data=DCE_data[DCE_data["PATIENT_ID"]==int(pat_no)]
time_dict={}
baseline_info =DCE_data[DCE_data["IMAGE_TYPE"]=="MRI_T1W_DCE_ACQ_0"]
baseline_time = baseline_info["TIME_HHMMSS"].values[0]
time_dict.update({0:baseline_time})
for item in range(1,6):
image=DCE_data[DCE_data["IMAGE_TYPE"]=="MRI_T1W_DCE_ACQ_"+str(item)]
val = image["TIME_HHMMSS"].values[0]
key=item
time_dict.update({key: val})
baseline_time=str(time_dict.get(0))
if len(baseline_time)==5:
baseline_time="0"+baseline_time
format = '%H%M%S'
time_diff={}
for timepoint in range(1,6):
time = str(time_dict.get(timepoint))
if len(time)==5:
time="0"+time
startDateTime = datetime.datetime.strptime(baseline_time, format)
endDateTime = datetime.datetime.strptime(time, format)
diff = endDateTime - startDateTime
time_diff.update({timepoint: int(diff.total_seconds())})
return(time_diff)
def generateTTP(norm_images,pat_no,timept,path):
stacked_arr = np.stack([sitk.GetArrayFromImage(i) for i in norm_images])
max_arr = np.argmax(stacked_arr, axis=0)+1
np.unique(max_arr, return_counts=True)
argmax_img=sitk.GetImageFromArray(max_arr)
argmax_img.CopyInformation(norm_images[0])
argmax_img = sitk.Cast(argmax_img, sitk.sitkInt16)
TTP_arr=sitk.GetArrayFromImage(argmax_img)
new_TTP_arr=copy.deepcopy(TTP_arr)
TTP_vals=obtainImageTimes(pat_no,timept)
for array_idx in range(1,np.max(TTP_arr)+1):
new_TTP_arr[TTP_arr==array_idx]=TTP_vals.get(array_idx)
TTP_img=sitk.GetImageFromArray(new_TTP_arr)
TTP_img.CopyInformation(norm_images[0])
TTP_img=sitk.Cast(TTP_img, sitk.sitkInt16)
folder="PET_LAB_PROCESSED/WES_0"+pat_no+"/IMAGES/"
sitk.WriteImage(TTP_img, path+folder+"WES_0"+pat_no+"_TIMEPOINT_"+timept+"_MRI_T1W_DCE_TTP.nii.gz")
return(TTP_img)
patient_list=["06","07","08","09","10","12","13","14","15","16","18","19","21","23"] #"04","05",
def runDCEgeneration(path="/home/alicja/",patient_list=patient_list):
MPE_images_generated=0
#TTP_images_generated=0
timepoints=["1","2","3"]
for pat_no in patient_list:
for timept in timepoints:
baseline_image,DCE_images = obtainDCEimages(pat_no=pat_no,timept=timept,path=path)
norm_images=returnNormImages(baseline_image,DCE_images)
_=generateMPE(norm_images,pat_no,timept,path)
MPE_images_generated+=1
print("Number of MPE images: ", MPE_images_generated)
#_=generateTTP(norm_images,pat_no,timept,path)
#TTP_images_generated+=1
#print("Number of TTP images: ", TTP_images_generated)
return(MPE_images_generated)#,TTP_images_generated)
MPE_images_generated=runDCEgeneration(path=path) #,TTP_images_generated
print("Total number MPE images:",MPE_images_generated)#,"Total number TTP images:", TTP_images_generated) | [
"copy.deepcopy",
"numpy.zeros_like",
"numpy.argmax",
"pandas.read_csv",
"SimpleITK.ReadImage",
"SimpleITK.GetArrayFromImage",
"numpy.max",
"datetime.datetime.strptime",
"SimpleITK.GetImageFromArray",
"glob.glob",
"SimpleITK.WriteImage",
"SimpleITK.Cast",
"numpy.unique"
] | [((290, 397), 'SimpleITK.ReadImage', 'sitk.ReadImage', (["(path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' + timept +\n '_MRI_T1W_DCE_ACQ_0.nii.gz')"], {}), "(path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' + timept +\n '_MRI_T1W_DCE_ACQ_0.nii.gz')\n", (304, 397), True, 'import SimpleITK as sitk\n'), ((401, 444), 'SimpleITK.Cast', 'sitk.Cast', (['baseline_image', 'sitk.sitkFloat64'], {}), '(baseline_image, sitk.sitkFloat64)\n', (410, 444), True, 'import SimpleITK as sitk\n'), ((459, 561), 'glob.glob', 'glob.glob', (["(path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' + timept +\n '_MRI_T1W_DCE_ACQ_*.nii.gz')"], {}), "(path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' + timept +\n '_MRI_T1W_DCE_ACQ_*.nii.gz')\n", (468, 561), False, 'import glob\n'), ((1502, 1529), 'numpy.max', 'np.max', (['stacked_arr'], {'axis': '(0)'}), '(stacked_arr, axis=0)\n', (1508, 1529), True, 'import numpy as np\n'), ((1544, 1582), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['max_arr_values'], {}), '(max_arr_values)\n', (1566, 1582), True, 'import SimpleITK as sitk\n'), ((1686, 1801), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['MPE_img', "(path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' + timept +\n '_MRI_T1W_DCE_MPE.nii.gz')"], {}), "(MPE_img, path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' +\n timept + '_MRI_T1W_DCE_MPE.nii.gz')\n", (1701, 1801), True, 'import SimpleITK as sitk\n'), ((1861, 1921), 'pandas.read_csv', 'pandas.read_csv', (["(path + 'PET_LAB_PROCESSED/PATIENT_DATA.csv')"], {}), "(path + 'PET_LAB_PROCESSED/PATIENT_DATA.csv')\n", (1876, 1921), False, 'import pandas\n'), ((3247, 3285), 'numpy.unique', 'np.unique', (['max_arr'], {'return_counts': '(True)'}), '(max_arr, return_counts=True)\n', (3256, 3285), True, 'import numpy as np\n'), ((3301, 3332), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['max_arr'], {}), '(max_arr)\n', (3323, 3332), True, 'import SimpleITK as sitk\n'), ((3397, 3434), 'SimpleITK.Cast', 'sitk.Cast', (['argmax_img', 'sitk.sitkInt16'], {}), '(argmax_img, sitk.sitkInt16)\n', (3406, 3434), True, 'import SimpleITK as sitk\n'), ((3452, 3486), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['argmax_img'], {}), '(argmax_img)\n', (3474, 3486), True, 'import SimpleITK as sitk\n'), ((3503, 3525), 'copy.deepcopy', 'copy.deepcopy', (['TTP_arr'], {}), '(TTP_arr)\n', (3516, 3525), False, 'import copy\n'), ((3701, 3736), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['new_TTP_arr'], {}), '(new_TTP_arr)\n', (3723, 3736), True, 'import SimpleITK as sitk\n'), ((3793, 3827), 'SimpleITK.Cast', 'sitk.Cast', (['TTP_img', 'sitk.sitkInt16'], {}), '(TTP_img, sitk.sitkInt16)\n', (3802, 3827), True, 'import SimpleITK as sitk\n'), ((3888, 4003), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['TTP_img', "(path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' + timept +\n '_MRI_T1W_DCE_TTP.nii.gz')"], {}), "(TTP_img, path + folder + 'WES_0' + pat_no + '_TIMEPOINT_' +\n timept + '_MRI_T1W_DCE_TTP.nii.gz')\n", (3903, 4003), True, 'import SimpleITK as sitk\n'), ((842, 871), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image'], {}), '(image)\n', (864, 871), True, 'import SimpleITK as sitk\n'), ((895, 933), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['baseline_image'], {}), '(baseline_image)\n', (917, 933), True, 'import SimpleITK as sitk\n'), ((1178, 1213), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['new_img_arr'], {}), '(new_img_arr)\n', (1200, 1213), True, 'import SimpleITK as sitk\n'), ((2821, 2870), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['baseline_time', 'format'], {}), '(baseline_time, format)\n', (2847, 2870), False, 'import datetime\n'), ((2893, 2933), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['time', 'format'], {}), '(time, format)\n', (2919, 2933), False, 'import datetime\n'), ((3210, 3240), 'numpy.argmax', 'np.argmax', (['stacked_arr'], {'axis': '(0)'}), '(stacked_arr, axis=0)\n', (3219, 3240), True, 'import numpy as np\n'), ((784, 805), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image'], {}), '(image)\n', (798, 805), True, 'import SimpleITK as sitk\n'), ((1432, 1457), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['i'], {}), '(i)\n', (1454, 1457), True, 'import SimpleITK as sitk\n'), ((3147, 3172), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['i'], {}), '(i)\n', (3169, 3172), True, 'import SimpleITK as sitk\n'), ((3604, 3619), 'numpy.max', 'np.max', (['TTP_arr'], {}), '(TTP_arr)\n', (3610, 3619), True, 'import numpy as np\n'), ((1107, 1123), 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), '(a)\n', (1120, 1123), True, 'import numpy as np\n')] |
import os
import time
import numpy as np
# for shot in available_shots.iterkeys():
print("## perform experiments on aloi ##")
num_of_classes = 1000
leaf_example_multiplier = 4 # 8
shots = 100
lr = 0.001
bits = 29
alpha = 0.1 # 0.3
passes = 3 # 3 #5
use_oas = False
dream_at_update = 0
learn_at_leaf = True # turn on leaf at leaf actually works better
num_queries = 5 # int(np.log(passes*num_of_classes*shots))
loss = "squared"
dream_repeats = 3
online = False
tree_node = int(
2
* passes
* (
num_of_classes
* shots
/ (np.log(num_of_classes * shots) / np.log(2) * leaf_example_multiplier)
)
)
train_data = "aloi_train.vw"
test_data = "aloi_test.vw"
if os.path.exists(train_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(train_data))
if os.path.exists(test_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(test_data))
saved_model = "{}.vw".format(train_data)
print("## Training...")
start = time.time()
command_train = f"../../build/vowpalwabbit/vw -d {train_data} --memory_tree {tree_node} {'--learn_at_leaf' if learn_at_leaf else ''} --max_number_of_labels {num_of_classes} --dream_at_update {dream_at_update} --dream_repeats {dream_repeats} {'--oas' if use_oas else ''} {'--online' if online else ''} --leaf_example_multiplier {leaf_example_multiplier} --alpha {alpha} -l {lr} -b {bits} -c --passes {passes} --loss_function {loss} --holdout_off -f {saved_model}"
print(command_train)
os.system(command_train)
train_time = time.time() - start
# test:
print("## Testing...")
start = time.time()
os.system("../../build/vowpalwabbit/vw {} -i {}".format(test_data, saved_model))
test_time = time.time() - start
print("## train time {}, and test time {}".format(train_time, test_time))
| [
"numpy.log",
"os.path.exists",
"os.system",
"time.time"
] | [((1021, 1032), 'time.time', 'time.time', ([], {}), '()\n', (1030, 1032), False, 'import time\n'), ((1517, 1541), 'os.system', 'os.system', (['command_train'], {}), '(command_train)\n', (1526, 1541), False, 'import os\n'), ((1615, 1626), 'time.time', 'time.time', ([], {}), '()\n', (1624, 1626), False, 'import time\n'), ((700, 726), 'os.path.exists', 'os.path.exists', (['train_data'], {}), '(train_data)\n', (714, 726), False, 'import os\n'), ((825, 850), 'os.path.exists', 'os.path.exists', (['test_data'], {}), '(test_data)\n', (839, 850), False, 'import os\n'), ((1555, 1566), 'time.time', 'time.time', ([], {}), '()\n', (1564, 1566), False, 'import time\n'), ((1721, 1732), 'time.time', 'time.time', ([], {}), '()\n', (1730, 1732), False, 'import time\n'), ((562, 592), 'numpy.log', 'np.log', (['(num_of_classes * shots)'], {}), '(num_of_classes * shots)\n', (568, 592), True, 'import numpy as np\n'), ((595, 604), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (601, 604), True, 'import numpy as np\n')] |
# modified from pyAudioAnalysis' audioSegmentation.py
import numpy as np
import scipy
import paa.audioFeatureExtraction as aF
import paa.audioTrainTest as aT
import paa.audioSegmentation as aS
import paa.audioBasicIO as audioBasicIO
from snippet import Snippet
def extractFeatures(x, Fs, shortTermSize, shortTermStep):
x = audioBasicIO.stereo2mono(x)
featureVectors = aF.stFeatureExtraction(x, Fs, Fs*shortTermSize, Fs*shortTermStep)
[featureVectors2, _, _] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
return featureVectors2.T
def musicThumbnailing(x, Fs, shortTermSize=1.0, shortTermStep=0.5, thumbnailSize=10.0, Limit1 = 0, Limit2 = 1):
# self-similarity matrix
x = audioBasicIO.stereo2mono(x)
stFeatures = aF.stFeatureExtraction(x, Fs, Fs*shortTermSize, Fs*shortTermStep)
S = aS.selfSimilarityMatrix(stFeatures)
# moving filter:
M = int(round(thumbnailSize / shortTermStep))
B = np.eye(M,M)
S = scipy.signal.convolve2d(S, B, 'valid')
MIN = np.min(S)
# post-processing (remove main diagonal elements)
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if abs(i-j) < 5.0 / shortTermStep or i > j:
S[i,j] = MIN
# find max position:
S[0:int(Limit1*S.shape[0]), :] = MIN
S[:, 0:int(Limit1*S.shape[0])] = MIN
S[int(Limit2*S.shape[0])::, :] = MIN
S[:, int(Limit2*S.shape[0])::] = MIN
matches = []
maxMax = maxVal = np.max(S)
i1 = i2 = j1 = j2 = 0
Sbak = np.copy(S)
while maxVal > maxMax/3*2 > MIN: # currently arbitrary cutoff
[I, J] = np.unravel_index(S.argmax(), S.shape)
# expand:
i1 = I; i2 = I
j1 = J; j2 = J
while i2-i1<M:
if i1 <=0 or j1<=0 or i2>=S.shape[0]-2 or j2>=S.shape[1]-2:
break
if S[i1-1, j1-1] > S[i2+1,j2+1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
S[i1, j1] = S[i2, j2] = MIN
# only add to potential matches if we have enough overlap or new record
if i2-i1 >= M:
matches.append(Snippet(
maxVal,
shortTermStep*i1, shortTermStep*i2,
shortTermStep*j1, shortTermStep*j2
))
S[I, J] = MIN
maxVal = np.max(S)
return (matches, Sbak)
| [
"paa.audioFeatureExtraction.stFeatureExtraction",
"scipy.signal.convolve2d",
"numpy.copy",
"snippet.Snippet",
"paa.audioBasicIO.stereo2mono",
"numpy.min",
"numpy.max",
"paa.audioTrainTest.normalizeFeatures",
"numpy.eye",
"paa.audioSegmentation.selfSimilarityMatrix"
] | [((330, 357), 'paa.audioBasicIO.stereo2mono', 'audioBasicIO.stereo2mono', (['x'], {}), '(x)\n', (354, 357), True, 'import paa.audioBasicIO as audioBasicIO\n'), ((379, 448), 'paa.audioFeatureExtraction.stFeatureExtraction', 'aF.stFeatureExtraction', (['x', 'Fs', '(Fs * shortTermSize)', '(Fs * shortTermStep)'], {}), '(x, Fs, Fs * shortTermSize, Fs * shortTermStep)\n', (401, 448), True, 'import paa.audioFeatureExtraction as aF\n'), ((475, 515), 'paa.audioTrainTest.normalizeFeatures', 'aT.normalizeFeatures', (['[featureVectors.T]'], {}), '([featureVectors.T])\n', (495, 515), True, 'import paa.audioTrainTest as aT\n'), ((738, 765), 'paa.audioBasicIO.stereo2mono', 'audioBasicIO.stereo2mono', (['x'], {}), '(x)\n', (762, 765), True, 'import paa.audioBasicIO as audioBasicIO\n'), ((783, 852), 'paa.audioFeatureExtraction.stFeatureExtraction', 'aF.stFeatureExtraction', (['x', 'Fs', '(Fs * shortTermSize)', '(Fs * shortTermStep)'], {}), '(x, Fs, Fs * shortTermSize, Fs * shortTermStep)\n', (805, 852), True, 'import paa.audioFeatureExtraction as aF\n'), ((857, 892), 'paa.audioSegmentation.selfSimilarityMatrix', 'aS.selfSimilarityMatrix', (['stFeatures'], {}), '(stFeatures)\n', (880, 892), True, 'import paa.audioSegmentation as aS\n'), ((973, 985), 'numpy.eye', 'np.eye', (['M', 'M'], {}), '(M, M)\n', (979, 985), True, 'import numpy as np\n'), ((993, 1031), 'scipy.signal.convolve2d', 'scipy.signal.convolve2d', (['S', 'B', '"""valid"""'], {}), "(S, B, 'valid')\n", (1016, 1031), False, 'import scipy\n'), ((1043, 1052), 'numpy.min', 'np.min', (['S'], {}), '(S)\n', (1049, 1052), True, 'import numpy as np\n'), ((1490, 1499), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (1496, 1499), True, 'import numpy as np\n'), ((1537, 1547), 'numpy.copy', 'np.copy', (['S'], {}), '(S)\n', (1544, 1547), True, 'import numpy as np\n'), ((2374, 2383), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (2380, 2383), True, 'import numpy as np\n'), ((2183, 2282), 'snippet.Snippet', 'Snippet', (['maxVal', '(shortTermStep * i1)', '(shortTermStep * i2)', '(shortTermStep * j1)', '(shortTermStep * j2)'], {}), '(maxVal, shortTermStep * i1, shortTermStep * i2, shortTermStep * j1,\n shortTermStep * j2)\n', (2190, 2282), False, 'from snippet import Snippet\n')] |
import os
import numpy as np
import pandas as pd
from pypfopt import expected_returns
from pypfopt import risk_models
from pypfopt.efficient_frontier import (
EfficientFrontier,
EfficientSemivariance,
EfficientCVaR,
)
from pypfopt.cla import CLA
from pypfopt.expected_returns import returns_from_prices
def resource(name):
return os.path.join(os.path.dirname(__file__), "resources", name)
def get_data():
return pd.read_csv(resource("stock_prices.csv"), parse_dates=True, index_col="date")
def get_benchmark_data():
return pd.read_csv(resource("spy_prices.csv"), parse_dates=True, index_col="date")
def get_market_caps():
mcaps = {
"GOOG": 927e9,
"AAPL": 1.19e12,
"FB": 574e9,
"BABA": 533e9,
"AMZN": 867e9,
"GE": 96e9,
"AMD": 43e9,
"WMT": 339e9,
"BAC": 301e9,
"GM": 51e9,
"T": 61e9,
"UAA": 78e9,
"SHLD": 0,
"XOM": 295e9,
"RRC": 1e9,
"BBY": 22e9,
"MA": 288e9,
"PFE": 212e9,
"JPM": 422e9,
"SBUX": 102e9,
}
return mcaps
def setup_efficient_frontier(
data_only=False, solver=None, verbose=False, solver_options=None
):
df = get_data()
mean_return = expected_returns.mean_historical_return(df)
sample_cov_matrix = risk_models.sample_cov(df)
if data_only:
return mean_return, sample_cov_matrix
return EfficientFrontier(
mean_return,
sample_cov_matrix,
solver=solver,
verbose=verbose,
solver_options=solver_options,
)
def setup_efficient_semivariance(data_only=False, solver=None, verbose=False):
df = get_data().dropna(axis=0, how="any")
mean_return = expected_returns.mean_historical_return(df, compounding=False)
historic_returns = returns_from_prices(df)
if data_only:
return mean_return, historic_returns
return EfficientSemivariance(
mean_return, historic_returns, solver=solver, verbose=verbose
)
def setup_efficient_cvar(
data_only=False, solver=None, verbose=False, solver_options=None
):
df = get_data().dropna(axis=0, how="any")
mean_return = expected_returns.mean_historical_return(df)
historic_returns = returns_from_prices(df)
if data_only:
return mean_return, historic_returns
return EfficientCVaR(
mean_return,
historic_returns,
verbose=verbose,
solver=solver,
solver_options=solver_options,
)
def setup_cla(data_only=False):
df = get_data()
mean_return = expected_returns.mean_historical_return(df)
sample_cov_matrix = risk_models.sample_cov(df)
if data_only:
return mean_return, sample_cov_matrix
return CLA(mean_return, sample_cov_matrix)
def simple_ef_weights(expected_returns, cov_matrix, target_return, weights_sum):
"""
Calculate weights to achieve target_return on the efficient frontier.
The only constraint is the sum of the weights.
Note: This is just a simple test utility, it does not support the generalised
constraints that EfficientFrontier does and is used to check the results
of EfficientFrontier in simple cases. In particular it is not capable of
preventing negative weights (shorting).
:param expected_returns: expected returns for each asset.
:type expected_returns: np.ndarray
:param cov_matrix: covariance of returns for each asset.
:type cov_matrix: np.ndarray
:param target_return: the target return for the portfolio to achieve.
:type target_return: float
:param weights_sum: the sum of the returned weights, optimization constraint.
:type weights_sum: float
:return: weight for each asset, which sum to 1.0
:rtype: np.ndarray
"""
# Solve using Lagrangian and matrix inversion.
r = expected_returns.reshape((-1, 1))
m = np.block(
[
[cov_matrix, r, np.ones(r.shape)],
[r.transpose(), 0, 0],
[np.ones(r.shape).transpose(), 0, 0],
]
)
y = np.block([[np.zeros(r.shape)], [target_return], [weights_sum]])
x = np.linalg.inv(m) @ y
# Weights are all but the last 2 elements, which are the lambdas.
w = x.flatten()[:-2]
return w
| [
"pypfopt.risk_models.sample_cov",
"pypfopt.efficient_frontier.EfficientFrontier",
"os.path.dirname",
"numpy.zeros",
"numpy.ones",
"pypfopt.efficient_frontier.EfficientSemivariance",
"pypfopt.expected_returns.returns_from_prices",
"pypfopt.efficient_frontier.EfficientCVaR",
"numpy.linalg.inv",
"pyp... | [((1264, 1307), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df'], {}), '(df)\n', (1303, 1307), False, 'from pypfopt import expected_returns\n'), ((1332, 1358), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['df'], {}), '(df)\n', (1354, 1358), False, 'from pypfopt import risk_models\n'), ((1434, 1551), 'pypfopt.efficient_frontier.EfficientFrontier', 'EfficientFrontier', (['mean_return', 'sample_cov_matrix'], {'solver': 'solver', 'verbose': 'verbose', 'solver_options': 'solver_options'}), '(mean_return, sample_cov_matrix, solver=solver, verbose=\n verbose, solver_options=solver_options)\n', (1451, 1551), False, 'from pypfopt.efficient_frontier import EfficientFrontier, EfficientSemivariance, EfficientCVaR\n'), ((1739, 1801), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df'], {'compounding': '(False)'}), '(df, compounding=False)\n', (1778, 1801), False, 'from pypfopt import expected_returns\n'), ((1825, 1848), 'pypfopt.expected_returns.returns_from_prices', 'returns_from_prices', (['df'], {}), '(df)\n', (1844, 1848), False, 'from pypfopt.expected_returns import returns_from_prices\n'), ((1923, 2012), 'pypfopt.efficient_frontier.EfficientSemivariance', 'EfficientSemivariance', (['mean_return', 'historic_returns'], {'solver': 'solver', 'verbose': 'verbose'}), '(mean_return, historic_returns, solver=solver, verbose\n =verbose)\n', (1944, 2012), False, 'from pypfopt.efficient_frontier import EfficientFrontier, EfficientSemivariance, EfficientCVaR\n'), ((2186, 2229), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df'], {}), '(df)\n', (2225, 2229), False, 'from pypfopt import expected_returns\n'), ((2253, 2276), 'pypfopt.expected_returns.returns_from_prices', 'returns_from_prices', (['df'], {}), '(df)\n', (2272, 2276), False, 'from pypfopt.expected_returns import returns_from_prices\n'), ((2351, 2462), 'pypfopt.efficient_frontier.EfficientCVaR', 'EfficientCVaR', (['mean_return', 'historic_returns'], {'verbose': 'verbose', 'solver': 'solver', 'solver_options': 'solver_options'}), '(mean_return, historic_returns, verbose=verbose, solver=solver,\n solver_options=solver_options)\n', (2364, 2462), False, 'from pypfopt.efficient_frontier import EfficientFrontier, EfficientSemivariance, EfficientCVaR\n'), ((2578, 2621), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df'], {}), '(df)\n', (2617, 2621), False, 'from pypfopt import expected_returns\n'), ((2646, 2672), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['df'], {}), '(df)\n', (2668, 2672), False, 'from pypfopt import risk_models\n'), ((2748, 2783), 'pypfopt.cla.CLA', 'CLA', (['mean_return', 'sample_cov_matrix'], {}), '(mean_return, sample_cov_matrix)\n', (2751, 2783), False, 'from pypfopt.cla import CLA\n'), ((3835, 3868), 'pypfopt.expected_returns.reshape', 'expected_returns.reshape', (['(-1, 1)'], {}), '((-1, 1))\n', (3859, 3868), False, 'from pypfopt import expected_returns\n'), ((362, 387), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (377, 387), False, 'import os\n'), ((4125, 4141), 'numpy.linalg.inv', 'np.linalg.inv', (['m'], {}), '(m)\n', (4138, 4141), True, 'import numpy as np\n'), ((3925, 3941), 'numpy.ones', 'np.ones', (['r.shape'], {}), '(r.shape)\n', (3932, 3941), True, 'import numpy as np\n'), ((4064, 4081), 'numpy.zeros', 'np.zeros', (['r.shape'], {}), '(r.shape)\n', (4072, 4081), True, 'import numpy as np\n'), ((3992, 4008), 'numpy.ones', 'np.ones', (['r.shape'], {}), '(r.shape)\n', (3999, 4008), True, 'import numpy as np\n')] |
from functools import partial, reduce
from itertools import zip_longest
import subprocess
import pytest
import os.path as op
import os
import sys
import re
import logging
import tempfile
from urllib.parse import quote
import shutil
import numpy as np
from numpy.testing import assert_array_equal
from pbcore.io import PacBioBamIndex, IndexedBamReader
from pbcore.io import openIndexedAlignmentFile
from pbcore.io.dataset.utils import consolidateXml
from pbcore.io import (DataSet, SubreadSet, ReferenceSet, AlignmentSet,
openDataSet, ConsensusReadSet, ConsensusAlignmentSet)
from pbcore.io.dataset.DataSetMetaTypes import InvalidDataSetIOError
from pbcore.io.dataset.DataSetMembers import (ExternalResource, Filters,
ContinuousDistribution,
DiscreteDistribution,
SubreadSetMetadata)
from pbcore.io.dataset.DataSetIO import _pathChanger
from pbcore.io.dataset.DataSetValidator import validateFile
from pbcore.io.dataset.DataSetUtils import loadMockCollectionMetadata
import pbcore.data.datasets as data
import pbcore.data as upstreamdata
log = logging.getLogger(__name__)
def twodots(fn):
"""For a unit-test.
.. doctest::
>>> twodots('foo.subreadset.xml')
'.subreadset.xml'
"""
bn = os.path.basename(fn)
dot0 = bn.rfind('.')
dot1 = bn.rfind('.', 0, dot0)
return bn[dot1:]
class TestDataSet:
"""Unit and integrationt tests for the DataSet class and \
associated module functions"""
def test_build(self):
# Progs like pbalign provide a .bam file:
# e.g. d = DataSet("aligned.bam")
# Something like the test files we have:
inBam = data.getBam()
assert inBam.endswith('.bam')
d = DataSet(inBam)
# A UniqueId is generated, despite being a BAM input
assert d.uuid != ''
dOldUuid = d.uuid
# They can write this BAM to an XML:
# e.g. d.write("alignmentset.xml")
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
log.debug(outXml)
# don't validate, type DataSet doesn't validate well
d.write(outXml, validate=False)
# And then recover the same XML (or a different one):
# e.g. d = DataSet("alignmentset.xml")
d = DataSet(outXml)
# The UniqueId will be the same
assert d.uuid == dOldUuid
# Inputs can be many and varied
ds1 = DataSet(data.getXml(10), data.getBam())
assert ds1.numExternalResources == 2
ds1 = DataSet(data.getFofn())
assert ds1.numExternalResources == 2
assert type(SubreadSet(data.getSubreadSet(),
skipMissing=True)).__name__ == 'SubreadSet'
# Even with untyped inputs
assert str(SubreadSet(data.getBam())).startswith('<SubreadSet')
assert type(SubreadSet(data.getBam())).__name__ == 'SubreadSet'
assert type(DataSet(data.getBam())).__name__ == 'DataSet'
# You can also cast up and down, but casting between siblings
# is limited (abuse at your own risk)
assert type(DataSet(data.getBam()).copy(
asType='SubreadSet')).__name__ == 'SubreadSet'
assert type(SubreadSet(data.getBam()).copy(
asType='DataSet')).__name__ == 'DataSet'
# Add external Resources:
ds = DataSet()
ds.externalResources.addResources(["IdontExist.bam"])
assert ds.externalResources[-1].resourceId == "IdontExist.bam"
# Add an index file
ds.externalResources[-1].addIndices(["IdontExist.bam.pbi"])
assert ds.externalResources[-1].indices[0].resourceId == "IdontExist.bam.pbi"
def test_merge_uuid(self):
ds1 = AlignmentSet(data.getBam(0))
u1 = ds1.uuid
ds2 = AlignmentSet(data.getBam(1))
u2 = ds2.uuid
assert not u1 == u2
merged = ds1 + ds2
u3 = merged.uuid
assert not u1 == u3
assert not u2 == u3
assert u1 == ds1.uuid
assert u2 == ds2.uuid
ds1 = AlignmentSet(data.getXml(7))
u1 = ds1.uuid
ds2 = AlignmentSet(data.getXml(10))
u2 = ds2.uuid
assert not u1 == u2
merged = AlignmentSet(data.getXml(7), data.getXml(10))
u3 = merged.uuid
assert not u1 == u3
assert not u2 == u3
assert u1 == ds1.uuid
assert u2 == ds2.uuid
def test_merged_CreatedAt(self):
ds1 = AlignmentSet(data.getXml(7))
u1 = ds1.createdAt
assert u1 == '2015-08-05T10:25:18'
ds2 = AlignmentSet(data.getXml(10))
u2 = ds2.createdAt
assert u2 == '2015-08-05T10:43:42'
assert not u1 == u2
merged = AlignmentSet(data.getXml(7), data.getXml(10))
u3 = merged.createdAt
assert not u1 == u3
assert not u2 == u3
assert u1 == ds1.createdAt
assert u2 == ds2.createdAt
ds1 = AlignmentSet(data.getXml(7))
u1 = ds1.createdAt
assert u1 == '2015-08-05T10:25:18'
ds2 = AlignmentSet(data.getXml(10))
u2 = ds2.createdAt
assert u2 == '2015-08-05T10:43:42'
assert not u1 == u2
merged = ds1 + ds2
u3 = merged.createdAt
assert not u1 == u3
assert not u2 == u3
assert u1 == ds1.createdAt
assert u2 == ds2.createdAt
def test_merged_Name(self):
# First has a name
ds1 = AlignmentSet(data.getXml(7))
ds1.name = 'Foo'
ds2 = AlignmentSet(data.getXml(10))
ds2.name = ''
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.name == 'Foo'
ds1 = AlignmentSet(data.getXml(7))
ds1.name = 'Foo'
ds2 = AlignmentSet(data.getXml(10))
ds2.name = ''
merged = ds1 + ds2
assert merged.name == 'Foo'
fn1.close()
fn2.close()
# Second has a name
ds1 = AlignmentSet(data.getXml(7))
ds1.name = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.name = 'Foo'
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.name == 'Foo'
ds1 = AlignmentSet(data.getXml(7))
ds1.name = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.name = 'Foo'
merged = ds1 + ds2
assert merged.name == 'Foo'
fn1.close()
fn2.close()
# Neither has a name
ds1 = AlignmentSet(data.getXml(7))
ds1.name = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.name = ''
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.name == ''
ds1 = AlignmentSet(data.getXml(7))
ds1.name = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.name = ''
merged = ds1 + ds2
assert merged.name == ''
fn1.close()
fn2.close()
# both have a names
ds1 = AlignmentSet(data.getXml(7))
ds1.name = 'Foo'
ds2 = AlignmentSet(data.getXml(10))
ds2.name = 'Bar'
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
# Just take a peek:
ds1 = AlignmentSet(fn1.name)
assert ds1.name == 'Foo'
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.name == 'Foo AND Bar'
ds1 = AlignmentSet(data.getXml(7))
ds1.name = 'Foo'
ds2 = AlignmentSet(data.getXml(10))
ds2.name = 'Bar'
merged = ds1 + ds2
assert merged.name == 'Foo AND Bar'
# if the names are the same don't append:
ds1 = AlignmentSet(data.getXml(7))
ds1.name = 'Foo'
ds2 = AlignmentSet(data.getXml(10))
ds2.name = 'Foo'
merged = ds1 + ds2
assert merged.name == 'Foo'
fn1.close()
fn2.close()
def test_merged_Tags(self):
# First has tags
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = 'Foo Bar'
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = ''
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.tags == 'Foo Bar'
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = 'Foo Bar'
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = ''
merged = ds1 + ds2
assert merged.tags == 'Foo Bar'
fn1.close()
fn2.close()
# Second has tags
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = 'Foo Bar'
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.tags == 'Foo Bar'
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = 'Foo'
merged = ds1 + ds2
assert merged.tags == 'Foo'
fn1.close()
fn2.close()
# Neither has tags
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = ''
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.tags == ''
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = ''
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = ''
merged = ds1 + ds2
assert merged.tags == ''
fn1.close()
fn2.close()
# both have tags
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = 'Foo Bar'
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = 'Baz'
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
# Just take a peek:
ds1 = AlignmentSet(fn1.name)
assert ds1.tags == 'Foo Bar'
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.tags == 'Foo Bar Baz'
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = 'Foo Bar'
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = 'Baz'
merged = ds1 + ds2
assert merged.tags == 'Foo Bar Baz'
fn1.close()
fn2.close()
# both have same tags
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = 'Foo Bar'
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = 'Foo Bar'
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds1.write(fn1.name)
ds2.write(fn2.name)
# Just take a peek:
ds1 = AlignmentSet(fn1.name)
assert ds1.tags == 'Foo Bar'
merged = AlignmentSet(fn1.name, fn2.name)
assert merged.tags == 'Foo Bar'
ds1 = AlignmentSet(data.getXml(7))
ds1.tags = 'Foo Bar'
ds2 = AlignmentSet(data.getXml(10))
ds2.tags = 'Foo Bar'
merged = ds1 + ds2
assert merged.tags == 'Foo Bar'
fn1.close()
fn2.close()
def test_merge_subdatasets(self):
# from data file
ds1 = AlignmentSet(data.getBam(0))
assert len(ds1.subdatasets) == 0
ds2 = AlignmentSet(data.getBam(1))
assert len(ds1.subdatasets) == 0
merged = ds1 + ds2
assert len(merged.subdatasets) == 2
assert merged.subdatasets[0].toExternalFiles(
) == AlignmentSet(data.getBam(0)).toExternalFiles()
assert len(merged.subdatasets[0].toExternalFiles()) == 1
assert merged.subdatasets[1].toExternalFiles(
) == AlignmentSet(data.getBam(1)).toExternalFiles()
assert len(merged.subdatasets[1].toExternalFiles()) == 1
# from data set
ds1 = AlignmentSet(data.getXml(7))
assert len(ds1.subdatasets) == 0
ds2 = AlignmentSet(data.getXml(10))
assert len(ds2.subdatasets) == 0
merged = ds1 + ds2
assert len(merged.subdatasets) == 2
assert merged.subdatasets[0].toExternalFiles(
) == AlignmentSet(data.getXml(7)).toExternalFiles()
assert len(merged.subdatasets[0].toExternalFiles()) == 1
assert merged.subdatasets[1].toExternalFiles() == AlignmentSet(
data.getXml(10)).toExternalFiles()
assert len(merged.subdatasets[1].toExternalFiles()) == 1
# combined data set
merged = AlignmentSet(data.getXml(7), data.getXml(10))
assert len(merged.subdatasets) == 2
assert len(merged.subdatasets[0].toExternalFiles()) == 1
assert merged.subdatasets[0].toExternalFiles(
) == AlignmentSet(data.getXml(7)).toExternalFiles()
assert len(merged.subdatasets[1].toExternalFiles()) == 1
assert merged.subdatasets[1].toExternalFiles() == AlignmentSet(
data.getXml(10)).toExternalFiles()
# No filters, 3 files:
ds1 = AlignmentSet(data.getXml(7))
assert len(ds1.subdatasets) == 0
ds2 = AlignmentSet(data.getXml(10))
assert len(ds2.subdatasets) == 0
ds3 = AlignmentSet(data.getXml(10))
assert len(ds3.subdatasets) == 0
ds3.externalResources[0].resourceId = "/blah.bam"
ds4 = ds1 + ds2 + ds3
assert len(ds4.externalResources) == 3
assert len(ds4.subdatasets) == 3
# Filters, 3 files:
ds1 = AlignmentSet(data.getXml(7))
assert len(ds1.subdatasets) == 0
ds1.filters.addRequirement(rq=[('>', 0.8)])
ds2 = AlignmentSet(data.getXml(10))
assert len(ds2.subdatasets) == 0
ds2.filters.addRequirement(rq=[('>', 0.8)])
ds3 = AlignmentSet(data.getXml(10))
assert len(ds3.subdatasets) == 0
ds3.externalResources[0].resourceId = "/blah.bam"
ds3.filters.addRequirement(rq=[('>', 0.8)])
ds4 = ds1 + ds2 + ds3
assert len(ds4.externalResources) == 3
assert len(ds4.subdatasets) == 3
assert str(ds4.filters) == '( rq > 0.8 )'
for sss in ds4.subdatasets:
assert str(sss.filters) == '( rq > 0.8 )'
with pytest.raises(TypeError):
# mismatched Filters, 3 files:
ds1 = AlignmentSet(data.getXml(7))
assert len(ds1.subdatasets) == 0
ds1.filters.addRequirement(rq=[('>', 0.8)])
ds2 = AlignmentSet(data.getXml(10))
assert len(ds2.subdatasets) == 0
ds2.filters.addRequirement(rq=[('>', 0.7)])
ds3 = AlignmentSet(data.getXml(10))
assert len(ds3.subdatasets) == 0
ds3.externalResources[0].resourceId = "/blah.bam"
ds3.filters.addRequirement(rq=[('>', 0.8)])
ds4 = ds1 + ds2 + ds3
def test_empty_metatype(self):
inBam = data.getBam()
d = DataSet(inBam)
for extRes in d.externalResources:
assert extRes.metaType == ""
def test_nonempty_metatype(self):
inBam = data.getBam()
d = AlignmentSet(inBam)
for extRes in d.externalResources:
assert extRes.metaType == "PacBio.AlignmentFile.AlignmentBamFile"
@pytest.mark.constools
def test_empty_file_counts(self):
# empty with pbi:
dset = SubreadSet(upstreamdata.getEmptyBam())
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
# Don't care what they are, just don't want them to fail:
dset.updateCounts()
dset.index
assert len(dset.resourceReaders()) == 1
assert len(list(dset.split(zmws=True, maxChunks=12))) == 1
# empty and full:
full_bam = SubreadSet(data.getXml(9)).toExternalFiles()[0]
dset = SubreadSet(upstreamdata.getEmptyBam(), full_bam)
assert dset.numRecords == 92, dset.numRecords
assert dset.totalLength == 124093
assert len(list(dset)) == 92
dset.updateCounts()
assert not list(dset.index) == []
assert len(dset.resourceReaders()) == 2
# there are 9 reads in this set, < the minimum chunk size
assert len(list(dset.split(zmws=True, maxChunks=12))) == 2
dset = AlignmentSet(upstreamdata.getEmptyAlignedBam())
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
dset.index
assert len(dset.resourceReaders()) == 1
# there is a minimum chunk size here:
assert len(
list(dset.split(contigs=True, maxChunks=12, breakContigs=True))) == 1
# empty and full:
dset = AlignmentSet(upstreamdata.getEmptyAlignedBam(), data.getBam())
assert dset.numRecords == 92
assert dset.totalLength == 123588
assert len(list(dset)) == 92
dset.updateCounts()
assert not list(dset.index) == []
assert len(dset.resourceReaders()) == 2
# there are 9 reads in this set, < the minimum chunk size
assert len(list(dset.split(zmws=True, maxChunks=12))) == 2
dset = ConsensusReadSet(upstreamdata.getEmptyBam())
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
dset.index
assert len(dset.resourceReaders()) == 1
dset = ConsensusAlignmentSet(upstreamdata.getEmptyAlignedBam())
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
dset.index
assert len(dset.resourceReaders()) == 1
# empty without pbi:
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outfile = os.path.split(upstreamdata.getEmptyBam())[1]
outpath = os.path.join(outdir, outfile)
shutil.copy(upstreamdata.getEmptyBam(), outpath)
alnoutfile = os.path.split(upstreamdata.getEmptyAlignedBam())[1]
alnoutpath = os.path.join(outdir, alnoutfile)
shutil.copy(upstreamdata.getEmptyAlignedBam(), alnoutpath)
dset = SubreadSet(outpath)
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
assert len(dset.resourceReaders()) == 1
assert len(list(dset.split(zmws=True, maxChunks=12))) == 1
# empty and full:
full_bam = SubreadSet(data.getXml(9)).toExternalFiles()[0]
dset = SubreadSet(outpath, full_bam)
assert len(dset.resourceReaders()) == 2
dset.updateCounts()
# without a pbi, updating counts is broken
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 92
with pytest.raises(IOError):
assert not list(dset.index) == []
assert len(list(dset.split(zmws=True, maxChunks=12))) == 1
dset = AlignmentSet(alnoutpath)
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
assert len(dset.resourceReaders()) == 1
assert len(
list(dset.split(contigs=True, maxChunks=12, breakContigs=True))) == 1
# empty and full:
dset = AlignmentSet(alnoutpath, data.getBam())
# without a pbi, updating counts is broken
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 92
dset.updateCounts()
with pytest.raises(IOError):
assert not list(dset.index) == []
assert len(dset.resourceReaders()) == 2
assert len(list(dset.split(zmws=True, maxChunks=12))) == 1
dset = ConsensusReadSet(outpath)
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
assert len(dset.resourceReaders()) == 1
dset = ConsensusAlignmentSet(alnoutpath)
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
assert len(dset.resourceReaders()) == 1
dset.induceIndices()
dset = ConsensusAlignmentSet(alnoutpath)
assert dset.numRecords == 0
assert dset.totalLength == 0
assert len(list(dset)) == 0
dset.updateCounts()
assert len(dset.resourceReaders()) == 1
def test_empty_bam_index_dtype(self):
# Make sure the BAM and DataSet APIs are consistent
empty_bam = upstreamdata.getEmptyBam()
sset = SubreadSet(empty_bam)
empty = np.array([], dtype=np.int32)
# The BAM API
assert np.array_equal(
sset.resourceReaders()[0].index.qId,
empty)
# The DataSet API
assert(np.array_equal(
sset.index.qId,
empty))
# Check to make sure we can stack them:
full_bam = upstreamdata.getUnalignedBam()
sset = SubreadSet(empty_bam, full_bam)
# The BAM API
assert len(sset.resourceReaders()[1].index.qId) != 0
# The DataSet API
assert len(sset.index.qId) != 0
def test_space_in_filename(self):
outdir = tempfile.mkdtemp(suffix="dataset unittest")
ofn = os.path.join(outdir, 'spaced.subreadset.xml')
ss = SubreadSet(data.getXml(9), strict=True)
ss.copyTo(ofn)
ss = SubreadSet(ofn, strict=True)
for fn in ss.toExternalFiles():
assert ' ' in fn
ss._modResources(partial(_pathChanger,
lambda x: ('file://' + quote(x)),
lambda x: x))
# have to dig deep to not get a processed version:
for er in ss.externalResources:
assert '%20' in er.attrib['ResourceId']
# this should have been cleaned for actual use:
for fn in ss.toExternalFiles():
assert ' ' in fn
ss.write(ofn)
ss = SubreadSet(ofn, strict=True)
shutil.rmtree(outdir)
def test_empty_aligned_bam_index_dtype(self):
# Make sure the BAM and DataSet APIs are consistent
empty_bam = data.getEmptyAlignedBam()
alnFile = AlignmentSet(empty_bam)
empty = np.array([], dtype=np.int32)
# The BAM API
assert np.array_equal(
alnFile.resourceReaders()[0].tId,
empty)
assert np.array_equal(
alnFile.resourceReaders()[0].index.tId,
empty)
# The DataSet API
assert np.array_equal(alnFile.tId, empty)
assert np.array_equal(alnFile.index.tId, empty)
# Check to make sure we can stack them:
full_bam = upstreamdata.getAlignedBam()
aset = AlignmentSet(empty_bam, full_bam)
# The BAM API
assert len(aset.resourceReaders()[1].index.qId) != 0
# The DataSet API
assert len(aset.index.qId) != 0
def test_read_ranges(self):
# This models the old and new ways by which Genomic Consensus generates
# lists of paired tStarts and tEnds.
full_bam = upstreamdata.getAlignedBam()
empty_bam = data.getEmptyAlignedBam()
file_lists = [[empty_bam],
[full_bam, empty_bam],
[empty_bam, full_bam]]
refId_list = ['lambda_NEB3011', 0]
minMapQV = 30
for file_list in file_lists:
for refId in refId_list:
alnFile = AlignmentSet(*file_list)
# new GC (just testing that it doesn't raise exceptions):
rows = alnFile.index[
((alnFile.tId == alnFile.referenceInfo(refId).ID) &
(alnFile.mapQV >= minMapQV))]
# FIXME these arrays will be empty in the first iteration of
# the nested loop, which leads to a MemoryError when lexsort
# is called below. Converting to Python lists avoids the
# error, but this seems seriously broken...
unsorted_tStart = rows.tStart
unsorted_tEnd = rows.tEnd
# Sort (expected by CoveredIntervals)
sort_order = np.lexsort(
(list(unsorted_tEnd), list(unsorted_tStart)))
tStart = unsorted_tStart[sort_order].tolist()
tEnd = unsorted_tEnd[sort_order].tolist()
def test_loading_reference(self):
log.info('Opening Reference')
r = ReferenceSet(data.getRef()).toExternalFiles()[0]
log.info('Done Opening Reference')
log.info('Opening AlignmentSet')
d = AlignmentSet(data.getBam(), referenceFastaFname=r)
log.info('Done Opening AlignmentSet')
bfile = openIndexedAlignmentFile(data.getBam(),
referenceFastaFname=r)
assert bfile.isReferenceLoaded
for res in d.resourceReaders():
assert res.isReferenceLoaded
aln = AlignmentSet(data.getBam())
aln.addReference(r)
for res in aln.resourceReaders():
assert res.isReferenceLoaded
def test_factory_function(self):
aln = data.getXml(7)
ref = data.getXml(8)
sub = data.getXml(9)
inTypes = [aln, ref, sub]
expTypes = [AlignmentSet, ReferenceSet, SubreadSet]
for infn, exp in zip(inTypes, expTypes):
# TODO enable this for all when simulated subread files can be
# pbi'd
if exp in [ReferenceSet, AlignmentSet]:
ds = openDataSet(infn, strict=True)
else:
ds = openDataSet(infn, strict=False)
assert type(ds) == exp
def test_factory_function_on_symlink(self):
# same as test_factory_function(), but symlinked
aln = data.getXml(7)
ref = data.getXml(8)
sub = data.getXml(9)
inTypes = [aln, ref, sub]
expTypes = [AlignmentSet, ReferenceSet, SubreadSet]
for infn, exp in zip(inTypes, expTypes):
linfn = 'foo' + twodots(infn)
if os.path.lexists(linfn):
os.remove(linfn)
os.symlink(infn, linfn)
assert os.path.islink(linfn)
del infn
if exp in [ReferenceSet, AlignmentSet]:
ds = openDataSet(linfn, strict=True)
else:
ds = openDataSet(linfn, strict=False)
assert type(ds) == exp
os.remove(linfn)
def test_type_checking(self):
bam = data.getBam()
fasta = ReferenceSet(data.getXml(8)).toExternalFiles()[0]
DataSet(bam, strict=False)
DataSet(fasta, strict=False)
with pytest.raises(Exception):
DataSet(bam, strict=True)
with pytest.raises(Exception):
DataSet(fasta, strict=True)
AlignmentSet(bam, strict=True)
with pytest.raises(Exception):
AlignmentSet(fasta, strict=True)
ReferenceSet(fasta, strict=True)
with pytest.raises(Exception):
ReferenceSet(bam, strict=True)
def test_updateCounts_without_pbi(self):
log.info("Testing updateCounts without pbi")
data_fname = data.getBam(0)
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
tempout = os.path.join(outdir, os.path.basename(data_fname))
subprocess.check_call(["cp", data_fname, tempout])
aln = AlignmentSet(tempout, strict=False)
assert aln.totalLength == 0
assert aln.numRecords == 0
@pytest.mark.internal_data
def test_barcode_accession(self):
testFile = ("/pbi/dept/secondary/siv/testdata/pblaa-unittest/"
"P6-C4/HLA_ClassI/m150724_012016_sherri_c1008203"
"52550000001823172911031521_s1_p0.class_I.haploid.bam")
# Test the pbi file:
bam = IndexedBamReader(testFile)
pbi = PacBioBamIndex(testFile + '.pbi')
for brec, prec in zip(bam, pbi):
brec_bc = list(brec.peer.opt("bc"))
prec_bc = [prec.bcForward, prec.bcReverse]
assert brec_bc == prec_bc
# Test split by barcode:
ss = SubreadSet(testFile)
sss = list(ss.split(chunks=2, barcodes=True))
assert len(sss) == 2
for sset in sss:
assert len(sset.barcodes) >= 1
def test_attributes(self):
aln = AlignmentSet(data.getBam(0))
assert aln.sequencingChemistry == ['unknown']
assert aln.isSorted == True
assert aln.isEmpty == False
assert aln.readType == 'standard'
assert len(aln.tStart) == aln.metadata.numRecords
assert len(aln.tEnd) == aln.metadata.numRecords
def test_updateCounts(self):
log.info("Testing updateCounts without filters")
aln = AlignmentSet(data.getBam(0))
readers = aln.resourceReaders()
expLen = 0
for reader in readers:
for record in reader:
expLen += record.readLength
assert record.aStart == record.bam.pbi[record.rowNumber]['aStart']
assert record.aEnd == record.bam.pbi[record.rowNumber]['aEnd']
expNum = 0
for reader in readers:
expNum += len(reader)
accLen = aln.metadata.totalLength
accNum = aln.metadata.numRecords
assert expLen == accLen
assert expNum == accNum
log.info("Testing whether filters are respected")
aln.filters.addRequirement(rname=[('=', 'E.faecalis.1')])
aln.updateCounts()
accLen = aln.metadata.totalLength
accNum = aln.metadata.numRecords
def count(gen):
count = 0
for _ in gen:
count += 1
return count
expLen = 0
for reader in readers:
for record in reader:
expLen += record.readLength
bfile = openIndexedAlignmentFile(data.getBam(0))
rWin = (bfile.referenceInfo('E.faecalis.1').ID,
0,
bfile.referenceInfo('E.faecalis.1').Length)
reads = bfile.readsInRange(*rWin)
expNum = count(reads)
expLen = 0
reads = bfile.readsInRange(*rWin)
for read in reads:
expLen += read.readLength
assert expLen == accLen
assert expNum == accNum
@pytest.mark.internal_data
def test_scraps_detection(self):
path = ('/pbi/dept/secondary/siv/testdata/SA3-Sequel/'
'lambda/3150128/r54008_20160308_001811/'
'2_B01/m54008_160308_053311.')
subreads = path + 'subreads.bam'
control = path + 'control.subreads.bam'
controlscraps = path + 'control.scraps.bam'
scraps = path + 'scraps.bam'
subreadspbi = subreads + '.pbi'
scrapspbi = scraps + '.pbi'
filesets = [[subreads],
[subreads, scraps],
[subreads, subreadspbi],
[subreads, scrapspbi]]
for files in filesets:
sset = SubreadSet(*files, strict=True)
assert len(sset.externalResources) == 1
assert sset.externalResources[0].resourceId == subreads
assert sset.externalResources[0].scraps == scraps
assert sset.externalResources[0].control == control
assert sset.externalResources[0].externalResources[0].resourceId == scraps
assert sset.externalResources[0].externalResources[1].resourceId == control
assert sset.externalResources[0].externalResources[1].externalResources[0].resourceId == controlscraps
@pytest.mark.internal_data
def test_referenceInfoTableMerging(self):
log.info("Testing refIds, etc. after merging")
bam1 = ("/pbi/dept/secondary/siv/testdata/SA3-RS/ecoli/"
"2590953/0001/Alignment_Results/"
"m140913_005018_42139_c100713652400000001823152"
"404301534_s1_p0.1.aligned.bam")
bam2 = ("/pbi/dept/secondary/siv/testdata/SA3-RS/ecoli/"
"2590953/0001/Alignment_Results/"
"m140913_005018_42139_c100713652400000001823152"
"404301534_s1_p0.3.aligned.bam")
aln = AlignmentSet(bam1, bam2)
readers = aln.resourceReaders()
ids = sorted([i for _, i in aln.refInfo('ID')])
assert list(range(len(ids))) == ids
accNames = aln.refNames
expNames = reduce(np.append,
[reader.referenceInfoTable['Name']
for reader in readers])
expNames = np.unique(expNames)
assert sorted(expNames) == sorted(accNames)
accNames = aln.fullRefNames
expNames = reduce(np.append,
[reader.referenceInfoTable['FullName']
for reader in readers])
expNames = np.unique(expNames)
assert sorted(expNames) == sorted(accNames)
def test_merge(self):
# xmls with different resourceIds: success
ds1 = DataSet(data.getXml(no=8))
ds2 = DataSet(data.getXml(no=11))
ds3 = ds1 + ds2
expected = ds1.numExternalResources + ds2.numExternalResources
assert ds3.numExternalResources == expected
# xmls with different resourceIds but conflicting filters:
# failure to merge
ds2 = DataSet(data.getXml(no=11))
ds2.filters.addRequirement(rname=[('=', 'E.faecalis.1')])
ds3 = ds1 + ds2
assert ds3 is None
# xmls with same resourceIds: ignores new inputs
ds1 = DataSet(data.getXml(no=8))
ds2 = DataSet(data.getXml(no=8))
ds3 = ds1 + ds2
expected = ds1.numExternalResources
assert ds3.numExternalResources == expected
def test_newUuid(self):
ds = DataSet()
old = ds.uuid
_ = ds.newUuid()
assert old != ds.uuid
def test_newUuid_repeat(self):
ds = DataSet()
old = ds.uuid
new = ds.newUuid()
assert old != ds.uuid
assert old != new
assert ds.uuid == new
reallynew = ds.newUuid()
# Note that you can keep calling new, and each tiem it will be
# different:
last = ds.uuid
for _ in range(10):
ds.newUuid()
assert ds.uuid != new
assert ds.uuid != last
last = ds.uuid
assert reallynew != new
assert reallynew != old
def test_newUuid_copy(self):
fn_orig = data.getXml(9)
ds = openDataSet(fn_orig)
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds.write(fn1.name)
ds.write(fn2.name)
ds1 = openDataSet(fn1.name)
ds2 = openDataSet(fn2.name)
assert ds1.uuid == ds2.uuid
for _ in range(10):
ds1.newUuid()
ds2.newUuid()
assert ds1.uuid == ds2.uuid
fn1.close()
fn2.close()
def test_newUuid_random(self):
fn_orig = data.getXml(9)
ds = openDataSet(fn_orig)
fn1 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
fn2 = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds.write(fn1.name)
ds.write(fn2.name)
ds1 = openDataSet(fn1.name)
original_uuid = ds1.uuid
ds2 = openDataSet(fn2.name)
ds3 = openDataSet(fn1.name)
assert ds1.uuid == ds2.uuid
for _ in range(10):
ds1.newUuid(random=True)
ds2.newUuid(random=True)
ds3.newRandomUuid()
for ds in [ds1, ds2, ds3]:
assert not original_uuid == ds.uuid
assert not ds1.uuid == ds2.uuid
assert not ds1.uuid == ds3.uuid
fn1.close()
fn2.close()
def test_bad_xml_extension(self):
fn = tempfile.NamedTemporaryFile(suffix=".alignmentset.xml.disabled")
with AlignmentSet(data.getXml(7)) as aln:
aln.write(fn.name)
with AlignmentSet(fn.name) as aln:
assert len(aln) == 92
shutil.copy(data.getBam(), fn.name)
with pytest.raises(IOError):
with AlignmentSet(fn.name) as aln:
assert len(aln) == 92
fn.close()
def test_write_to_stdout(self):
# open file:
fn = tempfile.NamedTemporaryFile(
suffix=".alignmentset.xml").name
ofh = open(fn, 'w')
with AlignmentSet(data.getXml(7)) as aln:
aln.write(ofh)
with AlignmentSet(fn) as aln:
assert len(aln) == 92
ofh.close()
# stdout:
# This is just going to be printed into the test output, but it is good
# to show that this doesn't error out
with AlignmentSet(data.getXml(7)) as aln:
aln.write(sys.stdout)
@pytest.mark.internal_data
def test_multi_movie_readsByName(self):
N_RECORDS = 1745161
test_file_1 = ("/pbi/dept/secondary/siv/testdata/SA3-DS/lambda/"
"2372215/0007/Analysis_Results/m150404_101626_42"
"267_c100807920800000001823174110291514_s1_p0.al"
"l.subreadset.xml")
test_file_2 = ("/pbi/dept/secondary/siv/testdata/SA3-DS/lambda/"
"2590980/0008/Analysis_Results/m141115_075238_et"
"han_c100699872550000001823139203261572_s1_p0.al"
"l.subreadset.xml")
ds1 = SubreadSet(test_file_1, test_file_2)
assert len(ds1) == N_RECORDS
queries = [('m150404_101626_42267_c1008079208000'
'00001823174110291514_s1_p0/7/*', 2),
('m141115_075238_ethan_c1006998725500'
'00001823139203261572_s1_p0/9/*', 39),
]
for query, count in queries:
reads = ds1.readsByName(query)
assert len(reads) == count
parts = query.split('/')
movie = parts[0]
hn = int(parts[1])
if len(parts) > 2:
qrange = parts[2]
for read in reads:
assert read.movieName == movie
assert read.holeNumber == hn
# TODO: test qrange/ccs
@pytest.mark.skip(reason="Too expensive")
def test_large_pbi(self):
pbiFn = ('/pbi/dept/secondary/siv/testdata/SA3-DS/lambda/simulated'
'/100Gb/alnsubreads/pbalchemy_100Gb_Seq_sim1_p0.'
'aligned.bam.pbi')
pbi = PacBioBamIndex(pbiFn)
assert pbi.aStart is not None
def test_copy(self):
ds1 = DataSet(data.getXml(11))
ds2 = ds1.copy()
assert not ds1 == ds2
assert not ds1.uuid == ds2.uuid
assert not ds1 is ds2
assert ds1.name == ds2.name
assert ds1.externalResources == ds2.externalResources
# The name and UniqueId are different:
assert not ds1.objMetadata == ds2.objMetadata
assert ds1.filters == ds2.filters
assert ds1.subdatasets == ds2.subdatasets
assert len(ds1.subdatasets) == 2
assert len(ds2.subdatasets) == 2
assert not reduce(lambda x, y: x or y,
[ds1d is ds2d for ds1d in
ds1.subdatasets for ds2d in
ds2.subdatasets])
# TODO: once simulated files are indexable, turn on strict:
ds1 = SubreadSet(data.getXml(9), strict=False)
assert type(ds1.metadata).__name__ == 'SubreadSetMetadata'
ds2 = ds1.copy()
assert type(ds2.metadata).__name__ == 'SubreadSetMetadata'
# Lets try casting
ds1 = DataSet(data.getBam())
assert type(ds1).__name__ == 'DataSet'
ds1 = ds1.copy(asType='SubreadSet')
assert type(ds1).__name__ == 'SubreadSet'
# Lets do some illicit casting
with pytest.raises(TypeError):
ds1 = ds1.copy(asType='ReferenceSet')
# Lets try not having to cast
ds1 = SubreadSet(data.getBam())
assert type(ds1).__name__ == 'SubreadSet'
def test_write(self):
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outfile = os.path.join(outdir, 'tempfile.xml')
ds1 = AlignmentSet(data.getBam())
ds1.write(outfile)
log.debug('Validated file: {f}'.format(f=outfile))
validateFile(outfile)
ds2 = AlignmentSet(outfile)
assert ds1 == ds2
# Should fail when strict:
ds3 = AlignmentSet(data.getBam())
ds3.write(outfile)
def test_addMetadata(self):
ds = DataSet()
ds.addMetadata(None, Name='LongReadsRock')
assert ds._metadata.getV(
container='attrib', tag='Name') == 'LongReadsRock'
ds2 = DataSet(data.getXml(7))
assert ds2._metadata.totalLength == 123588
ds2._metadata.totalLength = 100000
assert ds2._metadata.totalLength == 100000
ds2._metadata.totalLength += 100000
assert ds2._metadata.totalLength == 200000
def test_copyTo(self):
aln = AlignmentSet(data.getXml(7), strict=True)
explen = len(aln)
fn = tempfile.NamedTemporaryFile(suffix=".alignmentset.xml")
aln.copyTo(fn.name)
aln.close()
del aln
aln = AlignmentSet(fn.name, strict=True)
assert explen == len(aln)
fn.close()
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
aln.copyTo(outdir)
fn = os.path.join(outdir, "test.alignmentset.xml")
aln.write(fn)
aln.close()
del aln
aln = AlignmentSet(fn, strict=True)
assert explen == len(aln)
# do it twice to same dir to induce collisions
aln = AlignmentSet(data.getXml(7), strict=True)
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
aln.copyTo(outdir)
fn = os.path.join(outdir, "test.alignmentset.xml")
aln.write(fn)
aln = AlignmentSet(data.getXml(7), strict=True)
aln.copyTo(outdir)
fn2 = os.path.join(outdir, "test2.alignmentset.xml")
aln.write(fn2)
aln = AlignmentSet(fn, strict=True)
aln2 = AlignmentSet(fn2, strict=True)
assert explen == len(aln)
assert explen == len(aln2)
assert not sorted(aln.toExternalFiles()) == sorted(
aln2.toExternalFiles())
def test_mixed_pbi_columns(self):
import pbtestdata
inp1 = pbtestdata.get_file("barcoded-subreadset")
inp2 = pbtestdata.get_file("subreads-unbarcoded")
ds1 = SubreadSet(inp1, strict=True)
ds2 = SubreadSet(inp2, strict=True)
ds3 = SubreadSet(inp1, inp2)
for ds in [ds1, ds2, ds3]:
ds.updateCounts()
assert len(ds3) == len(ds1) + len(ds2)
assert ds1.isBarcoded
assert not ds2.isBarcoded
assert not ds3.isBarcoded
for ds in [ds1, ds2, ds3]:
assert ds.isIndexed
assert not ds.isMapped
def test_numbarcodes(self):
import pbtestdata
inp = pbtestdata.get_file("barcoded-subreadset")
ds = SubreadSet(inp, strict=True)
assert ds.numBarcodes == 3
@pytest.mark.internal_data
@pytest.mark.constools
def test_copyTo_same_base_names(self):
import pbtestdata
# see bug 33778
tmp_bam = tempfile.NamedTemporaryFile(suffix=".bam")
log.debug(tmp_bam.name)
ds = AlignmentSet(pbtestdata.get_file("aligned-ds-2"))
log.debug(pbtestdata.get_file("aligned-ds-2"))
consolidateXml(ds, tmp_bam.name, cleanup=True)
with AlignmentSet(tmp_bam.name) as f:
qnames = set()
for rec in f:
qnames.add(rec.qName)
assert len(qnames) == len([rec for rec in f])
assert len(qnames) == len(f)
tmp_bam.close()
def test_addExternalResources(self):
ds = DataSet()
er1 = ExternalResource()
er1.resourceId = "test1.bam"
er2 = ExternalResource()
er2.resourceId = "test2.bam"
er3 = ExternalResource()
er3.resourceId = "test1.bam"
ds.addExternalResources([er1], updateCount=False)
assert ds.numExternalResources == 1
# different resourceId: succeeds
ds.addExternalResources([er2], updateCount=False)
assert ds.numExternalResources == 2
# same resourceId: fails
ds.addExternalResources([er3], updateCount=False)
assert ds.numExternalResources == 2
for extRef in ds.externalResources:
assert type(extRef).__name__ == "ExternalResource"
extRef = ds.externalResources[0]
assert type(extRef).__name__ == "ExternalResource"
assert extRef.resourceId == 'test1.bam'
extRef = ds.externalResources[1]
assert type(extRef).__name__ == "ExternalResource"
assert extRef.resourceId == 'test2.bam'
def test_resourceReaders(self):
ds = AlignmentSet(data.getBam())
for seqFile in ds.resourceReaders():
assert len([row for row in seqFile]) == 92
def test_records(self):
ds = AlignmentSet(data.getXml(7))
assert len(list(ds.records)) == 92
def test_toFofn(self):
assert DataSet("bam1.bam", "bam2.bam", strict=False,
skipMissing=True).toFofn() == ['bam1.bam', 'bam2.bam']
realDS = DataSet(data.getXml(8))
files = realDS.toFofn()
assert len(files) == 1
assert os.path.exists(files[0])
assert os.path.isabs(files[0])
files = realDS.toFofn(relative=True)
assert len(files) == 1
assert os.path.exists(files[0])
assert not os.path.isabs(files[0])
def test_toExternalFiles(self):
bogusDS = DataSet("bam1.bam", "bam2.bam", strict=False,
skipMissing=True)
assert ['bam1.bam', 'bam2.bam'] == bogusDS.externalResources.resourceIds
assert DataSet("bam1.bam", "bam2.bam", strict=False,
skipMissing=True).toExternalFiles() == ['bam1.bam', 'bam2.bam']
realDS = DataSet(data.getXml(8))
files = realDS.toExternalFiles()
assert len(files) == 1
assert os.path.exists(files[0])
assert os.path.isabs(files[0])
def test_chunk_list(self):
test = [1, 2, 3, 4, 5]
chunks = DataSet()._chunkList(test, 3, balanceKey=lambda x: x)
assert chunks == [[5], [4, 1], [3, 2]]
def test_ref_names(self):
ds = AlignmentSet(data.getBam())
refNames = ds.refNames
assert sorted(refNames)[0] == 'A.baumannii.1'
assert len(refNames) == 59
def test_reads_in_range(self):
ds = AlignmentSet(data.getBam())
refNames = ds.refNames
rn = refNames[15]
reads = list(ds.readsInRange(rn, 10, 100))
assert len(reads) == 10
def lengthInWindow(hit, winStart, winEnd):
return min(hit.tEnd, winEnd) - max(hit.tStart, winStart)
reads = list(ds.readsInRange(rn, 10, 100, longest=True))
last = None
for read in reads:
if last is None:
last = lengthInWindow(read, 10, 100)
else:
assert last >= lengthInWindow(read, 10, 100)
last = lengthInWindow(read, 10, 100)
reads = list(ds._pbiReadsInRange(rn, 10, 100))
assert len(reads) == 10
ds2 = AlignmentSet(data.getBam(0))
reads = list(ds2.readsInRange("E.faecalis.1", 0, 1400))
assert len(reads) == 20
lengths = ds.refLengths
for rname, rId in ds.refInfo('ID'):
rn = ds._idToRname(rId)
assert rname == rn
rlen = lengths[rn]
assert len(list(ds.readsInReference(rn))) == len(
list(ds.readsInReference(rId)))
assert len(list(ds.readsInRange(rn, 0, rlen))) == len(
list(ds.readsInRange(rId, 0, rlen)))
def test_reads_in_range_indices(self):
ds = AlignmentSet(data.getBam())
refNames = ds.refNames
rn = refNames[15]
read_indexes = list(ds.readsInRange(rn, 10, 100, justIndices=True))
assert len(read_indexes) == 10
for read in read_indexes:
assert isinstance(read, (int, np.int64))
read_index_records = ds.index[read_indexes]
reads = list(ds.readsInRange(rn, 10, 100, justIndices=False))
assert len(reads) == 10
for ri, rr in zip(ds[read_indexes], reads):
assert ri == rr
@pytest.mark.internal_data
def test_reads_in_range_order(self):
log.debug("Testing with one file")
testFile = ("/pbi/dept/secondary/siv/testdata/SA3-DS/lambda/"
"2372215/0007_tiny/Alignment_Results/m150404_101"
"626_42267_c1008079208000000018231741102915"
"14_s1_p0.1.alignmentset.xml")
aln = AlignmentSet(testFile)
reads1 = aln.readsInRange(aln.refNames[0], 0, 400,
usePbi=False)
reads2 = aln.readsInRange(aln.refNames[0], 0, 400,
usePbi=True)
num = 0
for r1, r2 in zip(reads1, reads2):
assert r1 == r2
num += 1
assert num == 28
log.debug("Testing with three files")
testFile = ("/pbi/dept/secondary/siv/testdata/SA3-DS/lambda/"
"2372215/0007_tiny/Alignment_Results/m150404_101"
"626_42267_c1008079208000000018231741102915"
"14_s1_p0.all.alignmentset.xml")
aln = AlignmentSet(testFile)
reads1 = aln.readsInRange(aln.refNames[0], 0, 400,
usePbi=False)
reads2 = aln.readsInRange(aln.refNames[0], 0, 400,
usePbi=True)
num = 0
for r1, r2 in zip(reads1, reads2):
assert r1 == r2
num += 1
assert num == 105
@pytest.mark.internal_data
def test_reads_in_range_order_large(self):
window = ('Staphylococcus_aureus_subsp_aureus_USA300_TCH1516',
558500,
559005)
log.debug("Testing with one file")
testFile = ("/pbi/dept/secondary/siv/testdata/"
"genomic_consensus-unittest/"
"Quiver/staph/m140911_084715_42139_c10070239048"
"0000001823141103261514_s1_p0.aligned_subreads.bam")
aln = AlignmentSet(testFile)
reads1 = aln.readsInRange(*window, usePbi=False)
reads2 = aln.readsInRange(*window, usePbi=True)
num = 0
for r1, r2 in zip(reads1, reads2):
assert r1 == r2
num += 1
assert num > 100
winId, winStart, winEnd = window
def lengthInWindow(hit):
return min(hit.tEnd, winEnd) - max(hit.tStart, winStart)
log.debug("Testing longest sort vs no pbi")
aln = AlignmentSet(testFile)
reads1 = aln.readsInRange(*window, usePbi=False)
reads2 = aln.readsInRange(*window, usePbi=True, longest=True)
reads1 = list(reads1)
reads2 = list(reads2)
assert len(reads1) == len(reads2)
reads1 = sorted(reads1, key=lengthInWindow, reverse=True)
for r1, r2 in zip(reads1, reads2):
assert r1 == r2
log.debug("Testing longest sort vs pbi")
aln = AlignmentSet(testFile)
reads1 = aln.readsInRange(*window, usePbi=True)
reads2 = aln.readsInRange(*window, usePbi=True, longest=True)
reads1 = list(reads1)
reads2 = list(reads2)
assert len(reads1) == len(reads2)
reads1 = sorted(reads1, key=lengthInWindow, reverse=True)
for r1, r2 in zip(reads1, reads2):
assert r1 == r2
# TODO: get this working again when adding manual subdatasets is good to go
@pytest.mark.skip(reason="broken")
def test_reads_in_subdataset(self):
ds = DataSet(data.getXml(8))
#refs = ['E.faecalis.1', 'E.faecalis.2']
#readRefs = ['E.faecalis.1'] * 2 + ['E.faecalis.2'] * 9
# ds.filters.removeRequirement('rname')
dss = list(ds.split(contigs=True))
assert len(dss) == 12
assert ['B.vulgatus.4', 'B.vulgatus.5',
'C.beijerinckii.13', 'C.beijerinckii.14',
'C.beijerinckii.9', 'E.coli.6', 'E.faecalis.1',
'E.faecalis.2', 'R.sphaeroides.1',
'S.epidermidis.2', 'S.epidermidis.3',
'S.epidermidis.4'] == sorted([ds.filters[0][0].value for ds in dss])
assert len(list(dss[0].readsInSubDatasets())) == 3
assert len(list(dss[1].readsInSubDatasets())) == 20
#ds2 = DataSet(data.getXml(13))
# ds2._makePerContigSubDatasets()
#assert sorted([read.referenceName for read in ds2.readsInSubDatasets()]) == sorted(readRefs)
#ds3 = DataSet(data.getXml(13))
#assert len(list(ds3.readsInSubDatasets())) == 2
def test_intervalContour(self):
ds = AlignmentSet(data.getBam(0))
coverage = ds.intervalContour('E.faecalis.1')
ds.filters.addRequirement(rname=[('=', 'E.faecalis.1')])
# regular totalLength uses aEnd/aStart, which includes insertions
totalTargetLength = sum(ds.index.tEnd - ds.index.tStart)
assert totalTargetLength == sum(coverage)
# partial interval
ds = AlignmentSet(data.getBam(0))
coverage = ds.intervalContour('E.faecalis.1', tStart=100, tEnd=500)
ds.filters.addRequirement(rname=[('=', 'E.faecalis.1')],
tStart=[('<', '500')],
tEnd=[('>', '100')])
# regular totalLength uses aEnd/aStart, which includes insertions
ends = ds.index.tEnd
post = ends > 500
ends[post] = 500
starts = ds.index.tStart
pre = starts < 100
starts[pre] = 100
totalTargetLength = sum(ends - starts)
assert totalTargetLength == sum(coverage)
# test a second reference in this set
ds.filters.removeRequirement('rname')
coverage = ds.intervalContour('E.faecalis.2')
ds.filters.addRequirement(rname=[('=', 'E.faecalis.2')])
totalTargetLength = sum(ds.index.tEnd - ds.index.tStart)
assert totalTargetLength == sum(coverage)
# partial interval
ds = AlignmentSet(data.getBam(0))
coverage = ds.intervalContour('E.faecalis.2', tStart=100, tEnd=500)
ds.filters.addRequirement(rname=[('=', 'E.faecalis.2')],
tStart=[('<', '500')],
tEnd=[('>', '100')])
# regular totalLength uses aEnd/aStart, which includes insertions
ends = ds.index.tEnd
post = ends > 500
ends[post] = 500
starts = ds.index.tStart
pre = starts < 100
starts[pre] = 100
totalTargetLength = sum(ends - starts)
assert totalTargetLength == sum(coverage)
def test_refLengths(self):
ds = AlignmentSet(data.getBam(0))
random_few = {'B.cereus.6': 1472, 'S.agalactiae.1': 1470,
'B.cereus.4': 1472}
for (key, value) in random_few.items():
assert ds.refLengths[key] == value
# this is a hack to only emit refNames that actually have records
# associated with them:
dss = list(ds.split(contigs=True, chunks=1))[0]
assert dss.refLengths == {
'B.vulgatus.4': 1449,
'B.vulgatus.5': 1449,
'C.beijerinckii.13': 1433,
'C.beijerinckii.14': 1433,
'C.beijerinckii.9': 1433,
'E.coli.6': 1463,
'E.faecalis.1': 1482,
'E.faecalis.2': 1482,
'R.sphaeroides.1': 1386,
'S.epidermidis.2': 1472,
'S.epidermidis.3': 1472,
'S.epidermidis.4': 1472,
}
def test_reads_in_contig(self):
log.info("Testing reads in contigs")
ds = AlignmentSet(data.getXml(7))
dss = list(ds.split(contigs=True))
assert len(dss) == 12
efaec1TimesFound = 0
efaec1TotFound = 0
efaec2TimesFound = 0
efaec2TotFound = 0
for ds in dss:
ef1 = len(list(ds.readsInReference('E.faecalis.1')))
ef2 = len(list(ds.readsInReference('E.faecalis.2')))
if ef1:
efaec1TimesFound += 1
efaec1TotFound += ef1
if ef2:
efaec2TimesFound += 1
efaec2TotFound += ef2
assert efaec1TimesFound == 1
assert efaec1TotFound == 20
assert efaec2TimesFound == 1
assert efaec2TotFound == 3
ds = AlignmentSet(data.getXml(7))
filt = Filters()
filt.addRequirement(length=[('>', '100')])
ds.addFilters(filt)
dss = list(ds.split(contigs=True))
assert len(dss) == 12
efaec1TimesFound = 0
efaec1TotFound = 0
efaec2TimesFound = 0
efaec2TotFound = 0
for ds in dss:
ef1 = len(list(ds.readsInReference('E.faecalis.1')))
ef2 = len(list(ds.readsInReference('E.faecalis.2')))
if ef1:
efaec1TimesFound += 1
efaec1TotFound += ef1
if ef2:
efaec2TimesFound += 1
efaec2TotFound += ef2
assert efaec1TimesFound == 1
assert efaec1TotFound == 20
assert efaec2TimesFound == 1
assert efaec2TotFound == 3
ds = AlignmentSet(data.getXml(7))
filt = Filters()
filt.addRequirement(length=[('>', '1000')])
ds.addFilters(filt)
dss = list(ds.split(contigs=True))
assert len(dss) == 9
efaec1TimesFound = 0
efaec1TotFound = 0
efaec2TimesFound = 0
efaec2TotFound = 0
for ds in dss:
ef1 = len(list(ds.readsInReference('E.faecalis.1')))
ef2 = len(list(ds.readsInReference('E.faecalis.2')))
if ef1:
efaec1TimesFound += 1
efaec1TotFound += ef1
if ef2:
efaec2TimesFound += 1
efaec2TotFound += ef2
assert efaec1TimesFound == 1
assert efaec1TotFound == 20
assert efaec2TimesFound == 1
assert efaec2TotFound == 1
def test_get_item(self):
# Indexed files only for now:
# XXX Reactivate subreadsets when pbindex works for them
#toTest = [8, 10, 11, 12, 13, 15, 16]
toTest = [7, 10, 11, 14, 15]
for fileNo in toTest:
aln = openDataSet(data.getXml(fileNo))
items1 = [aln[i] for i in range(len(aln))]
aln = openDataSet(data.getXml(fileNo))
items2 = [aln[i] for i in range(len(aln))]
assert items1 == items2
aln = openDataSet(data.getXml(fileNo))
for i, item in enumerate(aln):
assert item == aln[i]
@pytest.mark.constools
def test_induce_indices(self):
# all of our test files are indexed. Copy just the main files to a temp
# location, open as dataset, assert unindexed, open with
# generateIndices=True, assert indexed
toTest = [8, 9, 10, 11, 12, 14, 15]
for fileNo in toTest:
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
orig_dset = openDataSet(data.getXml(fileNo))
resfnames = orig_dset.toExternalFiles()
new_resfnames = []
for fname in resfnames:
newfname = os.path.join(outdir, os.path.basename(fname))
shutil.copy(fname, newfname)
new_resfnames.append(newfname)
dset = type(orig_dset)(*new_resfnames)
assert not dset.isIndexed
dset = type(orig_dset)(*new_resfnames, generateIndices=True)
assert dset.isIndexed
def test_reads_in_reference(self):
ds = AlignmentSet(data.getBam())
refNames = ds.refNames
# See test_ref_names for why this is expected:
rn = refNames[15]
reads = ds.readsInReference(rn)
assert len(list(reads)) == 11
ds2 = AlignmentSet(data.getBam(0))
reads = ds2.readsInReference("E.faecalis.1")
assert len(list(reads)) == 20
reads = ds2.readsInReference("E.faecalis.2")
assert len(list(reads)) == 3
ds2 = AlignmentSet(data.getXml(7))
reads = ds2.readsInReference("E.faecalis.1")
assert len(list(reads)) == 20
ds2.filters.addRequirement(rname=[('=', 'E.faecalis.1')])
# Because of the filter!
reads = ds2.readsInReference("E.faecalis.2")
assert len(list(reads)) == 0
def test_staggered_reads_in_range(self):
ds = AlignmentSet(data.getXml(7))
refNames = ds.refNames
rn = 'B.vulgatus.5'
reads = list(ds.readsInRange(rn, 0, 10000))
ds2 = AlignmentSet(data.getXml(10))
reads2 = list(ds2.readsInRange(rn, 0, 10000))
dsBoth = AlignmentSet(data.getXml(7), data.getXml(10))
readsBoth = list(dsBoth.readsInRange(rn, 0, 10000))
readsBothNoPbi = list(dsBoth.readsInRange(rn, 0, 10000, usePbi=False))
assert readsBoth == readsBothNoPbi
assert len(reads) == 2
assert len(reads2) == 5
assert len(readsBoth) == 7
read_starts = (0, 1053)
for read, start in zip(reads, read_starts):
assert read.tStart == start
read2_starts = (0, 0, 3, 3, 4)
for read, start in zip(reads2, read2_starts):
assert read.tStart == start
readboth_starts = (0, 0, 0, 3, 3, 4, 1053)
for read, start in zip(readsBoth, readboth_starts):
assert read.tStart == start
def test_referenceInfo(self):
aln = AlignmentSet(data.getBam(0))
readers = aln.resourceReaders()
assert len(readers[0].referenceInfoTable) == 59
obstbl = readers[0].referenceInfo('E.faecalis.1')
exptbl = np.rec.fromrecords(list(zip(
[27],
[27],
['E.faecalis.1'],
['E.faecalis.1'],
[1482],
np.zeros(1, dtype=np.uint32),
np.zeros(1, dtype=np.uint32))),
dtype=[
('ID', '<i8'),
('RefInfoID', '<i8'),
('Name', 'O'),
('FullName', 'O'),
('Length', '<i8'),
('StartRow', '<u4'),
('EndRow', '<u4')])
assert obstbl == exptbl
# TODO: add a bam with a different referenceInfoTable to check merging
# and id remapping:
#assert str(aln.referenceInfo('E.faecalis.1')) == "(27, 27, 'E.faecalis.1', 'E.faecalis.1', 1482, 0, 0)"
# TODO: turn this back on when a bam with a different referenceInfoTable is
# found
@pytest.mark.skip(reason="broken")
def test_referenceInfoTable(self):
aln = AlignmentSet(data.getBam(0), data.getBam(1), data.getBam(2))
readers = aln.resourceReaders()
assert len(readers[0].referenceInfoTable) == 1
assert len(readers[1].referenceInfoTable) == 59
assert len(readers[2].referenceInfoTable) == 1
assert readers[0].referenceInfoTable.Name == readers[2].referenceInfoTable.Name
assert len(aln.referenceInfoTable) == 60
# TODO: turn this back on when a bam with a different referenceInfoTable is
# found
@pytest.mark.skip(reason="broken")
def test_readGroupTable(self):
aln = AlignmentSet(data.getBam(0), data.getBam(1), data.getBam(2))
readers = aln.resourceReaders()
assert len(readers[0].readGroupTable) == 1
assert len(readers[1].readGroupTable) == 1
assert len(readers[2].readGroupTable) == 1
assert len(aln.readGroupTable) == 3
assert "BaseFeatures" in aln.readGroupTable.dtype.fields
def test_missing_file(self):
with pytest.raises(IOError):
aln = AlignmentSet("NOPE")
def test_repr(self):
ds = DataSet(data.getBam())
rep = str(ds)
assert re.search('DataSet', rep)
assert re.search('uuid:', rep)
assert re.search('pbalchemysim0.pbalign.bam', rep)
def test_stats_metadata_zero_binwidth(self):
# both zero
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds1.metadata.summaryStats.readLenDist.minBinValue = 0
ds1.metadata.summaryStats.readLenDist.binWidth = 0
ds2.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds2.metadata.summaryStats.readLenDist.minBinValue = 0
ds2.metadata.summaryStats.readLenDist.binWidth = 0
ds3 = ds1 + ds2
assert len(ds3.metadata.summaryStats.readLenDists) == 1
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# one zero
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds1.metadata.summaryStats.readLenDist.minBinValue = 0
ds1.metadata.summaryStats.readLenDist.binWidth = 0
ds2.metadata.summaryStats.readLenDist.bins = (
[0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1])
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
ds2.metadata.summaryStats.readLenDist.minBinValue = 20
ds2.metadata.summaryStats.readLenDist.binWidth = 10
ds3 = ds1 + ds2
assert len(ds3.metadata.summaryStats.readLenDists) == 1
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
# other zero
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1])
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
ds1.metadata.summaryStats.readLenDist.minBinValue = 10
ds1.metadata.summaryStats.readLenDist.binWidth = 10
ds2.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds2.metadata.summaryStats.readLenDist.minBinValue = 0
ds2.metadata.summaryStats.readLenDist.binWidth = 0
ds3 = ds1 + ds2
assert len(ds3.metadata.summaryStats.readLenDists) == 1
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
# one zero more zero
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds3 = DataSet(data.getXml(10))
ds3.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds1.metadata.summaryStats.readLenDist.minBinValue = 0
ds1.metadata.summaryStats.readLenDist.binWidth = 0
ds2.metadata.summaryStats.readLenDist.bins = (
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1])
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1]
ds2.metadata.summaryStats.readLenDist.minBinValue = 20
ds2.metadata.summaryStats.readLenDist.binWidth = 10
ds3.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds3.metadata.summaryStats.readLenDist.minBinValue = 0
ds3.metadata.summaryStats.readLenDist.binWidth = 0
ds4 = ds1 + ds2 + ds3
assert len(ds3.metadata.summaryStats.readLenDists) == 1
assert ds4.metadata.summaryStats.readLenDist.bins == [
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1]
# other zero
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds3 = DataSet(data.getXml(10))
ds3.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1])
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
ds1.metadata.summaryStats.readLenDist.minBinValue = 10
ds1.metadata.summaryStats.readLenDist.binWidth = 10
ds2.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds2.metadata.summaryStats.readLenDist.minBinValue = 0
ds2.metadata.summaryStats.readLenDist.binWidth = 0
ds3.metadata.summaryStats.readLenDist.bins = (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ds3.metadata.summaryStats.readLenDist.minBinValue = 0
ds3.metadata.summaryStats.readLenDist.binWidth = 0
ds4 = ds1 + ds2 + ds3
assert len(ds3.metadata.summaryStats.readLenDists) == 1
assert ds4.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
def test_multi_channel_dists(self):
ds = DataSet(data.getBam())
ds.loadStats(data.getStats())
ds2 = DataSet(data.getBam())
ds2.loadStats(data.getStats())
assert list(ds.metadata.summaryStats.findChildren('BaselineLevelDist'))
assert ds.metadata.summaryStats.channelDists
assert ds.metadata.summaryStats.otherDists
assert ds.metadata.summaryStats.otherDists['PausinessDist']
assert ds.metadata.summaryStats.channelDists['HqBasPkMidDist']['G']
# merge two
ds3 = ds + ds2
# unmerged dists should increase in length:
assert len(ds3.metadata.summaryStats.channelDists['HqBasPkMidDist']['G']) == 2 * len(
ds.metadata.summaryStats.channelDists['HqBasPkMidDist']['G'])
assert len(ds3.metadata.summaryStats.otherDists['PausinessDist']) == 2 * len(
ds.metadata.summaryStats.otherDists['PausinessDist'])
# merged dists should not:
assert len(ds3.metadata.summaryStats.readLenDist) == len(
ds.metadata.summaryStats.readLenDist)
def test_distribution_name_accessor(self):
exp = ['MovieName', 'MovieLength', 'NumFramesDropped',
'NumSequencingZmws', 'TraceFileSize', 'PulseFileSize',
'BaseFileSize', 'AdapterDimerFraction', 'ShortInsertFraction',
'IsReadsFraction', 'FailedZmwClippedLowFraction',
'FailedZmwClippedHighFraction', 'ProdDist', 'ReadTypeDist',
'TotalBaseFractionPerChannel', 'TotalBaseFractionPerChannel',
'TotalBaseFractionPerChannel', 'TotalBaseFractionPerChannel',
'PkMidCVPerChannel', 'PkMidCVPerChannel', 'PkMidCVPerChannel',
'PkMidCVPerChannel', 'BaselineLevelDist', 'BaselineLevelDist',
'BaselineLevelDist', 'BaselineLevelDist', 'BaselineStdDist',
'BaselineStdDist', 'BaselineStdDist', 'BaselineStdDist',
'MovieReadQualDist', 'PulseRateDist', 'PulseWidthDist',
'BaseRateDist', 'BaseWidthDist', 'BaseIpdDist',
'LocalBaseRateDist', 'NumUnfilteredBasecallsDist', 'ReadLenDist',
'ReadQualDist', 'HqBaseFractionDist', 'RmBasQvDist',
'InsertReadLenDist', 'InsertReadQualDist', 'LocalYieldDist',
'LocalSnrDist', 'LocalSnrDist', 'LocalSnrDist', 'LocalSnrDist',
'TraceClippedFractionDist', 'TraceClippedFractionDist',
'TraceClippedFractionDist', 'TraceClippedFractionDist',
'TraceLowClippedFractionDist', 'TraceLowClippedFractionDist',
'TraceLowClippedFractionDist', 'TraceLowClippedFractionDist',
'TraceHighClippedFractionDist', 'TraceHighClippedFractionDist',
'TraceHighClippedFractionDist', 'TraceHighClippedFractionDist',
'PausinessDist', 'MedianInsertDist', 'SnrDist', 'SnrDist',
'SnrDist', 'SnrDist', 'HqRegionSnrDist', 'HqRegionSnrDist',
'HqRegionSnrDist', 'HqRegionSnrDist', 'HqBasPkMidDist',
'HqBasPkMidDist', 'HqBasPkMidDist', 'HqBasPkMidDist',
'BaselineLevelSequencingDist', 'BaselineLevelSequencingDist',
'BaselineLevelSequencingDist', 'BaselineLevelSequencingDist',
'BaselineLevelAntiholeDist', 'BaselineLevelAntiholeDist',
'BaselineLevelAntiholeDist', 'BaselineLevelAntiholeDist',
'BaselineLevelAntimirrorDist', 'BaselineLevelAntimirrorDist',
'BaselineLevelAntimirrorDist', 'BaselineLevelAntimirrorDist',
'BaselineLevelFiducialDist', 'BaselineLevelFiducialDist',
'BaselineLevelFiducialDist', 'BaselineLevelFiducialDist',
'SpectralDiagRRDist', 'SpectralDiagRRDist', 'SpectralDiagRRDist',
'SpectralDiagRRDist', 'MaxPauseFractionVsT', 'TMaxPauseFraction',
'MaxSlopePauseFractionVsT', 'TMaxSlopePauseFraction',
'MaxBaseRateRatioVsT', 'TMaxBaseRateRatio',
'MaxSlopeBaseRateRatioVsT', 'TMaxSlopeBaseRateRatio',
'SgnMaxSlopeBaseRateRatio', 'BaseRateChngStrtToEnd',
'YieldCvOverRegions', 'YieldChngCntrToEdge',
'SnrRatioEdgeToCntr_0', 'SnrRatioEdgeToCntr_2', 'PauseFractionVsT',
'BaseRateRatioVsT']
ds = DataSet(data.getBam())
ds.loadStats(data.getStats())
assert ds.metadata.summaryStats.availableDists() == exp
def test_distribution_accessors(self):
ds = DataSet(data.getBam())
ds.loadStats(data.getStats())
dist = ds.metadata.summaryStats.getDist('HqBaseFractionDist')
assert 0.8369355201721191 == pytest.approx(dist[0].sampleMean)
assert isinstance(dist[0], ContinuousDistribution)
dist = ds.metadata.summaryStats.getDist('NumUnfilteredBasecallsDist')
assert 5481.8447265625 == pytest.approx(dist[0].sampleMean)
dist = ds.metadata.summaryStats.getDist('NumUnfilteredBasecallsDist')
assert 5481.8447265625 == pytest.approx(dist[0].sampleMean)
dist = ds.metadata.summaryStats.getDist('ProdDist')
assert isinstance(dist, DiscreteDistribution)
dist = ds.metadata.summaryStats.getDist('ProdDist', unwrap=False)
assert isinstance(dist[0], DiscreteDistribution)
dist = ds.metadata.summaryStats.getDist('BaselineLevelDist')
assert isinstance(dist['A'][0], ContinuousDistribution)
dist = ds.metadata.summaryStats.getDist('BaselineLevelDist',
unwrap=False)
assert isinstance(dist['A'][0], ContinuousDistribution)
# merge two
ds2 = DataSet(data.getBam())
ds2.loadStats(data.getStats())
ds3 = ds + ds2
# should be unmerged
dist = ds3.metadata.summaryStats.getDist('HqBaseFractionDist')
assert 0.8369355201721191 == pytest.approx(dist[0].sampleMean)
assert isinstance(dist[0], ContinuousDistribution)
# should be merged
dist = ds3.metadata.summaryStats.getDist('ProdDist')
assert isinstance(dist, DiscreteDistribution)
# should be unmerged channel
dist = ds3.metadata.summaryStats.getDist('BaselineLevelDist')
assert isinstance(dist['A'][0], ContinuousDistribution)
# should be same as above (unmerged channel)
dist = ds3.metadata.summaryStats.getDist('BaselineLevelDist',
unwrap=False)
assert isinstance(dist['A'][0], ContinuousDistribution)
# what about subdatasets?
ds4 = SubreadSet(data.getBam())
ds5 = SubreadSet(data.getBam())
ds4.externalResources[0].sts = data.getStats()
ds5.externalResources[0].sts = data.getStats()
ds6 = ds4 + ds5
# but what happens when we write it out and read it again?
fn = tempfile.NamedTemporaryFile(suffix=".subreadset.xml")
ds6.write(fn.name)
ds6re = SubreadSet(fn.name)
dist = ds6re.metadata.summaryStats.getDist('ProdDist')
assert isinstance(dist, DiscreteDistribution)
dist = ds6re.subdatasets[0].metadata.summaryStats.getDist('ProdDist')
# it is empty?! Yeah, we don't want to populate those automatically
assert dist is None
# load them manually:
ds6re.loadStats()
# yaaay, summaryStats.
dist = ds6re.subdatasets[0].metadata.summaryStats.getDist('ProdDist')
assert isinstance(dist, DiscreteDistribution)
# lets just make sure the metadata object is the correct type:
assert isinstance(ds6re.subdatasets[0].metadata, SubreadSetMetadata)
fn.close()
def test_new_distribution(self):
ds = DataSet(data.getBam())
ds.loadStats(data.getStats())
dist = ds.metadata.summaryStats.getDist('ReadLenDist')
assert isinstance(dist, ContinuousDistribution)
assert 4528.69384765625 == pytest.approx(dist.sampleMean)
dist = ds.metadata.summaryStats['HqBaseFractionDist']
assert isinstance(dist[0], ContinuousDistribution)
assert 0.8369355201721191 == pytest.approx(dist[0].sampleMean)
def test_stats_metadata(self):
ds = DataSet(data.getBam())
ds.loadStats(data.getStats())
assert ds.metadata.summaryStats.prodDist.numBins == 4
assert ds.metadata.summaryStats.prodDist.bins == [1576, 901, 399, 0]
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds3 = ds1 + ds2
assert ds1.metadata.summaryStats.prodDist.bins == [1576, 901, 399, 0]
assert ds2.metadata.summaryStats.prodDist.bins == [1576, 901, 399, 0]
assert ds3.metadata.summaryStats.prodDist.bins == [3152, 1802, 798, 0]
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 62, 39, 36, 29, 37, 19, 29, 37, 32, 32, 40, 45,
54, 73, 77, 97, 95, 49, 17, 2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 62, 39, 36, 29, 37, 19, 29, 37, 32, 32, 40, 45,
54, 73, 77, 97, 95, 49, 17, 2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 124, 78, 72, 58, 74, 38, 58, 74, 64, 64, 80, 90,
108, 146, 154, 194, 190, 98, 34, 4, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0]
assert ds3.metadata.summaryStats.readLenDist.sampleSize == (
ds1.metadata.summaryStats.readLenDist.sampleSize +
ds2.metadata.summaryStats.readLenDist.sampleSize)
# Lets check some manual values
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1])
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
ds1.metadata.summaryStats.readLenDist.minBinValue = 10
ds1.metadata.summaryStats.readLenDist.binWidth = 10
ds2.metadata.summaryStats.readLenDist.bins = (
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1])
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1]
ds2.metadata.summaryStats.readLenDist.minBinValue = 20
ds2.metadata.summaryStats.readLenDist.binWidth = 10
assert ds1.metadata.summaryStats.readLenDist.sampleStd == 2322.805559802698
assert ds2.metadata.summaryStats.readLenDist.sampleStd == 2322.805559802698
assert 4528.69384766 == pytest.approx(
ds1.metadata.summaryStats.readLenDist.sampleMean)
assert 4528.69384766 == pytest.approx(
ds2.metadata.summaryStats.readLenDist.sampleMean)
assert ds1.metadata.summaryStats.readLenDist.sampleSize == 901
assert ds2.metadata.summaryStats.readLenDist.sampleSize == 901
ds3 = ds1 + ds2
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 10, 10, 9, 8, 7, 5, 3, 2, 1, 0, 1, 1]
assert ds1.metadata.summaryStats.readLenDist.sampleSize == 901
assert ds2.metadata.summaryStats.readLenDist.sampleSize == 901
assert ds3.metadata.summaryStats.readLenDist.sampleSize == (
ds1.metadata.summaryStats.readLenDist.sampleSize +
ds2.metadata.summaryStats.readLenDist.sampleSize)
assert 4528.69384766 == pytest.approx(
ds1.metadata.summaryStats.readLenDist.sampleMean)
assert 4528.69384766 == pytest.approx(
ds2.metadata.summaryStats.readLenDist.sampleMean)
assert 4528.69384766 == pytest.approx(
ds3.metadata.summaryStats.readLenDist.sampleMean)
assert 2322.805559802698 == pytest.approx(
ds1.metadata.summaryStats.readLenDist.sampleStd)
assert 2322.805559802698 == pytest.approx(
ds2.metadata.summaryStats.readLenDist.sampleStd)
assert 2322.16060475 == pytest.approx(
ds3.metadata.summaryStats.readLenDist.sampleStd)
# uses the bins, not the previous values for mean, std, etc.:
assert ds3.metadata.summaryStats.readLenDist.sampleMed == 45
assert 105.0 == pytest.approx(
ds3.metadata.summaryStats.readLenDist.sample95thPct)
# now lets swap
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1])
assert ds1.metadata.summaryStats.readLenDist.bins == [
0, 10, 9, 8, 7, 6, 4, 2, 1, 0, 0, 1]
ds1.metadata.summaryStats.readLenDist.minBinValue = 20
ds1.metadata.summaryStats.readLenDist.binWidth = 10
ds2.metadata.summaryStats.readLenDist.bins = (
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1])
assert ds2.metadata.summaryStats.readLenDist.bins == [
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1]
ds2.metadata.summaryStats.readLenDist.minBinValue = 10
ds2.metadata.summaryStats.readLenDist.binWidth = 10
ds3 = ds1 + ds2
assert ds3.metadata.summaryStats.readLenDist.bins == [
0, 1, 11, 10, 9, 8, 7, 5, 3, 1, 0, 1, 1]
# now lets do some non-overlapping
ds1 = DataSet(data.getXml(8))
ds1.loadStats(data.getStats())
ds2 = DataSet(data.getXml(10))
ds2.loadStats(data.getStats())
ds1.metadata.summaryStats.readLenDist.bins = (
[1, 1, 1])
assert ds1.metadata.summaryStats.readLenDist.bins == [1, 1, 1]
ds1.metadata.summaryStats.readLenDist.minBinValue = 10
ds1.metadata.summaryStats.readLenDist.binWidth = 10
ds2.metadata.summaryStats.readLenDist.bins = (
[2, 2, 2])
assert ds2.metadata.summaryStats.readLenDist.bins == [2, 2, 2]
ds2.metadata.summaryStats.readLenDist.minBinValue = 50
ds2.metadata.summaryStats.readLenDist.binWidth = 10
ds3 = ds1 + ds2
assert ds3.metadata.summaryStats.readLenDist.bins == [
1, 1, 1, 0, 2, 2, 2]
assert ds3.metadata.summaryStats.readLenDist.sampleMed == 55
assert ds3.metadata.summaryStats.readLenDist.sample95thPct == 75
# now lets test the subdataset metadata retention:
# or not, disabling for performance reasons
# TODO: make this fast again, then re-enable. Copying that much was
# killer
#ss = SubreadSet(data.getXml(10))
# ss.loadStats(data.getStats(0))
# ss.loadStats(data.getStats(1))
#assert 153168.0 == ss.metadata.summaryStats.numSequencingZmws
#assert 2876.0 == ss.subdatasets[0].metadata.summaryStats.numSequencingZmws
#assert 150292.0 == ss.subdatasets[1].metadata.summaryStats.numSequencingZmws
@pytest.mark.internal_data
def test_two_bam(self):
cmp1 = ("/pbi/dept/secondary/siv/testdata/SA3-RS/ecoli/"
"2590953/0001/Alignment_Results/"
"m140913_005018_42139_c100713652400000001823152"
"404301534_s1_p0.1.aligned.bam")
cmp2 = ("/pbi/dept/secondary/siv/testdata/SA3-RS/ecoli/"
"2590953/0001/Alignment_Results/"
"m140913_005018_42139_c100713652400000001823152"
"404301534_s1_p0.2.aligned.bam")
len1 = len(AlignmentSet(cmp1))
len2 = len(AlignmentSet(cmp2))
aln = AlignmentSet(cmp1, cmp2)
len3 = len(aln)
assert len1 + len2 == len3
assert len3 == 65346
obstbl = aln.referenceInfoTable
exptbl = np.rec.fromrecords(list(zip(
[0],
[0],
['ecoliK12_pbi_March2013'],
['ecoliK12_pbi_March2013'],
[4642522],
np.zeros(1, dtype=np.uint32),
np.zeros(1, dtype=np.uint32))),
dtype=[
('ID', '<i8'),
('RefInfoID', '<i8'),
('Name', 'O'),
('FullName', 'O'),
('Length', '<i8'),
('StartRow', '<u4'),
('EndRow', '<u4')])
assert obstbl == exptbl
assert set(aln.tId) == {0}
assert aln.referenceInfo(
'ecoliK12_pbi_March2013') == aln.referenceInfo(0)
@pytest.mark.internal_data
def test_two_xml(self):
cmp1 = ("/pbi/dept/secondary/siv/testdata/"
"SA3-DS/ecoli/2590953/0001/Alignment_Results/"
"m140913_005018_42139_c1007136524000000018231"
"52404301534_s1_p0.all.alignmentset.xml")
cmp2 = ("/pbi/dept/secondary/siv/testdata/"
"SA3-DS/ecoli/2590956/0003/Alignment_Results/"
"m140913_222218_42240_c1006999524000000018231"
"39203261564_s1_p0.all.alignmentset.xml")
len1 = len(AlignmentSet(cmp1))
len2 = len(AlignmentSet(cmp2))
aln = AlignmentSet(cmp1, cmp2)
len3 = len(aln)
assert len1 + len2 == len3
assert len3 == 160264
obstbl = aln.referenceInfoTable
exptbl = np.rec.fromrecords(list(zip(
[0],
[0],
['ecoliK12_pbi_March2013'],
['ecoliK12_pbi_March2013'],
[4642522],
np.zeros(1, dtype=np.uint32),
np.zeros(1, dtype=np.uint32))),
dtype=[
('ID', '<i8'),
('RefInfoID', '<i8'),
('Name', 'O'),
('FullName', 'O'),
('Length', '<i8'),
('StartRow', '<u4'),
('EndRow', '<u4')])
assert obstbl == exptbl
assert set(aln.tId) == {0}
assert aln.referenceInfo(
'ecoliK12_pbi_March2013') == aln.referenceInfo(0)
def assertListOfTuplesEqual(self, obslot, explot):
assert len(obslot) == len(explot)
for obs, exp in zip(obslot, explot):
for o, e in zip(obs, exp):
assert o == e
@pytest.mark.internal_data
def test_two_ref_bam(self):
cmp1 = upstreamdata.getAlignedBam()
# this is the supposedly the same data as above:
cmp2 = ("/pbi/dept/secondary/siv/testdata/"
"SA3-DS/ecoli/2590956/0003/Alignment_Results/"
"m140913_222218_42240_c1006999524000000018231"
"39203261564_s1_p0.all.alignmentset.xml")
len1 = len(AlignmentSet(cmp1))
len2 = len(AlignmentSet(cmp2))
aln = AlignmentSet(cmp1, cmp2)
len3 = len(aln)
assert len1 + len2 == len3
assert len3 == 57344
# TODO(mdsmith)(2016-01-25) I would like to be able to use the startrow
# and endrow fields for bams someday...
obstbl = aln.referenceInfoTable
exptbl0 = np.rec.fromrecords(list(zip(
[0],
[0],
['ecoliK12_pbi_March2013'],
['ecoliK12_pbi_March2013'],
[4642522],
np.zeros(1, dtype=np.uint32),
np.zeros(1, dtype=np.uint32))),
dtype=[
('ID', '<i8'),
('RefInfoID', '<i8'),
('Name', 'O'),
('FullName', 'O'),
('Length', '<i8'),
('StartRow', '<u4'),
('EndRow', '<u4')])
exptbl1 = np.rec.fromrecords(list(zip(
[1],
[1],
['lambda_NEB3011'],
['lambda_NEB3011'],
[48502],
np.zeros(1, dtype=np.uint32),
np.zeros(1, dtype=np.uint32))),
dtype=[
('ID', '<i8'),
('RefInfoID', '<i8'),
('Name', 'O'),
('FullName', 'O'),
('Length', '<i8'),
('StartRow', '<u4'),
('EndRow', '<u4')])
assert obstbl[0] == exptbl0
assert obstbl[1] == exptbl1
assert set(aln.tId) == {0, 1}
assert aln.referenceInfo(
'ecoliK12_pbi_March2013') == aln.referenceInfo(0)
assert aln.referenceInfo('lambda_NEB3011') == aln.referenceInfo(1)
@pytest.mark.internal_data
def test_two_ref_three_bam(self):
# Here we test whether duplicate references in a non-identical
# reference situation remain duplicates or are collapsed
cmp1 = upstreamdata.getAlignedBam()
# this is the supposedly the same data as above:
cmp2 = ("/pbi/dept/secondary/siv/testdata/"
"SA3-DS/ecoli/2590956/0003/Alignment_Results/"
"m140913_222218_42240_c1006999524000000018231"
"39203261564_s1_p0.all.alignmentset.xml")
cmp3 = ("/pbi/dept/secondary/siv/testdata/"
"SA3-DS/ecoli/2590953/0001/Alignment_Results/"
"m140913_005018_42139_c1007136524000000018231"
"52404301534_s1_p0.all.alignmentset.xml")
len1 = len(AlignmentSet(cmp1))
len2 = len(AlignmentSet(cmp2))
len3 = len(AlignmentSet(cmp3))
aln = AlignmentSet(cmp1, cmp2, cmp3)
len4 = len(aln)
assert len1 + len2 + len3 == len4
assert len4 == 160376
obstbl = aln.referenceInfoTable
exptbl0 = np.rec.fromrecords(list(zip(
[0],
[0],
['ecoliK12_pbi_March2013'],
['ecoliK12_pbi_March2013'],
[4642522],
np.zeros(1, dtype=np.uint32),
np.zeros(1, dtype=np.uint32))),
dtype=[
('ID', '<i8'),
('RefInfoID', '<i8'),
('Name', 'O'),
('FullName', 'O'),
('Length', '<i8'),
('StartRow', '<u4'),
('EndRow', '<u4')])
exptbl1 = np.rec.fromrecords(list(zip(
[1],
[1],
['lambda_NEB3011'],
['lambda_NEB3011'],
[48502],
np.zeros(1, dtype=np.uint32),
np.zeros(1, dtype=np.uint32))),
dtype=[
('ID', '<i8'),
('RefInfoID', '<i8'),
('Name', 'O'),
('FullName', 'O'),
('Length', '<i8'),
('StartRow', '<u4'),
('EndRow', '<u4')])
assert obstbl[0] == exptbl0
assert obstbl[1] == exptbl1
assert set(aln.tId) == {0, 1}
assert aln.referenceInfo(
'ecoliK12_pbi_March2013') == aln.referenceInfo(0)
assert aln.referenceInfo('lambda_NEB3011') == aln.referenceInfo(1)
def test_exceptions(self):
with pytest.raises(InvalidDataSetIOError) as e:
raise InvalidDataSetIOError("Wrong!")
assert 'Wrong!' in str(e.value)
def test_createdAt(self):
aln = AlignmentSet(data.getXml(7))
assert aln.createdAt == '2015-08-05T10:25:18'
@pytest.mark.internal_data
def test_load_sts_from_extres(self):
# don't have a subreadset.xml with loaded sts.xml in testdata,
# fabricate one here:
ss = SubreadSet(data.getXml(9))
ss.externalResources[0].sts = ('/pbi/dept/secondary/siv/testdata/'
'SA3-Sequel/lambda/roche_SAT/'
'm54013_151205_032353.sts.xml')
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
ss.write(outXml)
ss = SubreadSet(outXml)
assert ss.metadata.summaryStats
# test validation on write with loaded stats:
outXml = os.path.join(outdir, 'tempfileWithStats.xml')
ss.write(outXml, validate=False)
ss.write(outXml)
@pytest.mark.internal_data
def test_fixed_bin_sts(self):
# don't have a subreadset.xml with loaded sts.xml in testdata,
# fabricate one here:
ss = SubreadSet(data.getXml(9))
ss.externalResources[0].sts = ('/pbi/dept/secondary/siv/testdata/'
'pbreports-unittest/data/sts_xml/'
'3120134-r54009_20160323_173308-'
'1_A01-Bug30772/m54009_160323_'
'173323.sts.xml')
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
outXml2 = os.path.join(outdir, 'tempfile2.xml')
ss.write(outXml)
ss.write(outXml2)
ss = SubreadSet(outXml)
ss2 = SubreadSet(outXml2)
ss3 = ss + ss2
assert ss3.metadata.summaryStats.readLenDist.bins == [
b1 + b2 for b1, b2 in
zip(ss.metadata.summaryStats.readLenDist.bins,
ss2.metadata.summaryStats.readLenDist.bins)]
# smoke tests
ss3.metadata.summaryStats.insertReadLenDists
ss3.metadata.summaryStats.insertReadQualDists
@pytest.mark.internal_data
def test_reduced_sts_merging(self):
# don't have a subreadset.xml with loaded sts.xml in testdata,
# fabricate one here:
full = ('/pbi/dept/secondary/siv/testdata/'
'pbreports-unittest/data/sts_xml/'
'3120134-r54009_20160323_173308-'
'1_A01-Bug30772/m54009_160323_'
'173323.sts.xml')
partial = ('/pbi/dept/secondary/siv/testdata/'
'pbreports-unittest/data/sts_xml/'
'32246/m54026_160402_062929.sts.xml')
# two partial
ss = SubreadSet(data.getXml(9))
ss.externalResources[0].sts = partial
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
outXml2 = os.path.join(outdir, 'tempfile2.xml')
ss.write(outXml)
ss.write(outXml2)
ss = SubreadSet(outXml)
ss2 = SubreadSet(outXml2)
ss3 = ss + ss2
assert ss3.metadata.summaryStats.readLenDist.bins == [
b1 + b2 for b1, b2 in
zip(ss.metadata.summaryStats.readLenDist.bins,
ss2.metadata.summaryStats.readLenDist.bins)]
ss4 = SubreadSet(outXml, outXml2)
# one partial one full
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
outXml2 = os.path.join(outdir, 'tempfile2.xml')
ss = SubreadSet(data.getXml(9))
ss.externalResources[0].sts = partial
ss.write(outXml)
ss.externalResources[0].sts = full
ss.write(outXml2)
ss = SubreadSet(outXml)
ss2 = SubreadSet(outXml2)
ss3 = ss + ss2
assert ss3.metadata.summaryStats.readLenDist.bins == [
b1 + b2 for b1, b2 in
zip_longest(
ss.metadata.summaryStats.readLenDist.bins,
ss2.metadata.summaryStats.readLenDist.bins,
fillvalue=0)]
ss4 = SubreadSet(outXml, outXml2)
# one full one partial
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
outXml2 = os.path.join(outdir, 'tempfile2.xml')
ss = SubreadSet(data.getXml(9))
ss.externalResources[0].sts = full
ss.write(outXml)
ss.externalResources[0].sts = partial
ss.write(outXml2)
ss = SubreadSet(outXml)
ss2 = SubreadSet(outXml2)
ss3 = ss + ss2
assert ss3.metadata.summaryStats.readLenDist.bins == [
b1 + b2 for b1, b2 in
zip_longest(
ss.metadata.summaryStats.readLenDist.bins,
ss2.metadata.summaryStats.readLenDist.bins,
fillvalue=0)]
ss4 = SubreadSet(outXml, outXml2)
# two full
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
outXml2 = os.path.join(outdir, 'tempfile2.xml')
ss = SubreadSet(data.getXml(9))
ss.externalResources[0].sts = full
ss.write(outXml)
ss.write(outXml2)
ss = SubreadSet(outXml)
ss2 = SubreadSet(outXml2)
ss3 = ss + ss2
assert ss3.metadata.summaryStats.readLenDist.bins == [
b1 + b2 for b1, b2 in
zip(ss.metadata.summaryStats.readLenDist.bins,
ss2.metadata.summaryStats.readLenDist.bins)]
ss4 = SubreadSet(outXml, outXml2)
@pytest.mark.internal_data
def test_missing_extres(self):
# copy a file with relative paths, rescue ResourceId's
test_file = ('/pbi/dept/secondary/siv/testdata/'
'SA3-Sequel/lambda/roche_SAT/'
'm54013_151205_032353.subreadset.xml')
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
outXml = os.path.join(outdir, 'tempfile.xml')
resXml = os.path.join(outdir, 'tempfile.rescued.xml')
sset = SubreadSet(test_file)
# record the original paths:
path_map = {}
def recorder(x, m=path_map): return m.setdefault(
os.path.split(x)[1], x)
sset._changePaths(recorder)
# make the paths relative and write out dataset with all missing:
sset.makePathsRelative(os.path.dirname(test_file))
sset.write(outXml, validate=False)
# check that it is broken:
with pytest.raises(InvalidDataSetIOError):
sset = SubreadSet(outXml)
# check that rescuing fixes it:
def replacer(x, m=path_map): return m[x]
sset = SubreadSet(outXml, skipMissing=True)
sset._changePaths(replacer)
sset.write(resXml, validate=False)
sset = SubreadSet(resXml)
# check that removing any one breaks it:
for key in path_map:
mod_pmap = path_map.copy()
# remove a resourceId from the map:
mod_pmap.pop(key)
log.debug(key)
# use dict.get to maintain the breakage:
def replacer(x, m=mod_pmap): return m.get(x, x)
sset = SubreadSet(outXml, skipMissing=True)
sset._changePaths(replacer)
sset.write(resXml, validate=False)
with pytest.raises(InvalidDataSetIOError):
sset = SubreadSet(resXml)
def test_opening(self):
""" Test whether relativizing paths is working. If your subdataset
objects contain the same external resource objects as your dataset, and
you make everything relative, paths will be relativized twice, making
them invalid. """
ifn1 = data.getXml(7)
ifn2 = data.getXml(10)
outdir = tempfile.mkdtemp(suffix="dataset-unittest")
ofn = os.path.join(outdir, 'test.alignmentset.xml')
log.info(ofn)
aset = AlignmentSet(ifn1, ifn2)
aset.write(ofn, validate=True,
relPaths=True)
naset = AlignmentSet(ofn)
@pytest.mark.internal_data
def test_length_0_bam_records(self):
ds_file1 = ('/pbi/dept/secondary/siv/testdata/SA3-Sequel/ecoli/'
'EmptyRecords/m54043_180414_094215.subreadset.xml')
ds1 = SubreadSet(ds_file1, strict=True)
scraps = IndexedBamReader(ds1.externalResources[0].scraps)
found = False
for read in scraps:
if len(read.read(aligned=False)) == 0:
found = True
assert found
def test_load_mock_collection_metadata(self):
md = loadMockCollectionMetadata()
assert md.wellSample.name == "unknown"
def test_supplemental_resources(self):
ds_file1 = op.join(op.dirname(__file__), "data",
"supplemental_resources.consensusreadset.xml")
ds_file2 = op.join(op.dirname(__file__), "data",
"supplemental_resources2.consensusreadset.xml")
ds1 = ConsensusReadSet(ds_file1, strict=True)
ds2 = ConsensusReadSet(ds_file2, strict=True)
assert len(ds1.supplementalResources) == 1
assert ds1.supplementalResources[0].metaType == "PacBio.FileTypes.txt"
assert op.isabs(ds1.supplementalResources[0].resourceId)
assert op.basename(ds1.supplementalResources[0].resourceId) == "report.txt"
ds3 = ds1.merge(ds2)
assert len(ds3.supplementalResources) == 2
# write/read recycle
tmp_file = tempfile.NamedTemporaryFile(suffix=".consensusreadset.xml").name
ds3.write(tmp_file)
ds4 = ConsensusReadSet(tmp_file)
assert len(ds4.supplementalResources) == 2
# XXX this will also modify ds3!
ds2.makePathsRelative(op.dirname(ds_file2))
assert ds2.supplementalResources[0].resourceId == "report2.txt"
# add supplemental resource to dataset that didn't have any
ds5 = ConsensusReadSet(ds1.externalResources[0].bam)
ds5.supplementalResources.append(ds1.supplementalResources[0])
ds5.write(tmp_file)
ds6 = ConsensusReadSet(tmp_file)
assert len(ds6.supplementalResources) == 1
| [
"pbcore.data.getEmptyAlignedBam",
"os.remove",
"pbtestdata.get_file",
"os.path.islink",
"pbcore.data.getEmptyBam",
"pbcore.io.dataset.DataSetMetaTypes.InvalidDataSetIOError",
"shutil.rmtree",
"pytest.mark.skip",
"os.path.join",
"pbcore.data.datasets.getXml",
"subprocess.check_call",
"numpy.uni... | [((1207, 1234), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1224, 1234), False, 'import logging\n'), ((1381, 1401), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (1397, 1401), False, 'import os\n'), ((39435, 39475), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Too expensive"""'}), "(reason='Too expensive')\n", (39451, 39475), False, 'import pytest\n'), ((53076, 53109), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""broken"""'}), "(reason='broken')\n", (53092, 53109), False, 'import pytest\n'), ((64098, 64131), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""broken"""'}), "(reason='broken')\n", (64114, 64131), False, 'import pytest\n'), ((64688, 64721), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""broken"""'}), "(reason='broken')\n", (64704, 64721), False, 'import pytest\n'), ((1785, 1798), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (1796, 1798), True, 'import pbcore.data.datasets as data\n'), ((1849, 1863), 'pbcore.io.DataSet', 'DataSet', (['inBam'], {}), '(inBam)\n', (1856, 1863), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((2084, 2127), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (2100, 2127), False, 'import tempfile\n'), ((2145, 2181), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (2157, 2181), False, 'import os\n'), ((2430, 2445), 'pbcore.io.DataSet', 'DataSet', (['outXml'], {}), '(outXml)\n', (2437, 2445), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((3491, 3500), 'pbcore.io.DataSet', 'DataSet', ([], {}), '()\n', (3498, 3500), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((5697, 5750), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (5724, 5750), False, 'import tempfile\n'), ((5765, 5818), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (5792, 5818), False, 'import tempfile\n'), ((5892, 5924), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (5904, 5924), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((6376, 6429), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (6403, 6429), False, 'import tempfile\n'), ((6444, 6497), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (6471, 6497), False, 'import tempfile\n'), ((6571, 6603), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (6583, 6603), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((7053, 7106), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (7080, 7106), False, 'import tempfile\n'), ((7121, 7174), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (7148, 7174), False, 'import tempfile\n'), ((7248, 7280), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (7260, 7280), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((7726, 7779), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (7753, 7779), False, 'import tempfile\n'), ((7794, 7847), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (7821, 7847), False, 'import tempfile\n'), ((7946, 7968), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name'], {}), '(fn1.name)\n', (7958, 7968), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((8019, 8051), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (8031, 8051), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((8806, 8859), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (8833, 8859), False, 'import tempfile\n'), ((8874, 8927), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (8901, 8927), False, 'import tempfile\n'), ((9001, 9033), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (9013, 9033), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((9499, 9552), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (9526, 9552), False, 'import tempfile\n'), ((9567, 9620), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (9594, 9620), False, 'import tempfile\n'), ((9694, 9726), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (9706, 9726), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((10178, 10231), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (10205, 10231), False, 'import tempfile\n'), ((10246, 10299), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (10273, 10299), False, 'import tempfile\n'), ((10373, 10405), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (10385, 10405), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((10852, 10905), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (10879, 10905), False, 'import tempfile\n'), ((10920, 10973), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (10947, 10973), False, 'import tempfile\n'), ((11072, 11094), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name'], {}), '(fn1.name)\n', (11084, 11094), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((11149, 11181), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (11161, 11181), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((11669, 11722), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (11696, 11722), False, 'import tempfile\n'), ((11737, 11790), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (11764, 11790), False, 'import tempfile\n'), ((11889, 11911), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name'], {}), '(fn1.name)\n', (11901, 11911), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((11966, 11998), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn1.name', 'fn2.name'], {}), '(fn1.name, fn2.name)\n', (11978, 11998), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((15973, 15986), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (15984, 15986), True, 'import pbcore.data.datasets as data\n'), ((15999, 16013), 'pbcore.io.DataSet', 'DataSet', (['inBam'], {}), '(inBam)\n', (16006, 16013), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((16153, 16166), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (16164, 16166), True, 'import pbcore.data.datasets as data\n'), ((16179, 16198), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['inBam'], {}), '(inBam)\n', (16191, 16198), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((18822, 18865), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (18838, 18865), False, 'import tempfile\n'), ((18947, 18976), 'os.path.join', 'os.path.join', (['outdir', 'outfile'], {}), '(outdir, outfile)\n', (18959, 18976), False, 'import os\n'), ((19128, 19160), 'os.path.join', 'os.path.join', (['outdir', 'alnoutfile'], {}), '(outdir, alnoutfile)\n', (19140, 19160), False, 'import os\n'), ((19243, 19262), 'pbcore.io.SubreadSet', 'SubreadSet', (['outpath'], {}), '(outpath)\n', (19253, 19262), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((19624, 19653), 'pbcore.io.SubreadSet', 'SubreadSet', (['outpath', 'full_bam'], {}), '(outpath, full_bam)\n', (19634, 19653), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((20057, 20081), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['alnoutpath'], {}), '(alnoutpath)\n', (20069, 20081), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((20854, 20879), 'pbcore.io.ConsensusReadSet', 'ConsensusReadSet', (['outpath'], {}), '(outpath)\n', (20870, 20879), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((21081, 21114), 'pbcore.io.ConsensusAlignmentSet', 'ConsensusAlignmentSet', (['alnoutpath'], {}), '(alnoutpath)\n', (21102, 21114), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((21344, 21377), 'pbcore.io.ConsensusAlignmentSet', 'ConsensusAlignmentSet', (['alnoutpath'], {}), '(alnoutpath)\n', (21365, 21377), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((21686, 21712), 'pbcore.data.getEmptyBam', 'upstreamdata.getEmptyBam', ([], {}), '()\n', (21710, 21712), True, 'import pbcore.data as upstreamdata\n'), ((21728, 21749), 'pbcore.io.SubreadSet', 'SubreadSet', (['empty_bam'], {}), '(empty_bam)\n', (21738, 21749), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((21766, 21794), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (21774, 21794), True, 'import numpy as np\n'), ((21959, 21996), 'numpy.array_equal', 'np.array_equal', (['sset.index.qId', 'empty'], {}), '(sset.index.qId, empty)\n', (21973, 21996), True, 'import numpy as np\n'), ((22091, 22121), 'pbcore.data.getUnalignedBam', 'upstreamdata.getUnalignedBam', ([], {}), '()\n', (22119, 22121), True, 'import pbcore.data as upstreamdata\n'), ((22137, 22168), 'pbcore.io.SubreadSet', 'SubreadSet', (['empty_bam', 'full_bam'], {}), '(empty_bam, full_bam)\n', (22147, 22168), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((22376, 22419), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset unittest"""'}), "(suffix='dataset unittest')\n", (22392, 22419), False, 'import tempfile\n'), ((22434, 22479), 'os.path.join', 'os.path.join', (['outdir', '"""spaced.subreadset.xml"""'], {}), "(outdir, 'spaced.subreadset.xml')\n", (22446, 22479), False, 'import os\n'), ((22569, 22597), 'pbcore.io.SubreadSet', 'SubreadSet', (['ofn'], {'strict': '(True)'}), '(ofn, strict=True)\n', (22579, 22597), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((23139, 23167), 'pbcore.io.SubreadSet', 'SubreadSet', (['ofn'], {'strict': '(True)'}), '(ofn, strict=True)\n', (23149, 23167), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((23176, 23197), 'shutil.rmtree', 'shutil.rmtree', (['outdir'], {}), '(outdir)\n', (23189, 23197), False, 'import shutil\n'), ((23329, 23354), 'pbcore.data.datasets.getEmptyAlignedBam', 'data.getEmptyAlignedBam', ([], {}), '()\n', (23352, 23354), True, 'import pbcore.data.datasets as data\n'), ((23373, 23396), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['empty_bam'], {}), '(empty_bam)\n', (23385, 23396), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((23413, 23441), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (23421, 23441), True, 'import numpy as np\n'), ((23705, 23739), 'numpy.array_equal', 'np.array_equal', (['alnFile.tId', 'empty'], {}), '(alnFile.tId, empty)\n', (23719, 23739), True, 'import numpy as np\n'), ((23755, 23795), 'numpy.array_equal', 'np.array_equal', (['alnFile.index.tId', 'empty'], {}), '(alnFile.index.tId, empty)\n', (23769, 23795), True, 'import numpy as np\n'), ((23864, 23892), 'pbcore.data.getAlignedBam', 'upstreamdata.getAlignedBam', ([], {}), '()\n', (23890, 23892), True, 'import pbcore.data as upstreamdata\n'), ((23908, 23941), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['empty_bam', 'full_bam'], {}), '(empty_bam, full_bam)\n', (23920, 23941), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((24271, 24299), 'pbcore.data.getAlignedBam', 'upstreamdata.getAlignedBam', ([], {}), '()\n', (24297, 24299), True, 'import pbcore.data as upstreamdata\n'), ((24320, 24345), 'pbcore.data.datasets.getEmptyAlignedBam', 'data.getEmptyAlignedBam', ([], {}), '()\n', (24343, 24345), True, 'import pbcore.data.datasets as data\n'), ((26334, 26348), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (26345, 26348), True, 'import pbcore.data.datasets as data\n'), ((26363, 26377), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (26374, 26377), True, 'import pbcore.data.datasets as data\n'), ((26392, 26406), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (26403, 26406), True, 'import pbcore.data.datasets as data\n'), ((26975, 26989), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (26986, 26989), True, 'import pbcore.data.datasets as data\n'), ((27004, 27018), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (27015, 27018), True, 'import pbcore.data.datasets as data\n'), ((27033, 27047), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (27044, 27047), True, 'import pbcore.data.datasets as data\n'), ((27693, 27706), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (27704, 27706), True, 'import pbcore.data.datasets as data\n'), ((27782, 27808), 'pbcore.io.DataSet', 'DataSet', (['bam'], {'strict': '(False)'}), '(bam, strict=False)\n', (27789, 27808), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((27817, 27845), 'pbcore.io.DataSet', 'DataSet', (['fasta'], {'strict': '(False)'}), '(fasta, strict=False)\n', (27824, 27845), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((28011, 28041), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['bam'], {'strict': '(True)'}), '(bam, strict=True)\n', (28023, 28041), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((28135, 28167), 'pbcore.io.ReferenceSet', 'ReferenceSet', (['fasta'], {'strict': '(True)'}), '(fasta, strict=True)\n', (28147, 28167), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((28370, 28384), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (28381, 28384), True, 'import pbcore.data.datasets as data\n'), ((28402, 28445), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (28418, 28445), False, 'import tempfile\n'), ((28523, 28573), 'subprocess.check_call', 'subprocess.check_call', (["['cp', data_fname, tempout]"], {}), "(['cp', data_fname, tempout])\n", (28544, 28573), False, 'import subprocess\n'), ((28588, 28623), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['tempout'], {'strict': '(False)'}), '(tempout, strict=False)\n', (28600, 28623), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((29025, 29051), 'pbcore.io.IndexedBamReader', 'IndexedBamReader', (['testFile'], {}), '(testFile)\n', (29041, 29051), False, 'from pbcore.io import PacBioBamIndex, IndexedBamReader\n'), ((29066, 29099), 'pbcore.io.PacBioBamIndex', 'PacBioBamIndex', (["(testFile + '.pbi')"], {}), "(testFile + '.pbi')\n", (29080, 29099), False, 'from pbcore.io import PacBioBamIndex, IndexedBamReader\n'), ((29329, 29349), 'pbcore.io.SubreadSet', 'SubreadSet', (['testFile'], {}), '(testFile)\n', (29339, 29349), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((33377, 33401), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['bam1', 'bam2'], {}), '(bam1, bam2)\n', (33389, 33401), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((33595, 33671), 'functools.reduce', 'reduce', (['np.append', "[reader.referenceInfoTable['Name'] for reader in readers]"], {}), "(np.append, [reader.referenceInfoTable['Name'] for reader in readers])\n", (33601, 33671), False, 'from functools import partial, reduce\n'), ((33744, 33763), 'numpy.unique', 'np.unique', (['expNames'], {}), '(expNames)\n', (33753, 33763), True, 'import numpy as np\n'), ((33872, 33957), 'functools.reduce', 'reduce', (['np.append', "[reader.referenceInfoTable['FullName'] for reader in readers]"], {}), "(np.append, [reader.referenceInfoTable['FullName'] for reader in readers]\n )\n", (33878, 33957), False, 'from functools import partial, reduce\n'), ((34025, 34044), 'numpy.unique', 'np.unique', (['expNames'], {}), '(expNames)\n', (34034, 34044), True, 'import numpy as np\n'), ((34959, 34968), 'pbcore.io.DataSet', 'DataSet', ([], {}), '()\n', (34966, 34968), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((35095, 35104), 'pbcore.io.DataSet', 'DataSet', ([], {}), '()\n', (35102, 35104), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((35653, 35667), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (35664, 35667), True, 'import pbcore.data.datasets as data\n'), ((35681, 35701), 'pbcore.io.openDataSet', 'openDataSet', (['fn_orig'], {}), '(fn_orig)\n', (35692, 35701), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((35716, 35769), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (35743, 35769), False, 'import tempfile\n'), ((35784, 35837), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (35811, 35837), False, 'import tempfile\n'), ((35906, 35927), 'pbcore.io.openDataSet', 'openDataSet', (['fn1.name'], {}), '(fn1.name)\n', (35917, 35927), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((35942, 35963), 'pbcore.io.openDataSet', 'openDataSet', (['fn2.name'], {}), '(fn2.name)\n', (35953, 35963), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((36214, 36228), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (36225, 36228), True, 'import pbcore.data.datasets as data\n'), ((36242, 36262), 'pbcore.io.openDataSet', 'openDataSet', (['fn_orig'], {}), '(fn_orig)\n', (36253, 36262), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((36277, 36330), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (36304, 36330), False, 'import tempfile\n'), ((36345, 36398), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (36372, 36398), False, 'import tempfile\n'), ((36467, 36488), 'pbcore.io.openDataSet', 'openDataSet', (['fn1.name'], {}), '(fn1.name)\n', (36478, 36488), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((36536, 36557), 'pbcore.io.openDataSet', 'openDataSet', (['fn2.name'], {}), '(fn2.name)\n', (36547, 36557), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((36572, 36593), 'pbcore.io.openDataSet', 'openDataSet', (['fn1.name'], {}), '(fn1.name)\n', (36583, 36593), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((37036, 37100), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".alignmentset.xml.disabled"""'}), "(suffix='.alignmentset.xml.disabled')\n", (37063, 37100), False, 'import tempfile\n'), ((38657, 38693), 'pbcore.io.SubreadSet', 'SubreadSet', (['test_file_1', 'test_file_2'], {}), '(test_file_1, test_file_2)\n', (38667, 38693), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((39699, 39720), 'pbcore.io.PacBioBamIndex', 'PacBioBamIndex', (['pbiFn'], {}), '(pbiFn)\n', (39713, 39720), False, 'from pbcore.io import PacBioBamIndex, IndexedBamReader\n'), ((41308, 41351), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (41324, 41351), False, 'import tempfile\n'), ((41370, 41406), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (41382, 41406), False, 'import os\n'), ((41543, 41564), 'pbcore.io.dataset.DataSetValidator.validateFile', 'validateFile', (['outfile'], {}), '(outfile)\n', (41555, 41564), False, 'from pbcore.io.dataset.DataSetValidator import validateFile\n'), ((41579, 41600), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['outfile'], {}), '(outfile)\n', (41591, 41600), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((41778, 41787), 'pbcore.io.DataSet', 'DataSet', ([], {}), '()\n', (41785, 41787), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((42337, 42392), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".alignmentset.xml"""'}), "(suffix='.alignmentset.xml')\n", (42364, 42392), False, 'import tempfile\n'), ((42471, 42505), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn.name'], {'strict': '(True)'}), '(fn.name, strict=True)\n', (42483, 42505), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((42577, 42620), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (42593, 42620), False, 'import tempfile\n'), ((42661, 42706), 'os.path.join', 'os.path.join', (['outdir', '"""test.alignmentset.xml"""'], {}), "(outdir, 'test.alignmentset.xml')\n", (42673, 42706), False, 'import os\n'), ((42779, 42808), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn'], {'strict': '(True)'}), '(fn, strict=True)\n', (42791, 42808), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((42972, 43015), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (42988, 43015), False, 'import tempfile\n'), ((43056, 43101), 'os.path.join', 'os.path.join', (['outdir', '"""test.alignmentset.xml"""'], {}), "(outdir, 'test.alignmentset.xml')\n", (43068, 43101), False, 'import os\n'), ((43222, 43268), 'os.path.join', 'os.path.join', (['outdir', '"""test2.alignmentset.xml"""'], {}), "(outdir, 'test2.alignmentset.xml')\n", (43234, 43268), False, 'import os\n'), ((43307, 43336), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn'], {'strict': '(True)'}), '(fn, strict=True)\n', (43319, 43336), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((43352, 43382), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn2'], {'strict': '(True)'}), '(fn2, strict=True)\n', (43364, 43382), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((43628, 43670), 'pbtestdata.get_file', 'pbtestdata.get_file', (['"""barcoded-subreadset"""'], {}), "('barcoded-subreadset')\n", (43647, 43670), False, 'import pbtestdata\n'), ((43686, 43728), 'pbtestdata.get_file', 'pbtestdata.get_file', (['"""subreads-unbarcoded"""'], {}), "('subreads-unbarcoded')\n", (43705, 43728), False, 'import pbtestdata\n'), ((43743, 43772), 'pbcore.io.SubreadSet', 'SubreadSet', (['inp1'], {'strict': '(True)'}), '(inp1, strict=True)\n', (43753, 43772), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((43787, 43816), 'pbcore.io.SubreadSet', 'SubreadSet', (['inp2'], {'strict': '(True)'}), '(inp2, strict=True)\n', (43797, 43816), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((43831, 43853), 'pbcore.io.SubreadSet', 'SubreadSet', (['inp1', 'inp2'], {}), '(inp1, inp2)\n', (43841, 43853), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((44239, 44281), 'pbtestdata.get_file', 'pbtestdata.get_file', (['"""barcoded-subreadset"""'], {}), "('barcoded-subreadset')\n", (44258, 44281), False, 'import pbtestdata\n'), ((44295, 44323), 'pbcore.io.SubreadSet', 'SubreadSet', (['inp'], {'strict': '(True)'}), '(inp, strict=True)\n', (44305, 44323), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((44529, 44571), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".bam"""'}), "(suffix='.bam')\n", (44556, 44571), False, 'import tempfile\n'), ((44730, 44776), 'pbcore.io.dataset.utils.consolidateXml', 'consolidateXml', (['ds', 'tmp_bam.name'], {'cleanup': '(True)'}), '(ds, tmp_bam.name, cleanup=True)\n', (44744, 44776), False, 'from pbcore.io.dataset.utils import consolidateXml\n'), ((45092, 45101), 'pbcore.io.DataSet', 'DataSet', ([], {}), '()\n', (45099, 45101), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((45116, 45134), 'pbcore.io.dataset.DataSetMembers.ExternalResource', 'ExternalResource', ([], {}), '()\n', (45132, 45134), False, 'from pbcore.io.dataset.DataSetMembers import ExternalResource, Filters, ContinuousDistribution, DiscreteDistribution, SubreadSetMetadata\n'), ((45186, 45204), 'pbcore.io.dataset.DataSetMembers.ExternalResource', 'ExternalResource', ([], {}), '()\n', (45202, 45204), False, 'from pbcore.io.dataset.DataSetMembers import ExternalResource, Filters, ContinuousDistribution, DiscreteDistribution, SubreadSetMetadata\n'), ((45256, 45274), 'pbcore.io.dataset.DataSetMembers.ExternalResource', 'ExternalResource', ([], {}), '()\n', (45272, 45274), False, 'from pbcore.io.dataset.DataSetMembers import ExternalResource, Filters, ContinuousDistribution, DiscreteDistribution, SubreadSetMetadata\n'), ((46675, 46699), 'os.path.exists', 'os.path.exists', (['files[0]'], {}), '(files[0])\n', (46689, 46699), False, 'import os\n'), ((46715, 46738), 'os.path.isabs', 'os.path.isabs', (['files[0]'], {}), '(files[0])\n', (46728, 46738), False, 'import os\n'), ((46830, 46854), 'os.path.exists', 'os.path.exists', (['files[0]'], {}), '(files[0])\n', (46844, 46854), False, 'import os\n'), ((46953, 47016), 'pbcore.io.DataSet', 'DataSet', (['"""bam1.bam"""', '"""bam2.bam"""'], {'strict': '(False)', 'skipMissing': '(True)'}), "('bam1.bam', 'bam2.bam', strict=False, skipMissing=True)\n", (46960, 47016), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((47400, 47424), 'os.path.exists', 'os.path.exists', (['files[0]'], {}), '(files[0])\n', (47414, 47424), False, 'import os\n'), ((47440, 47463), 'os.path.isabs', 'os.path.isabs', (['files[0]'], {}), '(files[0])\n', (47453, 47463), False, 'import os\n'), ((50103, 50125), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['testFile'], {}), '(testFile)\n', (50115, 50125), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((50791, 50813), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['testFile'], {}), '(testFile)\n', (50803, 50813), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((51668, 51690), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['testFile'], {}), '(testFile)\n', (51680, 51690), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((52149, 52171), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['testFile'], {}), '(testFile)\n', (52161, 52171), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((52602, 52624), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['testFile'], {}), '(testFile)\n', (52614, 52624), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((57988, 57997), 'pbcore.io.dataset.DataSetMembers.Filters', 'Filters', ([], {}), '()\n', (57995, 57997), False, 'from pbcore.io.dataset.DataSetMembers import ExternalResource, Filters, ContinuousDistribution, DiscreteDistribution, SubreadSetMetadata\n'), ((58810, 58819), 'pbcore.io.dataset.DataSetMembers.Filters', 'Filters', ([], {}), '()\n', (58817, 58819), False, 'from pbcore.io.dataset.DataSetMembers import ExternalResource, Filters, ContinuousDistribution, DiscreteDistribution, SubreadSetMetadata\n'), ((65344, 65369), 're.search', 're.search', (['"""DataSet"""', 'rep'], {}), "('DataSet', rep)\n", (65353, 65369), False, 'import re\n'), ((65385, 65408), 're.search', 're.search', (['"""uuid:"""', 'rep'], {}), "('uuid:', rep)\n", (65394, 65408), False, 'import re\n'), ((65424, 65467), 're.search', 're.search', (['"""pbalchemysim0.pbalign.bam"""', 'rep'], {}), "('pbalchemysim0.pbalign.bam', rep)\n", (65433, 65467), False, 'import re\n'), ((78360, 78375), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (78373, 78375), True, 'import pbcore.data.datasets as data\n'), ((78415, 78430), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (78428, 78430), True, 'import pbcore.data.datasets as data\n'), ((78535, 78588), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".subreadset.xml"""'}), "(suffix='.subreadset.xml')\n", (78562, 78588), False, 'import tempfile\n'), ((78632, 78651), 'pbcore.io.SubreadSet', 'SubreadSet', (['fn.name'], {}), '(fn.name)\n', (78642, 78651), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((87299, 87323), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1', 'cmp2'], {}), '(cmp1, cmp2)\n', (87311, 87323), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((88772, 88796), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1', 'cmp2'], {}), '(cmp1, cmp2)\n', (88784, 88796), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((89913, 89941), 'pbcore.data.getAlignedBam', 'upstreamdata.getAlignedBam', ([], {}), '()\n', (89939, 89941), True, 'import pbcore.data as upstreamdata\n'), ((90327, 90351), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1', 'cmp2'], {}), '(cmp1, cmp2)\n', (90339, 90351), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((92159, 92187), 'pbcore.data.getAlignedBam', 'upstreamdata.getAlignedBam', ([], {}), '()\n', (92185, 92187), True, 'import pbcore.data as upstreamdata\n'), ((92848, 92878), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1', 'cmp2', 'cmp3'], {}), '(cmp1, cmp2, cmp3)\n', (92860, 92878), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((95098, 95141), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (95114, 95141), False, 'import tempfile\n'), ((95159, 95195), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (95171, 95195), False, 'import os\n'), ((95234, 95252), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {}), '(outXml)\n', (95244, 95252), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((95364, 95409), 'os.path.join', 'os.path.join', (['outdir', '"""tempfileWithStats.xml"""'], {}), "(outdir, 'tempfileWithStats.xml')\n", (95376, 95409), False, 'import os\n'), ((96050, 96093), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (96066, 96093), False, 'import tempfile\n'), ((96111, 96147), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (96123, 96147), False, 'import os\n'), ((96166, 96203), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile2.xml"""'], {}), "(outdir, 'tempfile2.xml')\n", (96178, 96203), False, 'import os\n'), ((96268, 96286), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {}), '(outXml)\n', (96278, 96286), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((96301, 96320), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml2'], {}), '(outXml2)\n', (96311, 96320), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((97392, 97435), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (97408, 97435), False, 'import tempfile\n'), ((97453, 97489), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (97465, 97489), False, 'import os\n'), ((97508, 97545), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile2.xml"""'], {}), "(outdir, 'tempfile2.xml')\n", (97520, 97545), False, 'import os\n'), ((97610, 97628), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {}), '(outXml)\n', (97620, 97628), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((97643, 97662), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml2'], {}), '(outXml2)\n', (97653, 97662), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((97917, 97944), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml', 'outXml2'], {}), '(outXml, outXml2)\n', (97927, 97944), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((97994, 98037), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (98010, 98037), False, 'import tempfile\n'), ((98055, 98091), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (98067, 98091), False, 'import os\n'), ((98110, 98147), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile2.xml"""'], {}), "(outdir, 'tempfile2.xml')\n", (98122, 98147), False, 'import os\n'), ((98341, 98359), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {}), '(outXml)\n', (98351, 98359), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((98374, 98393), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml2'], {}), '(outXml2)\n', (98384, 98393), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((98702, 98729), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml', 'outXml2'], {}), '(outXml, outXml2)\n', (98712, 98729), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((98779, 98822), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (98795, 98822), False, 'import tempfile\n'), ((98840, 98876), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (98852, 98876), False, 'import os\n'), ((98895, 98932), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile2.xml"""'], {}), "(outdir, 'tempfile2.xml')\n", (98907, 98932), False, 'import os\n'), ((99126, 99144), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {}), '(outXml)\n', (99136, 99144), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((99159, 99178), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml2'], {}), '(outXml2)\n', (99169, 99178), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((99487, 99514), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml', 'outXml2'], {}), '(outXml, outXml2)\n', (99497, 99514), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((99552, 99595), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (99568, 99595), False, 'import tempfile\n'), ((99613, 99649), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (99625, 99649), False, 'import os\n'), ((99668, 99705), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile2.xml"""'], {}), "(outdir, 'tempfile2.xml')\n", (99680, 99705), False, 'import os\n'), ((99853, 99871), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {}), '(outXml)\n', (99863, 99871), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((99886, 99905), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml2'], {}), '(outXml2)\n', (99896, 99905), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((100160, 100187), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml', 'outXml2'], {}), '(outXml, outXml2)\n', (100170, 100187), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((100504, 100547), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (100520, 100547), False, 'import tempfile\n'), ((100565, 100601), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.xml"""'], {}), "(outdir, 'tempfile.xml')\n", (100577, 100601), False, 'import os\n'), ((100619, 100663), 'os.path.join', 'os.path.join', (['outdir', '"""tempfile.rescued.xml"""'], {}), "(outdir, 'tempfile.rescued.xml')\n", (100631, 100663), False, 'import os\n'), ((100679, 100700), 'pbcore.io.SubreadSet', 'SubreadSet', (['test_file'], {}), '(test_file)\n', (100689, 100700), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((101298, 101334), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {'skipMissing': '(True)'}), '(outXml, skipMissing=True)\n', (101308, 101334), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((101429, 101447), 'pbcore.io.SubreadSet', 'SubreadSet', (['resXml'], {}), '(resXml)\n', (101439, 101447), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((102328, 102342), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (102339, 102342), True, 'import pbcore.data.datasets as data\n'), ((102358, 102373), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (102369, 102373), True, 'import pbcore.data.datasets as data\n'), ((102391, 102434), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (102407, 102434), False, 'import tempfile\n'), ((102449, 102494), 'os.path.join', 'os.path.join', (['outdir', '"""test.alignmentset.xml"""'], {}), "(outdir, 'test.alignmentset.xml')\n", (102461, 102494), False, 'import os\n'), ((102532, 102556), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['ifn1', 'ifn2'], {}), '(ifn1, ifn2)\n', (102544, 102556), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((102646, 102663), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['ofn'], {}), '(ofn)\n', (102658, 102663), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((102896, 102929), 'pbcore.io.SubreadSet', 'SubreadSet', (['ds_file1'], {'strict': '(True)'}), '(ds_file1, strict=True)\n', (102906, 102929), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((102947, 102996), 'pbcore.io.IndexedBamReader', 'IndexedBamReader', (['ds1.externalResources[0].scraps'], {}), '(ds1.externalResources[0].scraps)\n', (102963, 102996), False, 'from pbcore.io import PacBioBamIndex, IndexedBamReader\n'), ((103212, 103240), 'pbcore.io.dataset.DataSetUtils.loadMockCollectionMetadata', 'loadMockCollectionMetadata', ([], {}), '()\n', (103238, 103240), False, 'from pbcore.io.dataset.DataSetUtils import loadMockCollectionMetadata\n'), ((103579, 103618), 'pbcore.io.ConsensusReadSet', 'ConsensusReadSet', (['ds_file1'], {'strict': '(True)'}), '(ds_file1, strict=True)\n', (103595, 103618), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((103633, 103672), 'pbcore.io.ConsensusReadSet', 'ConsensusReadSet', (['ds_file2'], {'strict': '(True)'}), '(ds_file2, strict=True)\n', (103649, 103672), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((103818, 103867), 'os.path.isabs', 'op.isabs', (['ds1.supplementalResources[0].resourceId'], {}), '(ds1.supplementalResources[0].resourceId)\n', (103826, 103867), True, 'import os.path as op\n'), ((104187, 104213), 'pbcore.io.ConsensusReadSet', 'ConsensusReadSet', (['tmp_file'], {}), '(tmp_file)\n', (104203, 104213), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((104512, 104558), 'pbcore.io.ConsensusReadSet', 'ConsensusReadSet', (['ds1.externalResources[0].bam'], {}), '(ds1.externalResources[0].bam)\n', (104528, 104558), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((104672, 104698), 'pbcore.io.ConsensusReadSet', 'ConsensusReadSet', (['tmp_file'], {}), '(tmp_file)\n', (104688, 104698), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((2582, 2597), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (2593, 2597), True, 'import pbcore.data.datasets as data\n'), ((2599, 2612), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (2610, 2612), True, 'import pbcore.data.datasets as data\n'), ((2681, 2695), 'pbcore.data.datasets.getFofn', 'data.getFofn', ([], {}), '()\n', (2693, 2695), True, 'import pbcore.data.datasets as data\n'), ((3875, 3889), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (3886, 3889), True, 'import pbcore.data.datasets as data\n'), ((3940, 3954), 'pbcore.data.datasets.getBam', 'data.getBam', (['(1)'], {}), '(1)\n', (3951, 3954), True, 'import pbcore.data.datasets as data\n'), ((4202, 4216), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (4213, 4216), True, 'import pbcore.data.datasets as data\n'), ((4267, 4282), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (4278, 4282), True, 'import pbcore.data.datasets as data\n'), ((4364, 4378), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (4375, 4378), True, 'import pbcore.data.datasets as data\n'), ((4380, 4395), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (4391, 4395), True, 'import pbcore.data.datasets as data\n'), ((4603, 4617), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (4614, 4617), True, 'import pbcore.data.datasets as data\n'), ((4716, 4731), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (4727, 4731), True, 'import pbcore.data.datasets as data\n'), ((4861, 4875), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (4872, 4875), True, 'import pbcore.data.datasets as data\n'), ((4877, 4892), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (4888, 4892), True, 'import pbcore.data.datasets as data\n'), ((5078, 5092), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (5089, 5092), True, 'import pbcore.data.datasets as data\n'), ((5191, 5206), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (5202, 5206), True, 'import pbcore.data.datasets as data\n'), ((5576, 5590), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (5587, 5590), True, 'import pbcore.data.datasets as data\n'), ((5644, 5659), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (5655, 5659), True, 'import pbcore.data.datasets as data\n'), ((5989, 6003), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (6000, 6003), True, 'import pbcore.data.datasets as data\n'), ((6057, 6072), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (6068, 6072), True, 'import pbcore.data.datasets as data\n'), ((6255, 6269), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (6266, 6269), True, 'import pbcore.data.datasets as data\n'), ((6320, 6335), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (6331, 6335), True, 'import pbcore.data.datasets as data\n'), ((6668, 6682), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (6679, 6682), True, 'import pbcore.data.datasets as data\n'), ((6733, 6748), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (6744, 6748), True, 'import pbcore.data.datasets as data\n'), ((6935, 6949), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (6946, 6949), True, 'import pbcore.data.datasets as data\n'), ((7000, 7015), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (7011, 7015), True, 'import pbcore.data.datasets as data\n'), ((7342, 7356), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (7353, 7356), True, 'import pbcore.data.datasets as data\n'), ((7407, 7422), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (7418, 7422), True, 'import pbcore.data.datasets as data\n'), ((7602, 7616), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (7613, 7616), True, 'import pbcore.data.datasets as data\n'), ((7670, 7685), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (7681, 7685), True, 'import pbcore.data.datasets as data\n'), ((8124, 8138), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (8135, 8138), True, 'import pbcore.data.datasets as data\n'), ((8192, 8207), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (8203, 8207), True, 'import pbcore.data.datasets as data\n'), ((8383, 8397), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (8394, 8397), True, 'import pbcore.data.datasets as data\n'), ((8451, 8466), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (8462, 8466), True, 'import pbcore.data.datasets as data\n'), ((8681, 8695), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (8692, 8695), True, 'import pbcore.data.datasets as data\n'), ((8753, 8768), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (8764, 8768), True, 'import pbcore.data.datasets as data\n'), ((9102, 9116), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (9113, 9116), True, 'import pbcore.data.datasets as data\n'), ((9174, 9189), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (9185, 9189), True, 'import pbcore.data.datasets as data\n'), ((9374, 9388), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (9385, 9388), True, 'import pbcore.data.datasets as data\n'), ((9439, 9454), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (9450, 9454), True, 'import pbcore.data.datasets as data\n'), ((9795, 9809), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (9806, 9809), True, 'import pbcore.data.datasets as data\n'), ((9860, 9875), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (9871, 9875), True, 'import pbcore.data.datasets as data\n'), ((10060, 10074), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (10071, 10074), True, 'import pbcore.data.datasets as data\n'), ((10125, 10140), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (10136, 10140), True, 'import pbcore.data.datasets as data\n'), ((10467, 10481), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (10478, 10481), True, 'import pbcore.data.datasets as data\n'), ((10532, 10547), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (10543, 10547), True, 'import pbcore.data.datasets as data\n'), ((10724, 10738), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (10735, 10738), True, 'import pbcore.data.datasets as data\n'), ((10796, 10811), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (10807, 10811), True, 'import pbcore.data.datasets as data\n'), ((11254, 11268), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (11265, 11268), True, 'import pbcore.data.datasets as data\n'), ((11326, 11341), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (11337, 11341), True, 'import pbcore.data.datasets as data\n'), ((11537, 11551), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (11548, 11551), True, 'import pbcore.data.datasets as data\n'), ((11609, 11624), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (11620, 11624), True, 'import pbcore.data.datasets as data\n'), ((12067, 12081), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (12078, 12081), True, 'import pbcore.data.datasets as data\n'), ((12139, 12154), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (12150, 12154), True, 'import pbcore.data.datasets as data\n'), ((12383, 12397), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (12394, 12397), True, 'import pbcore.data.datasets as data\n'), ((12467, 12481), 'pbcore.data.datasets.getBam', 'data.getBam', (['(1)'], {}), '(1)\n', (12478, 12481), True, 'import pbcore.data.datasets as data\n'), ((13005, 13019), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (13016, 13019), True, 'import pbcore.data.datasets as data\n'), ((13089, 13104), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (13100, 13104), True, 'import pbcore.data.datasets as data\n'), ((13640, 13654), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (13651, 13654), True, 'import pbcore.data.datasets as data\n'), ((13656, 13671), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (13667, 13671), True, 'import pbcore.data.datasets as data\n'), ((14139, 14153), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (14150, 14153), True, 'import pbcore.data.datasets as data\n'), ((14223, 14238), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (14234, 14238), True, 'import pbcore.data.datasets as data\n'), ((14308, 14323), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (14319, 14323), True, 'import pbcore.data.datasets as data\n'), ((14598, 14612), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (14609, 14612), True, 'import pbcore.data.datasets as data\n'), ((14734, 14749), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (14745, 14749), True, 'import pbcore.data.datasets as data\n'), ((14871, 14886), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (14882, 14886), True, 'import pbcore.data.datasets as data\n'), ((15310, 15334), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (15323, 15334), False, 'import pytest\n'), ((16438, 16464), 'pbcore.data.getEmptyBam', 'upstreamdata.getEmptyBam', ([], {}), '()\n', (16462, 16464), True, 'import pbcore.data as upstreamdata\n'), ((16923, 16949), 'pbcore.data.getEmptyBam', 'upstreamdata.getEmptyBam', ([], {}), '()\n', (16947, 16949), True, 'import pbcore.data as upstreamdata\n'), ((17374, 17407), 'pbcore.data.getEmptyAlignedBam', 'upstreamdata.getEmptyAlignedBam', ([], {}), '()\n', (17405, 17407), True, 'import pbcore.data as upstreamdata\n'), ((17816, 17849), 'pbcore.data.getEmptyAlignedBam', 'upstreamdata.getEmptyAlignedBam', ([], {}), '()\n', (17847, 17849), True, 'import pbcore.data as upstreamdata\n'), ((17851, 17864), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (17862, 17864), True, 'import pbcore.data.datasets as data\n'), ((18266, 18292), 'pbcore.data.getEmptyBam', 'upstreamdata.getEmptyBam', ([], {}), '()\n', (18290, 18292), True, 'import pbcore.data as upstreamdata\n'), ((18536, 18569), 'pbcore.data.getEmptyAlignedBam', 'upstreamdata.getEmptyAlignedBam', ([], {}), '()\n', (18567, 18569), True, 'import pbcore.data as upstreamdata\n'), ((18997, 19023), 'pbcore.data.getEmptyBam', 'upstreamdata.getEmptyBam', ([], {}), '()\n', (19021, 19023), True, 'import pbcore.data as upstreamdata\n'), ((19181, 19214), 'pbcore.data.getEmptyAlignedBam', 'upstreamdata.getEmptyAlignedBam', ([], {}), '()\n', (19212, 19214), True, 'import pbcore.data as upstreamdata\n'), ((19904, 19926), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (19917, 19926), False, 'import pytest\n'), ((20436, 20449), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (20447, 20449), True, 'import pbcore.data.datasets as data\n'), ((20653, 20675), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (20666, 20675), False, 'import pytest\n'), ((22504, 22518), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (22515, 22518), True, 'import pbcore.data.datasets as data\n'), ((25804, 25817), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (25815, 25817), True, 'import pbcore.data.datasets as data\n'), ((25929, 25942), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (25940, 25942), True, 'import pbcore.data.datasets as data\n'), ((26156, 26169), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (26167, 26169), True, 'import pbcore.data.datasets as data\n'), ((27248, 27270), 'os.path.lexists', 'os.path.lexists', (['linfn'], {}), '(linfn)\n', (27263, 27270), False, 'import os\n'), ((27317, 27340), 'os.symlink', 'os.symlink', (['infn', 'linfn'], {}), '(infn, linfn)\n', (27327, 27340), False, 'import os\n'), ((27360, 27381), 'os.path.islink', 'os.path.islink', (['linfn'], {}), '(linfn)\n', (27374, 27381), False, 'import os\n'), ((27627, 27643), 'os.remove', 'os.remove', (['linfn'], {}), '(linfn)\n', (27636, 27643), False, 'import os\n'), ((27859, 27883), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (27872, 27883), False, 'import pytest\n'), ((27897, 27922), 'pbcore.io.DataSet', 'DataSet', (['bam'], {'strict': '(True)'}), '(bam, strict=True)\n', (27904, 27922), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((27936, 27960), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (27949, 27960), False, 'import pytest\n'), ((27974, 28001), 'pbcore.io.DataSet', 'DataSet', (['fasta'], {'strict': '(True)'}), '(fasta, strict=True)\n', (27981, 28001), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((28055, 28079), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (28068, 28079), False, 'import pytest\n'), ((28093, 28125), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fasta'], {'strict': '(True)'}), '(fasta, strict=True)\n', (28105, 28125), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((28181, 28205), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (28194, 28205), False, 'import pytest\n'), ((28219, 28249), 'pbcore.io.ReferenceSet', 'ReferenceSet', (['bam'], {'strict': '(True)'}), '(bam, strict=True)\n', (28231, 28249), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((28485, 28513), 'os.path.basename', 'os.path.basename', (['data_fname'], {}), '(data_fname)\n', (28501, 28513), False, 'import os\n'), ((29560, 29574), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (29571, 29574), True, 'import pbcore.data.datasets as data\n'), ((29976, 29990), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (29987, 29990), True, 'import pbcore.data.datasets as data\n'), ((31088, 31102), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (31099, 31102), True, 'import pbcore.data.datasets as data\n'), ((32204, 32235), 'pbcore.io.SubreadSet', 'SubreadSet', (['*files'], {'strict': '(True)'}), '(*files, strict=True)\n', (32214, 32235), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((34197, 34214), 'pbcore.data.datasets.getXml', 'data.getXml', ([], {'no': '(8)'}), '(no=8)\n', (34208, 34214), True, 'import pbcore.data.datasets as data\n'), ((34238, 34256), 'pbcore.data.datasets.getXml', 'data.getXml', ([], {'no': '(11)'}), '(no=11)\n', (34249, 34256), True, 'import pbcore.data.datasets as data\n'), ((34521, 34539), 'pbcore.data.datasets.getXml', 'data.getXml', ([], {'no': '(11)'}), '(no=11)\n', (34532, 34539), True, 'import pbcore.data.datasets as data\n'), ((34737, 34754), 'pbcore.data.datasets.getXml', 'data.getXml', ([], {'no': '(8)'}), '(no=8)\n', (34748, 34754), True, 'import pbcore.data.datasets as data\n'), ((34778, 34795), 'pbcore.data.datasets.getXml', 'data.getXml', ([], {'no': '(8)'}), '(no=8)\n', (34789, 34795), True, 'import pbcore.data.datasets as data\n'), ((37195, 37216), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn.name'], {}), '(fn.name)\n', (37207, 37216), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((37279, 37292), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (37290, 37292), True, 'import pbcore.data.datasets as data\n'), ((37316, 37338), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (37329, 37338), False, 'import pytest\n'), ((37515, 37570), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".alignmentset.xml"""'}), "(suffix='.alignmentset.xml')\n", (37542, 37570), False, 'import tempfile\n'), ((37707, 37723), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn'], {}), '(fn)\n', (37719, 37723), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((39807, 39822), 'pbcore.data.datasets.getXml', 'data.getXml', (['(11)'], {}), '(11)\n', (39818, 39822), True, 'import pbcore.data.datasets as data\n'), ((40341, 40446), 'functools.reduce', 'reduce', (['(lambda x, y: x or y)', '[(ds1d is ds2d) for ds1d in ds1.subdatasets for ds2d in ds2.subdatasets]'], {}), '(lambda x, y: x or y, [(ds1d is ds2d) for ds1d in ds1.subdatasets for\n ds2d in ds2.subdatasets])\n', (40347, 40446), False, 'from functools import partial, reduce\n'), ((40614, 40628), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (40625, 40628), True, 'import pbcore.data.datasets as data\n'), ((40852, 40865), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (40863, 40865), True, 'import pbcore.data.datasets as data\n'), ((41060, 41084), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (41073, 41084), False, 'import pytest\n'), ((41199, 41212), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (41210, 41212), True, 'import pbcore.data.datasets as data\n'), ((41434, 41447), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (41445, 41447), True, 'import pbcore.data.datasets as data\n'), ((41690, 41703), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (41701, 41703), True, 'import pbcore.data.datasets as data\n'), ((41958, 41972), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (41969, 41972), True, 'import pbcore.data.datasets as data\n'), ((42269, 42283), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (42280, 42283), True, 'import pbcore.data.datasets as data\n'), ((42926, 42940), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (42937, 42940), True, 'import pbcore.data.datasets as data\n'), ((43152, 43166), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (43163, 43166), True, 'import pbcore.data.datasets as data\n'), ((44630, 44665), 'pbtestdata.get_file', 'pbtestdata.get_file', (['"""aligned-ds-2"""'], {}), "('aligned-ds-2')\n", (44649, 44665), False, 'import pbtestdata\n'), ((44685, 44720), 'pbtestdata.get_file', 'pbtestdata.get_file', (['"""aligned-ds-2"""'], {}), "('aligned-ds-2')\n", (44704, 44720), False, 'import pbtestdata\n'), ((44790, 44816), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['tmp_bam.name'], {}), '(tmp_bam.name)\n', (44802, 44816), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((46160, 46173), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (46171, 46173), True, 'import pbcore.data.datasets as data\n'), ((46330, 46344), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (46341, 46344), True, 'import pbcore.data.datasets as data\n'), ((46581, 46595), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (46592, 46595), True, 'import pbcore.data.datasets as data\n'), ((46874, 46897), 'os.path.isabs', 'os.path.isabs', (['files[0]'], {}), '(files[0])\n', (46887, 46897), False, 'import os\n'), ((47297, 47311), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (47308, 47311), True, 'import pbcore.data.datasets as data\n'), ((47702, 47715), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (47713, 47715), True, 'import pbcore.data.datasets as data\n'), ((47899, 47912), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (47910, 47912), True, 'import pbcore.data.datasets as data\n'), ((48618, 48632), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (48629, 48632), True, 'import pbcore.data.datasets as data\n'), ((49205, 49218), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (49216, 49218), True, 'import pbcore.data.datasets as data\n'), ((53171, 53185), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (53182, 53185), True, 'import pbcore.data.datasets as data\n'), ((54245, 54259), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (54256, 54259), True, 'import pbcore.data.datasets as data\n'), ((54623, 54637), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (54634, 54637), True, 'import pbcore.data.datasets as data\n'), ((55610, 55624), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (55621, 55624), True, 'import pbcore.data.datasets as data\n'), ((56274, 56288), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (56285, 56288), True, 'import pbcore.data.datasets as data\n'), ((57239, 57253), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (57250, 57253), True, 'import pbcore.data.datasets as data\n'), ((57957, 57971), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (57968, 57971), True, 'import pbcore.data.datasets as data\n'), ((58779, 58793), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (58790, 58793), True, 'import pbcore.data.datasets as data\n'), ((60550, 60593), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""dataset-unittest"""'}), "(suffix='dataset-unittest')\n", (60566, 60593), False, 'import tempfile\n'), ((61197, 61210), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (61208, 61210), True, 'import pbcore.data.datasets as data\n'), ((61431, 61445), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (61442, 61445), True, 'import pbcore.data.datasets as data\n'), ((61657, 61671), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (61668, 61671), True, 'import pbcore.data.datasets as data\n'), ((62027, 62041), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (62038, 62041), True, 'import pbcore.data.datasets as data\n'), ((62182, 62197), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (62193, 62197), True, 'import pbcore.data.datasets as data\n'), ((62283, 62297), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (62294, 62297), True, 'import pbcore.data.datasets as data\n'), ((62299, 62314), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (62310, 62314), True, 'import pbcore.data.datasets as data\n'), ((63066, 63080), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (63077, 63080), True, 'import pbcore.data.datasets as data\n'), ((64198, 64212), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (64209, 64212), True, 'import pbcore.data.datasets as data\n'), ((64214, 64228), 'pbcore.data.datasets.getBam', 'data.getBam', (['(1)'], {}), '(1)\n', (64225, 64228), True, 'import pbcore.data.datasets as data\n'), ((64230, 64244), 'pbcore.data.datasets.getBam', 'data.getBam', (['(2)'], {}), '(2)\n', (64241, 64244), True, 'import pbcore.data.datasets as data\n'), ((64784, 64798), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (64795, 64798), True, 'import pbcore.data.datasets as data\n'), ((64800, 64814), 'pbcore.data.datasets.getBam', 'data.getBam', (['(1)'], {}), '(1)\n', (64811, 64814), True, 'import pbcore.data.datasets as data\n'), ((64816, 64830), 'pbcore.data.datasets.getBam', 'data.getBam', (['(2)'], {}), '(2)\n', (64827, 64830), True, 'import pbcore.data.datasets as data\n'), ((65182, 65204), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (65195, 65204), False, 'import pytest\n'), ((65224, 65244), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['"""NOPE"""'], {}), "('NOPE')\n", (65236, 65244), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((65292, 65305), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (65303, 65305), True, 'import pbcore.data.datasets as data\n'), ((65560, 65574), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (65571, 65574), True, 'import pbcore.data.datasets as data\n'), ((65598, 65613), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (65611, 65613), True, 'import pbcore.data.datasets as data\n'), ((65637, 65652), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (65648, 65652), True, 'import pbcore.data.datasets as data\n'), ((65676, 65691), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (65689, 65691), True, 'import pbcore.data.datasets as data\n'), ((66608, 66622), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (66619, 66622), True, 'import pbcore.data.datasets as data\n'), ((66646, 66661), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (66659, 66661), True, 'import pbcore.data.datasets as data\n'), ((66685, 66700), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (66696, 66700), True, 'import pbcore.data.datasets as data\n'), ((66724, 66739), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (66737, 66739), True, 'import pbcore.data.datasets as data\n'), ((67663, 67677), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (67674, 67677), True, 'import pbcore.data.datasets as data\n'), ((67701, 67716), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (67714, 67716), True, 'import pbcore.data.datasets as data\n'), ((67740, 67755), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (67751, 67755), True, 'import pbcore.data.datasets as data\n'), ((67779, 67794), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (67792, 67794), True, 'import pbcore.data.datasets as data\n'), ((68726, 68740), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (68737, 68740), True, 'import pbcore.data.datasets as data\n'), ((68764, 68779), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (68777, 68779), True, 'import pbcore.data.datasets as data\n'), ((68803, 68818), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (68814, 68818), True, 'import pbcore.data.datasets as data\n'), ((68842, 68857), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (68855, 68857), True, 'import pbcore.data.datasets as data\n'), ((68881, 68896), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (68892, 68896), True, 'import pbcore.data.datasets as data\n'), ((68920, 68935), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (68933, 68935), True, 'import pbcore.data.datasets as data\n'), ((70199, 70213), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (70210, 70213), True, 'import pbcore.data.datasets as data\n'), ((70237, 70252), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (70250, 70252), True, 'import pbcore.data.datasets as data\n'), ((70276, 70291), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (70287, 70291), True, 'import pbcore.data.datasets as data\n'), ((70315, 70330), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (70328, 70330), True, 'import pbcore.data.datasets as data\n'), ((70354, 70369), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (70365, 70369), True, 'import pbcore.data.datasets as data\n'), ((70393, 70408), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (70406, 70408), True, 'import pbcore.data.datasets as data\n'), ((71693, 71706), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (71704, 71706), True, 'import pbcore.data.datasets as data\n'), ((71729, 71744), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (71742, 71744), True, 'import pbcore.data.datasets as data\n'), ((71768, 71781), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (71779, 71781), True, 'import pbcore.data.datasets as data\n'), ((71805, 71820), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (71818, 71820), True, 'import pbcore.data.datasets as data\n'), ((75982, 75995), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (75993, 75995), True, 'import pbcore.data.datasets as data\n'), ((76018, 76033), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (76031, 76033), True, 'import pbcore.data.datasets as data\n'), ((76164, 76177), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (76175, 76177), True, 'import pbcore.data.datasets as data\n'), ((76200, 76215), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (76213, 76215), True, 'import pbcore.data.datasets as data\n'), ((76325, 76358), 'pytest.approx', 'pytest.approx', (['dist[0].sampleMean'], {}), '(dist[0].sampleMean)\n', (76338, 76358), False, 'import pytest\n'), ((76531, 76564), 'pytest.approx', 'pytest.approx', (['dist[0].sampleMean'], {}), '(dist[0].sampleMean)\n', (76544, 76564), False, 'import pytest\n'), ((76678, 76711), 'pytest.approx', 'pytest.approx', (['dist[0].sampleMean'], {}), '(dist[0].sampleMean)\n', (76691, 76711), False, 'import pytest\n'), ((77332, 77345), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (77343, 77345), True, 'import pbcore.data.datasets as data\n'), ((77369, 77384), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (77382, 77384), True, 'import pbcore.data.datasets as data\n'), ((77547, 77580), 'pytest.approx', 'pytest.approx', (['dist[0].sampleMean'], {}), '(dist[0].sampleMean)\n', (77560, 77580), False, 'import pytest\n'), ((78266, 78279), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (78277, 78279), True, 'import pbcore.data.datasets as data\n'), ((78306, 78319), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (78317, 78319), True, 'import pbcore.data.datasets as data\n'), ((79396, 79409), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (79407, 79409), True, 'import pbcore.data.datasets as data\n'), ((79432, 79447), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (79445, 79447), True, 'import pbcore.data.datasets as data\n'), ((79603, 79633), 'pytest.approx', 'pytest.approx', (['dist.sampleMean'], {}), '(dist.sampleMean)\n', (79616, 79633), False, 'import pytest\n'), ((79793, 79826), 'pytest.approx', 'pytest.approx', (['dist[0].sampleMean'], {}), '(dist[0].sampleMean)\n', (79806, 79826), False, 'import pytest\n'), ((79884, 79897), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (79895, 79897), True, 'import pbcore.data.datasets as data\n'), ((79920, 79935), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (79933, 79935), True, 'import pbcore.data.datasets as data\n'), ((80098, 80112), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (80109, 80112), True, 'import pbcore.data.datasets as data\n'), ((80136, 80151), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (80149, 80151), True, 'import pbcore.data.datasets as data\n'), ((80175, 80190), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (80186, 80190), True, 'import pbcore.data.datasets as data\n'), ((80214, 80229), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (80227, 80229), True, 'import pbcore.data.datasets as data\n'), ((81394, 81408), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (81405, 81408), True, 'import pbcore.data.datasets as data\n'), ((81432, 81447), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (81445, 81447), True, 'import pbcore.data.datasets as data\n'), ((81471, 81486), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (81482, 81486), True, 'import pbcore.data.datasets as data\n'), ((81510, 81525), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (81523, 81525), True, 'import pbcore.data.datasets as data\n'), ((82407, 82470), 'pytest.approx', 'pytest.approx', (['ds1.metadata.summaryStats.readLenDist.sampleMean'], {}), '(ds1.metadata.summaryStats.readLenDist.sampleMean)\n', (82420, 82470), False, 'import pytest\n'), ((82516, 82579), 'pytest.approx', 'pytest.approx', (['ds2.metadata.summaryStats.readLenDist.sampleMean'], {}), '(ds2.metadata.summaryStats.readLenDist.sampleMean)\n', (82529, 82579), False, 'import pytest\n'), ((83243, 83306), 'pytest.approx', 'pytest.approx', (['ds1.metadata.summaryStats.readLenDist.sampleMean'], {}), '(ds1.metadata.summaryStats.readLenDist.sampleMean)\n', (83256, 83306), False, 'import pytest\n'), ((83352, 83415), 'pytest.approx', 'pytest.approx', (['ds2.metadata.summaryStats.readLenDist.sampleMean'], {}), '(ds2.metadata.summaryStats.readLenDist.sampleMean)\n', (83365, 83415), False, 'import pytest\n'), ((83461, 83524), 'pytest.approx', 'pytest.approx', (['ds3.metadata.summaryStats.readLenDist.sampleMean'], {}), '(ds3.metadata.summaryStats.readLenDist.sampleMean)\n', (83474, 83524), False, 'import pytest\n'), ((83575, 83637), 'pytest.approx', 'pytest.approx', (['ds1.metadata.summaryStats.readLenDist.sampleStd'], {}), '(ds1.metadata.summaryStats.readLenDist.sampleStd)\n', (83588, 83637), False, 'import pytest\n'), ((83687, 83749), 'pytest.approx', 'pytest.approx', (['ds2.metadata.summaryStats.readLenDist.sampleStd'], {}), '(ds2.metadata.summaryStats.readLenDist.sampleStd)\n', (83700, 83749), False, 'import pytest\n'), ((83795, 83857), 'pytest.approx', 'pytest.approx', (['ds3.metadata.summaryStats.readLenDist.sampleStd'], {}), '(ds3.metadata.summaryStats.readLenDist.sampleStd)\n', (83808, 83857), False, 'import pytest\n'), ((84034, 84100), 'pytest.approx', 'pytest.approx', (['ds3.metadata.summaryStats.readLenDist.sample95thPct'], {}), '(ds3.metadata.summaryStats.readLenDist.sample95thPct)\n', (84047, 84100), False, 'import pytest\n'), ((84161, 84175), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (84172, 84175), True, 'import pbcore.data.datasets as data\n'), ((84199, 84214), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (84212, 84214), True, 'import pbcore.data.datasets as data\n'), ((84238, 84253), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (84249, 84253), True, 'import pbcore.data.datasets as data\n'), ((84277, 84292), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (84290, 84292), True, 'import pbcore.data.datasets as data\n'), ((85180, 85194), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (85191, 85194), True, 'import pbcore.data.datasets as data\n'), ((85218, 85233), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (85231, 85233), True, 'import pbcore.data.datasets as data\n'), ((85257, 85272), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (85268, 85272), True, 'import pbcore.data.datasets as data\n'), ((85296, 85311), 'pbcore.data.datasets.getStats', 'data.getStats', ([], {}), '()\n', (85309, 85311), True, 'import pbcore.data.datasets as data\n'), ((87226, 87244), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1'], {}), '(cmp1)\n', (87238, 87244), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((87265, 87283), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp2'], {}), '(cmp2)\n', (87277, 87283), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((88699, 88717), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1'], {}), '(cmp1)\n', (88711, 88717), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((88738, 88756), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp2'], {}), '(cmp2)\n', (88750, 88756), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((90254, 90272), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1'], {}), '(cmp1)\n', (90266, 90272), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((90293, 90311), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp2'], {}), '(cmp2)\n', (90305, 90311), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((92736, 92754), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp1'], {}), '(cmp1)\n', (92748, 92754), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((92775, 92793), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp2'], {}), '(cmp2)\n', (92787, 92793), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((92814, 92832), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['cmp3'], {}), '(cmp3)\n', (92826, 92832), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((94390, 94426), 'pytest.raises', 'pytest.raises', (['InvalidDataSetIOError'], {}), '(InvalidDataSetIOError)\n', (94403, 94426), False, 'import pytest\n'), ((94451, 94482), 'pbcore.io.dataset.DataSetMetaTypes.InvalidDataSetIOError', 'InvalidDataSetIOError', (['"""Wrong!"""'], {}), "('Wrong!')\n", (94472, 94482), False, 'from pbcore.io.dataset.DataSetMetaTypes import InvalidDataSetIOError\n'), ((94581, 94595), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (94592, 94595), True, 'import pbcore.data.datasets as data\n'), ((94849, 94863), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (94860, 94863), True, 'import pbcore.data.datasets as data\n'), ((95667, 95681), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (95678, 95681), True, 'import pbcore.data.datasets as data\n'), ((97313, 97327), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (97324, 97327), True, 'import pbcore.data.datasets as data\n'), ((98172, 98186), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (98183, 98186), True, 'import pbcore.data.datasets as data\n'), ((98957, 98971), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (98968, 98971), True, 'import pbcore.data.datasets as data\n'), ((99730, 99744), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (99741, 99744), True, 'import pbcore.data.datasets as data\n'), ((100997, 101023), 'os.path.dirname', 'os.path.dirname', (['test_file'], {}), '(test_file)\n', (101012, 101023), False, 'import os\n'), ((101117, 101153), 'pytest.raises', 'pytest.raises', (['InvalidDataSetIOError'], {}), '(InvalidDataSetIOError)\n', (101130, 101153), False, 'import pytest\n'), ((101174, 101192), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {}), '(outXml)\n', (101184, 101192), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((101804, 101840), 'pbcore.io.SubreadSet', 'SubreadSet', (['outXml'], {'skipMissing': '(True)'}), '(outXml, skipMissing=True)\n', (101814, 101840), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((103359, 103379), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (103369, 103379), True, 'import os.path as op\n'), ((103475, 103495), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (103485, 103495), True, 'import os.path as op\n'), ((103883, 103935), 'os.path.basename', 'op.basename', (['ds1.supplementalResources[0].resourceId'], {}), '(ds1.supplementalResources[0].resourceId)\n', (103894, 103935), True, 'import os.path as op\n'), ((104080, 104139), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".consensusreadset.xml"""'}), "(suffix='.consensusreadset.xml')\n", (104107, 104139), False, 'import tempfile\n'), ((104336, 104356), 'os.path.dirname', 'op.dirname', (['ds_file2'], {}), '(ds_file2)\n', (104346, 104356), True, 'import os.path as op\n'), ((15410, 15424), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (15421, 15424), True, 'import pbcore.data.datasets as data\n'), ((15558, 15573), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (15569, 15573), True, 'import pbcore.data.datasets as data\n'), ((15707, 15722), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (15718, 15722), True, 'import pbcore.data.datasets as data\n'), ((18898, 18924), 'pbcore.data.getEmptyBam', 'upstreamdata.getEmptyBam', ([], {}), '()\n', (18922, 18924), True, 'import pbcore.data as upstreamdata\n'), ((19069, 19102), 'pbcore.data.getEmptyAlignedBam', 'upstreamdata.getEmptyAlignedBam', ([], {}), '()\n', (19100, 19102), True, 'import pbcore.data as upstreamdata\n'), ((24637, 24661), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['*file_list'], {}), '(*file_list)\n', (24649, 24661), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((26718, 26748), 'pbcore.io.openDataSet', 'openDataSet', (['infn'], {'strict': '(True)'}), '(infn, strict=True)\n', (26729, 26748), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((26788, 26819), 'pbcore.io.openDataSet', 'openDataSet', (['infn'], {'strict': '(False)'}), '(infn, strict=False)\n', (26799, 26819), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((27288, 27304), 'os.remove', 'os.remove', (['linfn'], {}), '(linfn)\n', (27297, 27304), False, 'import os\n'), ((27476, 27507), 'pbcore.io.openDataSet', 'openDataSet', (['linfn'], {'strict': '(True)'}), '(linfn, strict=True)\n', (27487, 27507), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((27547, 27579), 'pbcore.io.openDataSet', 'openDataSet', (['linfn'], {'strict': '(False)'}), '(linfn, strict=False)\n', (27558, 27579), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((37127, 37141), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (37138, 37141), True, 'import pbcore.data.datasets as data\n'), ((37357, 37378), 'pbcore.io.AlignmentSet', 'AlignmentSet', (['fn.name'], {}), '(fn.name)\n', (37369, 37378), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((37643, 37657), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (37654, 37657), True, 'import pbcore.data.datasets as data\n'), ((37957, 37971), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (37968, 37971), True, 'import pbcore.data.datasets as data\n'), ((47544, 47553), 'pbcore.io.DataSet', 'DataSet', ([], {}), '()\n', (47551, 47553), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((59850, 59869), 'pbcore.data.datasets.getXml', 'data.getXml', (['fileNo'], {}), '(fileNo)\n', (59861, 59869), True, 'import pbcore.data.datasets as data\n'), ((59956, 59975), 'pbcore.data.datasets.getXml', 'data.getXml', (['fileNo'], {}), '(fileNo)\n', (59967, 59975), True, 'import pbcore.data.datasets as data\n'), ((60098, 60117), 'pbcore.data.datasets.getXml', 'data.getXml', (['fileNo'], {}), '(fileNo)\n', (60109, 60117), True, 'import pbcore.data.datasets as data\n'), ((60630, 60649), 'pbcore.data.datasets.getXml', 'data.getXml', (['fileNo'], {}), '(fileNo)\n', (60641, 60649), True, 'import pbcore.data.datasets as data\n'), ((60859, 60887), 'shutil.copy', 'shutil.copy', (['fname', 'newfname'], {}), '(fname, newfname)\n', (60870, 60887), False, 'import shutil\n'), ((101945, 101981), 'pytest.raises', 'pytest.raises', (['InvalidDataSetIOError'], {}), '(InvalidDataSetIOError)\n', (101958, 101981), False, 'import pytest\n'), ((102006, 102024), 'pbcore.io.SubreadSet', 'SubreadSet', (['resXml'], {}), '(resXml)\n', (102016, 102024), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((46432, 46495), 'pbcore.io.DataSet', 'DataSet', (['"""bam1.bam"""', '"""bam2.bam"""'], {'strict': '(False)', 'skipMissing': '(True)'}), "('bam1.bam', 'bam2.bam', strict=False, skipMissing=True)\n", (46439, 46495), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((47139, 47202), 'pbcore.io.DataSet', 'DataSet', (['"""bam1.bam"""', '"""bam2.bam"""'], {'strict': '(False)', 'skipMissing': '(True)'}), "('bam1.bam', 'bam2.bam', strict=False, skipMissing=True)\n", (47146, 47202), False, 'from pbcore.io import DataSet, SubreadSet, ReferenceSet, AlignmentSet, openDataSet, ConsensusReadSet, ConsensusAlignmentSet\n'), ((60818, 60841), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (60834, 60841), False, 'import os\n'), ((63410, 63438), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (63418, 63438), True, 'import numpy as np\n'), ((63452, 63480), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (63460, 63480), True, 'import numpy as np\n'), ((87647, 87675), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (87655, 87675), True, 'import numpy as np\n'), ((87689, 87717), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (87697, 87717), True, 'import numpy as np\n'), ((89121, 89149), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (89129, 89149), True, 'import numpy as np\n'), ((89163, 89191), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (89171, 89191), True, 'import numpy as np\n'), ((90804, 90832), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (90812, 90832), True, 'import numpy as np\n'), ((90846, 90874), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (90854, 90874), True, 'import numpy as np\n'), ((91319, 91347), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (91327, 91347), True, 'import numpy as np\n'), ((91361, 91389), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (91369, 91389), True, 'import numpy as np\n'), ((93211, 93239), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (93219, 93239), True, 'import numpy as np\n'), ((93253, 93281), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (93261, 93281), True, 'import numpy as np\n'), ((93726, 93754), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (93734, 93754), True, 'import numpy as np\n'), ((93768, 93796), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint32'}), '(1, dtype=np.uint32)\n', (93776, 93796), True, 'import numpy as np\n'), ((98526, 98642), 'itertools.zip_longest', 'zip_longest', (['ss.metadata.summaryStats.readLenDist.bins', 'ss2.metadata.summaryStats.readLenDist.bins'], {'fillvalue': '(0)'}), '(ss.metadata.summaryStats.readLenDist.bins, ss2.metadata.\n summaryStats.readLenDist.bins, fillvalue=0)\n', (98537, 98642), False, 'from itertools import zip_longest\n'), ((99311, 99427), 'itertools.zip_longest', 'zip_longest', (['ss.metadata.summaryStats.readLenDist.bins', 'ss2.metadata.summaryStats.readLenDist.bins'], {'fillvalue': '(0)'}), '(ss.metadata.summaryStats.readLenDist.bins, ss2.metadata.\n summaryStats.readLenDist.bins, fillvalue=0)\n', (99322, 99427), False, 'from itertools import zip_longest\n'), ((100831, 100847), 'os.path.split', 'os.path.split', (['x'], {}), '(x)\n', (100844, 100847), False, 'import os\n'), ((2773, 2793), 'pbcore.data.datasets.getSubreadSet', 'data.getSubreadSet', ([], {}), '()\n', (2791, 2793), True, 'import pbcore.data.datasets as data\n'), ((2935, 2948), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (2946, 2948), True, 'import pbcore.data.datasets as data\n'), ((3008, 3021), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (3019, 3021), True, 'import pbcore.data.datasets as data\n'), ((3077, 3090), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (3088, 3090), True, 'import pbcore.data.datasets as data\n'), ((12675, 12689), 'pbcore.data.datasets.getBam', 'data.getBam', (['(0)'], {}), '(0)\n', (12686, 12689), True, 'import pbcore.data.datasets as data\n'), ((12854, 12868), 'pbcore.data.datasets.getBam', 'data.getBam', (['(1)'], {}), '(1)\n', (12865, 12868), True, 'import pbcore.data.datasets as data\n'), ((13298, 13312), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (13309, 13312), True, 'import pbcore.data.datasets as data\n'), ((13481, 13496), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (13492, 13496), True, 'import pbcore.data.datasets as data\n'), ((13862, 13876), 'pbcore.data.datasets.getXml', 'data.getXml', (['(7)'], {}), '(7)\n', (13873, 13876), True, 'import pbcore.data.datasets as data\n'), ((14045, 14060), 'pbcore.data.datasets.getXml', 'data.getXml', (['(10)'], {}), '(10)\n', (14056, 14060), True, 'import pbcore.data.datasets as data\n'), ((16860, 16874), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (16871, 16874), True, 'import pbcore.data.datasets as data\n'), ((19572, 19586), 'pbcore.data.datasets.getXml', 'data.getXml', (['(9)'], {}), '(9)\n', (19583, 19586), True, 'import pbcore.data.datasets as data\n'), ((22770, 22778), 'urllib.parse.quote', 'quote', (['x'], {}), '(x)\n', (22775, 22778), False, 'from urllib.parse import quote\n'), ((25659, 25672), 'pbcore.data.datasets.getRef', 'data.getRef', ([], {}), '()\n', (25670, 25672), True, 'import pbcore.data.datasets as data\n'), ((27736, 27750), 'pbcore.data.datasets.getXml', 'data.getXml', (['(8)'], {}), '(8)\n', (27747, 27750), True, 'import pbcore.data.datasets as data\n'), ((3259, 3272), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (3270, 3272), True, 'import pbcore.data.datasets as data\n'), ((3370, 3383), 'pbcore.data.datasets.getBam', 'data.getBam', ([], {}), '()\n', (3381, 3383), True, 'import pbcore.data.datasets as data\n')] |
import os
import numpy as np
from matplotlib import pyplot as plt, animation
from hyperverlet.energy import PendulumEnergy
from hyperverlet.plotting.grid_spec import gs_3_2_3, gs_line
from hyperverlet.plotting.phasespace import init_phasespace_plot, update_phasespace_plot
from hyperverlet.plotting.utils import save_animation, create_gt_pred_legends, save_figure
from hyperverlet.utils.measures import print_z_loss
from hyperverlet.utils.misc import load_pickle, format_path
def pendulum_energy_plot(q, p, trajectory, m, length, g, plot_every=1):
# Detatch and trim data
q = q.cpu().detach().numpy()[::plot_every]
p = p.cpu().detach().numpy()[::plot_every]
trajectory = trajectory.cpu().detach().numpy()[::plot_every]
m = m.cpu().detach().numpy()
length = length.cpu().detach().numpy()
# Calculate energy of the system
energy = PendulumEnergy()
ke, pe, te = energy.all_energies(m, q, p, g=g, length=length)
plot_energy(trajectory, te, ke, pe)
def animate_pendulum(config, show_gt=False, show_plot=False, cfg=1):
plot_every = config["plotting"]["plot_every"]
result_path = format_path(config, config["result_path"])
result_dict = load_pickle(result_path)
save_plot = config["plotting"]["save_plot"]
result_dict = result_dict.get("train", result_dict)
#print_qp_mean_loss(result_dict["q"], result_dict["p"], result_dict["gt_q"], result_dict["gt_p"], label='total loss')
#print_qp_mean_loss(result_dict["q"][:, cfg], result_dict["p"][:, cfg], result_dict["gt_q"][:, cfg], result_dict["gt_p"][:, cfg], label='cfg loss')
q = result_dict["q"][::plot_every, cfg]
p = result_dict["p"][::plot_every, cfg]
trajectory = result_dict["trajectory"][::plot_every, cfg]
mass = result_dict["mass"][cfg]
length = result_dict["extra_args"]["length"][cfg]
g = result_dict["extra_args"]["g"][cfg]
x = length * np.sin(q)
y = -length * np.cos(q)
# Ground Truth
gt_q = result_dict["gt_q"][::plot_every, cfg]
gt_x = length * np.sin(gt_q)
gt_y = -length * np.cos(gt_q)
# Energy
energy = PendulumEnergy()
ke, pe, te = energy.all_energies(mass, q, p, g=g, length=length)
# Create grid spec
fig = plt.figure(figsize=(20, 15))
ax_pendulum, ax_energy, ax_phase_space = gs_3_2_3(fig)
# Initialize plots
init_pendulum_animate(ax_pendulum, x, length)
pe_plot, ke_plot, te_plot = init_energy_plot(ax_energy, trajectory, te, ke, pe)
ps_plot = init_phasespace_plot(ax_phase_space, q, p)
ax_energy.set_ylim(-0.3 * te.max(), te.max() * 1.05)
line, scatter = init_pendulum(ax_pendulum, x, y, color="red", zorder=1)
if show_gt:
gt_line, gt_scatter = init_pendulum(ax_pendulum, gt_x, gt_y, color="green", s=300, linewidth=2, zorder=2)
def animate(i):
update_pendulum(line, scatter, x, y, i)
if show_gt:
update_pendulum(gt_line, gt_scatter, gt_x, gt_y, i)
energy_animate_update(ax_energy, pe_plot, ke_plot, te_plot, trajectory, i, pe, ke, te)
update_phasespace_plot(ps_plot, q, p, i)
anim = animation.FuncAnimation(fig, animate, frames=q.shape[0], save_count=q.shape[0])
if show_plot:
plt.show()
if save_plot:
save_animation(anim, config)
def pendulum_snapshot(config, cfg=0, slices=6):
result_path = format_path(config, config["result_path"])
result_dict = load_pickle(result_path)
q = result_dict["q"][:, cfg]
gt_q = result_dict["gt_q"][:, cfg]
length = result_dict["extra_args"]["length"][cfg]
trajectory = result_dict["trajectory"][:, cfg]
x = length * np.sin(q)
y = -length * np.cos(q)
gt_x = length * np.sin(gt_q)
gt_y = -length * np.cos(gt_q)
fig = plt.figure(figsize=(20, 15))
ax_pendulums = gs_line(fig, slices)
step_size = (q.shape[0] - 1) // (slices - 1)
cm = ["green", "red"]
legend_handles = create_gt_pred_legends(q, cm)
for idx, (slice, ax_pendulum) in enumerate(zip(range(slices), ax_pendulums)):
if idx == 0:
ax_pendulum.legend(handles=legend_handles, loc='upper left', bbox_to_anchor=(0, 1), ncol=1, fancybox=True, shadow=True)
index = step_size * slice
label = f"Time {int(trajectory[index])}"
init_pendulum_plot(ax_pendulum, x, length, title=label, xmargin=0.25, ymargin=0.25, set_ylabel=idx==0)
line, scatter = init_pendulum(ax_pendulum, x, y, color=cm[1], zorder=1)
gt_line, gt_scatter = init_pendulum(ax_pendulum, gt_x, gt_y, color=cm[0], s=300, linewidth=2, zorder=2)
update_pendulum(line, scatter, x, y, index)
update_pendulum(gt_line, gt_scatter, gt_x, gt_y, index)
config_name = config["train_args_path"].split('/')[-2]
solver_name = config["model_args"]["solver"]
save_figure(f"visualization/{config_name}", solver_name)
def update_pendulum(line, scatter, x, y, i):
x = x[i, 0]
y = y[i, 0]
line.set_data([0, x], [0, y])
scatter.set_offsets(np.array([x, y]))
def init_pendulum_plot(ax, x, length, xmargin=1.2, ymargin=1.05, title="Pendulum experiment", set_ylabel=True):
ax.set_title(title)
ax.set_xlabel('X')
if set_ylabel:
ax.set_ylabel('Y')
else:
ax.get_yaxis().set_visible(False)
ax.set_aspect('equal')
ax.set_xlim([-length - xmargin, length + xmargin])
ax.set_ylim([-length - ymargin, length + ymargin])
def init_pendulum_animate(ax, x, length, xmargin=1.2, ymargin=1.05, title="Pendulum experiment"):
ax.set_title(title)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_aspect('equal')
ax.set_xlim([x.min() * xmargin, x.max() * xmargin])
ax.set_ylim([-length * ymargin, length * (ymargin - 1)])
def init_pendulum(ax, x, y, color='red', s=500, linewidth=4, zorder=1):
x = x[0, 0]
y = y[0, 0]
lines = ax.plot([0, x], [0, y], linewidth=linewidth, color="k", zorder=zorder)
scatter = ax.scatter(x, y, color=color, marker='o', s=s, zorder=zorder + 1)
return lines[0], scatter
| [
"hyperverlet.plotting.utils.create_gt_pred_legends",
"hyperverlet.utils.misc.load_pickle",
"matplotlib.pyplot.show",
"hyperverlet.plotting.utils.save_figure",
"hyperverlet.plotting.grid_spec.gs_3_2_3",
"hyperverlet.plotting.phasespace.update_phasespace_plot",
"hyperverlet.plotting.utils.save_animation",... | [((866, 882), 'hyperverlet.energy.PendulumEnergy', 'PendulumEnergy', ([], {}), '()\n', (880, 882), False, 'from hyperverlet.energy import PendulumEnergy\n'), ((1129, 1171), 'hyperverlet.utils.misc.format_path', 'format_path', (['config', "config['result_path']"], {}), "(config, config['result_path'])\n", (1140, 1171), False, 'from hyperverlet.utils.misc import load_pickle, format_path\n'), ((1190, 1214), 'hyperverlet.utils.misc.load_pickle', 'load_pickle', (['result_path'], {}), '(result_path)\n', (1201, 1214), False, 'from hyperverlet.utils.misc import load_pickle, format_path\n'), ((2100, 2116), 'hyperverlet.energy.PendulumEnergy', 'PendulumEnergy', ([], {}), '()\n', (2114, 2116), False, 'from hyperverlet.energy import PendulumEnergy\n'), ((2220, 2248), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (2230, 2248), True, 'from matplotlib import pyplot as plt, animation\n'), ((2294, 2307), 'hyperverlet.plotting.grid_spec.gs_3_2_3', 'gs_3_2_3', (['fig'], {}), '(fig)\n', (2302, 2307), False, 'from hyperverlet.plotting.grid_spec import gs_3_2_3, gs_line\n'), ((2480, 2522), 'hyperverlet.plotting.phasespace.init_phasespace_plot', 'init_phasespace_plot', (['ax_phase_space', 'q', 'p'], {}), '(ax_phase_space, q, p)\n', (2500, 2522), False, 'from hyperverlet.plotting.phasespace import init_phasespace_plot, update_phasespace_plot\n'), ((3097, 3176), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'frames': 'q.shape[0]', 'save_count': 'q.shape[0]'}), '(fig, animate, frames=q.shape[0], save_count=q.shape[0])\n', (3120, 3176), False, 'from matplotlib import pyplot as plt, animation\n'), ((3339, 3381), 'hyperverlet.utils.misc.format_path', 'format_path', (['config', "config['result_path']"], {}), "(config, config['result_path'])\n", (3350, 3381), False, 'from hyperverlet.utils.misc import load_pickle, format_path\n'), ((3400, 3424), 'hyperverlet.utils.misc.load_pickle', 'load_pickle', (['result_path'], {}), '(result_path)\n', (3411, 3424), False, 'from hyperverlet.utils.misc import load_pickle, format_path\n'), ((3737, 3765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (3747, 3765), True, 'from matplotlib import pyplot as plt, animation\n'), ((3785, 3805), 'hyperverlet.plotting.grid_spec.gs_line', 'gs_line', (['fig', 'slices'], {}), '(fig, slices)\n', (3792, 3805), False, 'from hyperverlet.plotting.grid_spec import gs_3_2_3, gs_line\n'), ((3903, 3932), 'hyperverlet.plotting.utils.create_gt_pred_legends', 'create_gt_pred_legends', (['q', 'cm'], {}), '(q, cm)\n', (3925, 3932), False, 'from hyperverlet.plotting.utils import save_animation, create_gt_pred_legends, save_figure\n'), ((4787, 4843), 'hyperverlet.plotting.utils.save_figure', 'save_figure', (['f"""visualization/{config_name}"""', 'solver_name'], {}), "(f'visualization/{config_name}', solver_name)\n", (4798, 4843), False, 'from hyperverlet.plotting.utils import save_animation, create_gt_pred_legends, save_figure\n'), ((1898, 1907), 'numpy.sin', 'np.sin', (['q'], {}), '(q)\n', (1904, 1907), True, 'import numpy as np\n'), ((1926, 1935), 'numpy.cos', 'np.cos', (['q'], {}), '(q)\n', (1932, 1935), True, 'import numpy as np\n'), ((2026, 2038), 'numpy.sin', 'np.sin', (['gt_q'], {}), '(gt_q)\n', (2032, 2038), True, 'import numpy as np\n'), ((2060, 2072), 'numpy.cos', 'np.cos', (['gt_q'], {}), '(gt_q)\n', (2066, 2072), True, 'import numpy as np\n'), ((3044, 3084), 'hyperverlet.plotting.phasespace.update_phasespace_plot', 'update_phasespace_plot', (['ps_plot', 'q', 'p', 'i'], {}), '(ps_plot, q, p, i)\n', (3066, 3084), False, 'from hyperverlet.plotting.phasespace import init_phasespace_plot, update_phasespace_plot\n'), ((3204, 3214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3212, 3214), True, 'from matplotlib import pyplot as plt, animation\n'), ((3242, 3270), 'hyperverlet.plotting.utils.save_animation', 'save_animation', (['anim', 'config'], {}), '(anim, config)\n', (3256, 3270), False, 'from hyperverlet.plotting.utils import save_animation, create_gt_pred_legends, save_figure\n'), ((3621, 3630), 'numpy.sin', 'np.sin', (['q'], {}), '(q)\n', (3627, 3630), True, 'import numpy as np\n'), ((3649, 3658), 'numpy.cos', 'np.cos', (['q'], {}), '(q)\n', (3655, 3658), True, 'import numpy as np\n'), ((3679, 3691), 'numpy.sin', 'np.sin', (['gt_q'], {}), '(gt_q)\n', (3685, 3691), True, 'import numpy as np\n'), ((3713, 3725), 'numpy.cos', 'np.cos', (['gt_q'], {}), '(gt_q)\n', (3719, 3725), True, 'import numpy as np\n'), ((4982, 4998), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (4990, 4998), True, 'import numpy as np\n')] |
import os
import numpy as np
from PIL import Image
from torch.utils import data
import dataset.transform as transform
from utils.func import recursive_glob
class Cityscapes(data.Dataset):
def __init__(self, root, distributed=False, train_transform=None, valid_transform=None):
self.root = root
self.imgs, self.labels = self.get_img_list(root)
self.dataset_size = len(self.imgs)
self.classes_name = self.get_classes_name()
self.num_classes = len(self.classes_name)
self.cmap = self.get_colormap()
self.train_transform = train_transform
self.valid_transform = valid_transform
self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
self.ignore_index = 255
self.class_map = dict(zip(self.valid_classes, range(self.num_classes)))
self._split(distributed)
self.img_channels = self.__getitem__(0)[0].shape[0]
def get_img_list(self, root):
img_root = os.path.join(root, 'leftImg8bit')
label_root = os.path.join(root, 'gtFine')
imgs = []
labels = []
train_img_root = os.path.join(img_root, 'train')
train_label_root = os.path.join(label_root, 'train')
imgs += sorted(recursive_glob(root=train_img_root, suffix='leftImg8bit.png'))
labels += sorted(recursive_glob(root=train_label_root, suffix='gtFine_labelIds.png'))
train_split = len(imgs)
valid_img_root = os.path.join(img_root, 'val')
valid_label_root = os.path.join(label_root, 'val')
imgs += sorted(recursive_glob(root=valid_img_root, suffix='leftImg8bit.png'))
labels += sorted(recursive_glob(root=valid_label_root, suffix='gtFine_labelIds.png'))
valid_split = len(imgs)
test_img_root = os.path.join(img_root, 'test')
test_label_root = os.path.join(label_root, 'test')
imgs += sorted(recursive_glob(root=test_img_root, suffix='leftImg8bit.png'))
labels += sorted(recursive_glob(root=test_label_root, suffix='gtFine_labelIds.png'))
self.indices = list(range(len(imgs)))
self.train_indices = self.indices[:train_split]
self.valid_indices = self.indices[train_split:valid_split]
self.test_indices = self.indices[valid_split:]
return imgs, labels
def _split(self, distributed):
self.train_dataset = data.Subset(self, self.train_indices)
self.valid_dataset = data.Subset(self, self.valid_indices)
self.test_dataset = data.Subset(self, self.test_indices)
if distributed:
self.train_sampler = data.distributed.DistributedSampler(self.train_dataset)
else:
self.train_sampler = data.RandomSampler(self.train_dataset)
self.valid_sampler = data.SequentialSampler(self.valid_dataset)
self.test_sampler = data.SequentialSampler(self.test_dataset)
def get_dataloader(self, batch_size=1, num_workers=0, pin_memory=False):
train_loader = data.DataLoader(self.train_dataset, batch_size=batch_size, sampler=self.train_sampler,
num_workers=num_workers, pin_memory=pin_memory)
valid_loader = data.DataLoader(self.valid_dataset, batch_size=batch_size, sampler=self.valid_sampler,
num_workers=num_workers, pin_memory=pin_memory)
test_loader = data.DataLoader(self.test_dataset, batch_size=batch_size, sampler=self.test_sampler,
num_workers=num_workers, pin_memory=pin_memory)
return train_loader, valid_loader, test_loader
def get_colormap(self):
cmap = np.array([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]]
).astype(np.uint8)
other_map = np.array([[0, 0, 0],] * (256 - cmap.shape[0]))
cmap = np.concatenate((cmap, other_map))
return cmap
def default_transform(self, data):
data = transform.to_tensor(data)
return data
def get_classes_name(self):
classes_name = ['road', 'sidewalk', 'building',
'wall', 'fence', 'pole',
'traffic_light', 'traffic_sign', 'vegetation',
'terrain', 'sky', 'person',
'rider', 'car', 'truck',
'bus', 'train', 'motorcycle',
'bicycle']
return classes_name
def vis_transform(self, imgs, labels, preds, to_plt=False):
cmap = self.get_colormap()
if imgs is not None:
if type(imgs).__module__ != np.__name__:
imgs = imgs.cpu().detach().numpy()
if to_plt is True:
imgs = imgs.transpose((0, 2, 3, 1))
if labels is not None:
if type(labels).__module__ != np.__name__:
labels = labels.cpu().detach().numpy().astype('int')
labels = cmap[labels]
labels = labels.transpose((0, 3, 1, 2))
if to_plt is True:
labels = labels.transpose((0, 2, 3, 1))
labels = labels / 255.
if preds is not None:
if type(preds).__module__ != np.__name__:
preds = preds.cpu().detach().numpy()
if preds.shape[1] == self.num_classes:
preds = preds.argmax(axis=1)
preds = cmap[preds]
preds = preds.transpose((0, 3, 1, 2))
if to_plt is True:
preds = preds.transpose((0, 2, 3, 1))
preds = preds / 255.
return imgs, labels, preds
def vis_transform(self, imgs, labels, preds, to_plt=False):
cmap = self.get_colormap()
if imgs is not None:
if type(imgs).__module__ != np.__name__:
imgs = imgs.cpu().detach().numpy()
if to_plt is True:
imgs = imgs.transpose((0, 2, 3, 1))
if labels is not None:
if type(labels).__module__ != np.__name__:
labels = labels.cpu().detach().numpy().astype('int')
labels = cmap[labels]
labels = labels.transpose((0, 3, 1, 2))
if to_plt is True:
labels = labels.transpose((0, 2, 3, 1))
labels = labels / 255.
if preds is not None:
if type(preds).__module__ != np.__name__:
preds = preds.cpu().detach().numpy()
if preds.shape[1] == self.num_classes:
preds = preds.argmax(axis=1)
preds = cmap[preds]
preds = preds.transpose((0, 3, 1, 2))
if to_plt is True:
preds = preds.transpose((0, 2, 3, 1))
preds = preds / 255.
return imgs, labels, preds
def __getitem__(self, index):
img_path = self.imgs[index]
img = Image.open(img_path)
label_path = self.labels[index]
label = Image.open(label_path)
data = {'image': img, 'label': label}
if index in self.train_indices and self.train_transform is not None:
data = self.train_transform(data)
elif index in self.valid_indices and self.valid_transform is not None:
data = self.valid_transform(data)
else:
data = transform.to_numpy(data)
img = data['image']
label = data['label']
for _voidc in self.void_classes:
label[label == _voidc] = self.ignore_index
for _validc in self.valid_classes:
label[label == _validc] = self.class_map[_validc]
data = {'image': img, 'label': label}
data = self.default_transform(data)
img = data['image']
label = data['label']
label = (label * 255).long().view(label.shape[1], label.shape[2])
return img, label
def __len__(self):
return self.dataset_size
if __name__ == '__main__':
from utils.vis import imshow
from dataset.transform import *
root = os.path.expanduser('E:/pCloud/dataset/Cityscapes')
dataset_ = Cityscapes(root=root, train_transform=real_world_transform(output_size=256, type='train'),
valid_transform=real_world_transform(output_size=256, type='valid'))
train_loader, _, _ = dataset_.get_dataloader(batch_size=1)
for batch_idx, (img, label) in enumerate(train_loader):
imgs, labels, _ = dataset_.vis_transform(imgs=img, labels=label, preds=None, to_plt=True)
imshow(title='VOC2012Seg', imgs=(imgs[0], labels[0]))
break
| [
"torch.utils.data.Subset",
"dataset.transform.to_numpy",
"dataset.transform.to_tensor",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"PIL.Image.open",
"utils.func.recursive_glob",
"utils.vis.imshow",
"torch.utils.data.distributed.DistributedSampler",
"torch.ut... | [((8676, 8726), 'os.path.expanduser', 'os.path.expanduser', (['"""E:/pCloud/dataset/Cityscapes"""'], {}), "('E:/pCloud/dataset/Cityscapes')\n", (8694, 8726), False, 'import os\n'), ((1135, 1168), 'os.path.join', 'os.path.join', (['root', '"""leftImg8bit"""'], {}), "(root, 'leftImg8bit')\n", (1147, 1168), False, 'import os\n'), ((1190, 1218), 'os.path.join', 'os.path.join', (['root', '"""gtFine"""'], {}), "(root, 'gtFine')\n", (1202, 1218), False, 'import os\n'), ((1292, 1323), 'os.path.join', 'os.path.join', (['img_root', '"""train"""'], {}), "(img_root, 'train')\n", (1304, 1323), False, 'import os\n'), ((1351, 1384), 'os.path.join', 'os.path.join', (['label_root', '"""train"""'], {}), "(label_root, 'train')\n", (1363, 1384), False, 'import os\n'), ((1624, 1653), 'os.path.join', 'os.path.join', (['img_root', '"""val"""'], {}), "(img_root, 'val')\n", (1636, 1653), False, 'import os\n'), ((1681, 1712), 'os.path.join', 'os.path.join', (['label_root', '"""val"""'], {}), "(label_root, 'val')\n", (1693, 1712), False, 'import os\n'), ((1951, 1981), 'os.path.join', 'os.path.join', (['img_root', '"""test"""'], {}), "(img_root, 'test')\n", (1963, 1981), False, 'import os\n'), ((2008, 2040), 'os.path.join', 'os.path.join', (['label_root', '"""test"""'], {}), "(label_root, 'test')\n", (2020, 2040), False, 'import os\n'), ((2550, 2587), 'torch.utils.data.Subset', 'data.Subset', (['self', 'self.train_indices'], {}), '(self, self.train_indices)\n', (2561, 2587), False, 'from torch.utils import data\n'), ((2617, 2654), 'torch.utils.data.Subset', 'data.Subset', (['self', 'self.valid_indices'], {}), '(self, self.valid_indices)\n', (2628, 2654), False, 'from torch.utils import data\n'), ((2683, 2719), 'torch.utils.data.Subset', 'data.Subset', (['self', 'self.test_indices'], {}), '(self, self.test_indices)\n', (2694, 2719), False, 'from torch.utils import data\n'), ((2949, 2991), 'torch.utils.data.SequentialSampler', 'data.SequentialSampler', (['self.valid_dataset'], {}), '(self.valid_dataset)\n', (2971, 2991), False, 'from torch.utils import data\n'), ((3020, 3061), 'torch.utils.data.SequentialSampler', 'data.SequentialSampler', (['self.test_dataset'], {}), '(self.test_dataset)\n', (3042, 3061), False, 'from torch.utils import data\n'), ((3167, 3306), 'torch.utils.data.DataLoader', 'data.DataLoader', (['self.train_dataset'], {'batch_size': 'batch_size', 'sampler': 'self.train_sampler', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory'}), '(self.train_dataset, batch_size=batch_size, sampler=self.\n train_sampler, num_workers=num_workers, pin_memory=pin_memory)\n', (3182, 3306), False, 'from torch.utils import data\n'), ((3364, 3503), 'torch.utils.data.DataLoader', 'data.DataLoader', (['self.valid_dataset'], {'batch_size': 'batch_size', 'sampler': 'self.valid_sampler', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory'}), '(self.valid_dataset, batch_size=batch_size, sampler=self.\n valid_sampler, num_workers=num_workers, pin_memory=pin_memory)\n', (3379, 3503), False, 'from torch.utils import data\n'), ((3560, 3697), 'torch.utils.data.DataLoader', 'data.DataLoader', (['self.test_dataset'], {'batch_size': 'batch_size', 'sampler': 'self.test_sampler', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory'}), '(self.test_dataset, batch_size=batch_size, sampler=self.\n test_sampler, num_workers=num_workers, pin_memory=pin_memory)\n', (3575, 3697), False, 'from torch.utils import data\n'), ((4406, 4451), 'numpy.array', 'np.array', (['([[0, 0, 0]] * (256 - cmap.shape[0]))'], {}), '([[0, 0, 0]] * (256 - cmap.shape[0]))\n', (4414, 4451), True, 'import numpy as np\n'), ((4468, 4501), 'numpy.concatenate', 'np.concatenate', (['(cmap, other_map)'], {}), '((cmap, other_map))\n', (4482, 4501), True, 'import numpy as np\n'), ((4581, 4606), 'dataset.transform.to_tensor', 'transform.to_tensor', (['data'], {}), '(data)\n', (4600, 4606), True, 'import dataset.transform as transform\n'), ((7503, 7523), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (7513, 7523), False, 'from PIL import Image\n'), ((7589, 7611), 'PIL.Image.open', 'Image.open', (['label_path'], {}), '(label_path)\n', (7599, 7611), False, 'from PIL import Image\n'), ((9162, 9215), 'utils.vis.imshow', 'imshow', ([], {'title': '"""VOC2012Seg"""', 'imgs': '(imgs[0], labels[0])'}), "(title='VOC2012Seg', imgs=(imgs[0], labels[0]))\n", (9168, 9215), False, 'from utils.vis import imshow\n'), ((1408, 1469), 'utils.func.recursive_glob', 'recursive_glob', ([], {'root': 'train_img_root', 'suffix': '"""leftImg8bit.png"""'}), "(root=train_img_root, suffix='leftImg8bit.png')\n", (1422, 1469), False, 'from utils.func import recursive_glob\n'), ((1496, 1563), 'utils.func.recursive_glob', 'recursive_glob', ([], {'root': 'train_label_root', 'suffix': '"""gtFine_labelIds.png"""'}), "(root=train_label_root, suffix='gtFine_labelIds.png')\n", (1510, 1563), False, 'from utils.func import recursive_glob\n'), ((1736, 1797), 'utils.func.recursive_glob', 'recursive_glob', ([], {'root': 'valid_img_root', 'suffix': '"""leftImg8bit.png"""'}), "(root=valid_img_root, suffix='leftImg8bit.png')\n", (1750, 1797), False, 'from utils.func import recursive_glob\n'), ((1824, 1891), 'utils.func.recursive_glob', 'recursive_glob', ([], {'root': 'valid_label_root', 'suffix': '"""gtFine_labelIds.png"""'}), "(root=valid_label_root, suffix='gtFine_labelIds.png')\n", (1838, 1891), False, 'from utils.func import recursive_glob\n'), ((2064, 2124), 'utils.func.recursive_glob', 'recursive_glob', ([], {'root': 'test_img_root', 'suffix': '"""leftImg8bit.png"""'}), "(root=test_img_root, suffix='leftImg8bit.png')\n", (2078, 2124), False, 'from utils.func import recursive_glob\n'), ((2151, 2217), 'utils.func.recursive_glob', 'recursive_glob', ([], {'root': 'test_label_root', 'suffix': '"""gtFine_labelIds.png"""'}), "(root=test_label_root, suffix='gtFine_labelIds.png')\n", (2165, 2217), False, 'from utils.func import recursive_glob\n'), ((2778, 2833), 'torch.utils.data.distributed.DistributedSampler', 'data.distributed.DistributedSampler', (['self.train_dataset'], {}), '(self.train_dataset)\n', (2813, 2833), False, 'from torch.utils import data\n'), ((2881, 2919), 'torch.utils.data.RandomSampler', 'data.RandomSampler', (['self.train_dataset'], {}), '(self.train_dataset)\n', (2899, 2919), False, 'from torch.utils import data\n'), ((3834, 4149), 'numpy.array', 'np.array', (['[[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, \n 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [\n 152, 251, 152], [0, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142],\n [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]]'], {}), '([[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [\n 190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, \n 142, 35], [152, 251, 152], [0, 130, 180], [220, 20, 60], [255, 0, 0], [\n 0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, \n 11, 32]])\n', (3842, 4149), True, 'import numpy as np\n'), ((7948, 7972), 'dataset.transform.to_numpy', 'transform.to_numpy', (['data'], {}), '(data)\n', (7966, 7972), True, 'import dataset.transform as transform\n')] |
import numpy as np
from ..tools import contains_nan
class _CheckInputs:
def __init__(self, inputs, indep_test=None, reps=None):
self.inputs = inputs
self.reps = reps
self.indep_test = indep_test
def __call__(self):
self._check_ndarray_inputs()
for i in self.inputs:
contains_nan(i)
self.inputs = self.check_dim()
self.inputs = self._convert_inputs_float64()
self._check_indep_test()
self._check_min_samples()
return self.inputs
def _check_ndarray_inputs(self):
if len(self.inputs) < 2:
raise ValueError("there must be at least 2 inputs")
for i in self.inputs:
if not isinstance(i, np.ndarray):
raise ValueError("x and y must be ndarrays")
def check_dim(self):
# check if inputs are ndarrays
new_inputs = []
dims = []
for i in self.inputs:
# convert arrays of type (n,) to (n, 1)
if i.ndim == 1:
i = i[:, np.newaxis]
elif i.ndim != 2:
raise ValueError(
"Expected a 2-D array `i`, found shape " "{}".format(i.shape)
)
dims.append(i.shape[1])
new_inputs.append(i)
self._check_nd_ksampletest(dims)
return new_inputs
def _check_nd_ksampletest(self, dims):
if len(set(dims)) > 1:
raise ValueError(
"Shape mismatch, inputs must have shape " "[n, p] and [m, p]."
)
def _convert_inputs_float64(self):
return [np.asarray(i).astype(np.float64) for i in self.inputs]
def _check_indep_test(self):
tests = ["cca", "dcorr", "hhg", "rv", "hsic", "mgc", "kmerf"]
if self.indep_test not in tests and self.indep_test is not None:
raise ValueError("indep_test must be in {}".format(tests))
def _check_min_samples(self):
for i in self.inputs:
if i.shape[0] <= 3:
raise ValueError("Number of samples is too low")
def k_sample_transform(inputs, test_type="normal"):
"""
Computes a `k`-sample transform of the inputs.
For :math:`k` groups, this creates two matrices, the first vertically stacks the
inputs.
In order to use this function, the inputs must have the same number of dimensions
:math:`p` and can have varying number of samples :math:`n`. The second output is a
label
matrix the one-hoc encodes the groups. The outputs are thus ``(N, p)`` and
``(N, k)`` where `N` is the total number of samples. In the case where the test
a random forest based tests, it creates a ``(N, 1)`` where the entries are
varlues from 1 to :math:`k` based on the number of samples.
Parameters
----------
inputs : list of ndarray
A list of the inputs. All inputs must be ``(n, p)`` where `n` is the number
of samples and `p` is the number of dimensions. `n` can vary between samples,
but `p` must be the same among all the samples.
test_type : {"normal", "rf"}, default: "normal"
Whether to one-hoc encode the inputs ("normal") or use a one-dimensional
categorical encoding ("rf").
Returns
-------
u : ndarray
The matrix of concatenated inputs of shape ``(N, p)``.
v : ndarray
The label matrix of shape ``(N, k)`` ("normal") or ``(N, 1)`` ("rf").
"""
n_inputs = len(inputs)
u = np.vstack(inputs)
if np.var(u) == 0:
raise ValueError("Test cannot be run, the inputs have 0 variance")
if test_type == "rf":
v = np.concatenate(
[np.repeat(i, inputs[i].shape[0]) for i in range(n_inputs)], axis=0
)
elif test_type == "normal":
if n_inputs == 2:
n1 = inputs[0].shape[0]
n2 = inputs[1].shape[0]
v = np.vstack([np.zeros((n1, 1)), np.ones((n2, 1))])
else:
vs = []
for i in range(n_inputs):
n = inputs[i].shape[0]
encode = np.zeros(shape=(n, n_inputs))
encode[:, i] = np.ones(shape=n)
vs.append(encode)
v = np.concatenate(vs)
else:
raise ValueError("test_type must be normal or rf")
return u, v
| [
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.vstack",
"numpy.var",
"numpy.concatenate",
"numpy.repeat"
] | [((3477, 3494), 'numpy.vstack', 'np.vstack', (['inputs'], {}), '(inputs)\n', (3486, 3494), True, 'import numpy as np\n'), ((3502, 3511), 'numpy.var', 'np.var', (['u'], {}), '(u)\n', (3508, 3511), True, 'import numpy as np\n'), ((3661, 3693), 'numpy.repeat', 'np.repeat', (['i', 'inputs[i].shape[0]'], {}), '(i, inputs[i].shape[0])\n', (3670, 3693), True, 'import numpy as np\n'), ((4197, 4215), 'numpy.concatenate', 'np.concatenate', (['vs'], {}), '(vs)\n', (4211, 4215), True, 'import numpy as np\n'), ((1615, 1628), 'numpy.asarray', 'np.asarray', (['i'], {}), '(i)\n', (1625, 1628), True, 'import numpy as np\n'), ((4069, 4098), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n_inputs)'}), '(shape=(n, n_inputs))\n', (4077, 4098), True, 'import numpy as np\n'), ((4130, 4146), 'numpy.ones', 'np.ones', ([], {'shape': 'n'}), '(shape=n)\n', (4137, 4146), True, 'import numpy as np\n'), ((3895, 3912), 'numpy.zeros', 'np.zeros', (['(n1, 1)'], {}), '((n1, 1))\n', (3903, 3912), True, 'import numpy as np\n'), ((3914, 3930), 'numpy.ones', 'np.ones', (['(n2, 1)'], {}), '((n2, 1))\n', (3921, 3930), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Author: jon4hz
# Date: 17.03.2021
# Desc: Conways Game of Life, implemented with pygame
#
#######################################################################################################################
# disable support prompt
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import sys, pygame, argparse
import numpy as np
# pylint: disable=no-name-in-module
from pygame.constants import (
QUIT,
MOUSEBUTTONDOWN,
KEYDOWN,
K_p, K_c
)
# pylint: enable=no-name-in-module
from seeds import seeds
# CONSTANTS
RECT_SIZE = 10
SIZE = WIDTH, HEIGHT = 1000, 1000
WHITE = (200, 200, 200, 255)
BLACK = (0, 0, 0, 255)
TITLE = "Conways Game of Life"
def build_board(seed) -> pygame.Surface:
screen = pygame.display.set_mode(SIZE)
pygame.display.set_caption(TITLE)
screen.fill(WHITE)
pygame.mouse.set_visible(1)
if seed is not None and seed.any():
for i in range(seed.shape[0]):
for j in range(seed.shape[1]):
if seed[i, j] == 1:
rect = pygame.Rect(j*RECT_SIZE, i*RECT_SIZE, RECT_SIZE, RECT_SIZE)
pygame.draw.rect(screen, BLACK, rect, 0)
pygame.display.update(rect)
elif seed[i, j] == 0:
rect = pygame.Rect(j*RECT_SIZE, i*RECT_SIZE, RECT_SIZE, RECT_SIZE)
pygame.draw.rect(screen, WHITE, rect, 0)
pygame.draw.rect(screen, BLACK, rect, 1)
pygame.display.update(rect)
else:
for y in range(HEIGHT):
for x in range(WIDTH):
rect = pygame.Rect(x*RECT_SIZE, y*RECT_SIZE, RECT_SIZE, RECT_SIZE)
pygame.draw.rect(screen, BLACK, rect, 1)
return screen
def calculate_position(pos) -> tuple:
x = int(pos[0]/RECT_SIZE)
y = int(pos[1]/RECT_SIZE)
return (x, y)
def apply_rules(universe, x, y) -> int:
num_neighbours = np.sum(universe[x - 1 : x + 2, y - 1 : y + 2]) - universe[x, y]
if universe[x, y] and not 2 <= num_neighbours <= 3:
return 0
elif num_neighbours == 3:
return 1
else:
return universe[x, y]
def simulation(universe, screen) -> np.array:
next_universe = np.copy(universe)
for i in range(universe.shape[0]):
for j in range(universe.shape[1]):
next_universe[i, j] = apply_rules(universe, i, j)
for i in range(universe.shape[0]):
for j in range(universe.shape[1]):
if next_universe[i, j] == 1 and universe[i, j] == 0:
rect = pygame.Rect(j*RECT_SIZE, i*RECT_SIZE, RECT_SIZE, RECT_SIZE)
pygame.draw.rect(screen, BLACK, rect, 0)
pygame.display.update(rect)
elif next_universe[i, j] == 0 and universe[i, j] == 1:
rect = pygame.Rect(j*RECT_SIZE, i*RECT_SIZE, RECT_SIZE, RECT_SIZE)
pygame.draw.rect(screen, WHITE, rect, 0)
pygame.draw.rect(screen, BLACK, rect, 1)
pygame.display.update(rect)
universe[:] = next_universe[:]
pygame.time.wait(50)
return universe
if __name__ == "__main__":
# argparser
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--seed', help="set a seed")
args = parser.parse_args()
if seed := args.seed:
try:
# make sure seed is available
if seeds[seed].get('seed') == None:
raise KeyError
arr = np.zeros((int(HEIGHT/RECT_SIZE), int(WIDTH/RECT_SIZE)))
try:
arr[seeds[seed].get('co')[0]:len(seeds[seed].get('seed'))+seeds[seed].get('co')[0],seeds[seed].get('co')[1]:len(seeds[seed].get('seed')[0])+seeds[seed].get('co')[1]] = seeds[seed].get('seed')
except ValueError:
raise ValueError('Please create a bigger board if you want to use that seed!')
# build the inital board
screen = build_board(arr)
pygame.display.update()
except Exception as e:
print(f'Error - {e}')
screen = build_board(None)
pygame.display.update()
universe = np.zeros((int(HEIGHT/RECT_SIZE), int(WIDTH/RECT_SIZE)))
else:
screen = build_board(None)
pygame.display.update()
universe = np.zeros((int(HEIGHT/RECT_SIZE), int(WIDTH/RECT_SIZE)))
# set game status initally to 0
game_status = 0
# init numpy array with size of the board
try:
universe = np.copy(arr)
except NameError:
universe = np.zeros((int(HEIGHT/RECT_SIZE), int(WIDTH/RECT_SIZE)))
# pylint: disable=no-member
pygame.init()
# pylint: enable=no-member
# start main loop
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == QUIT:
# pylint: disable=no-member
pygame.quit()
# pylint: enable=no-member
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_p:
if game_status == 0:
game_status = 1
else:
game_status = 0
# reset board
elif event.key == K_c and game_status == 0:
screen = build_board(None)
universe = np.zeros((int(HEIGHT/RECT_SIZE), int(WIDTH/RECT_SIZE)))
pygame.display.update()
if game_status == 0:
if event.type == MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
array_pos = calculate_position(pos)
color = screen.get_at(pos)
if color == WHITE:
universe[array_pos[1], array_pos[0]] = 1
rect = pygame.Rect(array_pos[0]*RECT_SIZE, array_pos[1]*RECT_SIZE, RECT_SIZE, RECT_SIZE)
pygame.draw.rect(screen, BLACK, rect, 0)
pygame.display.update(rect)
elif color == BLACK:
universe[array_pos[1], array_pos[0]] = 0
rect = pygame.Rect(array_pos[0]*RECT_SIZE, array_pos[1]*RECT_SIZE, RECT_SIZE, RECT_SIZE)
pygame.draw.rect(screen, WHITE, rect, 0)
pygame.draw.rect(screen, BLACK, rect, 1)
pygame.display.update(rect)
if game_status == 1:
prev_universe = np.copy(universe)
universe = simulation(universe, screen)
# set game_status automatically to 0 if universe didn't change
if (prev_universe==universe).all():
game_status = 0
| [
"pygame.quit",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.copy",
"pygame.mouse.set_visible",
"pygame.display.set_mode",
"pygame.event.get",
"pygame.Rect",
"pygame.draw.rect",
"pygame.init",
"pygame.time.wait",
"pygame.display.update",
"pygame.mouse.get_pos",
"pygame.display.set_caption... | [((757, 786), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SIZE'], {}), '(SIZE)\n', (780, 786), False, 'import sys, pygame, argparse\n'), ((791, 824), 'pygame.display.set_caption', 'pygame.display.set_caption', (['TITLE'], {}), '(TITLE)\n', (817, 824), False, 'import sys, pygame, argparse\n'), ((852, 879), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(1)'], {}), '(1)\n', (876, 879), False, 'import sys, pygame, argparse\n'), ((2239, 2256), 'numpy.copy', 'np.copy', (['universe'], {}), '(universe)\n', (2246, 2256), True, 'import numpy as np\n'), ((3079, 3099), 'pygame.time.wait', 'pygame.time.wait', (['(50)'], {}), '(50)\n', (3095, 3099), False, 'import sys, pygame, argparse\n'), ((3177, 3202), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3200, 3202), False, 'import sys, pygame, argparse\n'), ((4638, 4651), 'pygame.init', 'pygame.init', ([], {}), '()\n', (4649, 4651), False, 'import sys, pygame, argparse\n'), ((4709, 4732), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4730, 4732), False, 'import sys, pygame, argparse\n'), ((1948, 1990), 'numpy.sum', 'np.sum', (['universe[x - 1:x + 2, y - 1:y + 2]'], {}), '(universe[x - 1:x + 2, y - 1:y + 2])\n', (1954, 1990), True, 'import numpy as np\n'), ((4261, 4284), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4282, 4284), False, 'import sys, pygame, argparse\n'), ((4492, 4504), 'numpy.copy', 'np.copy', (['arr'], {}), '(arr)\n', (4499, 4504), True, 'import numpy as np\n'), ((4770, 4788), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4786, 4788), False, 'import sys, pygame, argparse\n'), ((3965, 3988), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3986, 3988), False, 'import sys, pygame, argparse\n'), ((6543, 6560), 'numpy.copy', 'np.copy', (['universe'], {}), '(universe)\n', (6550, 6560), True, 'import numpy as np\n'), ((1629, 1692), 'pygame.Rect', 'pygame.Rect', (['(x * RECT_SIZE)', '(y * RECT_SIZE)', 'RECT_SIZE', 'RECT_SIZE'], {}), '(x * RECT_SIZE, y * RECT_SIZE, RECT_SIZE, RECT_SIZE)\n', (1640, 1692), False, 'import sys, pygame, argparse\n'), ((1705, 1745), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', 'rect', '(1)'], {}), '(screen, BLACK, rect, 1)\n', (1721, 1745), False, 'import sys, pygame, argparse\n'), ((2571, 2634), 'pygame.Rect', 'pygame.Rect', (['(j * RECT_SIZE)', '(i * RECT_SIZE)', 'RECT_SIZE', 'RECT_SIZE'], {}), '(j * RECT_SIZE, i * RECT_SIZE, RECT_SIZE, RECT_SIZE)\n', (2582, 2634), False, 'import sys, pygame, argparse\n'), ((2647, 2687), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', 'rect', '(0)'], {}), '(screen, BLACK, rect, 0)\n', (2663, 2687), False, 'import sys, pygame, argparse\n'), ((2704, 2731), 'pygame.display.update', 'pygame.display.update', (['rect'], {}), '(rect)\n', (2725, 2731), False, 'import sys, pygame, argparse\n'), ((4105, 4128), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4126, 4128), False, 'import sys, pygame, argparse\n'), ((4885, 4898), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4896, 4898), False, 'import sys, pygame, argparse\n'), ((4958, 4968), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4966, 4968), False, 'import sys, pygame, argparse\n'), ((1065, 1128), 'pygame.Rect', 'pygame.Rect', (['(j * RECT_SIZE)', '(i * RECT_SIZE)', 'RECT_SIZE', 'RECT_SIZE'], {}), '(j * RECT_SIZE, i * RECT_SIZE, RECT_SIZE, RECT_SIZE)\n', (1076, 1128), False, 'import sys, pygame, argparse\n'), ((1145, 1185), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', 'rect', '(0)'], {}), '(screen, BLACK, rect, 0)\n', (1161, 1185), False, 'import sys, pygame, argparse\n'), ((1206, 1233), 'pygame.display.update', 'pygame.display.update', (['rect'], {}), '(rect)\n', (1227, 1233), False, 'import sys, pygame, argparse\n'), ((2822, 2885), 'pygame.Rect', 'pygame.Rect', (['(j * RECT_SIZE)', '(i * RECT_SIZE)', 'RECT_SIZE', 'RECT_SIZE'], {}), '(j * RECT_SIZE, i * RECT_SIZE, RECT_SIZE, RECT_SIZE)\n', (2833, 2885), False, 'import sys, pygame, argparse\n'), ((2898, 2938), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'WHITE', 'rect', '(0)'], {}), '(screen, WHITE, rect, 0)\n', (2914, 2938), False, 'import sys, pygame, argparse\n'), ((2955, 2995), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', 'rect', '(1)'], {}), '(screen, BLACK, rect, 1)\n', (2971, 2995), False, 'import sys, pygame, argparse\n'), ((3012, 3039), 'pygame.display.update', 'pygame.display.update', (['rect'], {}), '(rect)\n', (3033, 3039), False, 'import sys, pygame, argparse\n'), ((5595, 5617), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (5615, 5617), False, 'import sys, pygame, argparse\n'), ((1299, 1362), 'pygame.Rect', 'pygame.Rect', (['(j * RECT_SIZE)', '(i * RECT_SIZE)', 'RECT_SIZE', 'RECT_SIZE'], {}), '(j * RECT_SIZE, i * RECT_SIZE, RECT_SIZE, RECT_SIZE)\n', (1310, 1362), False, 'import sys, pygame, argparse\n'), ((1379, 1419), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'WHITE', 'rect', '(0)'], {}), '(screen, WHITE, rect, 0)\n', (1395, 1419), False, 'import sys, pygame, argparse\n'), ((1440, 1480), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', 'rect', '(1)'], {}), '(screen, BLACK, rect, 1)\n', (1456, 1480), False, 'import sys, pygame, argparse\n'), ((1501, 1528), 'pygame.display.update', 'pygame.display.update', (['rect'], {}), '(rect)\n', (1522, 1528), False, 'import sys, pygame, argparse\n'), ((5856, 5945), 'pygame.Rect', 'pygame.Rect', (['(array_pos[0] * RECT_SIZE)', '(array_pos[1] * RECT_SIZE)', 'RECT_SIZE', 'RECT_SIZE'], {}), '(array_pos[0] * RECT_SIZE, array_pos[1] * RECT_SIZE, RECT_SIZE,\n RECT_SIZE)\n', (5867, 5945), False, 'import sys, pygame, argparse\n'), ((5962, 6002), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', 'rect', '(0)'], {}), '(screen, BLACK, rect, 0)\n', (5978, 6002), False, 'import sys, pygame, argparse\n'), ((6027, 6054), 'pygame.display.update', 'pygame.display.update', (['rect'], {}), '(rect)\n', (6048, 6054), False, 'import sys, pygame, argparse\n'), ((5437, 5460), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5458, 5460), False, 'import sys, pygame, argparse\n'), ((6192, 6281), 'pygame.Rect', 'pygame.Rect', (['(array_pos[0] * RECT_SIZE)', '(array_pos[1] * RECT_SIZE)', 'RECT_SIZE', 'RECT_SIZE'], {}), '(array_pos[0] * RECT_SIZE, array_pos[1] * RECT_SIZE, RECT_SIZE,\n RECT_SIZE)\n', (6203, 6281), False, 'import sys, pygame, argparse\n'), ((6298, 6338), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'WHITE', 'rect', '(0)'], {}), '(screen, WHITE, rect, 0)\n', (6314, 6338), False, 'import sys, pygame, argparse\n'), ((6363, 6403), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK', 'rect', '(1)'], {}), '(screen, BLACK, rect, 1)\n', (6379, 6403), False, 'import sys, pygame, argparse\n'), ((6428, 6455), 'pygame.display.update', 'pygame.display.update', (['rect'], {}), '(rect)\n', (6449, 6455), False, 'import sys, pygame, argparse\n')] |
"""
This displays the user-based filtering page
"""
from ast import literal_eval
from collections import defaultdict
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import numpy as np
from app import APP
import global_record
import grab_list
import display_final_movie
COLORS = {
'background': '#111111',
'text': '#000080'
}
NUM_FINAL_RECOMMEND = 10
COP = defaultdict(list)
df = pd.read_csv('movies-dataset/source/collaborative_result.csv',
header=None, index_col=0, converters={1: literal_eval})
for row in df.iterrows():
# print([item for item in row[1][0]])
tmp_list = list(row[1])
COP[int(row[0])] = [item[0] for item in tmp_list[0]]
obs = grab_list.read_csv()
id_set = obs.id_set
id_title_set = obs.id_title_set
user_val = []
user_val.extend([{'label': str(i), 'value': i} for i in np.arange(1, 467, 1)])
def main():
"""
:return: a html div with all content to be displayed for the user-based filtering,
including, a div with drop down boxes, find user button, and movies with their info
"""
movie_div = display_final_movie.add_final_movies(zip(range(NUM_FINAL_RECOMMEND),
global_record.INITIAL_MOVIE_ID_LIST[10:(10 + NUM_FINAL_RECOMMEND)]))
search_bar = html.Div(
children=[
html.Div(children='Please type a user ID',
style={'text-align': 'center',
'font-size': '16px',
'margin-bottom': '20px'}),
html.Div(dcc.Input(
id='user_id_dropdown'.format(),
type='number',
placeholder="Please enter a user id",
style={'font-size': '13px',
'width': '100%'}
))
]
)
search_button_div = html.Div(html.Button(id='user_id_button',
children='Find User',
style={'font-size': '13px'}),
style={'margin-top': '50px',
'margin-bottom': '20px',
'width': '40%',
'margin-left': '30%',
'text-align': 'center'})
app_recommender_tab = html.Div(children=[])
app_recommender_tab.children.append(html.Div(html.H1('User-based Filtering'),
className='wrap'))
app_recommender_tab.children.append(html.Div(search_bar, style={'margin-top': '15px'}))
app_recommender_tab.children.append(search_button_div)
app_recommender_tab.children.append(html.Div(id='recommend_main_div', children=movie_div))
return app_recommender_tab
def call_back_recom():
"""
A call back function for the find user button in the html div.
:return: a updated html div with after the filter button is clicked
"""
list_state = [State('user_id_dropdown', 'value')]
@APP.callback(
Output('recommend_main_div', 'children'),
[Input('user_id_button', 'n_clicks')],
list_state)
def update_multi_output(n_clicks, *input_value):
ctx = dash.callback_context
if not ctx.triggered:
user_click = 'No clicks yet'
else:
user_click = ctx.triggered[0]['prop_id'].split('.')[0]
if n_clicks is not None and n_clicks > 0:
list_filter = list(input_value)
user_id = int(list_filter[0])
print(user_id)
list_next_movie_id = []
movie_names = COP[user_id]
# movie_names = COP.user_recommendation_dic[user_id]
for mn in movie_names:
print(mn)
if mn in id_title_set:
#print(id_title_set[mn])
list_next_movie_id.append(int(id_title_set[mn]))
print(list_next_movie_id)
ls = []
for ids in list_next_movie_id:
if ids in id_set:
ls.append(ids)
list_next_movie_id = ls
num_movie_rate = len(list_next_movie_id)
print(list_next_movie_id)
result = display_final_movie.add_final_movies(zip(range(num_movie_rate), list_next_movie_id))
return result
else:
raise PreventUpdate
if __name__ == '__main__':
main()
| [
"grab_list.read_csv",
"dash_html_components.Button",
"pandas.read_csv",
"dash_html_components.Div",
"dash.dependencies.State",
"collections.defaultdict",
"dash.dependencies.Input",
"numpy.arange",
"dash_html_components.H1",
"dash.dependencies.Output"
] | [((513, 530), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (524, 530), False, 'from collections import defaultdict\n'), ((536, 659), 'pandas.read_csv', 'pd.read_csv', (['"""movies-dataset/source/collaborative_result.csv"""'], {'header': 'None', 'index_col': '(0)', 'converters': '{(1): literal_eval}'}), "('movies-dataset/source/collaborative_result.csv', header=None,\n index_col=0, converters={(1): literal_eval})\n", (547, 659), True, 'import pandas as pd\n'), ((831, 851), 'grab_list.read_csv', 'grab_list.read_csv', ([], {}), '()\n', (849, 851), False, 'import grab_list\n'), ((2505, 2526), 'dash_html_components.Div', 'html.Div', ([], {'children': '[]'}), '(children=[])\n', (2513, 2526), True, 'import dash_html_components as html\n'), ((1994, 2081), 'dash_html_components.Button', 'html.Button', ([], {'id': '"""user_id_button"""', 'children': '"""Find User"""', 'style': "{'font-size': '13px'}"}), "(id='user_id_button', children='Find User', style={'font-size':\n '13px'})\n", (2005, 2081), True, 'import dash_html_components as html\n'), ((2717, 2767), 'dash_html_components.Div', 'html.Div', (['search_bar'], {'style': "{'margin-top': '15px'}"}), "(search_bar, style={'margin-top': '15px'})\n", (2725, 2767), True, 'import dash_html_components as html\n'), ((2868, 2921), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""recommend_main_div"""', 'children': 'movie_div'}), "(id='recommend_main_div', children=movie_div)\n", (2876, 2921), True, 'import dash_html_components as html\n'), ((3152, 3186), 'dash.dependencies.State', 'State', (['"""user_id_dropdown"""', '"""value"""'], {}), "('user_id_dropdown', 'value')\n", (3157, 3186), False, 'from dash.dependencies import Input, Output, State\n'), ((3216, 3256), 'dash.dependencies.Output', 'Output', (['"""recommend_main_div"""', '"""children"""'], {}), "('recommend_main_div', 'children')\n", (3222, 3256), False, 'from dash.dependencies import Input, Output, State\n'), ((976, 996), 'numpy.arange', 'np.arange', (['(1)', '(467)', '(1)'], {}), '(1, 467, 1)\n', (985, 996), True, 'import numpy as np\n'), ((2576, 2607), 'dash_html_components.H1', 'html.H1', (['"""User-based Filtering"""'], {}), "('User-based Filtering')\n", (2583, 2607), True, 'import dash_html_components as html\n'), ((3267, 3302), 'dash.dependencies.Input', 'Input', (['"""user_id_button"""', '"""n_clicks"""'], {}), "('user_id_button', 'n_clicks')\n", (3272, 3302), False, 'from dash.dependencies import Input, Output, State\n'), ((1482, 1606), 'dash_html_components.Div', 'html.Div', ([], {'children': '"""Please type a user ID"""', 'style': "{'text-align': 'center', 'font-size': '16px', 'margin-bottom': '20px'}"}), "(children='Please type a user ID', style={'text-align': 'center',\n 'font-size': '16px', 'margin-bottom': '20px'})\n", (1490, 1606), True, 'import dash_html_components as html\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tensorflow as tf
import tensorflow.keras as keras
from .building_blocks import Conv3dAdaIn, Conv2dAdaIn, AdaIn
from ..confignet_utils import euler_angles_to_matrix, transform_3d_grid_tf
import numpy as np
class HologanGenerator(tf.keras.models.Model):
def __init__(self, latent_dim, output_shape, n_adain_mlp_units, n_adain_mlp_layers, gen_output_activation):
super(HologanGenerator, self).__init__()
self.output_img_shape = output_shape
self.const_shape = (4, 4, 4, 512)
n_features_in_first_layer = 256
nl_f = keras.layers.LeakyReLU
mlp_nl = lambda: keras.layers.LeakyReLU(alpha=0.2)
self.zero_input = keras.layers.Lambda(lambda x: self.get_zero_inputs(x), name="zero_input")
self.learned_input_layer = keras.layers.Dense(np.prod(self.const_shape),
kernel_initializer="zeros",
bias_initializer=tf.compat.v1.initializers.ones(),
name="learned_input")
#pre-rotation function
self.map_3d_0 = Conv3dAdaIn(
num_feature_maps = n_features_in_first_layer, kernel_size = 3, double_conv = False,
non_linear_after = None,
z_size = latent_dim,
mlp_num_units = n_adain_mlp_units,
mlp_num_layers = n_adain_mlp_layers,
mlp_non_linear = mlp_nl, conv_non_linear = nl_f)
self.map_3d_1 = Conv3dAdaIn(
num_feature_maps = n_features_in_first_layer // 2, kernel_size = 3, double_conv = False,
non_linear_after = None,
z_size = latent_dim,
mlp_num_units = n_adain_mlp_units,
mlp_num_layers = n_adain_mlp_layers,
mlp_non_linear = mlp_nl, conv_non_linear = nl_f)
#NOTE: rotation (done in call)
#post-rotation function
self.map_3d_post = keras.models.Sequential([
keras.layers.Conv3D(n_features_in_first_layer // 4, 3, padding="same"),
nl_f(),
keras.layers.Conv3D(n_features_in_first_layer // 4, 3, padding="same"),
nl_f()
])
self.projection_conv = keras.layers.Conv2D(512, 1, activation=tf.nn.leaky_relu, padding="same")
#2d mapping
self.map_2d_0 = Conv2dAdaIn(
num_feature_maps = n_features_in_first_layer, kernel_size = 4, double_conv = False,
non_linear_after = None,
z_size = latent_dim,
mlp_num_units = n_adain_mlp_units,
mlp_num_layers = n_adain_mlp_layers,
mlp_non_linear = mlp_nl, conv_non_linear = nl_f)
self.map_2d_1 = Conv2dAdaIn(
num_feature_maps = n_features_in_first_layer // 4, kernel_size = 4, double_conv = False,
non_linear_after = None,
z_size = latent_dim,
mlp_num_units = n_adain_mlp_units,
mlp_num_layers = n_adain_mlp_layers,
mlp_non_linear = mlp_nl, conv_non_linear = nl_f)
self.map_2d_2 = Conv2dAdaIn(
num_feature_maps = n_features_in_first_layer // 8, kernel_size = 4, double_conv = False,
non_linear_after = None,
z_size = latent_dim,
mlp_num_units = n_adain_mlp_units,
mlp_num_layers = n_adain_mlp_layers,
mlp_non_linear = mlp_nl, conv_non_linear = nl_f)
if self.output_img_shape[0] > 128:
self.map_2d_2b = Conv2dAdaIn(
num_feature_maps = n_features_in_first_layer // 8, kernel_size = 4, double_conv = False,
non_linear_after = None,
z_size = latent_dim,
mlp_num_units = n_adain_mlp_units,
mlp_num_layers = n_adain_mlp_layers,
mlp_non_linear = mlp_nl, conv_non_linear = nl_f)
if self.output_img_shape[0] > 256:
self.map_2d_2c = Conv2dAdaIn(
num_feature_maps = n_features_in_first_layer // 16, kernel_size = 4, double_conv = False,
non_linear_after = None,
z_size = latent_dim,
mlp_num_units = n_adain_mlp_units,
mlp_num_layers = n_adain_mlp_layers,
mlp_non_linear = mlp_nl, conv_non_linear = nl_f)
self.map_final = keras.layers.Conv2D(3, 4, activation=gen_output_activation, padding="same")
def get_zero_inputs(self, input_layer):
zero_input = tf.constant(0.0, shape = (1, 1), name='HoloGanConstantInput')
zero_input = tf.tile(zero_input, (tf.shape(input_layer)[0], 1))
return zero_input
def build_input_dict(self, latent_vector, rotation):
input_dict = {}
if isinstance(latent_vector, list):
input_dict["z_3d_0"] = latent_vector[0]
input_dict["z_3d_1"] = latent_vector[1]
input_dict["z_2d_0"] = latent_vector[2]
input_dict["z_2d_1"] = latent_vector[3]
input_dict["z_2d_2"] = latent_vector[4]
else:
input_dict["z_3d_0"] = latent_vector
input_dict["z_3d_1"] = latent_vector
input_dict["z_2d_0"] = latent_vector
input_dict["z_2d_1"] = latent_vector
input_dict["z_2d_2"] = latent_vector
input_dict["rotation"] = rotation
return input_dict
def call(self, inputs):
if not isinstance(inputs, dict):
inputs = self.build_input_dict(inputs[0], inputs[1])
zeros = self.zero_input(inputs['z_3d_0'])
x = self.learned_input_layer(zeros)
x = keras.layers.Reshape(self.const_shape, name="learned_input_reshape")(x)
#upsample by factor of 2
x = keras.layers.UpSampling3D()(x)
#transform
x = self.map_3d_0({'x': x, 'z' : inputs['z_3d_0']})
x = keras.layers.UpSampling3D()(x)
x = self.map_3d_1({'x': x, 'z' : inputs['z_3d_1']})
#rotate in 3d
transforms = euler_angles_to_matrix(inputs['rotation'])
x = transform_3d_grid_tf(x, transforms)
#'rendering' layers
x = self.map_3d_post(x)
#...including the reshape
x_s = list(x.shape)
if x_s[0] is None:
x_s[0] = -1
x = tf.reshape(x, (x_s[0], x_s[1], x_s[2], x_s[3] * x_s[4]))
x = self.projection_conv(x)
x = self.map_2d_0({'x': x, 'z' : inputs['z_2d_0']})
x = keras.layers.UpSampling2D()(x)
x = self.map_2d_1({'x': x, 'z' : inputs['z_2d_1']})
x = keras.layers.UpSampling2D()(x)
x = self.map_2d_2({'x': x, 'z' : inputs['z_2d_2']})
x = keras.layers.UpSampling2D()(x)
if self.output_img_shape[0] > 128:
x = self.map_2d_2b({'x': x, 'z' : inputs['z_2d_2']})
x = keras.layers.UpSampling2D()(x)
if self.output_img_shape[0] > 256:
x = self.map_2d_2c({'x': x, 'z' : inputs['z_2d_2']})
x = keras.layers.UpSampling2D()(x)
x = self.map_final(x)
return x
| [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.reshape",
"tensorflow.keras.layers.UpSampling3D",
"tensorflow.keras.layers.Conv3D",
"tensorflow.constant",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.compat.v1.initializers.ones",
"tensorflow.shape",
"tensorflow... | [((2357, 2429), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(512)', '(1)'], {'activation': 'tf.nn.leaky_relu', 'padding': '"""same"""'}), "(512, 1, activation=tf.nn.leaky_relu, padding='same')\n", (2376, 2429), True, 'import tensorflow.keras as keras\n'), ((4491, 4566), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(3)', '(4)'], {'activation': 'gen_output_activation', 'padding': '"""same"""'}), "(3, 4, activation=gen_output_activation, padding='same')\n", (4510, 4566), True, 'import tensorflow.keras as keras\n'), ((4636, 4695), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '(1, 1)', 'name': '"""HoloGanConstantInput"""'}), "(0.0, shape=(1, 1), name='HoloGanConstantInput')\n", (4647, 4695), True, 'import tensorflow as tf\n'), ((6462, 6518), 'tensorflow.reshape', 'tf.reshape', (['x', '(x_s[0], x_s[1], x_s[2], x_s[3] * x_s[4])'], {}), '(x, (x_s[0], x_s[1], x_s[2], x_s[3] * x_s[4]))\n', (6472, 6518), True, 'import tensorflow as tf\n'), ((709, 742), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (731, 742), True, 'import tensorflow.keras as keras\n'), ((901, 926), 'numpy.prod', 'np.prod', (['self.const_shape'], {}), '(self.const_shape)\n', (908, 926), True, 'import numpy as np\n'), ((5788, 5856), 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['self.const_shape'], {'name': '"""learned_input_reshape"""'}), "(self.const_shape, name='learned_input_reshape')\n", (5808, 5856), True, 'import tensorflow.keras as keras\n'), ((5909, 5936), 'tensorflow.keras.layers.UpSampling3D', 'keras.layers.UpSampling3D', ([], {}), '()\n', (5934, 5936), True, 'import tensorflow.keras as keras\n'), ((6036, 6063), 'tensorflow.keras.layers.UpSampling3D', 'keras.layers.UpSampling3D', ([], {}), '()\n', (6061, 6063), True, 'import tensorflow.keras as keras\n'), ((6632, 6659), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {}), '()\n', (6657, 6659), True, 'import tensorflow.keras as keras\n'), ((6737, 6764), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {}), '()\n', (6762, 6764), True, 'import tensorflow.keras as keras\n'), ((6842, 6869), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {}), '()\n', (6867, 6869), True, 'import tensorflow.keras as keras\n'), ((1083, 1115), 'tensorflow.compat.v1.initializers.ones', 'tf.compat.v1.initializers.ones', ([], {}), '()\n', (1113, 1115), True, 'import tensorflow as tf\n'), ((2113, 2183), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['(n_features_in_first_layer // 4)', '(3)'], {'padding': '"""same"""'}), "(n_features_in_first_layer // 4, 3, padding='same')\n", (2132, 2183), True, 'import tensorflow.keras as keras\n'), ((2219, 2289), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['(n_features_in_first_layer // 4)', '(3)'], {'padding': '"""same"""'}), "(n_features_in_first_layer // 4, 3, padding='same')\n", (2238, 2289), True, 'import tensorflow.keras as keras\n'), ((7000, 7027), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {}), '()\n', (7025, 7027), True, 'import tensorflow.keras as keras\n'), ((7158, 7185), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {}), '()\n', (7183, 7185), True, 'import tensorflow.keras as keras\n'), ((4741, 4762), 'tensorflow.shape', 'tf.shape', (['input_layer'], {}), '(input_layer)\n', (4749, 4762), True, 'import tensorflow as tf\n')] |
import time
import numpy as np
import math
import numba as nb
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states,xoroshiro128p_uniform_float64,xoroshiro128p_normal_float64
import matplotlib.pyplot as mpl
################################################################################
spec=[
('pi',nb.float64),
('Mass',nb.float64[:]),
('LJ_E',nb.float64[:]),
('LJ_S',nb.float64[:]),
('fcc_lattice',nb.float64),
('kB',nb.float64),
('T',nb.float64[:]),
('mp_V',nb.float64[:]),
('nd_Mass',nb.float64),
('nd_Energy',nb.float64),
('nd_Length',nb.float64),
('nd_Velocity',nb.float64),
('nd_Time',nb.float64),
('nd_Acceleration',nb.float64),
('cutoff',nb.float64),
('d',nb.float64),
('spr_k',nb.float64),
('dt',nb.float64),
('Tt',nb.int64),
('Pt_I',nb.int64),
('Pt_J',nb.int64),
('Pt_K',nb.int64),
('Pt_N',nb.int64),
('Ar_N',nb.int64),
('All_N',nb.int64),
('Box',nb.float64[:,:]),
('Pt_ePos',nb.float64[:,:]),
('All_type',nb.float64[:]),
('state',nb.boolean),
('dumpstep',nb.int64),
('Pt_argVel',nb.float64[:]),
('Pt_V2',nb.float64[:]),
]
@nb.jitclass(spec)
class Parameters():
def __init__(self):
#物理参数
self.pi=3.14159265
self.Mass=np.array([39.95,195.08])/6.02*1E-26#单位kg
self.LJ_E=np.array([1.654E-21,5.207E-20,1.093E-21])#单位J
self.LJ_S=np.array([3.40,2.47,2.94])*1E-10#单位m
self.fcc_lattice=3.93E-10
self.kB=1.38E-23
self.T=np.array([300.,300.])
self.mp_V=np.array([np.sqrt(2*self.kB*self.T[0]/self.Mass[0]),np.sqrt(3*self.kB*self.T[1]/self.Mass[1])])#气体最概然速率,固体方均根速率
#无量纲参数
self.nd_Mass=self.Mass[1]
self.nd_Energy=self.LJ_E[1]
self.nd_Length=self.LJ_S[1]
self.nd_Velocity=np.sqrt(self.nd_Energy/self.nd_Mass)
self.nd_Time=self.nd_Length/self.nd_Velocity
self.nd_Acceleration=self.nd_Energy/(self.nd_Mass*self.nd_Length)
#无量纲化
self.Mass/=self.nd_Mass
self.LJ_E/=self.nd_Energy
self.LJ_S/=self.nd_Length
self.cutoff=10*1E-10/self.nd_Length
self.fcc_lattice/=self.nd_Length
self.mp_V/=self.nd_Velocity
self.d=5.0
self.spr_k=5000.
self.dt=0.001
self.Tt=3500000
#盒子参数
self.Pt_I=6
self.Pt_J=6
self.Pt_K=3
self.Pt_N=4*self.Pt_I*self.Pt_J*self.Pt_K
self.Ar_N=1
self.All_N=self.Pt_N+self.Ar_N
self.Box=np.zeros((3,3))
self.Pt_ePos=np.zeros((self.Pt_N,3))
self.All_type=np.zeros(self.All_N)
#状态参数
self.state=True
self.dumpstep=100
################################################################################
def Initialization(Pars):
Pars.Box[0,0]=0
Pars.Box[0,1]=Pars.Pt_I*Pars.fcc_lattice
Pars.Box[0,2]=Pars.Box[0,1]-Pars.Box[0,0]
Pars.Box[1,0]=0
Pars.Box[1,1]=Pars.Pt_J*Pars.fcc_lattice
Pars.Box[1,2]=Pars.Box[1,1]-Pars.Box[1,0]
Pars.Box[2,0]=-(Pars.Pt_K-0.5)*Pars.fcc_lattice
Pars.Box[2,1]=Pars.d
Pars.Box[2,2]=Pars.Box[2,1]-Pars.Box[2,0]
print('计算区域X: ',Pars.Box[0,0],', ',Pars.Box[0,1])
print('计算区域Y: ',Pars.Box[1,0],', ',Pars.Box[1,1])
print('计算区域Z: ',Pars.Box[2,0],', ',Pars.Box[2,1])
timestep=0
#位置,速度初始化
All_Pos=np.zeros((Pars.All_N,3))
All_Vel=np.zeros((Pars.All_N,3))
d_All_Pos=cuda.to_device(All_Pos)
d_All_Vel=cuda.to_device(All_Vel)
seedt=str(int(time.time()))
rng_states=create_xoroshiro128p_states(Pars.All_N,seedt)
Initialization_Kernel[1,Pars.All_N](d_All_Pos,d_All_Vel,Pars.All_type,Pars.Pt_N,Pars.Pt_I,Pars.Pt_J,Pars.Pt_K,Pars.fcc_lattice,rng_states,Pars.mp_V,Pars.Box,Pars.pi)
#首次位置周期
temp=np.array([np.nan])
Pos_period[1,Pars.All_N](d_All_Pos,Pars.All_N,Pars.Box,Pars.All_type,Pars.Pt_ePos,temp)
#首次控温
Pt_argVel=np.zeros(3)
Pt_V2=np.zeros(1)
Pt_T=np.zeros(1)
rescale_T[1,Pars.Pt_N](d_All_Vel,Pt_argVel,Pars.Pt_N,Pt_V2,Pt_T,Pars.nd_Velocity,Pars.Mass,Pars.nd_Mass,Pars.kB,Pars.T)
#首次加速度周期
All_Acc=np.zeros((Pars.All_N,3))
d_All_Acc=cuda.to_device(All_Acc)
Acceleration_period[1,Pars.All_N](d_All_Pos,d_All_Acc,Pars.All_N,Pars.All_type,Pars.LJ_E,Pars.LJ_S,Pars.Box,Pars.cutoff,Pars.Pt_ePos,Pars.spr_k,Pars.Mass)
All_Pos=d_All_Pos.copy_to_host()
All_Vel=d_All_Vel.copy_to_host()
All_Acc=d_All_Acc.copy_to_host()
Pars.Pt_ePos=All_Pos[:Pars.Pt_N,:]
#初始信息
print('Created ',Pars.Pt_N,' Pt')
print('Created ',Pars.Ar_N,' Ar')
print('Pt整体x方向平均速度',Pt_argVel[0])
print('Pt整体y方向平均速度',Pt_argVel[1])
print('Pt整体z方向平均速度',Pt_argVel[2])
print('Pt温度',Pt_T[0])
print('Ar入射速度:',All_Vel[Pars.Pt_N,0],',',All_Vel[Pars.Pt_N,1],',',All_Vel[Pars.Pt_N,2])
print('*******Model Initialization Done!*******')
return(All_Pos,All_Vel,All_Acc,timestep)
################################################################################
@cuda.jit
def Initialization_Kernel(All_Pos,All_Vel,All_type,Pt_N,Pt_I,Pt_J,Pt_K,fcc_lattice,rng_states,mp_V,Box,pi):
tx=cuda.threadIdx.x
bx=cuda.blockIdx.x
tpb=cuda.blockDim.x
t_pos=tx+tpb*bx
if(t_pos<Pt_N):#Pt
i=int(t_pos/(2*Pt_J*Pt_K))
jk=t_pos%(2*Pt_J*Pt_K)
j=int(jk/Pt_K)
k=jk%Pt_K
if(((i%2)+(j%2))%2==0):
k=2*k
else:
k=2*k+1
All_Pos[t_pos,0]=i/2*fcc_lattice
All_Pos[t_pos,1]=j/2*fcc_lattice
All_Pos[t_pos,2]=(k/2-2.5)*fcc_lattice
for dim in range(3):
R=0
while R==0:
R=xoroshiro128p_normal_float64(rng_states, t_pos)
All_Vel[t_pos,dim]=mp_V[1]/math.sqrt(3.0)*R#高斯分布,平均值为0,方差=方均根速度用来控温
All_type[t_pos]=1.0
if(t_pos==Pt_N):#Ar
Rx=xoroshiro128p_uniform_float64(rng_states, t_pos)
Ry=xoroshiro128p_uniform_float64(rng_states, t_pos)
All_Pos[t_pos,0]=Box[0,0]+Box[0,2]*Rx
All_Pos[t_pos,1]=Box[1,0]+Box[1,2]*Ry
All_Pos[t_pos,2]=Box[2,1]
R1=0
while R1==0:
R1=xoroshiro128p_uniform_float64(rng_states, t_pos)
R2=0
while R2==0:
R2=xoroshiro128p_uniform_float64(rng_states, t_pos)
All_Vel[t_pos,0]=mp_V[0]*math.sqrt(-math.log(R1))*math.cos(2*pi*R2)#Maxwell分布
R1=0
while R1==0:
R1=xoroshiro128p_uniform_float64(rng_states, t_pos)
R2=0
while R2==0:
R2=xoroshiro128p_uniform_float64(rng_states, t_pos)
All_Vel[t_pos,1]=mp_V[0]*math.sqrt(-math.log(R1))*math.sin(2*pi*R2)
R1=0
while R1==0:
R1=xoroshiro128p_uniform_float64(rng_states, t_pos)
R2=0
while R2==0:
R2=xoroshiro128p_uniform_float64(rng_states, t_pos)
All_Vel[t_pos,2]=-mp_V[0]*math.sqrt(-math.log(R1))
All_type[t_pos]=0.0
cuda.syncthreads()
################################################################################
@cuda.jit
def Pos_period(All_Pos,All_N,Box,All_type,Pt_ePos,temp):
tx=cuda.threadIdx.x
bx=cuda.blockIdx.x
tpb=cuda.blockDim.x
t_pos=tx+tpb*bx
if(t_pos<All_N):
#X,Y方向周期
for axis in range(2):
if(All_Pos[t_pos,axis]<Box[axis,0]):
All_Pos[t_pos,axis]+=Box[axis,2]
if(All_type[t_pos]==1):
Pt_ePos[t_pos,axis]+=Box[axis,2]
elif(All_Pos[t_pos,axis]>=Box[axis,1]):
All_Pos[t_pos,axis]-=Box[axis,2]
if(All_type[t_pos]==1):
Pt_ePos[t_pos,axis]-=Box[axis,2]
#Z方向下边界更新
cuda.atomic.min(temp,0,All_Pos[t_pos,2])
cuda.syncthreads()
Box[2,0]=temp[0]
#Pars.Box[2,2]=Pars.Box[2,1]-Pars.Box[2,0]
################################################################################
@cuda.jit
def Acceleration_period(All_Pos,All_Acc,All_N,All_type,LJ_E,LJ_S,Box,cutoff,Pt_ePos,spr_k,Mass):
tx=cuda.threadIdx.x
bx=cuda.blockIdx.x
tpb=cuda.blockDim.x
t_pos=tx+tpb*bx
if(t_pos<All_N):
Atom_Fx=0.
Atom_Fy=0.
Atom_Fz=0.
for j in range(All_N):
if(All_type[t_pos]==1 and All_type[j]==1):
LJ_pair=1
elif(All_type[t_pos]==0 and All_type[j]==0):
LJ_pair=0
else:
LJ_pair=2
Epair=LJ_E[LJ_pair]
Spair=LJ_S[LJ_pair]
#周期相对位置
Pairx=All_Pos[t_pos,0]-All_Pos[j,0]
Pairy=All_Pos[t_pos,1]-All_Pos[j,1]
Pairz=All_Pos[t_pos,2]-All_Pos[j,2]
if(abs(Pairx)>=Box[0,2]-cutoff):
Pairx=Pairx-Box[0,2]*Pairx/abs(Pairx)
if(abs(Pairy)>=Box[1,2]-cutoff):
Pairy=Pairy-Box[1,2]*Pairy/abs(Pairy)
#周期距离
Dispair=math.sqrt(Pairx**2+Pairy**2+Pairz**2)
if(Dispair>0 and Dispair<=cutoff):
Fpair=48*Epair*(Spair**12/Dispair**13-0.5*Spair**6/Dispair**7)
Atom_Fx+=Pairx*Fpair/Dispair
Atom_Fy+=Pairy*Fpair/Dispair
Atom_Fz+=Pairz*Fpair/Dispair
if(All_type[t_pos]==1):
#Pt弹性恢复力
Spring_Disx=All_Pos[t_pos,0]-Pt_ePos[t_pos,0]
Spring_Fx=-spr_k*Spring_Disx
Pt_Fx=Atom_Fx+Spring_Fx
All_Acc[t_pos,0]=Pt_Fx/Mass[1]
Spring_Disy=All_Pos[t_pos,1]-Pt_ePos[t_pos,1]
Spring_Fy=-spr_k*Spring_Disy
Pt_Fy=Atom_Fy+Spring_Fy
All_Acc[t_pos,1]=Pt_Fy/Mass[1]
Spring_Disz=All_Pos[t_pos,2]-Pt_ePos[t_pos,2]
Spring_Fz=-spr_k*Spring_Disz
Pt_Fz=Atom_Fz+Spring_Fz
All_Acc[t_pos,2]=Pt_Fz/Mass[1]
else:
Ar_Fx=Atom_Fx
All_Acc[t_pos,0]=Ar_Fx/Mass[0]
Ar_Fy=Atom_Fy
All_Acc[t_pos,1]=Ar_Fy/Mass[0]
Ar_Fz=Atom_Fz
All_Acc[t_pos,2]=Ar_Fz/Mass[0]
cuda.syncthreads()
################################################################################
@cuda.jit
def Verlet_Pos(All_Pos,All_Vel,All_Acc,All_N,dt):
tx=cuda.threadIdx.x
bx=cuda.blockIdx.x
tpb=cuda.blockDim.x
t_pos=tx+tpb*bx
if(t_pos<All_N):
for axis in range(3):
All_Pos[t_pos,axis]+=All_Vel[t_pos,axis]*dt+0.5*All_Acc[t_pos,axis]*dt**2
cuda.syncthreads()
################################################################################
@cuda.jit
def Verlet_Vel(All_Vel,All_Acc_temp,All_Acc,All_N,dt):
tx=cuda.threadIdx.x
bx=cuda.blockIdx.x
tpb=cuda.blockDim.x
t_pos=tx+tpb*bx
if(t_pos<All_N):
for axis in range(3):
All_Vel[t_pos,axis]+=0.5*(All_Acc_temp[t_pos,axis]+All_Acc[t_pos,axis])*dt
cuda.syncthreads()
################################################################################
@cuda.jit
def rescale_T(All_Vel,Pt_argVel,Pt_N,Pt_V2,Pt_T,nd_Velocity,Mass,nd_Mass,kB,T):
tx=cuda.threadIdx.x
bx=cuda.blockIdx.x
tpb=cuda.blockDim.x
t_pos=tx+tpb*bx
if(t_pos<Pt_N):
cuda.atomic.add(Pt_argVel,0,(All_Vel[t_pos,0])/Pt_N)
cuda.atomic.add(Pt_argVel,1,(All_Vel[t_pos,1])/Pt_N)
cuda.atomic.add(Pt_argVel,2,(All_Vel[t_pos,2])/Pt_N)
cuda.syncthreads()
#只需要热运动速度
All_Vel[t_pos,0]-=Pt_argVel[0]
All_Vel[t_pos,1]-=Pt_argVel[1]
All_Vel[t_pos,2]-=Pt_argVel[2]
cuda.atomic.add(Pt_V2,0,All_Vel[t_pos,0]**2+All_Vel[t_pos,1]**2+All_Vel[t_pos,2]**2)
cuda.syncthreads()
Pt_T[0]=Pt_V2[0]*nd_Velocity**2*Mass[1]*nd_Mass/(3*Pt_N*kB)
All_Vel[t_pos,0]*=math.sqrt(T[1]/Pt_T[0])
All_Vel[t_pos,1]*=math.sqrt(T[1]/Pt_T[0])
All_Vel[t_pos,2]*=math.sqrt(T[1]/Pt_T[0])
cuda.syncthreads()
################################################################################
def Dump(All_Pos,Ar_Z,Ar_t,Pars,timestep,dumpstep=1):
if(timestep%dumpstep==0):
with open('Kernel_MD_JIT_CUDA.dump','a') as MD:
print('ITEM: TIMESTEP',file=MD)
print(timestep,file=MD)
print('ITEM: NUMBER OF ATOMS',file=MD)
print(Pars.All_N,file=MD)
print('ITEM: BOX BOUNDS pp pp ff',file=MD)
print(Pars.Box[0,0],Pars.Box[0,1],file=MD)
print(Pars.Box[1,0],Pars.Box[1,1],file=MD)
print(Pars.Box[2,0],Pars.Box[2,1],file=MD)
print('ITEM: ATOMS id type x y z',file=MD)
for i in range(Pars.All_N):
print(i+1,int(Pars.All_type[i])+1,All_Pos[i,0],All_Pos[i,1],All_Pos[i,2],file=MD)
Ar_Z.append(All_Pos[Pars.Pt_N,2])
Ar_t.append(timestep*Pars.dt)
return(Ar_Z,Ar_t)
################################################################################
def Exit(All_Pos,Ar_Z,Ar_t,Pars,timestep):
if(All_Pos[Pars.Pt_N,2]>Pars.d or timestep>=Pars.Tt):
Pars.state=False
(Ar_Z,Ar_t)=Dump(All_Pos,Ar_Z,Ar_t,Pars,timestep)
else:
(Ar_Z,Ar_t)=Dump(All_Pos,Ar_Z,Ar_t,Pars,timestep,Pars.dumpstep)
################################################################################
#***********************************Others*************************************#
################################################################################
################################################################################
def Fplot(Pars,All_Vel,Ar_Z,Ar_t,timestep,argtime):
nacc=25
mp_V2=[]
for i in range(Pars.Pt_N):
mp_V2.append(All_Vel[i,0]**2+All_Vel[i,1]**2+All_Vel[i,2]**2)
mp_V=[math.sqrt(i) for i in mp_V2]
print('x方向平均速度'+str(np.sum(All_Vel[:Pars.Pt_N,0])/Pars.Pt_N))
print('y方向平均速度'+str(np.sum(All_Vel[:Pars.Pt_N,1])/Pars.Pt_N))
print('z方向平均速度'+str(np.sum(All_Vel[:Pars.Pt_N,2])/Pars.Pt_N))
print('温度'+str(np.sum(mp_V2)*Pars.nd_Velocity**2*Pars.Mass[1]*Pars.nd_Mass/(3*Pars.Pt_N*Pars.kB)))
test=[int(round(i*nacc)) for i in mp_V]
maxu=np.max(test)
minu=np.min(test)
print(maxu/nacc,minu/nacc)
fu=np.zeros((maxu-minu+1,2))
thex=[x/nacc/10 for x in range(minu*10,(maxu+1)*10)]
thefmax=[4*Pars.pi*(3/(2*Pars.pi*Pars.mp_V[1]**2))**(3/2)*np.exp(-3*thexi**2/(2*Pars.mp_V[1]**2))*thexi**2 for thexi in thex]
du=minu
while minu<=du<=maxu:
fu[du-minu,0]=du/nacc
fu[du-minu,1]=test.count(du)*nacc/Pars.Pt_N
du+=1
print(np.sum(fu[:,1])/nacc)
#图1
fig1=mpl.figure()
mpl.plot(fu[:,0],fu[:,1],'ro',markersize=1,label='Random')
mpx=[Pars.mp_V[1] for x in range(250)]
mpy=[y/100 for y in range(250)]
mpl.plot(mpx,mpy,'g--',markersize=1,label='RMS')
mpl.plot(thex,thefmax,'b',markersize=1,label='Maxwell')
mpl.legend(loc='upper left')
mpl.xlabel('$v^{*}$')
mpl.ylabel('$f(v^{*})$')
mpl.savefig('Pt_T_Distribution.png',dpi=600)
#图2
fig2=mpl.figure()
eff=str(timestep)+' TimeSteps; ArgTime: '+str(round(argtime,10))+' Seconds;'
mpl.plot(Ar_t,Ar_Z,'ro',markersize=1,label=eff)
mpl.legend(loc='upper center')
mpl.xlabel('$t$')
mpl.ylabel('$Z$')
mpl.savefig('Ar_Z_t.png',dpi=600)
mpl.show()
################################################################################
def Bar_show(Pars,current,start_time,Name):
bar_length=10
pre=' |'
suf='| '
fill='◉'
emp='◯'
per=int(current*100/Pars.Tt)
n=int(current*bar_length/Pars.Tt)
rn=int(bar_length-n)
step_time=time.time()
argtime=(step_time-start_time)/current
show_string=Name+': '+pre+fill*n+emp*rn+suf+str(current)+' / '+str(Pars.Tt)+' ArgTime: '+str(round(argtime,10))+' Seconds'
if(current!=Pars.Tt):
print(show_string, end='\r', flush=True)
#time.sleep(0.01)
return(argtime)
################################################################################
def Bar_close(start_time,Name):
end_time=time.time()
alltime=end_time-start_time
close_string='\n'+'*******'+Name+' '+'Done! '+'AllTime: '+str(round(alltime,10))+' Seconds!'+'*******'
print(close_string)
#time.sleep(1)
################################################################################
#*************************************Main*************************************#
################################################################################
Pars=Parameters()
(All_Pos,All_Vel,All_Acc,timestep)=Initialization(Pars)
Ar_Z=[]
Ar_t=[]
Dump(All_Pos,Ar_Z,Ar_t,Pars,timestep)
start_time=time.time()
while(Pars.state):
d_All_Pos=cuda.to_device(All_Pos)
d_All_Vel=cuda.to_device(All_Vel)
d_All_Acc=cuda.to_device(All_Acc)
Verlet_Pos[1,Pars.All_N](d_All_Pos,d_All_Vel,d_All_Acc,Pars.All_N,Pars.dt)
temp=np.array([np.nan])
Pos_period[1,Pars.All_N](d_All_Pos,Pars.All_N,Pars.Box,Pars.All_type,Pars.Pt_ePos,temp)
d_All_Acc_temp=d_All_Acc
Acceleration_period[1,Pars.All_N](d_All_Pos,d_All_Acc,Pars.All_N,Pars.All_type,Pars.LJ_E,Pars.LJ_S,Pars.Box,Pars.cutoff,Pars.Pt_ePos,Pars.spr_k,Pars.Mass)
Verlet_Vel[1,Pars.All_N](d_All_Vel,d_All_Acc_temp,d_All_Acc,Pars.All_N,Pars.dt)
Pt_argVel=np.zeros(3)
Pt_V2=np.zeros(1)
Pt_T=np.zeros(1)
rescale_T[1,Pars.Pt_N](d_All_Vel,Pt_argVel,Pars.Pt_N,Pt_V2,Pt_T,Pars.nd_Velocity,Pars.Mass,Pars.nd_Mass,Pars.kB,Pars.T)
timestep+=1
All_Pos=d_All_Pos.copy_to_host()
All_Vel=d_All_Vel.copy_to_host()
All_Acc=d_All_Acc.copy_to_host()
Exit(All_Pos,Ar_Z,Ar_t,Pars,timestep)
argtime=Bar_show(Pars,timestep,start_time,'Kernel_MD_JIT_CUDA')
Bar_close(start_time,'Kernel_MD_JIT_CUDA')
Fplot(Pars,All_Vel,Ar_Z,Ar_t,timestep,argtime)
| [
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.max",
"math.cos",
"math.log",
"numba.cuda.random.xoroshiro128p_normal_float64",
"matplotlib.pyplot.show",
"math.sqrt",
"numba.jitclass",
"matplotlib.pyplot.legend",
"math.sin",
"numpy.min",
"numba.cuda.random.xoroshiro128p_unifor... | [((1193, 1210), 'numba.jitclass', 'nb.jitclass', (['spec'], {}), '(spec)\n', (1204, 1210), True, 'import numba as nb\n'), ((16417, 16428), 'time.time', 'time.time', ([], {}), '()\n', (16426, 16428), False, 'import time\n'), ((3361, 3386), 'numpy.zeros', 'np.zeros', (['(Pars.All_N, 3)'], {}), '((Pars.All_N, 3))\n', (3369, 3386), True, 'import numpy as np\n'), ((3398, 3423), 'numpy.zeros', 'np.zeros', (['(Pars.All_N, 3)'], {}), '((Pars.All_N, 3))\n', (3406, 3423), True, 'import numpy as np\n'), ((3437, 3460), 'numba.cuda.to_device', 'cuda.to_device', (['All_Pos'], {}), '(All_Pos)\n', (3451, 3460), False, 'from numba import cuda\n'), ((3475, 3498), 'numba.cuda.to_device', 'cuda.to_device', (['All_Vel'], {}), '(All_Vel)\n', (3489, 3498), False, 'from numba import cuda\n'), ((3546, 3592), 'numba.cuda.random.create_xoroshiro128p_states', 'create_xoroshiro128p_states', (['Pars.All_N', 'seedt'], {}), '(Pars.All_N, seedt)\n', (3573, 3592), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((3783, 3801), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (3791, 3801), True, 'import numpy as np\n'), ((3918, 3929), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3926, 3929), True, 'import numpy as np\n'), ((3940, 3951), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3948, 3951), True, 'import numpy as np\n'), ((3961, 3972), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3969, 3972), True, 'import numpy as np\n'), ((4122, 4147), 'numpy.zeros', 'np.zeros', (['(Pars.All_N, 3)'], {}), '((Pars.All_N, 3))\n', (4130, 4147), True, 'import numpy as np\n'), ((4161, 4184), 'numba.cuda.to_device', 'cuda.to_device', (['All_Acc'], {}), '(All_Acc)\n', (4175, 4184), False, 'from numba import cuda\n'), ((6901, 6919), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (6917, 6919), False, 'from numba import cuda\n'), ((13932, 13944), 'numpy.max', 'np.max', (['test'], {}), '(test)\n', (13938, 13944), True, 'import numpy as np\n'), ((13954, 13966), 'numpy.min', 'np.min', (['test'], {}), '(test)\n', (13960, 13966), True, 'import numpy as np\n'), ((14005, 14035), 'numpy.zeros', 'np.zeros', (['(maxu - minu + 1, 2)'], {}), '((maxu - minu + 1, 2))\n', (14013, 14035), True, 'import numpy as np\n'), ((14401, 14413), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (14411, 14413), True, 'import matplotlib.pyplot as mpl\n'), ((14418, 14482), 'matplotlib.pyplot.plot', 'mpl.plot', (['fu[:, 0]', 'fu[:, 1]', '"""ro"""'], {'markersize': '(1)', 'label': '"""Random"""'}), "(fu[:, 0], fu[:, 1], 'ro', markersize=1, label='Random')\n", (14426, 14482), True, 'import matplotlib.pyplot as mpl\n'), ((14560, 14612), 'matplotlib.pyplot.plot', 'mpl.plot', (['mpx', 'mpy', '"""g--"""'], {'markersize': '(1)', 'label': '"""RMS"""'}), "(mpx, mpy, 'g--', markersize=1, label='RMS')\n", (14568, 14612), True, 'import matplotlib.pyplot as mpl\n'), ((14613, 14672), 'matplotlib.pyplot.plot', 'mpl.plot', (['thex', 'thefmax', '"""b"""'], {'markersize': '(1)', 'label': '"""Maxwell"""'}), "(thex, thefmax, 'b', markersize=1, label='Maxwell')\n", (14621, 14672), True, 'import matplotlib.pyplot as mpl\n'), ((14673, 14701), 'matplotlib.pyplot.legend', 'mpl.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (14683, 14701), True, 'import matplotlib.pyplot as mpl\n'), ((14706, 14727), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""$v^{*}$"""'], {}), "('$v^{*}$')\n", (14716, 14727), True, 'import matplotlib.pyplot as mpl\n'), ((14732, 14756), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""$f(v^{*})$"""'], {}), "('$f(v^{*})$')\n", (14742, 14756), True, 'import matplotlib.pyplot as mpl\n'), ((14761, 14806), 'matplotlib.pyplot.savefig', 'mpl.savefig', (['"""Pt_T_Distribution.png"""'], {'dpi': '(600)'}), "('Pt_T_Distribution.png', dpi=600)\n", (14772, 14806), True, 'import matplotlib.pyplot as mpl\n'), ((14823, 14835), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (14833, 14835), True, 'import matplotlib.pyplot as mpl\n'), ((14921, 14972), 'matplotlib.pyplot.plot', 'mpl.plot', (['Ar_t', 'Ar_Z', '"""ro"""'], {'markersize': '(1)', 'label': 'eff'}), "(Ar_t, Ar_Z, 'ro', markersize=1, label=eff)\n", (14929, 14972), True, 'import matplotlib.pyplot as mpl\n'), ((14973, 15003), 'matplotlib.pyplot.legend', 'mpl.legend', ([], {'loc': '"""upper center"""'}), "(loc='upper center')\n", (14983, 15003), True, 'import matplotlib.pyplot as mpl\n'), ((15008, 15025), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""$t$"""'], {}), "('$t$')\n", (15018, 15025), True, 'import matplotlib.pyplot as mpl\n'), ((15030, 15047), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""$Z$"""'], {}), "('$Z$')\n", (15040, 15047), True, 'import matplotlib.pyplot as mpl\n'), ((15052, 15086), 'matplotlib.pyplot.savefig', 'mpl.savefig', (['"""Ar_Z_t.png"""'], {'dpi': '(600)'}), "('Ar_Z_t.png', dpi=600)\n", (15063, 15086), True, 'import matplotlib.pyplot as mpl\n'), ((15091, 15101), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (15099, 15101), True, 'import matplotlib.pyplot as mpl\n'), ((15408, 15419), 'time.time', 'time.time', ([], {}), '()\n', (15417, 15419), False, 'import time\n'), ((15839, 15850), 'time.time', 'time.time', ([], {}), '()\n', (15848, 15850), False, 'import time\n'), ((16462, 16485), 'numba.cuda.to_device', 'cuda.to_device', (['All_Pos'], {}), '(All_Pos)\n', (16476, 16485), False, 'from numba import cuda\n'), ((16500, 16523), 'numba.cuda.to_device', 'cuda.to_device', (['All_Vel'], {}), '(All_Vel)\n', (16514, 16523), False, 'from numba import cuda\n'), ((16538, 16561), 'numba.cuda.to_device', 'cuda.to_device', (['All_Acc'], {}), '(All_Acc)\n', (16552, 16561), False, 'from numba import cuda\n'), ((16650, 16668), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (16658, 16668), True, 'import numpy as np\n'), ((17047, 17058), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17055, 17058), True, 'import numpy as np\n'), ((17069, 17080), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (17077, 17080), True, 'import numpy as np\n'), ((17090, 17101), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (17098, 17101), True, 'import numpy as np\n'), ((1374, 1417), 'numpy.array', 'np.array', (['[1.654e-21, 5.207e-20, 1.093e-21]'], {}), '([1.654e-21, 5.207e-20, 1.093e-21])\n', (1382, 1417), True, 'import numpy as np\n'), ((1549, 1573), 'numpy.array', 'np.array', (['[300.0, 300.0]'], {}), '([300.0, 300.0])\n', (1557, 1573), True, 'import numpy as np\n'), ((1847, 1885), 'numpy.sqrt', 'np.sqrt', (['(self.nd_Energy / self.nd_Mass)'], {}), '(self.nd_Energy / self.nd_Mass)\n', (1854, 1885), True, 'import numpy as np\n'), ((2536, 2552), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2544, 2552), True, 'import numpy as np\n'), ((2573, 2597), 'numpy.zeros', 'np.zeros', (['(self.Pt_N, 3)'], {}), '((self.Pt_N, 3))\n', (2581, 2597), True, 'import numpy as np\n'), ((2619, 2639), 'numpy.zeros', 'np.zeros', (['self.All_N'], {}), '(self.All_N)\n', (2627, 2639), True, 'import numpy as np\n'), ((5825, 5873), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (5854, 5873), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((5885, 5933), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (5914, 5933), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((7640, 7683), 'numba.cuda.atomic.min', 'cuda.atomic.min', (['temp', '(0)', 'All_Pos[t_pos, 2]'], {}), '(temp, 0, All_Pos[t_pos, 2])\n', (7655, 7683), False, 'from numba import cuda\n'), ((7689, 7707), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (7705, 7707), False, 'from numba import cuda\n'), ((9961, 9979), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (9977, 9979), False, 'from numba import cuda\n'), ((10359, 10377), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (10375, 10377), False, 'from numba import cuda\n'), ((10763, 10781), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (10779, 10781), False, 'from numba import cuda\n'), ((11074, 11129), 'numba.cuda.atomic.add', 'cuda.atomic.add', (['Pt_argVel', '(0)', '(All_Vel[t_pos, 0] / Pt_N)'], {}), '(Pt_argVel, 0, All_Vel[t_pos, 0] / Pt_N)\n', (11089, 11129), False, 'from numba import cuda\n'), ((11135, 11190), 'numba.cuda.atomic.add', 'cuda.atomic.add', (['Pt_argVel', '(1)', '(All_Vel[t_pos, 1] / Pt_N)'], {}), '(Pt_argVel, 1, All_Vel[t_pos, 1] / Pt_N)\n', (11150, 11190), False, 'from numba import cuda\n'), ((11196, 11251), 'numba.cuda.atomic.add', 'cuda.atomic.add', (['Pt_argVel', '(2)', '(All_Vel[t_pos, 2] / Pt_N)'], {}), '(Pt_argVel, 2, All_Vel[t_pos, 2] / Pt_N)\n', (11211, 11251), False, 'from numba import cuda\n'), ((11257, 11275), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (11273, 11275), False, 'from numba import cuda\n'), ((11419, 11522), 'numba.cuda.atomic.add', 'cuda.atomic.add', (['Pt_V2', '(0)', '(All_Vel[t_pos, 0] ** 2 + All_Vel[t_pos, 1] ** 2 + All_Vel[t_pos, 2] ** 2)'], {}), '(Pt_V2, 0, All_Vel[t_pos, 0] ** 2 + All_Vel[t_pos, 1] ** 2 +\n All_Vel[t_pos, 2] ** 2)\n', (11434, 11522), False, 'from numba import cuda\n'), ((11512, 11530), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (11528, 11530), False, 'from numba import cuda\n'), ((11625, 11650), 'math.sqrt', 'math.sqrt', (['(T[1] / Pt_T[0])'], {}), '(T[1] / Pt_T[0])\n', (11634, 11650), False, 'import math\n'), ((11675, 11700), 'math.sqrt', 'math.sqrt', (['(T[1] / Pt_T[0])'], {}), '(T[1] / Pt_T[0])\n', (11684, 11700), False, 'import math\n'), ((11725, 11750), 'math.sqrt', 'math.sqrt', (['(T[1] / Pt_T[0])'], {}), '(T[1] / Pt_T[0])\n', (11734, 11750), False, 'import math\n'), ((11757, 11775), 'numba.cuda.syncthreads', 'cuda.syncthreads', ([], {}), '()\n', (11773, 11775), False, 'from numba import cuda\n'), ((13549, 13561), 'math.sqrt', 'math.sqrt', (['i'], {}), '(i)\n', (13558, 13561), False, 'import math\n'), ((1438, 1465), 'numpy.array', 'np.array', (['[3.4, 2.47, 2.94]'], {}), '([3.4, 2.47, 2.94])\n', (1446, 1465), True, 'import numpy as np\n'), ((3517, 3528), 'time.time', 'time.time', ([], {}), '()\n', (3526, 3528), False, 'import time\n'), ((6109, 6157), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (6138, 6157), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((6207, 6255), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (6236, 6255), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((6314, 6335), 'math.cos', 'math.cos', (['(2 * pi * R2)'], {}), '(2 * pi * R2)\n', (6322, 6335), False, 'import math\n'), ((6391, 6439), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (6420, 6439), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((6489, 6537), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (6518, 6537), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((6596, 6617), 'math.sin', 'math.sin', (['(2 * pi * R2)'], {}), '(2 * pi * R2)\n', (6604, 6617), False, 'import math\n'), ((6663, 6711), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (6692, 6711), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((6761, 6809), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (6790, 6809), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((8846, 8893), 'math.sqrt', 'math.sqrt', (['(Pairx ** 2 + Pairy ** 2 + Pairz ** 2)'], {}), '(Pairx ** 2 + Pairy ** 2 + Pairz ** 2)\n', (8855, 8893), False, 'import math\n'), ((14362, 14378), 'numpy.sum', 'np.sum', (['fu[:, 1]'], {}), '(fu[:, 1])\n', (14368, 14378), True, 'import numpy as np\n'), ((1315, 1340), 'numpy.array', 'np.array', (['[39.95, 195.08]'], {}), '([39.95, 195.08])\n', (1323, 1340), True, 'import numpy as np\n'), ((1599, 1646), 'numpy.sqrt', 'np.sqrt', (['(2 * self.kB * self.T[0] / self.Mass[0])'], {}), '(2 * self.kB * self.T[0] / self.Mass[0])\n', (1606, 1646), True, 'import numpy as np\n'), ((1641, 1688), 'numpy.sqrt', 'np.sqrt', (['(3 * self.kB * self.T[1] / self.Mass[1])'], {}), '(3 * self.kB * self.T[1] / self.Mass[1])\n', (1648, 1688), True, 'import numpy as np\n'), ((5634, 5681), 'numba.cuda.random.xoroshiro128p_normal_float64', 'xoroshiro128p_normal_float64', (['rng_states', 't_pos'], {}), '(rng_states, t_pos)\n', (5662, 5681), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((14150, 14199), 'numpy.exp', 'np.exp', (['(-3 * thexi ** 2 / (2 * Pars.mp_V[1] ** 2))'], {}), '(-3 * thexi ** 2 / (2 * Pars.mp_V[1] ** 2))\n', (14156, 14199), True, 'import numpy as np\n'), ((5721, 5735), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (5730, 5735), False, 'import math\n'), ((6855, 6867), 'math.log', 'math.log', (['R1'], {}), '(R1)\n', (6863, 6867), False, 'import math\n'), ((13614, 13644), 'numpy.sum', 'np.sum', (['All_Vel[:Pars.Pt_N, 0]'], {}), '(All_Vel[:Pars.Pt_N, 0])\n', (13620, 13644), True, 'import numpy as np\n'), ((13680, 13710), 'numpy.sum', 'np.sum', (['All_Vel[:Pars.Pt_N, 1]'], {}), '(All_Vel[:Pars.Pt_N, 1])\n', (13686, 13710), True, 'import numpy as np\n'), ((13746, 13776), 'numpy.sum', 'np.sum', (['All_Vel[:Pars.Pt_N, 2]'], {}), '(All_Vel[:Pars.Pt_N, 2])\n', (13752, 13776), True, 'import numpy as np\n'), ((6300, 6312), 'math.log', 'math.log', (['R1'], {}), '(R1)\n', (6308, 6312), False, 'import math\n'), ((6582, 6594), 'math.log', 'math.log', (['R1'], {}), '(R1)\n', (6590, 6594), False, 'import math\n'), ((13799, 13812), 'numpy.sum', 'np.sum', (['mp_V2'], {}), '(mp_V2)\n', (13805, 13812), True, 'import numpy as np\n')] |
"""
Version: 1.5
Summary: Automatic image brightness adjustment based on gamma correction method
Author: <NAME>
Author-email: <EMAIL>
USAGE:
python3 gamma_correction.py -p ~/plant-image-analysis/test/ -ft jpg
argument:
("-p", "--path", required = True, help="path to image file")
("-ft", "--filetype", required=True, help="Image filetype")
"""
#!/usr/bin/python
# Standard Libraries
import os,fnmatch
import argparse
import shutil
import cv2
import numpy as np
import glob
import multiprocessing
from multiprocessing import Pool
from contextlib import closing
import resource
from PIL import Image, ImageEnhance
# create result folder
def mkdir(path):
# import module
#import os
# remove space at the beginning
path=path.strip()
# remove slash at the end
path=path.rstrip("\\")
# path exist? # True # False
isExists=os.path.exists(path)
# process
if not isExists:
# construct the path and folder
#print path + ' folder constructed!'
# make dir
os.makedirs(path)
return True
else:
# if exists, return
#print path+' path exists!'
return False
#adjust the gamma value to increase the brightness of image
def adjust_gamma(image, gamma):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
#apply CLAHE (Contrast Limited Adaptive Histogram Equalization) to perfrom image enhancement
def image_enhance(img):
# CLAHE (Contrast Limited Adaptive Histogram Equalization)
clahe = cv2.createCLAHE(clipLimit=3., tileGridSize=(8,8))
# convert from BGR to LAB color space
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# split on 3 different channels
l, a, b = cv2.split(lab)
# apply CLAHE to the L-channel
l2 = clahe.apply(l)
# merge channels
lab = cv2.merge((l2,a,b))
# convert from LAB to BGR
img_enhance = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return img_enhance
# Convert it to LAB color space to access the luminous channel which is independent of colors.
def isbright(image_file):
# Set up threshold value for luminous channel, can be adjusted and generalized
thresh = 0.1
# Load image file
orig = cv2.imread(image_file)
# Make backup image
image = orig.copy()
# Get file name
#abs_path = os.path.abspath(image_file)
#filename, file_extension = os.path.splitext(abs_path)
#base_name = os.path.splitext(os.path.basename(filename))[0]
#image_file_name = Path(image_file).name
# Convert color space to LAB format and extract L channel
L, A, B = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2LAB))
# Normalize L channel by dividing all pixel values with maximum pixel value
L = L/np.max(L)
text_bool = "bright" if np.mean(L) < thresh else "dark"
print(np.mean(L))
return np.mean(L) > thresh
def gamma_correction(image_file):
#parse the file name
path, filename = os.path.split(image_file)
#filename, file_extension = os.path.splitext(image_file)
# construct the result file path
result_img_path = save_path + str(filename[0:-4]) + '.' + ext
print("Enhancing image : {0} \n".format(str(filename)))
# Load the image
image = cv2.imread(image_file)
#get size of image
img_height, img_width = image.shape[:2]
#image = cv2.resize(image, (0,0), fx = scale_factor, fy = scale_factor)
gamma = args['gamma']
# apply gamma correction and show the images
gamma = gamma if gamma > 0 else 0.1
adjusted = adjust_gamma(image, gamma=gamma)
enhanced_image = image_enhance(adjusted)
# save result as images for reference
cv2.imwrite(result_img_path,enhanced_image)
def image_enhance(image_file):
#parse the file name
path, filename = os.path.split(image_file)
#filename, file_extension = os.path.splitext(image_file)
# construct the result file path
result_img_path = save_path + str(filename[0:-4]) + '.' + ext
print("Enhancing image : {0} \n".format(str(filename)))
im = Image.open(image_file)
im_sharpness = ImageEnhance.Sharpness(im).enhance(1.5)
im_contrast = ImageEnhance.Contrast(im_sharpness).enhance(1.5)
im_out = ImageEnhance.Brightness(im_contrast).enhance(0.8)
im_out.save(result_img_path)
def Adaptive_Histogram_Equalization(image_file):
#parse the file name
path, filename = os.path.split(image_file)
#filename, file_extension = os.path.splitext(image_file)
# construct the result file path
result_img_path = save_path + str(filename[0:-4]) + '.' + ext
print("AHE image : {0} \n".format(str(filename)))
#print(isbright(image_file))
# Load the image
bgr = cv2.imread(image_file)
lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
out = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
# save result as images for reference
cv2.imwrite(result_img_path, out)
if __name__ == '__main__':
# construct the argument and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required = True, help = "path to image file")
ap.add_argument("-ft", "--filetype", required = False, default = 'jpg', help = "image filetype")
ap.add_argument("-gamma", "--gamma", type = float, required = False, default = 0.5, help = "gamma value")
args = vars(ap.parse_args())
# setting path to model file
file_path = args["path"]
ext = args['filetype']
#accquire image file list
filetype = '*.' + ext
image_file_path = file_path + filetype
#accquire image file list
imgList = sorted(glob.glob(image_file_path))
#print((imgList))
# make the folder to store the results
parent_path = os.path.abspath(os.path.join(file_path, os.pardir))
#mkpath = parent_path + '/' + str('gamma_correction')
mkpath = file_path + '/' + str('gamma_correction')
mkdir(mkpath)
save_path = mkpath + '/'
#print "results_folder: " + save_path
# Loop execute
for image in imgList:
Adaptive_Histogram_Equalization(image)
'''
# get cpu number for parallel processing
#agents = psutil.cpu_count()
agents = multiprocessing.cpu_count()-1
print("Using {0} cores to perfrom parallel processing... \n".format(int(agents)))
# Create a pool of processes. By default, one is created for each CPU in the machine.
# extract the bouding box for each image in file list
with closing(Pool(processes = agents)) as pool:
result = pool.map(Adaptive_Histogram_Equalization, imgList)
pool.terminate()
'''
# monitor memory usage
rusage_denom = 1024.0
print("Memory usage: {0} MB\n".format(int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom)))
| [
"PIL.ImageEnhance.Brightness",
"argparse.ArgumentParser",
"numpy.mean",
"numpy.arange",
"glob.glob",
"resource.getrusage",
"os.path.join",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"numpy.max",
"cv2.LUT",
"cv2.split",
"PIL.ImageEnhance.Sharpness",
"cv2.createCLAHE",
"cv2.merge",... | [((880, 900), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (894, 900), False, 'import os, fnmatch\n'), ((1575, 1596), 'cv2.LUT', 'cv2.LUT', (['image', 'table'], {}), '(image, table)\n', (1582, 1596), False, 'import cv2\n'), ((1791, 1842), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(3.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=3.0, tileGridSize=(8, 8))\n', (1806, 1842), False, 'import cv2\n'), ((1894, 1930), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2LAB'], {}), '(img, cv2.COLOR_BGR2LAB)\n', (1906, 1930), False, 'import cv2\n'), ((1988, 2002), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (1997, 2002), False, 'import cv2\n'), ((2099, 2120), 'cv2.merge', 'cv2.merge', (['(l2, a, b)'], {}), '((l2, a, b))\n', (2108, 2120), False, 'import cv2\n'), ((2174, 2210), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_LAB2BGR'], {}), '(lab, cv2.COLOR_LAB2BGR)\n', (2186, 2210), False, 'import cv2\n'), ((2508, 2530), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (2518, 2530), False, 'import cv2\n'), ((3287, 3312), 'os.path.split', 'os.path.split', (['image_file'], {}), '(image_file)\n', (3300, 3312), False, 'import os, fnmatch\n'), ((3590, 3612), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (3600, 3612), False, 'import cv2\n'), ((4052, 4096), 'cv2.imwrite', 'cv2.imwrite', (['result_img_path', 'enhanced_image'], {}), '(result_img_path, enhanced_image)\n', (4063, 4096), False, 'import cv2\n'), ((4181, 4206), 'os.path.split', 'os.path.split', (['image_file'], {}), '(image_file)\n', (4194, 4206), False, 'import os, fnmatch\n'), ((4460, 4482), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (4470, 4482), False, 'from PIL import Image, ImageEnhance\n'), ((4829, 4854), 'os.path.split', 'os.path.split', (['image_file'], {}), '(image_file)\n', (4842, 4854), False, 'import os, fnmatch\n'), ((5155, 5177), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (5165, 5177), False, 'import cv2\n'), ((5189, 5225), 'cv2.cvtColor', 'cv2.cvtColor', (['bgr', 'cv2.COLOR_BGR2LAB'], {}), '(bgr, cv2.COLOR_BGR2LAB)\n', (5201, 5225), False, 'import cv2\n'), ((5244, 5258), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (5253, 5258), False, 'import cv2\n'), ((5272, 5323), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=2.0, tileGridSize=(8, 8))\n', (5287, 5323), False, 'import cv2\n'), ((5381, 5402), 'cv2.merge', 'cv2.merge', (['lab_planes'], {}), '(lab_planes)\n', (5390, 5402), False, 'import cv2\n'), ((5414, 5450), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_LAB2BGR'], {}), '(lab, cv2.COLOR_LAB2BGR)\n', (5426, 5450), False, 'import cv2\n'), ((5498, 5531), 'cv2.imwrite', 'cv2.imwrite', (['result_img_path', 'out'], {}), '(result_img_path, out)\n', (5509, 5531), False, 'import cv2\n'), ((5624, 5649), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5647, 5649), False, 'import argparse\n'), ((1050, 1067), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1061, 1067), False, 'import os, fnmatch\n'), ((2919, 2957), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2LAB'], {}), '(image, cv2.COLOR_BGR2LAB)\n', (2931, 2957), False, 'import cv2\n'), ((3054, 3063), 'numpy.max', 'np.max', (['L'], {}), '(L)\n', (3060, 3063), True, 'import numpy as np\n'), ((3144, 3154), 'numpy.mean', 'np.mean', (['L'], {}), '(L)\n', (3151, 3154), True, 'import numpy as np\n'), ((3172, 3182), 'numpy.mean', 'np.mean', (['L'], {}), '(L)\n', (3179, 3182), True, 'import numpy as np\n'), ((6225, 6251), 'glob.glob', 'glob.glob', (['image_file_path'], {}), '(image_file_path)\n', (6234, 6251), False, 'import glob\n'), ((6355, 6389), 'os.path.join', 'os.path.join', (['file_path', 'os.pardir'], {}), '(file_path, os.pardir)\n', (6367, 6389), False, 'import os, fnmatch\n'), ((3097, 3107), 'numpy.mean', 'np.mean', (['L'], {}), '(L)\n', (3104, 3107), True, 'import numpy as np\n'), ((4507, 4533), 'PIL.ImageEnhance.Sharpness', 'ImageEnhance.Sharpness', (['im'], {}), '(im)\n', (4529, 4533), False, 'from PIL import Image, ImageEnhance\n'), ((4570, 4605), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['im_sharpness'], {}), '(im_sharpness)\n', (4591, 4605), False, 'from PIL import Image, ImageEnhance\n'), ((4633, 4669), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['im_contrast'], {}), '(im_contrast)\n', (4656, 4669), False, 'from PIL import Image, ImageEnhance\n'), ((1474, 1491), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (1483, 1491), True, 'import numpy as np\n'), ((7360, 7400), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (7378, 7400), False, 'import resource\n')] |
import argparse
import math
import os, sys
import random
import datetime
import time
from typing import List
import json
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.tensorboard import SummaryWriter
import _init_paths
from dataset.get_dataset import get_datasets
from utils.logger import setup_logger
import models
import models.aslloss
from models.query2label import build_q2l
from utils.metric import voc_mAP
from utils.misc import clean_state_dict
from utils.slconfig import get_raw_dict
def parser_args():
parser = argparse.ArgumentParser(description='Query2Label MSCOCO Training')
parser.add_argument('--dataname', help='dataname', default='coco14', choices=['coco14'])
parser.add_argument('--dataset_dir', help='dir of dataset', default='/comp_robot/liushilong/data/COCO14/')
parser.add_argument('--img_size', default=448, type=int,
help='size of input images')
parser.add_argument('--keep_only', help='scale down dataset to this portion', default=1.0)
parser.add_argument('--output', metavar='DIR',
help='path to output folder')
parser.add_argument('--num_class', default=80, type=int,
help="Number of query slots")
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model. default is False. ')
parser.add_argument('--optim', default='AdamW', type=str, choices=['AdamW', 'Adam_twd'],
help='which optim to use')
# loss
parser.add_argument('--eps', default=1e-5, type=float,
help='eps for focal loss (default: 1e-5)')
parser.add_argument('--dtgfl', action='store_true', default=False,
help='disable_torch_grad_focal_loss in asl')
parser.add_argument('--gamma_pos', default=0, type=float,
metavar='gamma_pos', help='gamma pos for simplified asl loss')
parser.add_argument('--gamma_neg', default=2, type=float,
metavar='gamma_neg', help='gamma neg for simplified asl loss')
parser.add_argument('--loss_dev', default=-1, type=float,
help='scale factor for loss')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=80, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--val_interval', default=1, type=int, metavar='N',
help='interval of validation')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--wd', '--weight-decay', default=1e-2, type=float,
metavar='W', help='weight decay (default: 1e-2)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_omit', default=[], type=str, nargs='*')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--ema-decay', default=0.9997, type=float, metavar='M',
help='decay of model ema')
parser.add_argument('--ema-epoch', default=0, type=int, metavar='M',
help='start ema epoch')
# distribution training
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='env://', type=str,
help='url used to set up distributed training')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
# data aug
parser.add_argument('--cutout', action='store_true', default=False,
help='apply cutout')
parser.add_argument('--n_holes', type=int, default=1,
help='number of holes to cut out from image')
parser.add_argument('--length', type=int, default=-1,
help='length of the holes. suggest to use default setting -1.')
parser.add_argument('--cut_fact', type=float, default=0.5,
help='mutual exclusion with length. ')
parser.add_argument('--orid_norm', action='store_true', default=False,
help='using mean [0,0,0] and std [1,1,1] to normalize input images')
# * Transformer
parser.add_argument('--enc_layers', default=1, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=2, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=8192, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=2048, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=4, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--pre_norm', action='store_true')
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine'),
help="Type of positional embedding to use on top of the image features")
parser.add_argument('--backbone', default='resnet101', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--keep_other_self_attn_dec', action='store_true',
help='keep the other self attention modules in transformer decoders, which will be removed default.')
parser.add_argument('--keep_first_self_attn_dec', action='store_true',
help='keep the first self attention module in transformer decoders, which will be removed default.')
parser.add_argument('--keep_input_proj', action='store_true',
help="keep the input projection layer. Needed when the channel of image features is different from hidden_dim of Transformer layers.")
# * raining
parser.add_argument('--amp', action='store_true', default=False,
help='apply amp')
parser.add_argument('--early-stop', action='store_true', default=False,
help='apply early stop')
parser.add_argument('--kill-stop', action='store_true', default=False,
help='apply early stop')
args = parser.parse_args()
return args
def get_args():
args = parser_args()
return args
best_mAP = 0
def main():
args = get_args()
if 'WORLD_SIZE' in os.environ:
assert args.world_size > 0, 'please set --world-size and --rank in the command line'
# launch by torch.distributed.launch
# Single node
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...
# Multi nodes
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
local_world_size = int(os.environ['WORLD_SIZE'])
args.world_size = args.world_size * local_world_size
args.rank = args.rank * local_world_size + args.local_rank
print('world size: {}, world rank: {}, local rank: {}'.format(args.world_size, args.rank, args.local_rank))
print('os.environ:', os.environ)
else:
# single process, useful for debugging
# python main.py ...
args.world_size = 1
args.rank = 0
args.local_rank = 0
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.cuda.set_device(args.local_rank)
print('| distributed init (local_rank {}): {}'.format(
args.local_rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
cudnn.benchmark = True
os.makedirs(args.output, exist_ok=True)
logger = setup_logger(output=args.output, distributed_rank=dist.get_rank(), color=False, name="Q2L")
logger.info("Command: "+' '.join(sys.argv))
if dist.get_rank() == 0:
path = os.path.join(args.output, "config.json")
with open(path, 'w') as f:
json.dump(get_raw_dict(args), f, indent=2)
logger.info("Full config saved to {}".format(path))
logger.info('world size: {}'.format(dist.get_world_size()))
logger.info('dist.get_rank(): {}'.format(dist.get_rank()))
logger.info('local_rank: {}'.format(args.local_rank))
return main_worker(args, logger)
def main_worker(args, logger):
global best_mAP
# build model
model = build_q2l(args)
model = model.cuda()
ema_m = ModelEma(model, args.ema_decay) # 0.9997
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False)
# criterion
criterion = models.aslloss.AsymmetricLossOptimized(
gamma_neg=args.gamma_neg, gamma_pos=args.gamma_pos,
disable_torch_grad_focal_loss=args.dtgfl,
eps=args.eps,
)
# optimizer
args.lr_mult = args.batch_size / 256
if args.optim == 'AdamW':
param_dicts = [
{"params": [p for n, p in model.module.named_parameters() if p.requires_grad]},
]
optimizer = getattr(torch.optim, args.optim)(
param_dicts,
args.lr_mult * args.lr,
betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay
)
elif args.optim == 'Adam_twd':
parameters = add_weight_decay(model, args.weight_decay)
optimizer = torch.optim.Adam(
parameters,
args.lr_mult * args.lr,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0
)
else:
raise NotImplementedError
# tensorboard
if dist.get_rank() == 0:
summary_writer = SummaryWriter(log_dir=args.output)
else:
summary_writer = None
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=torch.device(dist.get_rank()))
if 'state_dict' in checkpoint:
state_dict = clean_state_dict(checkpoint['state_dict'])
elif 'model' in checkpoint:
state_dict = clean_state_dict(checkpoint['model'])
else:
raise ValueError("No model or state_dicr Found!!!")
logger.info("Omitting {}".format(args.resume_omit))
# import ipdb; ipdb.set_trace()
for omit_name in args.resume_omit:
del state_dict[omit_name]
model.module.load_state_dict(state_dict, strict=False)
# model.module.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
del state_dict
torch.cuda.empty_cache()
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
train_dataset, val_dataset = get_datasets(args)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
assert args.batch_size // dist.get_world_size() == args.batch_size / dist.get_world_size(), 'Batch size is not divisible by num of gpus.'
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size // dist.get_world_size(), shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size // dist.get_world_size(), shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=val_sampler)
if args.evaluate:
_, mAP = validate(val_loader, model, criterion, args, logger)
logger.info(' * mAP {mAP:.5f}'
.format(mAP=mAP))
return
epoch_time = AverageMeterHMS('TT')
eta = AverageMeterHMS('ETA', val_only=True)
losses = AverageMeter('Loss', ':5.3f', val_only=True)
losses_ema = AverageMeter('Loss_ema', ':5.3f', val_only=True)
mAPs = AverageMeter('mAP', ':5.5f', val_only=True)
mAPs_ema = AverageMeter('mAP_ema', ':5.5f', val_only=True)
progress = ProgressMeter(
args.epochs,
[eta, epoch_time, losses, mAPs, losses_ema, mAPs_ema],
prefix='=> Test Epoch: ')
# one cycle learning rate
scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=args.lr, steps_per_epoch=len(train_loader), epochs=args.epochs, pct_start=0.2)
end = time.time()
best_epoch = -1
best_regular_mAP = 0
best_regular_epoch = -1
best_ema_mAP = 0
regular_mAP_list = []
ema_mAP_list = []
torch.cuda.empty_cache()
for epoch in range(args.start_epoch, args.epochs):
train_sampler.set_epoch(epoch)
if args.ema_epoch == epoch:
ema_m = ModelEma(model.module, args.ema_decay)
torch.cuda.empty_cache()
torch.cuda.empty_cache()
# train for one epoch
loss = train(train_loader, model, ema_m, criterion, optimizer, scheduler, epoch, args, logger)
if summary_writer:
# tensorboard logger
summary_writer.add_scalar('train_loss', loss, epoch)
# summary_writer.add_scalar('train_acc1', acc1, epoch)
summary_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
if epoch % args.val_interval == 0:
# evaluate on validation set
loss, mAP = validate(val_loader, model, criterion, args, logger)
loss_ema, mAP_ema = validate(val_loader, ema_m.module, criterion, args, logger)
losses.update(loss)
mAPs.update(mAP)
losses_ema.update(loss_ema)
mAPs_ema.update(mAP_ema)
epoch_time.update(time.time() - end)
end = time.time()
eta.update(epoch_time.avg * (args.epochs - epoch - 1))
regular_mAP_list.append(mAP)
ema_mAP_list.append(mAP_ema)
progress.display(epoch, logger)
if summary_writer:
# tensorboard logger
summary_writer.add_scalar('val_loss', loss, epoch)
summary_writer.add_scalar('val_mAP', mAP, epoch)
summary_writer.add_scalar('val_loss_ema', loss_ema, epoch)
summary_writer.add_scalar('val_mAP_ema', mAP_ema, epoch)
# remember best (regular) mAP and corresponding epochs
if mAP > best_regular_mAP:
best_regular_mAP = max(best_regular_mAP, mAP)
best_regular_epoch = epoch
if mAP_ema > best_ema_mAP:
best_ema_mAP = max(mAP_ema, best_ema_mAP)
if mAP_ema > mAP:
mAP = mAP_ema
state_dict = ema_m.module.state_dict()
else:
state_dict = model.state_dict()
is_best = mAP > best_mAP
if is_best:
best_epoch = epoch
best_mAP = max(mAP, best_mAP)
logger.info("{} | Set best mAP {} in ep {}".format(epoch, best_mAP, best_epoch))
logger.info(" | best regular mAP {} in ep {}".format(best_regular_mAP, best_regular_epoch))
if dist.get_rank() == 0:
save_checkpoint({
'epoch': epoch + 1,
'backbone': args.backbone,
'state_dict': state_dict,
'best_mAP': best_mAP,
'optimizer' : optimizer.state_dict(),
}, is_best=is_best, filename=os.path.join(args.output, 'checkpoint.pth.tar'))
# filename=os.path.join(args.output, 'checkpoint_{:04d}.pth.tar'.format(epoch))
if math.isnan(loss) or math.isnan(loss_ema):
save_checkpoint({
'epoch': epoch + 1,
'backbone': args.backbone,
'state_dict': model.state_dict(),
'best_mAP': best_mAP,
'optimizer' : optimizer.state_dict(),
}, is_best=is_best, filename=os.path.join(args.output, 'checkpoint_nan.pth.tar'))
logger.info('Loss is NaN, break')
sys.exit(1)
# early stop
if args.early_stop:
if best_epoch >= 0 and epoch - max(best_epoch, best_regular_epoch) > 8:
if len(ema_mAP_list) > 1 and ema_mAP_list[-1] < best_ema_mAP:
logger.info("epoch - best_epoch = {}, stop!".format(epoch - best_epoch))
if dist.get_rank() == 0 and args.kill_stop:
filename = sys.argv[0].split(' ')[0].strip()
killedlist = kill_process(filename, os.getpid())
logger.info("Kill all process of {}: ".format(filename) + " ".join(killedlist))
break
print("Best mAP:", best_mAP)
if summary_writer:
summary_writer.close()
return 0
def train(train_loader, model, ema_m, criterion, optimizer, scheduler, epoch, args, logger):
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
batch_time = AverageMeter('T', ':5.3f')
data_time = AverageMeter('DT', ':5.3f')
speed_gpu = AverageMeter('S1', ':.1f')
speed_all = AverageMeter('SA', ':.1f')
losses = AverageMeter('Loss', ':5.3f')
lr = AverageMeter('LR', ':.3e', val_only=True)
mem = AverageMeter('Mem', ':.0f', val_only=True)
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, speed_gpu, speed_all, lr, losses, mem],
prefix="Epoch: [{}/{}]".format(epoch, args.epochs))
def get_learning_rate(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
lr.update(get_learning_rate(optimizer))
logger.info("lr:{}".format(get_learning_rate(optimizer)))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
with torch.cuda.amp.autocast(enabled=args.amp):
output = model(images)
loss = criterion(output, target)
if args.loss_dev > 0:
loss *= args.loss_dev
# record loss
losses.update(loss.item(), images.size(0))
mem.update(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)
# compute gradient and do SGD step
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# one cycle learning rate
scheduler.step()
lr.update(get_learning_rate(optimizer))
if epoch >= args.ema_epoch:
ema_m.update(model)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
speed_gpu.update(images.size(0) / batch_time.val, batch_time.val)
speed_all.update(images.size(0) * dist.get_world_size() / batch_time.val, batch_time.val)
if i % args.print_freq == 0:
progress.display(i, logger)
return losses.avg
@torch.no_grad()
def validate(val_loader, model, criterion, args, logger):
batch_time = AverageMeter('Time', ':5.3f')
losses = AverageMeter('Loss', ':5.3f')
# Acc1 = AverageMeter('Acc@1', ':5.2f')
# top5 = AverageMeter('Acc@5', ':5.2f')
mem = AverageMeter('Mem', ':.0f', val_only=True)
# mAP = AverageMeter('mAP', ':5.3f', val_only=)
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, mem],
prefix='Test: ')
# switch to evaluate mode
saveflag = False
model.eval()
saved_data = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
with torch.cuda.amp.autocast(enabled=args.amp):
output = model(images)
loss = criterion(output, target)
if args.loss_dev > 0:
loss *= args.loss_dev
output_sm = nn.functional.sigmoid(output)
if torch.isnan(loss):
saveflag = True
# record loss
losses.update(loss.item(), images.size(0))
mem.update(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)
# save some data
# output_sm = nn.functional.sigmoid(output)
_item = torch.cat((output_sm.detach().cpu(), target.detach().cpu()), 1)
# del output_sm
# del target
saved_data.append(_item)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and dist.get_rank() == 0:
progress.display(i, logger)
logger.info('=> synchronize...')
if dist.get_world_size() > 1:
dist.barrier()
loss_avg, = map(
_meter_reduce if dist.get_world_size() > 1 else lambda x: x.avg,
[losses]
)
# import ipdb; ipdb.set_trace()
# calculate mAP
saved_data = torch.cat(saved_data, 0).numpy()
saved_name = 'saved_data_tmp.{}.txt'.format(dist.get_rank())
np.savetxt(os.path.join(args.output, saved_name), saved_data)
if dist.get_world_size() > 1:
dist.barrier()
if dist.get_rank() == 0:
print("Calculating mAP:")
filenamelist = ['saved_data_tmp.{}.txt'.format(ii) for ii in range(dist.get_world_size())]
metric_func = voc_mAP
mAP, aps = metric_func([os.path.join(args.output, _filename) for _filename in filenamelist], args.num_class, return_each=True)
logger.info(" mAP: {}".format(mAP))
logger.info(" aps: {}".format(np.array2string(aps, precision=5)))
else:
mAP = 0
if dist.get_world_size() > 1:
dist.barrier()
return loss_avg, mAP
##################################################################################
def add_weight_decay(model, weight_decay=1e-4, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
class ModelEma(torch.nn.Module):
def __init__(self, model, decay=0.9997, device=None):
super(ModelEma, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
# import ipdb; ipdb.set_trace()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
def _meter_reduce(meter):
meter_sum = torch.FloatTensor([meter.sum]).cuda()
meter_count = torch.FloatTensor([meter.count]).cuda()
torch.distributed.reduce(meter_sum, 0)
torch.distributed.reduce(meter_count, 0)
meter_avg = meter_sum / meter_count
return meter_avg.item()
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
# torch.save(state, filename)
if is_best:
torch.save(state, os.path.split(filename)[0] + '/model_best.pth.tar')
# shutil.copyfile(filename, os.path.split(filename)[0] + '/model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', val_only=False):
self.name = name
self.fmt = fmt
self.val_only = val_only
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
if self.val_only:
fmtstr = '{name} {val' + self.fmt + '}'
else:
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class AverageMeterHMS(AverageMeter):
"""Meter for timer in HH:MM:SS format"""
def __str__(self):
if self.val_only:
fmtstr = '{name} {val}'
else:
fmtstr = '{name} {val} ({sum})'
return fmtstr.format(name=self.name,
val=str(datetime.timedelta(seconds=int(self.val))),
sum=str(datetime.timedelta(seconds=int(self.sum))))
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch, logger):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
logger.info(' '.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def kill_process(filename:str, holdpid:int) -> List[str]:
import subprocess, signal
res = subprocess.check_output("ps aux | grep {} | grep -v grep | awk '{{print $2}}'".format(filename), shell=True, cwd="./")
res = res.decode('utf-8')
idlist = [i.strip() for i in res.split('\n') if i != '']
print("kill: {}".format(idlist))
for idname in idlist:
if idname != str(holdpid):
os.kill(int(idname), signal.SIGKILL)
return idlist
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"models.aslloss.AsymmetricLossOptimized",
"torch.cuda.max_memory_allocated",
"torch.cat",
"os.path.isfile",
"torch.distributed.get_world_size",
"torch.nn.functional.sigmoid",
"torch.no_grad",
"os.path.join",
"torch.isnan",
"torch.cuda.amp.autocas... | [((21698, 21713), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21711, 21713), False, 'import torch\n'), ((827, 893), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query2Label MSCOCO Training"""'}), "(description='Query2Label MSCOCO Training')\n", (850, 893), False, 'import argparse\n'), ((9578, 9616), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (9599, 9616), False, 'import torch\n'), ((9733, 9861), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': 'args.dist_url', 'world_size': 'args.world_size', 'rank': 'args.rank'}), "(backend='nccl', init_method=args.\n dist_url, world_size=args.world_size, rank=args.rank)\n", (9769, 9861), False, 'import torch\n'), ((9926, 9965), 'os.makedirs', 'os.makedirs', (['args.output'], {'exist_ok': '(True)'}), '(args.output, exist_ok=True)\n', (9937, 9965), False, 'import os, sys\n'), ((10661, 10676), 'models.query2label.build_q2l', 'build_q2l', (['args'], {}), '(args)\n', (10670, 10676), False, 'from models.query2label import build_q2l\n'), ((10767, 10875), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'broadcast_buffers': '(False)'}), '(model, device_ids=[args.\n local_rank], broadcast_buffers=False)\n', (10808, 10875), False, 'import torch\n'), ((10904, 11055), 'models.aslloss.AsymmetricLossOptimized', 'models.aslloss.AsymmetricLossOptimized', ([], {'gamma_neg': 'args.gamma_neg', 'gamma_pos': 'args.gamma_pos', 'disable_torch_grad_focal_loss': 'args.dtgfl', 'eps': 'args.eps'}), '(gamma_neg=args.gamma_neg, gamma_pos=\n args.gamma_pos, disable_torch_grad_focal_loss=args.dtgfl, eps=args.eps)\n', (10942, 11055), False, 'import models\n'), ((13241, 13259), 'dataset.get_dataset.get_datasets', 'get_datasets', (['args'], {}), '(args)\n', (13253, 13259), False, 'from dataset.get_dataset import get_datasets\n'), ((13281, 13343), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (13328, 13343), False, 'import torch\n'), ((13756, 13831), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['val_dataset'], {'shuffle': '(False)'}), '(val_dataset, shuffle=False)\n', (13803, 13831), False, 'import torch\n'), ((14882, 14893), 'time.time', 'time.time', ([], {}), '()\n', (14891, 14893), False, 'import time\n'), ((15040, 15064), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (15062, 15064), False, 'import torch\n'), ((19502, 19545), 'torch.cuda.amp.GradScaler', 'torch.cuda.amp.GradScaler', ([], {'enabled': 'args.amp'}), '(enabled=args.amp)\n', (19527, 19545), False, 'import torch\n'), ((20352, 20363), 'time.time', 'time.time', ([], {}), '()\n', (20361, 20363), False, 'import time\n'), ((26483, 26521), 'torch.distributed.reduce', 'torch.distributed.reduce', (['meter_sum', '(0)'], {}), '(meter_sum, 0)\n', (26507, 26521), False, 'import torch\n'), ((26526, 26566), 'torch.distributed.reduce', 'torch.distributed.reduce', (['meter_count', '(0)'], {}), '(meter_count, 0)\n', (26550, 26566), False, 'import torch\n'), ((9474, 9496), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (9485, 9496), False, 'import random\n'), ((9505, 9533), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9522, 9533), False, 'import torch\n'), ((9542, 9567), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (9556, 9567), True, 'import numpy as np\n'), ((10126, 10141), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (10139, 10141), True, 'import torch.distributed as dist\n'), ((10163, 10203), 'os.path.join', 'os.path.join', (['args.output', '"""config.json"""'], {}), "(args.output, 'config.json')\n", (10175, 10203), False, 'import os, sys\n'), ((11841, 11856), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (11854, 11856), True, 'import torch.distributed as dist\n'), ((11888, 11922), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'args.output'}), '(log_dir=args.output)\n', (11901, 11922), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((12037, 12064), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (12051, 12064), False, 'import os, sys\n'), ((15299, 15323), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (15321, 15323), False, 'import torch\n'), ((21409, 21420), 'time.time', 'time.time', ([], {}), '()\n', (21418, 21420), False, 'import time\n'), ((22269, 22284), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22282, 22284), False, 'import torch\n'), ((22300, 22311), 'time.time', 'time.time', ([], {}), '()\n', (22309, 22311), False, 'import time\n'), ((25530, 25545), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (25538, 25545), False, 'from copy import deepcopy\n'), ((10029, 10044), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (10042, 10044), True, 'import torch.distributed as dist\n'), ((10395, 10416), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (10414, 10416), True, 'import torch.distributed as dist\n'), ((10464, 10479), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (10477, 10479), True, 'import torch.distributed as dist\n'), ((11614, 11717), 'torch.optim.Adam', 'torch.optim.Adam', (['parameters', '(args.lr_mult * args.lr)'], {'betas': '(0.9, 0.999)', 'eps': '(1e-08)', 'weight_decay': '(0)'}), '(parameters, args.lr_mult * args.lr, betas=(0.9, 0.999),\n eps=1e-08, weight_decay=0)\n', (11630, 11717), False, 'import torch\n'), ((13065, 13089), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13087, 13089), False, 'import torch\n'), ((13374, 13395), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (13393, 13395), True, 'import torch.distributed as dist\n'), ((13417, 13438), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (13436, 13438), True, 'import torch.distributed as dist\n'), ((15266, 15290), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (15288, 15290), False, 'import torch\n'), ((16206, 16217), 'time.time', 'time.time', ([], {}), '()\n', (16215, 16217), False, 'import time\n'), ((20636, 20677), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'args.amp'}), '(enabled=args.amp)\n', (20659, 20677), False, 'import torch\n'), ((23385, 23396), 'time.time', 'time.time', ([], {}), '()\n', (23394, 23396), False, 'import time\n'), ((23561, 23582), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (23580, 23582), True, 'import torch.distributed as dist\n'), ((23600, 23614), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (23612, 23614), True, 'import torch.distributed as dist\n'), ((23927, 23942), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (23940, 23942), True, 'import torch.distributed as dist\n'), ((23963, 24000), 'os.path.join', 'os.path.join', (['args.output', 'saved_name'], {}), '(args.output, saved_name)\n', (23975, 24000), False, 'import os, sys\n'), ((24025, 24046), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (24044, 24046), True, 'import torch.distributed as dist\n'), ((24064, 24078), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (24076, 24078), True, 'import torch.distributed as dist\n'), ((24091, 24106), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (24104, 24106), True, 'import torch.distributed as dist\n'), ((24631, 24652), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (24650, 24652), True, 'import torch.distributed as dist\n'), ((24670, 24684), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (24682, 24684), True, 'import torch.distributed as dist\n'), ((25857, 25872), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25870, 25872), False, 'import torch\n'), ((26383, 26413), 'torch.FloatTensor', 'torch.FloatTensor', (['[meter.sum]'], {}), '([meter.sum])\n', (26400, 26413), False, 'import torch\n'), ((26439, 26471), 'torch.FloatTensor', 'torch.FloatTensor', (['[meter.count]'], {}), '([meter.count])\n', (26456, 26471), False, 'import torch\n'), ((10261, 10279), 'utils.slconfig.get_raw_dict', 'get_raw_dict', (['args'], {}), '(args)\n', (10273, 10279), False, 'from utils.slconfig import get_raw_dict\n'), ((12306, 12348), 'utils.misc.clean_state_dict', 'clean_state_dict', (["checkpoint['state_dict']"], {}), "(checkpoint['state_dict'])\n", (12322, 12348), False, 'from utils.misc import clean_state_dict\n'), ((13587, 13608), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (13606, 13608), True, 'import torch.distributed as dist\n'), ((13929, 13950), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (13948, 13950), True, 'import torch.distributed as dist\n'), ((17619, 17634), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (17632, 17634), True, 'import torch.distributed as dist\n'), ((18110, 18126), 'math.isnan', 'math.isnan', (['loss'], {}), '(loss)\n', (18120, 18126), False, 'import math\n'), ((18130, 18150), 'math.isnan', 'math.isnan', (['loss_ema'], {}), '(loss_ema)\n', (18140, 18150), False, 'import math\n'), ((18591, 18602), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (18599, 18602), False, 'import os, sys\n'), ((20481, 20492), 'time.time', 'time.time', ([], {}), '()\n', (20490, 20492), False, 'import time\n'), ((21376, 21387), 'time.time', 'time.time', ([], {}), '()\n', (21385, 21387), False, 'import time\n'), ((22521, 22562), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'args.amp'}), '(enabled=args.amp)\n', (22544, 22562), False, 'import torch\n'), ((22760, 22789), 'torch.nn.functional.sigmoid', 'nn.functional.sigmoid', (['output'], {}), '(output)\n', (22781, 22789), True, 'import torch.nn as nn\n'), ((22809, 22826), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (22820, 22826), False, 'import torch\n'), ((23842, 23866), 'torch.cat', 'torch.cat', (['saved_data', '(0)'], {}), '(saved_data, 0)\n', (23851, 23866), False, 'import torch\n'), ((12418, 12455), 'utils.misc.clean_state_dict', 'clean_state_dict', (["checkpoint['model']"], {}), "(checkpoint['model'])\n", (12434, 12455), False, 'from utils.misc import clean_state_dict\n'), ((16169, 16180), 'time.time', 'time.time', ([], {}), '()\n', (16178, 16180), False, 'import time\n'), ((20924, 20957), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (20955, 20957), False, 'import torch\n'), ((21537, 21558), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (21556, 21558), True, 'import torch.distributed as dist\n'), ((23348, 23359), 'time.time', 'time.time', ([], {}), '()\n', (23357, 23359), False, 'import time\n'), ((23442, 23457), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (23455, 23457), True, 'import torch.distributed as dist\n'), ((23669, 23690), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (23688, 23690), True, 'import torch.distributed as dist\n'), ((24340, 24376), 'os.path.join', 'os.path.join', (['args.output', '_filename'], {}), '(args.output, _filename)\n', (24352, 24376), False, 'import os, sys\n'), ((24549, 24582), 'numpy.array2string', 'np.array2string', (['aps'], {'precision': '(5)'}), '(aps, precision=5)\n', (24564, 24582), True, 'import numpy as np\n'), ((26782, 26805), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (26795, 26805), False, 'import os, sys\n'), ((12215, 12230), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (12228, 12230), True, 'import torch.distributed as dist\n'), ((17953, 18000), 'os.path.join', 'os.path.join', (['args.output', '"""checkpoint.pth.tar"""'], {}), "(args.output, 'checkpoint.pth.tar')\n", (17965, 18000), False, 'import os, sys\n'), ((18472, 18523), 'os.path.join', 'os.path.join', (['args.output', '"""checkpoint_nan.pth.tar"""'], {}), "(args.output, 'checkpoint_nan.pth.tar')\n", (18484, 18523), False, 'import os, sys\n'), ((22969, 23002), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (23000, 23002), False, 'import torch\n'), ((24230, 24251), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (24249, 24251), True, 'import torch.distributed as dist\n'), ((18956, 18971), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (18969, 18971), True, 'import torch.distributed as dist\n'), ((19134, 19145), 'os.getpid', 'os.getpid', ([], {}), '()\n', (19143, 19145), False, 'import os, sys\n')] |
import os
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
from .utility import download_url, check_integrity
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def is_image_file(filename):
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
class CUB2011( data.Dataset ):
base_folder = 'CUB_200_2011'
url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz'
filename = 'CUB_200_2011.tgz'
tgz_md5 = '97eceeb196236b17998738112f37df78'
train_test_split='train_test_split'
def __init__(self,
root,
train,
extensions=IMG_EXTENSIONS,
loader=pil_loader,
transform=None,
target_transform=None,
download=False
):
self.root = os.path.expanduser( root )
if download:
self.download()
pathimage = os.path.join( self.root, self.base_folder, 'images' )
classes, class_to_idx = find_classes( pathimage )
samples = make_dataset(pathimage, class_to_idx, extensions)
if len(samples) == 0:
raise RuntimeError( 'Dataset not found or corrupted. You can use download=True to download it' )
ids, train_index = np.loadtxt( os.path.join(self.root, self.base_folder,'{}.txt'.format( self.train_test_split ) ), unpack=True )
self.index = np.where(train_index == train)[0]
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = np.array([s[1] for s in samples])
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, idx):
"""
Args:
idx (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[ self.index[idx] ]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.index)
def _check_integrity(self):
return os.path.exists( os.path.join(self.root, self.filename))
# for fentry in (self.train_list + self.test_list):
# filename, md5 = fentry[0], fentry[1]
# fpath = os.path.join(root, self.base_folder, filename)
# if not check_integrity(fpath, md5):
# return False
# return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.url, root, self.filename, self.tgz_md5)
# extract file
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
return fmt_str
class CUB2011MetricLearning( CUB2011 ):
num_training_classes = 100
def __init__(self,
root,
train,
extensions=IMG_EXTENSIONS,
loader=pil_loader ,
transform=None,
target_transform=None,
download=False,
):
super(CUB2011MetricLearning, self).__init__( root, True, transform=transform, target_transform=target_transform, download=download )
classes = self.classes[:self.num_training_classes] if train else self.classes[self.num_training_classes:(self.num_training_classes+self.num_training_classes) ]
index = np.array([], dtype=int)
for c in classes:
index = np.append(index, np.where( self.targets == self.class_to_idx[c] )[0], axis=0 )
class_to_idx = {classes[i]: i for i in range(len(classes))}
samples = []
for i in index:
path = self.samples[i][0]
c = self.classes[self.samples[i][1]]
samples.append( (path, class_to_idx[c]) )
self.index = np.array( [ i for i in range(len(index)) ] )
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = np.array([s[1] for s in samples])
#self.class_to_idx = {class_label: class_label_ind for class_label, class_label_ind in self.class_to_idx.items() if class_label in self.classes}
#self.imgs = [(image_file_path, class_label_ind) for image_file_path, class_label_ind in self.imgs if class_label_ind in self.class_to_idx.values()]
| [
"os.path.expanduser",
"os.getcwd",
"os.path.isdir",
"os.walk",
"PIL.Image.open",
"numpy.where",
"numpy.array",
"os.chdir",
"os.path.join",
"os.listdir"
] | [((1468, 1491), 'os.path.expanduser', 'os.path.expanduser', (['dir'], {}), '(dir)\n', (1486, 1491), False, 'import os\n'), ((396, 409), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (406, 409), False, 'from PIL import Image\n'), ((1517, 1532), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1527, 1532), False, 'import os\n'), ((1547, 1572), 'os.path.join', 'os.path.join', (['dir', 'target'], {}), '(dir, target)\n', (1559, 1572), False, 'import os\n'), ((2513, 2537), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (2531, 2537), False, 'import os\n'), ((2608, 2659), 'os.path.join', 'os.path.join', (['self.root', 'self.base_folder', '"""images"""'], {}), "(self.root, self.base_folder, 'images')\n", (2620, 2659), False, 'import os\n'), ((3333, 3366), 'numpy.array', 'np.array', (['[s[1] for s in samples]'], {}), '([s[1] for s in samples])\n', (3341, 3366), True, 'import numpy as np\n'), ((4673, 4684), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4682, 4684), False, 'import os\n'), ((4763, 4777), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (4771, 4777), False, 'import os\n'), ((4831, 4844), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (4839, 4844), False, 'import os\n'), ((5620, 5643), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (5628, 5643), True, 'import numpy as np\n'), ((6232, 6265), 'numpy.array', 'np.array', (['[s[1] for s in samples]'], {}), '([s[1] for s in samples])\n', (6240, 6265), True, 'import numpy as np\n'), ((1219, 1234), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1229, 1234), False, 'import os\n'), ((1588, 1604), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (1601, 1604), False, 'import os\n'), ((1666, 1676), 'os.walk', 'os.walk', (['d'], {}), '(d)\n', (1673, 1676), False, 'import os\n'), ((3101, 3131), 'numpy.where', 'np.where', (['(train_index == train)'], {}), '(train_index == train)\n', (3109, 3131), True, 'import numpy as np\n'), ((4071, 4109), 'os.path.join', 'os.path.join', (['self.root', 'self.filename'], {}), '(self.root, self.filename)\n', (4083, 4109), False, 'import os\n'), ((4712, 4745), 'os.path.join', 'os.path.join', (['root', 'self.filename'], {}), '(root, self.filename)\n', (4724, 4745), False, 'import os\n'), ((1252, 1272), 'os.path.join', 'os.path.join', (['dir', 'd'], {}), '(dir, d)\n', (1264, 1272), False, 'import os\n'), ((1813, 1838), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (1825, 1838), False, 'import os\n'), ((5707, 5753), 'numpy.where', 'np.where', (['(self.targets == self.class_to_idx[c])'], {}), '(self.targets == self.class_to_idx[c])\n', (5715, 5753), True, 'import numpy as np\n')] |
"""
PySCeS - Python Simulator for Cellular Systems (http://sourceforge.net)
Copyright (C) 2004-2020 <NAME>, <NAME>, <NAME> all rights reserved,
<NAME> (<EMAIL>)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
<NAME>
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
__doc__ = '''PySCeS unittest module'''
import os, sys, shutil
import unittest
from time import sleep
from . import PyscesStoich, pitcon
from . import install_dir as INSTALL_DIR
from . import model_dir as MODEL_DIR
from . import model as PSCMODEL
from .version import __version__
# evaluation comparison stuff
from numpy.testing import assert_array_equal, assert_equal
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import array, zeros, ones, logspace
from scipy.optimize import fsolve
from scipy.integrate import odeint
def CopyTestModels(
dirIn=os.path.join(INSTALL_DIR, 'pscmodels'), dirOut=MODEL_DIR, overwrite=0,
):
"""
CopyTestModels(dirIn=os.path.join(pysces_install,'pscmodels'),dirOut=pysces_model,overwrite=0)
'Copy all PySCeS test model files in dirIn to dirOut, defaults to:
in: pysces/pscmodels
out: model_dir
Arguments:
=========
dirIn [default=os.path.join(pysces_install,'pscmodels')]: target directory
dirOut [default=pysces_model]: destination directory
overwrite [default=0]: automaitcally (1) overwrite target files
"""
if not os.path.exists(dirOut):
os.makedirs(dirOut)
if os.path.exists(dirIn):
if os.path.exists(dirOut):
print('src : ' + dirIn)
print('dest: ' + dirOut)
flist = os.listdir(dirIn)
if overwrite == 0:
flist2 = os.listdir(dirOut)
maxlen = 0
for x in flist:
if len(x) > maxlen:
maxlen = len(x)
if len(flist) != 0:
for File in flist:
if File[:12] == 'pysces_test_' and File[-4:] == '.psc':
if overwrite == 0:
try:
a = flist2.index(File)
# print File + (maxlen-len(File))*'.' + ' skipped'
except:
shutil.copy(
os.path.join(dirIn, File),
os.path.join(dirOut, File),
)
print(File + (maxlen - len(File)) * '.' + ' ok')
else:
shutil.copy(
os.path.join(dirIn, File), os.path.join(dirOut, File)
)
print(File + (maxlen - len(File)) * '.' + ' ok')
else:
print('Empty directory?')
else:
print(dirOut + ' does not exist')
else:
print(dirIn + ' does not exist')
class PyscesTest:
'''PySCeS test suite: takes a test level as an argument'''
__version__ = __version__
model_dir = os.path.join(MODEL_DIR, 'tests')
def __init__(self, lvl=2, std2file=0):
# copy models from server to local model store
print('\nCopying pysces_test models if necessary ...')
CopyTestModels()
print('done.')
self.basic_runner = unittest.TextTestRunner()
self.BasicTest = unittest.makeSuite(PyscesBasicTest, 'test')
self.ExtendedTest = unittest.makeSuite(PyscesExtendedTest, 'test')
self.ExternalTest = unittest.makeSuite(PyscesExternalTest, 'test')
if lvl > 3:
lvl = 3
class NullWriter:
def __init__(self, dres=0):
if dres:
self.Fout = open(
os.path.join(os.getcwd(), 'pysces_test_results.txt'), 'w'
)
self.fof = 1
else:
self.fof = 0
def write(self, s):
if self.fof:
self.Fout.write(s)
def close(self):
if self.fof:
self.Fout.flush()
self.Fout.close()
self.__dfi__ = NullWriter(std2file)
tmpSTD = sys.stdout
for x in range(lvl):
print('\nLevel ' + str(x + 1) + ' tests')
sys.stdout = self.__dfi__
getattr(self, 'lvl_' + str(x + 1))()
sys.stdout = tmpSTD
sleep(1)
self.__dfi__.close()
def lvl_1(self):
self.basic_runner.run(self.BasicTest)
def lvl_2(self):
self.basic_runner.run(self.ExtendedTest)
def lvl_3(self):
self.basic_runner.run(self.ExternalTest)
class PyscesBasicTest(unittest.TestCase):
'''Basic test class, tests low-level numerical algorithms'''
__version__ = __version__
model_dir = os.path.join(MODEL_DIR, 'tests')
MathArrayTest = PyscesStoich.MathArrayFunc()
def test_swaprow_d(self):
arr = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'd')
karr = array([[7, 8, 9], [4, 5, 6], [1, 2, 3]], 'd')
arr = self.MathArrayTest.SwapRowd(arr, 0, 2)
assert_array_equal(arr, karr)
def test_swapcol_d(self):
arr = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'd')
karr = array([[3, 2, 1], [6, 5, 4], [9, 8, 7]], 'd')
arr = self.MathArrayTest.SwapCold(arr, 0, 2)
assert_array_equal(arr, karr)
def test_swaprow_z(self):
arr = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'D')
karr = array([[7, 8, 9], [4, 5, 6], [1, 2, 3]], 'D')
arr = self.MathArrayTest.SwapRowz(arr, 0, 2)
assert_array_equal(arr, karr)
def test_swapcol_z(self):
arr = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'D')
karr = array([[3, 2, 1], [6, 5, 4], [9, 8, 7]], 'D')
arr = self.MathArrayTest.SwapColz(arr, 0, 2)
assert_array_equal(arr, karr)
def test_matrixfloatfix(self):
arr = array([[1.0e-15, 2, 3], [4, -0.0, 6], [7, 8, 1.0e-15]])
karr = array([[0.0, 2, 3], [4, 0.0, 6], [7, 8, 0.0]])
self.MathArrayTest.MatrixFloatFix(arr, val=1.0e-13)
assert_array_equal(arr, karr)
def test_FSOLVE_linear(self):
ds = zeros((3), 'd')
s = zeros((3), 'd')
def linear1_ode(s):
ds[0] = 100.0 - 6.0 * s[0] + s[1]
ds[1] = 5.0 * s[0] - 4.0 * s[1] + s[2]
ds[2] = 3.0 * s[1] - 3.0 * s[2] + 1.0
return ds
res = fsolve(linear1_ode, [1.0, 1.0, 1.0])
known = [23.1025641, 38.61538462, 38.94871795]
assert_array_almost_equal(res, known)
def test_ODEINT_linear(self):
ds = zeros((3), 'd')
s = zeros((3), 'd')
def linear1_ode(s, t):
ds[0] = 100.0 - 6.0 * s[0] + s[1]
ds[1] = 5.0 * s[0] - 4.0 * s[1] + s[2]
ds[2] = 3.0 * s[1] - 3.0 * s[2] + 1.0
return ds
sim_res = odeint(
linear1_ode, [1.0, 1.0, 1.0], array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
)
known = array(
[
[1.0, 1.0, 1.0],
[20.70698909, 27.55450322, 20.9819434],
[22.4418045, 35.47695146, 33.60751644],
[22.91227529, 37.71013683, 37.40388317],
[23.04761842, 38.35410015, 38.50268374],
[23.08668173, 38.53997688, 38.81993704],
[23.09809306, 38.59337738, 38.91160251],
[23.10124276, 38.60909619, 38.93798616],
[23.10217397, 38.61358525, 38.94561234],
[23.10245088, 38.61486396, 38.94781836],
[23.10253812, 38.61526501, 38.94851136],
]
)
assert_array_almost_equal(sim_res, known, 3)
def test_ODEINT_moiety(self):
"""This set of ODE's is numerically unstable and might fail without error"""
ds = zeros((3), 'd')
s = zeros((3), 'd')
def moibranch_ode(s, t):
s[2] = 1.0 - s[1]
ds[0] = s[1] / (2.0 + 2.0 * s[1]) - 2.0 * s[0] / (1.0 + s[0])
ds[1] = 10.0 * s[2] / (2.0 + 2.0 * s[2]) - s[1] / (2.0 + 2.0 * s[1])
ds[2] = s[1] / (2.0 + 2.0 * s[1]) - 10.0 * s[2] / (2.0 + 2.0 * s[2])
return ds
sim_res = odeint(
moibranch_ode, [0.7, 1.0, 0.3], array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
)
known = array(
[
[0.7, 1.0, 0.3],
[0.43521812, 0.94587168, 0.05412832],
[0.21547096, 0.94879365, 0.05120635],
[0.15590709, 0.94882109, 0.05117891],
[0.14233313, 0.9488214, 0.0511786],
[0.13938884, 0.94882111, 0.05117889],
[0.13875741, 0.94882111, 0.05117889],
[0.1386223, 0.94882133, 0.05117867],
[0.1385936, 0.94882134, 0.05117866],
[0.13858742, 0.94882133, 0.05117867],
[0.13858601, 0.94882133, 0.05117867],
]
)
assert_array_almost_equal(sim_res[-5:, :], known[-5:, :], 3)
class PyscesExtendedTest(unittest.TestCase):
'''Extended test class, tests modelling related methods'''
__version__ = __version__
model_dir = os.path.join(MODEL_DIR, 'tests')
def test_statemetab_linear1(self):
lin = PSCMODEL('pysces_test_linear1.psc', self.model_dir)
lin.doLoad()
lin.hybrd_mesg = 0
lin.State()
linmet = array([23.1025641, 38.61538462, 38.94871795], 'd')
assert_array_almost_equal(lin.state_species, linmet)
def test_statemetab_branch1(self):
bra = PSCMODEL('pysces_test_branch1.psc', self.model_dir)
bra.doLoad()
bra.hybrd_mesg = 0
bra.State()
bramet = array([4.8583996, 1.88547254, 1.49124431, 1.49124431], 'd')
assert_array_almost_equal(bra.state_species, bramet)
def test_statemetab_moiety1(self):
moi = PSCMODEL('pysces_test_moiety1.psc', self.model_dir)
moi.doLoad()
moi.hybrd_mesg = 0
moi.State()
moimet = array(
[
3.6886875,
16.25569882,
7.3113125,
4.39229787,
41.02504596,
2.60770213,
0.42718994,
2.57281006,
2.44791155,
17.0012171,
],
'd',
)
assert_array_almost_equal(moi.state_species, moimet)
def test_stateflux_linear1(self):
lin = PSCMODEL('pysces_test_linear1.psc', self.model_dir)
lin.doLoad()
lin.hybrd_mesg = 0
lin.State()
linflux = array([76.8974359, 76.8974359, 76.8974359, 76.8974359], 'd')
assert_array_almost_equal(lin.state_flux, linflux)
def test_stateflux_branch1(self):
bra = PSCMODEL('pysces_test_branch1.psc', self.model_dir)
bra.doLoad()
bra.hybrd_mesg = 0
bra.State()
braflux = array(
[2.42139889, 2.42139889, 1.21069945, 1.21069945, 1.21069945, 1.21069945],
'd',
)
assert_array_almost_equal(bra.state_flux, braflux)
def test_stateflux_moiety1(self):
moi = PSCMODEL('pysces_test_moiety1.psc', self.model_dir)
moi.doLoad()
moi.hybrd_mesg = 0
moi.State()
moiflux = array(
[
250.01825652,
250.01825652,
250.01825652,
250.01825652,
250.01825652,
250.01825652,
250.01825652,
],
'd',
)
assert_array_almost_equal(moi.state_flux, moiflux)
def test_elas_linear1(self):
lin = PSCMODEL('pysces_test_linear1.psc', self.model_dir)
lin.doLoad()
lin.hybrd_mesg = 0
lin.State()
lin.EvalEvar()
line = [
-0.30043348,
0.0,
0.0,
1.50216739,
-0.50216739,
0.0,
0.0,
1.50650217,
-0.50650217,
0.0,
0.0,
1.01300433,
]
line_new = []
for x in range(lin.elas_var.shape[0]):
for y in range(lin.elas_var.shape[1]):
line_new.append(round(lin.elas_var[x, y], 2))
for x in range(len(line)):
line[x] = round(line[x], 2)
assert_array_almost_equal(line, line_new)
def test_elas_branch1(self):
bra = PSCMODEL('pysces_test_branch1.psc', self.model_dir)
bra.doLoad()
bra.hybrd_mesg = 0
bra.State()
bra.EvalEvar()
brae = [
-0.66930781448548349,
0.0,
0.0,
0.0,
0.78845903183743249,
-0.52920041809870422,
0.0,
0.0,
0.0,
0.95441603882382564,
-0.60578219088834051,
0.0,
0.0,
0.95441603882382564,
0.0,
-0.60578219088834051,
0.0,
0.0,
0.9421058792599355,
0.0,
0.0,
0.0,
0.0,
0.9421058792599355,
]
brae_new = []
for x in range(bra.elas_var.shape[0]):
for y in range(bra.elas_var.shape[1]):
brae_new.append(round(bra.elas_var[x, y], 2))
for x in range(len(brae)):
brae[x] = round(brae[x], 2)
assert_array_almost_equal(brae, brae_new)
def test_elas_moiety1(self):
moi = PSCMODEL('pysces_test_moiety1.psc', self.model_dir)
moi.doLoad()
moi.hybrd_mesg = 0
moi.State()
moi.EvalEvar()
moie = [
1.4753672613878632,
-0.47536726138786306,
-0.47536726138786312,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.4278931518740323,
0.0,
1.4278931518740325,
-0.42789315187403271,
-0.42789315187403271,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0514524437327983,
0.0,
1.0514524437327983,
-0.051452443732798468,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.04300468618304179,
0.0,
1.0430046861830418,
0.0,
0.0,
-0.043004686183041797,
0.0,
-0.073768363069393092,
0.0,
1.0737683630693931,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0737683630693931,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
-0.029048874655981143,
1.0290488746559812,
0.0,
-0.029048874655981143,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0199985395848277,
]
moie_new = []
for x in range(moi.elas_var.shape[0]):
for y in range(moi.elas_var.shape[1]):
moie_new.append(round(moi.elas_var[x, y], 2))
for x in range(len(moie)):
moie[x] = round(moie[x], 2)
assert_array_almost_equal(moie, moie_new)
def test_cc_linear1(self):
lin = PSCMODEL('pysces_test_linear1.psc', self.model_dir)
lin.doLoad()
lin.hybrd_mesg = 0
lin.State()
lin.EvalEvar()
lin.EvalCC()
lincc = [
0.02564102564102570300,
0.05128205128205126600,
0.15384615384615383000,
0.76923076923076916000,
0.02564102564102571000,
0.05128205128205128000,
0.15384615384615385000,
0.76923076923076938000,
0.02564102564102571000,
0.05128205128205128000,
0.15384615384615385000,
0.76923076923076938000,
0.02564102564102570300,
0.05128205128205126600,
0.15384615384615383000,
0.76923076923076916000,
-0.30636428644396779000,
-0.61272857288793536000,
0.15318214322198387000,
0.76591071610991934000,
-0.08534676570192656400,
-0.17069353140385327000,
-0.51208059421155983000,
0.76812089131733974000,
-0.96185074526088343000,
0.05062372343478333000,
0.15187117030435002000,
0.75935585152175000000,
]
lincc_new = []
for x in range(lin.cc_all.shape[0]):
for y in range(lin.cc_all.shape[1]):
lincc_new.append(round(lin.cc_all[x, y], 2))
for x in range(len(lincc)):
lincc[x] = round(lincc[x], 2)
assert_array_almost_equal(lincc, lincc_new)
def test_cc_branch1(self):
bra = PSCMODEL('pysces_test_branch1.psc', self.model_dir)
bra.doLoad()
bra.hybrd_mesg = 0
bra.State()
bra.EvalEvar()
bra.EvalCC()
bracc = [
0.25338970963029361000,
-0.13797075277724413000,
-0.21457061516904127000,
0.32372624345386269000,
0.39406892242342095000,
0.38135649243870806000,
-0.13797075277724413000,
0.25338970963029356000,
0.39406892242342095000,
0.32372624345386258000,
-0.21457061516904130000,
0.38135649243870812000,
-0.13797075277724413000,
0.25338970963029356000,
0.39406892242342095000,
0.32372624345386258000,
-0.21457061516904130000,
0.38135649243870812000,
0.05770947842652475500,
0.05770947842652472700,
0.08974915362718985400,
0.32372624345386269000,
0.08974915362718984000,
0.38135649243870817000,
0.25338970963029361000,
-0.13797075277724413000,
-0.21457061516904127000,
0.32372624345386269000,
0.39406892242342095000,
0.38135649243870806000,
0.05770947842652474100,
0.05770947842652471300,
0.08974915362718984000,
0.32372624345386264000,
0.08974915362718982600,
0.38135649243870806000,
-0.23751396180748979000,
-0.23751396180748979000,
-0.36937913195668803000,
0.55728841856739109000,
-0.36937913195668803000,
0.65649776896096446000,
-0.08622262758262820600,
-0.08622262758262820600,
-0.13409249329650016000,
-0.48367318660804903000,
-0.13409249329650016000,
0.92430342836630586000,
-0.79249085140642661000,
-0.14644930661681688000,
-0.22775636995026044000,
0.34361981023636490000,
0.41828517483934929000,
0.40479154289778968000,
-0.14644930661681685000,
-0.79249085140642661000,
0.41828517483934929000,
0.34361981023636484000,
-0.22775636995026050000,
0.40479154289778979000,
]
bracc_new = []
for x in range(bra.cc_all.shape[0]):
for y in range(bra.cc_all.shape[1]):
bracc_new.append(round(bra.cc_all[x, y], 2))
for x in range(len(bracc)):
bracc[x] = round(bracc[x], 2)
# os.sys.stderr.write(str(bracc[x]) + ' --> ' + str(bracc_new[x])+'\n')
assert_array_almost_equal(bracc, bracc_new)
def test_cc_moiety1(self):
moi = PSCMODEL('pysces_test_moiety1.psc', self.model_dir)
moi.doLoad()
moi.hybrd_mesg = 0
moi.State()
moi.EvalEvar()
moi.EvalCC()
moicc = moicc = [
0.01114694087227875000,
0.07381787576415140000,
0.18139104475836643000,
0.04685617079784056700,
0.25208717198140801000,
0.04329618737852313600,
0.39140460844743269000,
0.01114694087227862500,
0.07381787576415056700,
0.18139104475836440000,
0.04685617079784004000,
0.25208717198140518000,
0.04329618737852265100,
0.39140460844742830000,
0.01114694087227874000,
0.07381787576415133100,
0.18139104475836623000,
0.04685617079784051800,
0.25208717198140773000,
0.04329618737852309500,
0.39140460844743230000,
0.01114694087227875000,
0.07381787576415140000,
0.18139104475836643000,
0.04685617079784056700,
0.25208717198140801000,
0.04329618737852313600,
0.39140460844743269000,
0.01114694087227875000,
0.07381787576415140000,
0.18139104475836643000,
0.04685617079784056700,
0.25208717198140801000,
0.04329618737852313600,
0.39140460844743269000,
0.01114694087227876100,
0.07381787576415146900,
0.18139104475836659000,
0.04685617079784060900,
0.25208717198140823000,
0.04329618737852317800,
0.39140460844743302000,
0.01114694087227875200,
0.07381787576415141400,
0.18139104475836645000,
0.04685617079784057400,
0.25208717198140806000,
0.04329618737852314300,
0.39140460844743280000,
-0.96946517151898726000,
0.07237057005414701500,
0.17783461222620814000,
0.04593748812318156800,
0.24714464011293155000,
0.04244730330314599300,
0.38373055769937375000,
-0.00493043463469603270,
-0.03265059135931840800,
-0.08023158100034956400,
0.14913606725290945000,
0.02650472034900546900,
0.11529508789995771000,
-0.17312326850750914000,
-0.00258941341724495350,
-0.01714775382110165000,
-0.04213679882643606200,
0.25950964810000904000,
-0.07785637143685617000,
-0.02885675558547331700,
-0.09092255501289707400,
-0.00651179467430067560,
-0.04312275948862360300,
-0.10596460973080281000,
-0.02003169777867189200,
0.40783108111031047000,
-0.00355036168876400080,
-0.22864985774914801000,
-0.07520202175595115700,
-0.49800690277268156000,
1.11329060303376930000,
0.28758054022385904000,
1.54718927875470390000,
0.26573108181778565000,
-2.64058257930148920000,
0.08511194652599511600,
-0.37976718223865702000,
-0.93319355560031703000,
-0.24105862936563618000,
-1.29690043219020780000,
-0.22274375836758617000,
2.98855161123640300000,
-0.01413200623441426100,
0.06305662607990611400,
0.15494766227242857000,
0.04002542759392844300,
0.21533763168639128000,
0.03698442240380646300,
-0.49621976380204558000,
0.01332315621836589900,
0.08822932693215754200,
0.21680398717628285000,
-0.25121007495788511000,
0.32322678367369212000,
-0.85819164176559659000,
0.46781846272298522000,
0.01096817827331699600,
0.07263406439629339900,
0.17848209108569948000,
0.03374050370795465800,
-0.68693259335574453000,
0.00598007183654026410,
0.38512768405594056000,
0.00513245176028354240,
0.03398840011328187900,
0.08351894906745090100,
-0.51437161070192172000,
0.15431837495286327000,
0.05719670138973560700,
0.18021673341830685000,
]
moicc_new = []
for x in range(moi.cc_all.shape[0]):
for y in range(moi.cc_all.shape[1]):
moicc_new.append(round(moi.cc_all[x, y], 2))
for x in range(len(moicc)):
moicc[x] = round(moicc[x], 2)
# os.sys.stderr.write(str(moicc[x]) + ' --> ' + str(moicc_new[x])+'\n')
assert_array_almost_equal(moicc, moicc_new)
class PyscesExternalTest(unittest.TestCase):
'''Extended test class, tests external/add-in numerical algorithms'''
__version__ = __version__
model_dir = os.path.join(MODEL_DIR, 'tests')
def test_PITCON1(self):
import scipy
print(
'''
C PCPRB1.FOR The Freudenstein-Roth function.
C Limit points in the first variable occur at:
C (14.28309, -1.741377, 0.2585779)
C (61.66936, 1.983801, -0.6638797)
'''
)
def fx(X):
FX[0] = (
X[0] - ((X[1] - 5.0) * X[1] + 2.0) * X[1] - 13.0 + 34.0 * (X[2] - 1.0)
)
FX[1] = (
X[0] + ((X[1] + 1.0) * X[1] - 14.0) * X[1] - 29.0 + 10.0 * (X[2] - 1.0)
)
return FX
def fjac(s):
return
parm = 3
iwork = scipy.zeros((30 + parm), 'i')
rwork = scipy.zeros((30 + (6 * parm) * parm), 'd')
ipar = scipy.zeros((parm), 'i')
fpar = scipy.zeros((parm), 'd')
xr = scipy.zeros((parm), 'd') # output array
FX = scipy.zeros((3), 'd') # function array 'v'
iwork[0] = 0
iwork[1] = 2
iwork[2] = 0
iwork[3] = 0
iwork[4] = 0
iwork[5] = 1
iwork[6] = 0
iwork[7] = 6
iwork[8] = 2
iwork[9] = 0
iwork[10] = 0
iwork[11] = 0
iwork[12] = 30
iwork[13] = len(iwork)
iwork[14] = 30 + 4 * parm
iwork[15] = len(rwork)
iwork[16] = 20
rwork[0] = 0.00001
rwork[1] = 0.00001
rwork[2] = 0.01
rwork[3] = 20.0
rwork[4] = 0.3
rwork[5] = 1.0
rwork[6] = 1.0
rwork[7] = 0.0
rwork[19] = 3.0
xr[0] = 15.0
xr[1] = -2.0
xr[2] = 0.0
output = []
limits = []
iterations = 10
for run in range(iterations):
ierror, iwork, rwork, xrout = pitcon.pitcon1(
fjac, fpar, fx, ipar, iwork, rwork, xr
)
if iwork[0] == 4:
print('\nLimit point in run = ' + repr(run + 1))
print(xrout)
limits.append(xrout)
output.append(xrout)
output = scipy.array(output)
limit0 = [14.28309125, -1.74137688, 0.25857788]
limit1 = [61.66936258, 1.98380112, -0.66387974]
out6 = [4.48781323e001, 4.87659925e-001, 5.95322940e-002]
output6 = []
for x in range(3):
output6.append(output[6, x])
assert_array_almost_equal(limits[0], limit0, 4)
assert_array_almost_equal(limits[1], limit1, 4)
assert_array_almost_equal(output6, out6, 4)
# def test_PITCON2(self): # unreliable
# mod = model('pysces_test_pitcon.psc', self.model_dir)
# mod.doLoad()
# mod.V2 = mod.V3 = 100.0
# mod.A05 = 10.0
# mod.pitcon_iter = 5
# mod.pitcon_par_space = logspace(0,1,10)
# res = mod.PITCON('P')
## pysces.plt.plot2D(res, 0, style='.'); pysces.plt.logxy()
# os.sys.stderr.write('\n\"Iteration terminates after NITMAX and Newton method fails to converge\" warnings can be ignored\n')
# benchmark = array([
# [8.39949877e-01,1.61211501e+01,4.84068424e+00,7.12011640e+01,7.12011640e+01,7.12011640e+01],
# [5.40984421e-01,1.57720807e+01,4.31351073e+00,7.27545617e+01,7.27545617e+01,7.27545617e+01],
# [-5.14362451e-02,1.45210385e+01,3.06361620e+00,7.64860963e+01,7.64860963e+01,7.64860963e+01],
# [-4.66310864e-01,1.31962735e+01,1.95980785e+00,8.04668684e+01,8.04668684e+01,8.04668683e+01],
# [1.13190191e+00,1.61011691e+01,5.26326909e+00,6.96411063e+01,6.96411063e+01,6.96411063e+01],
# [8.30873009e-01,1.61149804e+01,4.82588194e+00,7.12478479e+01,7.12478479e+01,7.12478479e+01],
# [4.39606946e-01,1.56001789e+01,4.11842961e+00,7.33077027e+01,7.33077027e+01,7.33077027e+01],
# [-3.07339668e-01,1.37808508e+01,2.41799659e+00,7.87206889e+01,7.87206889e+01,7.87206889e+01],
# [1.64608357e+00,1.37635551e+01,5.46388538e+00,6.53427535e+01,6.53427535e+01,6.53427535e+01],
# [1.57039736e+00,1.45248830e+01,5.53382469e+00,6.63454792e+01,6.63454792e+01,6.63454792e+01],
# [1.36033858e+00,1.56423070e+01,5.48938893e+00,6.81979734e+01,6.81979734e+01,6.81979734e+01],
# [3.90033764e-01,1.55086998e+01,4.02030887e+00,7.35869382e+01,7.35869382e+01,7.35869382e+01],
# [1.90300355e+00,4.61218050e-01,8.11483528e-01,1.67232557e+01,1.67232557e+01,1.67232557e+01],
# [1.65604156e+00,1.14527548e+00,1.32711044e+00,2.91604808e+01,2.91604810e+01,2.91604810e+01],
# [1.60792245e+00,3.36846985e+00,2.47903648e+00,4.55723011e+01,4.55723011e+01,4.55723011e+01],
# [1.76260920e+00,1.02529273e+01,4.77285351e+00,6.09994737e+01,6.09994737e+01,6.09994737e+01],
# [2.48410121e+00,1.76577346e-01,5.36816396e-01,7.17264824e+00,7.17264824e+00,7.17264824e+00],
# [2.04216203e+00,3.36094901e-01,6.94802141e-01,1.31279273e+01,1.31279273e+01,1.31279273e+01],
# [1.68318189e+00,9.87364532e-01,1.22120154e+00,2.69666997e+01,2.69666997e+01,2.69666997e+01],
# [1.60195518e+00,3.09604637e+00,2.35831199e+00,4.43144773e+01,4.43144773e+01,4.43144773e+01],
# [3.29451248e+00,1.10989590e-01,5.07996700e-01,3.71775348e+00,3.71775351e+00,3.71775348e+00],
# [2.53835638e+00,1.67389490e-01,5.28506741e-01,6.75388116e+00,6.75388117e+00,6.75388117e+00],
# [2.13066473e+00,2.84747678e-01,6.44229065e-01,1.14218470e+01,1.14218470e+01,1.14218470e+01],
# [1.85930898e+00,5.18944892e-01,8.62231652e-01,1.81726016e+01,1.81726016e+01,1.81726016e+01],
# [4.93995542e+00,1.06865480e-01,6.50716299e-01,2.37791780e+00,2.37791781e+00,2.37791780e+00],
# [5.51998987e+00,1.12930767e-01,7.15671948e-01,2.26203167e+00,2.26203167e+00,2.26203167e+00],
# [7.20197168e+00,1.35245848e-01,9.14818799e-01,2.13476038e+00,2.13476038e+00,2.13476038e+00],
# [1.21464994e+01,2.10315787e-01,1.52485159e+00,2.11433598e+00,2.11433598e+00,2.11433598e+00],
# [6.29277796e+00,1.22633783e-01,8.05857247e-01,2.18036060e+00,2.18036060e+00,2.18036060e+00],
# [6.71463987e+00,1.28373921e-01,8.56137906e-01,2.15469266e+00,2.15469266e+00,2.15469266e+00],
# [7.96638113e+00,1.46372121e-01,1.00775931e+00,2.11668562e+00,2.11668562e+00,2.11668562e+00],
# [1.16768618e+01,2.02994936e-01,1.46632082e+00,2.11150946e+00,2.11150946e+00,2.11150946e+00],
# [8.04040339e+00,1.47466151e-01,1.01680363e+00,2.11553054e+00,2.11553054e+00,2.11553055e+00],
# [8.46158253e+00,1.53734006e-01,1.06838323e+00,2.11040547e+00,2.11040547e+00,2.11040547e+00],
# [9.21463978e+00,1.65086727e-01,1.16102019e+00,2.10586653e+00,2.10586653e+00,2.10586653e+00],
# [1.07956247e+01,1.89324600e-01,1.35672050e+00,2.10728985e+00,2.10728985e+00,2.10728985e+00],
# [1.02976931e+01,1.81645169e-01,1.29494069e+00,2.10576586e+00,2.10576586e+00,2.10576586e+00],
# [1.07186771e+01,1.88135560e-01,1.34716589e+00,2.10700665e+00,2.10700665e+00,2.10700665e+00],
# [1.13140312e+01,1.97355266e-01,1.42115741e+00,2.10957637e+00,2.10957637e+00,2.10957637e+00],
# [1.21560089e+01,2.10464256e-01,1.52603757e+00,2.11439651e+00,2.11439651e+00,2.11439651e+00]])
# assert_array_almost_equal(benchmark[:5,:],res[:5,:],3)
# assert_array_almost_equal(benchmark[-5:,:],res[-5:,:],1)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.makedirs",
"unittest.TextTestRunner",
"os.getcwd",
"numpy.testing.assert_array_equal",
"os.path.exists",
"unittest.makeSuite",
"scipy.optimize.fsolve",
"time.sleep",
"scipy.zeros",
"scipy.array",
"numpy.testing.assert_array_almost_equal",
"os.path.join",
"os.listdir"
] | [((1221, 1259), 'os.path.join', 'os.path.join', (['INSTALL_DIR', '"""pscmodels"""'], {}), "(INSTALL_DIR, 'pscmodels')\n", (1233, 1259), False, 'import os, sys, shutil\n'), ((1832, 1853), 'os.path.exists', 'os.path.exists', (['dirIn'], {}), '(dirIn)\n', (1846, 1853), False, 'import os, sys, shutil\n'), ((3445, 3477), 'os.path.join', 'os.path.join', (['MODEL_DIR', '"""tests"""'], {}), "(MODEL_DIR, 'tests')\n", (3457, 3477), False, 'import os, sys, shutil\n'), ((5250, 5282), 'os.path.join', 'os.path.join', (['MODEL_DIR', '"""tests"""'], {}), "(MODEL_DIR, 'tests')\n", (5262, 5282), False, 'import os, sys, shutil\n'), ((9623, 9655), 'os.path.join', 'os.path.join', (['MODEL_DIR', '"""tests"""'], {}), "(MODEL_DIR, 'tests')\n", (9635, 9655), False, 'import os, sys, shutil\n'), ((25458, 25490), 'os.path.join', 'os.path.join', (['MODEL_DIR', '"""tests"""'], {}), "(MODEL_DIR, 'tests')\n", (25470, 25490), False, 'import os, sys, shutil\n'), ((32635, 32650), 'unittest.main', 'unittest.main', ([], {}), '()\n', (32648, 32650), False, 'import unittest\n'), ((1772, 1794), 'os.path.exists', 'os.path.exists', (['dirOut'], {}), '(dirOut)\n', (1786, 1794), False, 'import os, sys, shutil\n'), ((1804, 1823), 'os.makedirs', 'os.makedirs', (['dirOut'], {}), '(dirOut)\n', (1815, 1823), False, 'import os, sys, shutil\n'), ((1866, 1888), 'os.path.exists', 'os.path.exists', (['dirOut'], {}), '(dirOut)\n', (1880, 1888), False, 'import os, sys, shutil\n'), ((3716, 3741), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (3739, 3741), False, 'import unittest\n'), ((3767, 3810), 'unittest.makeSuite', 'unittest.makeSuite', (['PyscesBasicTest', '"""test"""'], {}), "(PyscesBasicTest, 'test')\n", (3785, 3810), False, 'import unittest\n'), ((3839, 3885), 'unittest.makeSuite', 'unittest.makeSuite', (['PyscesExtendedTest', '"""test"""'], {}), "(PyscesExtendedTest, 'test')\n", (3857, 3885), False, 'import unittest\n'), ((3914, 3960), 'unittest.makeSuite', 'unittest.makeSuite', (['PyscesExternalTest', '"""test"""'], {}), "(PyscesExternalTest, 'test')\n", (3932, 3960), False, 'import unittest\n'), ((5378, 5423), 'scipy.array', 'array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]', '"""d"""'], {}), "([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'd')\n", (5383, 5423), False, 'from scipy import array, zeros, ones, logspace\n'), ((5439, 5484), 'scipy.array', 'array', (['[[7, 8, 9], [4, 5, 6], [1, 2, 3]]', '"""d"""'], {}), "([[7, 8, 9], [4, 5, 6], [1, 2, 3]], 'd')\n", (5444, 5484), False, 'from scipy import array, zeros, ones, logspace\n'), ((5546, 5575), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr', 'karr'], {}), '(arr, karr)\n', (5564, 5575), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((5621, 5666), 'scipy.array', 'array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]', '"""d"""'], {}), "([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'd')\n", (5626, 5666), False, 'from scipy import array, zeros, ones, logspace\n'), ((5682, 5727), 'scipy.array', 'array', (['[[3, 2, 1], [6, 5, 4], [9, 8, 7]]', '"""d"""'], {}), "([[3, 2, 1], [6, 5, 4], [9, 8, 7]], 'd')\n", (5687, 5727), False, 'from scipy import array, zeros, ones, logspace\n'), ((5789, 5818), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr', 'karr'], {}), '(arr, karr)\n', (5807, 5818), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((5864, 5909), 'scipy.array', 'array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]', '"""D"""'], {}), "([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'D')\n", (5869, 5909), False, 'from scipy import array, zeros, ones, logspace\n'), ((5925, 5970), 'scipy.array', 'array', (['[[7, 8, 9], [4, 5, 6], [1, 2, 3]]', '"""D"""'], {}), "([[7, 8, 9], [4, 5, 6], [1, 2, 3]], 'D')\n", (5930, 5970), False, 'from scipy import array, zeros, ones, logspace\n'), ((6032, 6061), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr', 'karr'], {}), '(arr, karr)\n', (6050, 6061), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((6107, 6152), 'scipy.array', 'array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]', '"""D"""'], {}), "([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'D')\n", (6112, 6152), False, 'from scipy import array, zeros, ones, logspace\n'), ((6168, 6213), 'scipy.array', 'array', (['[[3, 2, 1], [6, 5, 4], [9, 8, 7]]', '"""D"""'], {}), "([[3, 2, 1], [6, 5, 4], [9, 8, 7]], 'D')\n", (6173, 6213), False, 'from scipy import array, zeros, ones, logspace\n'), ((6275, 6304), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr', 'karr'], {}), '(arr, karr)\n', (6293, 6304), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((6355, 6406), 'scipy.array', 'array', (['[[1e-15, 2, 3], [4, -0.0, 6], [7, 8, 1e-15]]'], {}), '([[1e-15, 2, 3], [4, -0.0, 6], [7, 8, 1e-15]])\n', (6360, 6406), False, 'from scipy import array, zeros, ones, logspace\n'), ((6426, 6472), 'scipy.array', 'array', (['[[0.0, 2, 3], [4, 0.0, 6], [7, 8, 0.0]]'], {}), '([[0.0, 2, 3], [4, 0.0, 6], [7, 8, 0.0]])\n', (6431, 6472), False, 'from scipy import array, zeros, ones, logspace\n'), ((6541, 6570), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr', 'karr'], {}), '(arr, karr)\n', (6559, 6570), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((6619, 6632), 'scipy.zeros', 'zeros', (['(3)', '"""d"""'], {}), "(3, 'd')\n", (6624, 6632), False, 'from scipy import array, zeros, ones, logspace\n'), ((6647, 6660), 'scipy.zeros', 'zeros', (['(3)', '"""d"""'], {}), "(3, 'd')\n", (6652, 6660), False, 'from scipy import array, zeros, ones, logspace\n'), ((6876, 6912), 'scipy.optimize.fsolve', 'fsolve', (['linear1_ode', '[1.0, 1.0, 1.0]'], {}), '(linear1_ode, [1.0, 1.0, 1.0])\n', (6882, 6912), False, 'from scipy.optimize import fsolve\n'), ((6976, 7013), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'known'], {}), '(res, known)\n', (7001, 7013), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((7062, 7075), 'scipy.zeros', 'zeros', (['(3)', '"""d"""'], {}), "(3, 'd')\n", (7067, 7075), False, 'from scipy import array, zeros, ones, logspace\n'), ((7090, 7103), 'scipy.zeros', 'zeros', (['(3)', '"""d"""'], {}), "(3, 'd')\n", (7095, 7103), False, 'from scipy import array, zeros, ones, logspace\n'), ((7444, 7905), 'scipy.array', 'array', (['[[1.0, 1.0, 1.0], [20.70698909, 27.55450322, 20.9819434], [22.4418045, \n 35.47695146, 33.60751644], [22.91227529, 37.71013683, 37.40388317], [\n 23.04761842, 38.35410015, 38.50268374], [23.08668173, 38.53997688, \n 38.81993704], [23.09809306, 38.59337738, 38.91160251], [23.10124276, \n 38.60909619, 38.93798616], [23.10217397, 38.61358525, 38.94561234], [\n 23.10245088, 38.61486396, 38.94781836], [23.10253812, 38.61526501, \n 38.94851136]]'], {}), '([[1.0, 1.0, 1.0], [20.70698909, 27.55450322, 20.9819434], [22.4418045,\n 35.47695146, 33.60751644], [22.91227529, 37.71013683, 37.40388317], [\n 23.04761842, 38.35410015, 38.50268374], [23.08668173, 38.53997688, \n 38.81993704], [23.09809306, 38.59337738, 38.91160251], [23.10124276, \n 38.60909619, 38.93798616], [23.10217397, 38.61358525, 38.94561234], [\n 23.10245088, 38.61486396, 38.94781836], [23.10253812, 38.61526501, \n 38.94851136]])\n', (7449, 7905), False, 'from scipy import array, zeros, ones, logspace\n'), ((8099, 8143), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['sim_res', 'known', '(3)'], {}), '(sim_res, known, 3)\n', (8124, 8143), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((8277, 8290), 'scipy.zeros', 'zeros', (['(3)', '"""d"""'], {}), "(3, 'd')\n", (8282, 8290), False, 'from scipy import array, zeros, ones, logspace\n'), ((8305, 8318), 'scipy.zeros', 'zeros', (['(3)', '"""d"""'], {}), "(3, 'd')\n", (8310, 8318), False, 'from scipy import array, zeros, ones, logspace\n'), ((8782, 9206), 'scipy.array', 'array', (['[[0.7, 1.0, 0.3], [0.43521812, 0.94587168, 0.05412832], [0.21547096, \n 0.94879365, 0.05120635], [0.15590709, 0.94882109, 0.05117891], [\n 0.14233313, 0.9488214, 0.0511786], [0.13938884, 0.94882111, 0.05117889],\n [0.13875741, 0.94882111, 0.05117889], [0.1386223, 0.94882133, \n 0.05117867], [0.1385936, 0.94882134, 0.05117866], [0.13858742, \n 0.94882133, 0.05117867], [0.13858601, 0.94882133, 0.05117867]]'], {}), '([[0.7, 1.0, 0.3], [0.43521812, 0.94587168, 0.05412832], [0.21547096, \n 0.94879365, 0.05120635], [0.15590709, 0.94882109, 0.05117891], [\n 0.14233313, 0.9488214, 0.0511786], [0.13938884, 0.94882111, 0.05117889],\n [0.13875741, 0.94882111, 0.05117889], [0.1386223, 0.94882133, \n 0.05117867], [0.1385936, 0.94882134, 0.05117866], [0.13858742, \n 0.94882133, 0.05117867], [0.13858601, 0.94882133, 0.05117867]])\n', (8787, 9206), False, 'from scipy import array, zeros, ones, logspace\n'), ((9405, 9465), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['sim_res[-5:, :]', 'known[-5:, :]', '(3)'], {}), '(sim_res[-5:, :], known[-5:, :], 3)\n', (9430, 9465), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((9847, 9897), 'scipy.array', 'array', (['[23.1025641, 38.61538462, 38.94871795]', '"""d"""'], {}), "([23.1025641, 38.61538462, 38.94871795], 'd')\n", (9852, 9897), False, 'from scipy import array, zeros, ones, logspace\n'), ((9906, 9958), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['lin.state_species', 'linmet'], {}), '(lin.state_species, linmet)\n', (9931, 9958), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((10150, 10209), 'scipy.array', 'array', (['[4.8583996, 1.88547254, 1.49124431, 1.49124431]', '"""d"""'], {}), "([4.8583996, 1.88547254, 1.49124431, 1.49124431], 'd')\n", (10155, 10209), False, 'from scipy import array, zeros, ones, logspace\n'), ((10218, 10270), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['bra.state_species', 'bramet'], {}), '(bra.state_species, bramet)\n', (10243, 10270), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((10462, 10599), 'scipy.array', 'array', (['[3.6886875, 16.25569882, 7.3113125, 4.39229787, 41.02504596, 2.60770213, \n 0.42718994, 2.57281006, 2.44791155, 17.0012171]', '"""d"""'], {}), "([3.6886875, 16.25569882, 7.3113125, 4.39229787, 41.02504596, \n 2.60770213, 0.42718994, 2.57281006, 2.44791155, 17.0012171], 'd')\n", (10467, 10599), False, 'from scipy import array, zeros, ones, logspace\n'), ((10813, 10865), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['moi.state_species', 'moimet'], {}), '(moi.state_species, moimet)\n', (10838, 10865), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((11057, 11117), 'scipy.array', 'array', (['[76.8974359, 76.8974359, 76.8974359, 76.8974359]', '"""d"""'], {}), "([76.8974359, 76.8974359, 76.8974359, 76.8974359], 'd')\n", (11062, 11117), False, 'from scipy import array, zeros, ones, logspace\n'), ((11126, 11176), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['lin.state_flux', 'linflux'], {}), '(lin.state_flux, linflux)\n', (11151, 11176), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((11368, 11457), 'scipy.array', 'array', (['[2.42139889, 2.42139889, 1.21069945, 1.21069945, 1.21069945, 1.21069945]', '"""d"""'], {}), "([2.42139889, 2.42139889, 1.21069945, 1.21069945, 1.21069945, \n 1.21069945], 'd')\n", (11373, 11457), False, 'from scipy import array, zeros, ones, logspace\n'), ((11496, 11546), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['bra.state_flux', 'braflux'], {}), '(bra.state_flux, braflux)\n', (11521, 11546), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((11738, 11852), 'scipy.array', 'array', (['[250.01825652, 250.01825652, 250.01825652, 250.01825652, 250.01825652, \n 250.01825652, 250.01825652]', '"""d"""'], {}), "([250.01825652, 250.01825652, 250.01825652, 250.01825652, 250.01825652,\n 250.01825652, 250.01825652], 'd')\n", (11743, 11852), False, 'from scipy import array, zeros, ones, logspace\n'), ((12019, 12069), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['moi.state_flux', 'moiflux'], {}), '(moi.state_flux, moiflux)\n', (12044, 12069), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((12803, 12844), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['line', 'line_new'], {}), '(line, line_new)\n', (12828, 12844), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((13882, 13923), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['brae', 'brae_new'], {}), '(brae, brae_new)\n', (13907, 13923), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((15922, 15963), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['moie', 'moie_new'], {}), '(moie, moie_new)\n', (15947, 15963), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((17480, 17523), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['lincc', 'lincc_new'], {}), '(lincc, lincc_new)\n', (17505, 17523), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((20293, 20336), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['bracc', 'bracc_new'], {}), '(bracc, bracc_new)\n', (20318, 20336), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((25246, 25289), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['moicc', 'moicc_new'], {}), '(moicc, moicc_new)\n', (25271, 25289), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((26164, 26191), 'scipy.zeros', 'scipy.zeros', (['(30 + parm)', '"""i"""'], {}), "(30 + parm, 'i')\n", (26175, 26191), False, 'import scipy\n'), ((26210, 26248), 'scipy.zeros', 'scipy.zeros', (['(30 + 6 * parm * parm)', '"""d"""'], {}), "(30 + 6 * parm * parm, 'd')\n", (26221, 26248), False, 'import scipy\n'), ((26268, 26290), 'scipy.zeros', 'scipy.zeros', (['parm', '"""i"""'], {}), "(parm, 'i')\n", (26279, 26290), False, 'import scipy\n'), ((26308, 26330), 'scipy.zeros', 'scipy.zeros', (['parm', '"""d"""'], {}), "(parm, 'd')\n", (26319, 26330), False, 'import scipy\n'), ((26346, 26368), 'scipy.zeros', 'scipy.zeros', (['parm', '"""d"""'], {}), "(parm, 'd')\n", (26357, 26368), False, 'import scipy\n'), ((26400, 26419), 'scipy.zeros', 'scipy.zeros', (['(3)', '"""d"""'], {}), "(3, 'd')\n", (26411, 26419), False, 'import scipy\n'), ((27566, 27585), 'scipy.array', 'scipy.array', (['output'], {}), '(output)\n', (27577, 27585), False, 'import scipy\n'), ((27862, 27909), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['limits[0]', 'limit0', '(4)'], {}), '(limits[0], limit0, 4)\n', (27887, 27909), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((27918, 27965), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['limits[1]', 'limit1', '(4)'], {}), '(limits[1], limit1, 4)\n', (27943, 27965), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((27974, 28017), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['output6', 'out6', '(4)'], {}), '(output6, out6, 4)\n', (27999, 28017), False, 'from numpy.testing import assert_almost_equal, assert_array_almost_equal\n'), ((1983, 2000), 'os.listdir', 'os.listdir', (['dirIn'], {}), '(dirIn)\n', (1993, 2000), False, 'import os, sys, shutil\n'), ((4846, 4854), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (4851, 4854), False, 'from time import sleep\n'), ((7376, 7417), 'scipy.array', 'array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (7381, 7417), False, 'from scipy import array, zeros, ones, logspace\n'), ((8714, 8755), 'scipy.array', 'array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (8719, 8755), False, 'from scipy import array, zeros, ones, logspace\n'), ((2057, 2075), 'os.listdir', 'os.listdir', (['dirOut'], {}), '(dirOut)\n', (2067, 2075), False, 'import os, sys, shutil\n'), ((4168, 4179), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4177, 4179), False, 'import os, sys, shutil\n'), ((2983, 3008), 'os.path.join', 'os.path.join', (['dirIn', 'File'], {}), '(dirIn, File)\n', (2995, 3008), False, 'import os, sys, shutil\n'), ((3010, 3036), 'os.path.join', 'os.path.join', (['dirOut', 'File'], {}), '(dirOut, File)\n', (3022, 3036), False, 'import os, sys, shutil\n'), ((2674, 2699), 'os.path.join', 'os.path.join', (['dirIn', 'File'], {}), '(dirIn, File)\n', (2686, 2699), False, 'import os, sys, shutil\n'), ((2737, 2763), 'os.path.join', 'os.path.join', (['dirOut', 'File'], {}), '(dirOut, File)\n', (2749, 2763), False, 'import os, sys, shutil\n')] |
from numpy.distutils.misc_util import Configuration
def configuration(parent_package='', top_path=None):
config = Configuration('special', parent_package, top_path)
config.add_data_dir('tests')
return config | [
"numpy.distutils.misc_util.Configuration"
] | [((119, 169), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""special"""', 'parent_package', 'top_path'], {}), "('special', parent_package, top_path)\n", (132, 169), False, 'from numpy.distutils.misc_util import Configuration\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Provide support utilities for time lagging ensembles"""
import numpy as np
from improver import BasePlugin
from improver.metadata.forecast_times import rebadge_forecasts_as_latest_cycle
from improver.utilities.cube_manipulation import MergeCubes
class GenerateTimeLaggedEnsemble(BasePlugin):
"""Combine realizations from different forecast cycles into one cube"""
def process(self, cubelist):
"""
Take an input cubelist containing forecasts from different cycles and
merges them into a single cube.
The steps taken are:
1. Update forecast reference time and period to match the latest
contributing cycle.
2. Check for duplicate realization numbers. If a duplicate is
found, renumber all of the realizations uniquely.
3. Concatenate into one cube along the realization axis.
Args:
cubelist (iris.cube.CubeList or list of iris.cube.Cube):
List of input forecasts
Returns:
iris.cube.Cube:
Concatenated forecasts
"""
cubelist = rebadge_forecasts_as_latest_cycle(cubelist)
# Take all the realizations from all the input cube and
# put in one array
all_realizations = [cube.coord("realization").points for cube in cubelist]
all_realizations = np.concatenate(all_realizations)
# Find unique realizations
unique_realizations = np.unique(all_realizations)
# If we have fewer unique realizations than total realizations we have
# duplicate realizations so we rebadge all realizations in the cubelist
if len(unique_realizations) < len(all_realizations):
first_realization = 0
for cube in cubelist:
n_realization = len(cube.coord("realization").points)
cube.coord("realization").points = np.arange(
first_realization, first_realization + n_realization, dtype=np.int32
)
first_realization = first_realization + n_realization
# slice over realization to deal with cases where direct concatenation
# would result in a non-monotonic coordinate
lagged_ensemble = MergeCubes()(cubelist, slice_over_realization=True)
return lagged_ensemble
| [
"numpy.concatenate",
"numpy.arange",
"improver.utilities.cube_manipulation.MergeCubes",
"improver.metadata.forecast_times.rebadge_forecasts_as_latest_cycle",
"numpy.unique"
] | [((2791, 2834), 'improver.metadata.forecast_times.rebadge_forecasts_as_latest_cycle', 'rebadge_forecasts_as_latest_cycle', (['cubelist'], {}), '(cubelist)\n', (2824, 2834), False, 'from improver.metadata.forecast_times import rebadge_forecasts_as_latest_cycle\n'), ((3037, 3069), 'numpy.concatenate', 'np.concatenate', (['all_realizations'], {}), '(all_realizations)\n', (3051, 3069), True, 'import numpy as np\n'), ((3135, 3162), 'numpy.unique', 'np.unique', (['all_realizations'], {}), '(all_realizations)\n', (3144, 3162), True, 'import numpy as np\n'), ((3920, 3932), 'improver.utilities.cube_manipulation.MergeCubes', 'MergeCubes', ([], {}), '()\n', (3930, 3932), False, 'from improver.utilities.cube_manipulation import MergeCubes\n'), ((3573, 3652), 'numpy.arange', 'np.arange', (['first_realization', '(first_realization + n_realization)'], {'dtype': 'np.int32'}), '(first_realization, first_realization + n_realization, dtype=np.int32)\n', (3582, 3652), True, 'import numpy as np\n')] |
import os
import sys
import time
import logging
import numpy as np
from datetime import datetime
import torch
import flags
import datacode
import worlds
import trainers
import agents
import teachers
from misc import util
def main():
config = configure()
datasets = datacode.load(config)
trainer = trainers.load(config)
student = agents.load(config)
teacher = teachers.load(config)
with torch.cuda.device(config.device_id):
trainer.train(datasets, student, teacher)
def configure():
config = flags.make_config()
config.command_line = 'python -u ' + ' '.join(sys.argv)
config.data_dir = "%s/%s" % (config.data_path, config.task)
output_dir = "%s/experiments" % config.save_path
print("Data directory is ", config.data_dir)
print("Output direction is ", output_dir)
print("Old data is ", os.getenv('PT_OUTPUT_DIR', 'experiments'))
config.experiment_dir = "%s/%s" % (output_dir, config.name)
if config.resume:
ckpt_file = '%s/%s' % (config.experiment_dir, 'last.ckpt')
if os.path.exists(ckpt_file):
print('Resume from %s' % ckpt_file)
config.student.model.load_from = ckpt_file
elif not os.path.exists(config.experiment_dir):
print('Did not find a checkpoint to resume from')
os.makedirs(config.experiment_dir)
else:
assert not os.path.exists(config.experiment_dir), \
"Experiment %s already exists!" % config.experiment_dir
os.makedirs(config.experiment_dir)
torch.manual_seed(config.seed)
random = np.random.RandomState(config.seed)
config.random = random
config.device = torch.device('cuda', config.device_id)
config.start_time = time.time()
log_file = "%s/run.log" % config.experiment_dir
util.config_logging(log_file)
logging.info(str(datetime.now()))
logging.info(config.command_line)
logging.info('Write log to %s' % log_file)
logging.info(str(config))
return config
if __name__ == '__main__':
main()
| [
"teachers.load",
"os.makedirs",
"flags.make_config",
"torch.manual_seed",
"misc.util.config_logging",
"os.path.exists",
"numpy.random.RandomState",
"time.time",
"datacode.load",
"agents.load",
"logging.info",
"datetime.datetime.now",
"torch.device",
"torch.cuda.device",
"os.getenv",
"t... | [((280, 301), 'datacode.load', 'datacode.load', (['config'], {}), '(config)\n', (293, 301), False, 'import datacode\n'), ((317, 338), 'trainers.load', 'trainers.load', (['config'], {}), '(config)\n', (330, 338), False, 'import trainers\n'), ((354, 373), 'agents.load', 'agents.load', (['config'], {}), '(config)\n', (365, 373), False, 'import agents\n'), ((389, 410), 'teachers.load', 'teachers.load', (['config'], {}), '(config)\n', (402, 410), False, 'import teachers\n'), ((540, 559), 'flags.make_config', 'flags.make_config', ([], {}), '()\n', (557, 559), False, 'import flags\n'), ((1551, 1581), 'torch.manual_seed', 'torch.manual_seed', (['config.seed'], {}), '(config.seed)\n', (1568, 1581), False, 'import torch\n'), ((1595, 1629), 'numpy.random.RandomState', 'np.random.RandomState', (['config.seed'], {}), '(config.seed)\n', (1616, 1629), True, 'import numpy as np\n'), ((1678, 1716), 'torch.device', 'torch.device', (['"""cuda"""', 'config.device_id'], {}), "('cuda', config.device_id)\n", (1690, 1716), False, 'import torch\n'), ((1742, 1753), 'time.time', 'time.time', ([], {}), '()\n', (1751, 1753), False, 'import time\n'), ((1811, 1840), 'misc.util.config_logging', 'util.config_logging', (['log_file'], {}), '(log_file)\n', (1830, 1840), False, 'from misc import util\n'), ((1883, 1916), 'logging.info', 'logging.info', (['config.command_line'], {}), '(config.command_line)\n', (1895, 1916), False, 'import logging\n'), ((1921, 1963), 'logging.info', 'logging.info', (["('Write log to %s' % log_file)"], {}), "('Write log to %s' % log_file)\n", (1933, 1963), False, 'import logging\n'), ((421, 456), 'torch.cuda.device', 'torch.cuda.device', (['config.device_id'], {}), '(config.device_id)\n', (438, 456), False, 'import torch\n'), ((861, 902), 'os.getenv', 'os.getenv', (['"""PT_OUTPUT_DIR"""', '"""experiments"""'], {}), "('PT_OUTPUT_DIR', 'experiments')\n", (870, 902), False, 'import os\n'), ((1070, 1095), 'os.path.exists', 'os.path.exists', (['ckpt_file'], {}), '(ckpt_file)\n', (1084, 1095), False, 'import os\n'), ((1511, 1545), 'os.makedirs', 'os.makedirs', (['config.experiment_dir'], {}), '(config.experiment_dir)\n', (1522, 1545), False, 'import os\n'), ((1394, 1431), 'os.path.exists', 'os.path.exists', (['config.experiment_dir'], {}), '(config.experiment_dir)\n', (1408, 1431), False, 'import os\n'), ((1862, 1876), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1874, 1876), False, 'from datetime import datetime\n'), ((1217, 1254), 'os.path.exists', 'os.path.exists', (['config.experiment_dir'], {}), '(config.experiment_dir)\n', (1231, 1254), False, 'import os\n'), ((1330, 1364), 'os.makedirs', 'os.makedirs', (['config.experiment_dir'], {}), '(config.experiment_dir)\n', (1341, 1364), False, 'import os\n')] |
import netCDF4 as nc
import os
import re
import numpy as np
from matplotlib.patches import Ellipse, Circle
from matplotlib.collections import EllipseCollection
class nc_reader:
def __init__(self):
self._ncfile = None
# either a field or parcel file
self._nctype = None
def open(self, fname):
if not os.path.exists(fname):
raise IOError("File '" + fname + "' does not exist.")
self._ncfile = nc.Dataset(fname, "r", format="NETCDF4")
self._nctype = self.get_global_attribute("file_type")
# if we read in a parcel file we pre-evaluate the number of
# steps
if self.is_parcel_file:
self._loaded_step = -1
basename = os.path.basename(fname)
# 14 Feb 2022
# https://stackoverflow.com/questions/15340582/python-extract-pattern-matches
p = re.compile("(.*)_(\d*)_parcels.nc")
result = p.search(basename)
self._basename = result.group(1)
self._dirname = os.path.dirname(fname)
if self._dirname == '':
self._dirname = '.'
self._n_parcel_files = 0
for ff in os.listdir(self._dirname):
if self._basename in ff and '_parcels.nc' in ff:
self._n_parcel_files += 1
def close(self):
self._ncfile.close()
@property
def is_parcel_file(self):
return self._nctype == "parcels"
@property
def is_parcel_stats_file(self):
return self._nctype == "parcel_stats"
@property
def is_field_stats_file(self):
return self._nctype == "field_stats"
@property
def is_field_file(self):
return self._nctype == "fields"
def get_num_steps(self):
if self.is_parcel_file:
return self._n_parcel_files
else:
return self._ncfile.dimensions['t'].size
def get_box_extent(self):
return self.get_global_attribute("extent")
def get_box_ncells(self):
return self.get_global_attribute("ncells")
def get_box_origin(self):
return self.get_global_attribute("origin")
def get_all(self, name):
if self.is_parcel_file:
raise IOError("This function is not availble for parcel files.")
if not name in self._ncfile.variables.keys():
raise IOError("Dataset '" + name + "' unknown.")
return np.array(self._ncfile.variables[name])
def get_dataset(self, step, name, indices=None):
if not name in self._ncfile.variables.keys():
raise IOError("Dataset '" + name + "' unknown.")
nsteps = self.get_num_steps()
if step > nsteps - 1:
raise ValueError("Dataset has only steps 0 to " + str(nsteps - 1) + ".")
if self.is_parcel_file and name == 't':
# parcel files store the time as a global attribute
step = step + 1
self._load_parcel_file(step)
return self._ncfile.variables[name]
if self.is_parcel_file:
step = step + 1
self._load_parcel_file(step)
if indices is not None:
return self._ncfile.variables[name][indices, ...]
else:
return np.array(self._ncfile.variables[name])
else:
if indices is not None:
return self._ncfile.variables[name][step, ...][indices, ...]
else:
return np.array(self._ncfile.variables[name][step, ...])
def get_dataset_attribute(self, name, attr):
if not name in self._ncfile.variables.keys():
raise IOError("Dataset '" + name + "' unknown.")
if not attr in self._ncfile.variables[name].ncattrs():
raise IOError("Dataset attribute '" + name + "' unknown.")
return self._ncfile.variables[name].getncattr(attr)
def get_dataset_min_max(self, name, indices=None):
nsteps = self.get_num_steps()
data = self.get_dataset(0, name, indices=indices)
vmax = data.max()
vmin = data.min()
for step in range(1, nsteps):
data = self.get_dataset(step, name, indices=indices)
vmax = max(vmax, data.max())
vmin = min(vmin, data.min())
return vmin, vmax
def get_global_attribute_names(self):
return list(self._ncfile.ncattrs())
def get_global_attribute(self, name):
if not name in self._ncfile.ncattrs():
raise IOError("Global attribute '" + name + "' unknown.")
attr = self._ncfile.getncattr(name)
if isinstance(attr, np.bytes_):
attr = attr.decode()
return attr
def get_diagnostic(self, name):
if not name in self._ncfile.variables.keys():
raise IOError("Dataset '" + name + "' unknown.")
return np.array(self._ncfile.variables[name])
def get_num_parcels(self, step):
if not self.is_parcel_file:
raise IOError("Not a parcel output file.")
self._load_parcel_file(step)
return self._ncfile.dimensions['n_parcels'].size
def get_ellipses(self, step, indices=None):
if not self.is_parcel_file:
raise IOError("Not a parcel output file.")
x_pos = self.get_dataset(step, "x_position", indices=indices)
z_pos = self.get_dataset(step, "z_position", indices=indices)
position = np.empty((len(x_pos), 2))
position[:, 0] = x_pos
position[:, 1] = z_pos
V = self.get_dataset(step, "volume", indices=indices)
B11 = self.get_dataset(step, "B11", indices=indices)
B12 = self.get_dataset(step, "B12", indices=indices)
B22 = self._get_B22(B11, B12, V)
a2 = self._get_eigenvalue(B11, B12, B22)
angle = self._get_angle(B11, B12, B22, a2)
b2 = (V / np.pi) ** 2 / a2
# 4 Feb 2022
# https://matplotlib.org/stable/gallery/shapes_and_collections/ellipse_collection.html
return EllipseCollection(widths=2 * np.sqrt(a2),
heights=2 * np.sqrt(b2),
angles=np.rad2deg(angle),
units='xy',
offsets=position)
def get_ellipses_for_bokeh(self, step, indices=None):
if not self.is_parcel_file:
raise IOError("Not a parcel output file.")
x_pos = self.get_dataset(step, "x_position", indices=indices)
z_pos = self.get_dataset(step, "z_position", indices=indices)
V = self.get_dataset(step, "volume", indices=indices)
B11 = self.get_dataset(step, "B11", indices=indices)
B12 = self.get_dataset(step, "B12", indices=indices)
B22 = self._get_B22(B11, B12, V)
a2 = self._get_eigenvalue(B11, B12, B22)
angle = self._get_angle(B11, B12, B22, a2)
b2 = (V / np.pi) ** 2 / a2
return (
x_pos[:],
z_pos[:],
2 * np.sqrt(a2[:]),
2 * np.sqrt(b2[:]),
angle[:],
)
def get_aspect_ratio(self, step, indices=None):
if not self.is_parcel_file:
raise IOError("Not a parcel output file.")
V = self.get_dataset(step, "volume", indices=indices)
B11 = self.get_dataset(step, "B11", indices=indices)
B12 = self.get_dataset(step, "B12", indices=indices)
B22 = self._get_B22(B11, B12, V)
a2 = self._get_eigenvalue(B11, B12, B22)
return a2 / V * np.pi
def _get_B22(self, B11, B12, V):
return ((V / np.pi) ** 2 + B12 ** 2) / B11
def _get_eigenvalue(self, B11, B12, B22):
return 0.5 * (B11 + B22) + np.sqrt(0.25 * (B11 - B22) ** 2 + B12 ** 2)
def _get_eigenvector(self, a2, B11, B12, B22):
evec = np.array([a2 - B22, B12])
for i in range(evec.shape[1]):
if abs(evec[0, i]) + abs(evec[1, i]) == 0.0:
if B11[i] > B22[i]:
evec[0, i] = evec[0, i] + np.finfo(np.float64).eps
else:
evec[1, i] = evec[1, i] + np.finfo(np.float64).eps
return evec / np.linalg.norm(evec, 2)
def _get_angle(self, B11, B12, B22, a2=None):
if a2 is None:
a2 = self._get_eigenvalue(B11, B12, B22)
evec = self._get_eigenvector(a2, B11, B12, B22)
return np.arctan2(evec[1, :], evec[0, :])
def _get_step_string(self, step):
return str(step).zfill(10)
def _load_parcel_file(self, step):
if self._loaded_step == step:
return
self._loaded_step = step
self._ncfile.close()
s = self._get_step_string(step)
fname = os.path.join(self._dirname, self._basename + '_' + s + '_parcels.nc')
self._ncfile = nc.Dataset(fname, "r", format="NETCDF4")
# 18 Feb 2022
# https://stackoverflow.com/questions/8450472/how-to-print-a-string-at-a-fixed-width
# 19 Feb 2022
# https://stackoverflow.com/questions/873327/pythons-most-efficient-way-to-choose-longest-string-in-list
def __str__(self):
print("=" * 80)
# print global attributes
print("GLOBAL ATTRIBUTES:")
l = len(max(self._ncfile.ncattrs(), key=len))
fmt = '{0: <' + str(l) + '}'
for key in self._ncfile.ncattrs():
print(fmt.format(key), "\t", self._ncfile.getncattr(key))
print("-" * 80)
print("DIMENSIONS:")
for dim in self._ncfile.dimensions:
print(" ", dim, "=", self._ncfile.dimensions[dim].size)
print("-" * 80)
print("VARIABLES:")
# get first variable name
name = list(self._ncfile.variables.keys())[0]
# get length of longest attribute string
l = len(max(self._ncfile.variables[name].ncattrs(), key=len))
fmt = '{0: <' + str(l) + '}'
# print variables and their attributes
for var in self._ncfile.variables:
print(" ", var)
for attr in self._ncfile.variables[var].ncattrs():
print("\t", fmt.format(attr), "\t", self._ncfile.variables[var].getncattr(attr))
print("=" * 80)
return ""
| [
"netCDF4.Dataset",
"numpy.arctan2",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"numpy.rad2deg",
"numpy.finfo",
"numpy.array",
"numpy.linalg.norm",
"numpy.sqrt",
"os.path.join",
"os.listdir",
"re.compile"
] | [((455, 495), 'netCDF4.Dataset', 'nc.Dataset', (['fname', '"""r"""'], {'format': '"""NETCDF4"""'}), "(fname, 'r', format='NETCDF4')\n", (465, 495), True, 'import netCDF4 as nc\n'), ((2431, 2469), 'numpy.array', 'np.array', (['self._ncfile.variables[name]'], {}), '(self._ncfile.variables[name])\n', (2439, 2469), True, 'import numpy as np\n'), ((4853, 4891), 'numpy.array', 'np.array', (['self._ncfile.variables[name]'], {}), '(self._ncfile.variables[name])\n', (4861, 4891), True, 'import numpy as np\n'), ((7787, 7812), 'numpy.array', 'np.array', (['[a2 - B22, B12]'], {}), '([a2 - B22, B12])\n', (7795, 7812), True, 'import numpy as np\n'), ((8355, 8389), 'numpy.arctan2', 'np.arctan2', (['evec[1, :]', 'evec[0, :]'], {}), '(evec[1, :], evec[0, :])\n', (8365, 8389), True, 'import numpy as np\n'), ((8679, 8748), 'os.path.join', 'os.path.join', (['self._dirname', "(self._basename + '_' + s + '_parcels.nc')"], {}), "(self._dirname, self._basename + '_' + s + '_parcels.nc')\n", (8691, 8748), False, 'import os\n'), ((8772, 8812), 'netCDF4.Dataset', 'nc.Dataset', (['fname', '"""r"""'], {'format': '"""NETCDF4"""'}), "(fname, 'r', format='NETCDF4')\n", (8782, 8812), True, 'import netCDF4 as nc\n'), ((343, 364), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (357, 364), False, 'import os\n'), ((733, 756), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (749, 756), False, 'import os\n'), ((889, 925), 're.compile', 're.compile', (['"""(.*)_(\\\\d*)_parcels.nc"""'], {}), "('(.*)_(\\\\d*)_parcels.nc')\n", (899, 925), False, 'import re\n'), ((1038, 1060), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (1053, 1060), False, 'import os\n'), ((1192, 1217), 'os.listdir', 'os.listdir', (['self._dirname'], {}), '(self._dirname)\n', (1202, 1217), False, 'import os\n'), ((7676, 7719), 'numpy.sqrt', 'np.sqrt', (['(0.25 * (B11 - B22) ** 2 + B12 ** 2)'], {}), '(0.25 * (B11 - B22) ** 2 + B12 ** 2)\n', (7683, 7719), True, 'import numpy as np\n'), ((8133, 8156), 'numpy.linalg.norm', 'np.linalg.norm', (['evec', '(2)'], {}), '(evec, 2)\n', (8147, 8156), True, 'import numpy as np\n'), ((3269, 3307), 'numpy.array', 'np.array', (['self._ncfile.variables[name]'], {}), '(self._ncfile.variables[name])\n', (3277, 3307), True, 'import numpy as np\n'), ((3476, 3525), 'numpy.array', 'np.array', (['self._ncfile.variables[name][step, ...]'], {}), '(self._ncfile.variables[name][step, ...])\n', (3484, 3525), True, 'import numpy as np\n'), ((6135, 6152), 'numpy.rad2deg', 'np.rad2deg', (['angle'], {}), '(angle)\n', (6145, 6152), True, 'import numpy as np\n'), ((6977, 6991), 'numpy.sqrt', 'np.sqrt', (['a2[:]'], {}), '(a2[:])\n', (6984, 6991), True, 'import numpy as np\n'), ((7009, 7023), 'numpy.sqrt', 'np.sqrt', (['b2[:]'], {}), '(b2[:])\n', (7016, 7023), True, 'import numpy as np\n'), ((6024, 6035), 'numpy.sqrt', 'np.sqrt', (['a2'], {}), '(a2)\n', (6031, 6035), True, 'import numpy as np\n'), ((6082, 6093), 'numpy.sqrt', 'np.sqrt', (['b2'], {}), '(b2)\n', (6089, 6093), True, 'import numpy as np\n'), ((7992, 8012), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (8000, 8012), True, 'import numpy as np\n'), ((8085, 8105), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (8093, 8105), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import math
import tvm.contrib.ethosu.cascader as cs
from .infra import make_matrices
@pytest.mark.parametrize(
"test_id, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape",
[
# Conv2D
(
0,
"ethosu_conv2d",
"NONE",
(34, 19),
(2, 2),
(1, 1),
(0, 0, 0, 0),
(1, 266, 111, 15),
(1, 117, 47, 15),
),
(
1,
"ethosu_conv2d",
"NONE",
(14, 14),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 125, 63, 64),
(1, 112, 50, 128),
),
(
2,
"ethosu_conv2d",
"NONE",
(7, 1),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 13, 4, 12),
(1, 4, 4, 511),
),
(
3,
"ethosu_conv2d",
"NONE",
(5, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 96, 16, 276),
(1, 92, 12, 16),
),
(
4,
"ethosu_conv2d",
"NONE",
(5, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 96, 16, 276),
(1, 92, 12, 1),
),
(
5,
"ethosu_conv2d",
"NONE",
(3, 3),
(1, 1),
(2, 2),
(0, 0, 0, 0),
(1, 62, 94, 32),
(1, 58, 90, 16),
),
# Depthwise Conv2D
(
6,
"ethosu_depthwise_conv2d",
"NONE",
(3, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 77, 23, 18),
(1, 75, 19, 18),
),
(
7,
"ethosu_depthwise_conv2d",
"NONE",
(3, 3),
(2, 2),
(1, 1),
(1, 1, 1, 1),
(1, 25, 10, 276),
(1, 13, 5, 276),
),
# Pooling
(
8,
"ethosu_pooling",
"NONE",
(13, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 13, 5, 276),
(1, 1, 1, 276),
),
(
9,
"ethosu_pooling",
"NONE",
(7, 3),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 317, 14, 21),
(1, 156, 12, 21),
),
],
)
@pytest.mark.parametrize(
"layouts",
[
("NHWC", "NHWC"),
("NHCWB16", "NHCWB16"),
("NHWC", "NHCWB16"),
("NHCWB16", "NHWC"),
],
)
@pytest.mark.parametrize(
"acc_config, expected_block_configs",
[
(
"ethos-u55-32",
[
# Conv2D
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
((1, 4, 4, 16), (1, 4, 1, 4, 16)),
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 10, 6, 4), (1, 5, 1, 12, 4), (1, 16, 1, 4, 4)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
# Depthwise Conv2D
((1, 6, 10, 16), (1, 6, 1, 10, 16)),
((1, 7, 5, 16), (1, 7, 1, 5, 16)),
# Pooling
((1, 1, 1, 16), (1, 1, 1, 1, 16)),
((1, 9, 6, 16), (1, 9, 1, 6, 16)),
],
),
(
"ethos-u55-64",
[
# Conv2D
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
((1, 4, 4, 16), (1, 4, 1, 4, 16)),
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 10, 6, 8), (1, 16, 1, 4, 8)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
# Depthwise Conv2D
((1, 6, 10, 16), (1, 6, 1, 10, 16)),
((1, 7, 5, 16), (1, 7, 1, 5, 16)),
# Pooling
((1, 1, 1, 16), (1, 1, 1, 1, 16)),
((1, 9, 6, 16), (1, 9, 1, 6, 16)),
],
),
(
"ethos-u55-128",
[
# Conv2D
((1, 7, 6, 16), (1, 7, 1, 6, 16)),
((1, 5, 8, 16), (1, 5, 1, 8, 16)),
((1, 4, 4, 16), (1, 4, 1, 4, 16)),
((1, 16, 4, 16), (1, 16, 1, 4, 16)),
((1, 8, 12, 8), (1, 8, 1, 12, 8)),
((1, 10, 6, 16), (1, 10, 1, 6, 16)),
# Depthwise Conv2D
((1, 7, 10, 16), (1, 7, 1, 10, 16)),
((1, 7, 6, 16), (1, 7, 1, 6, 16)),
# Pooling
((1, 1, 2, 80), (1, 1, 5, 2, 16)),
((1, 10, 6, 16), (1, 10, 1, 6, 16)),
],
),
(
"ethos-u55-256",
[
# Conv2D
((1, 14, 8, 16), (1, 14, 1, 8, 16)),
((1, 16, 8, 16), (1, 16, 1, 8, 16)),
((1, 4, 4, 16), (1, 4, 1, 4, 16)),
((1, 32, 4, 16), (1, 10, 12, 16), (1, 32, 1, 4, 16), (1, 10, 1, 12, 16)),
((1, 20, 12, 8), (1, 20, 1, 12, 8)),
((1, 12, 10, 16), (1, 12, 1, 10, 16)),
# Depthwise Conv2D
((1, 8, 20, 16), (1, 8, 1, 20, 16)),
((1, 14, 6, 16), (1, 14, 1, 6, 16)),
# Pooling
((1, 2, 2, 48), (1, 2, 3, 2, 16)),
((1, 10, 12, 16), (1, 10, 1, 12, 16)),
],
),
],
)
def test_best_block_config(
test_id,
op_type,
activation,
kernel,
stride,
dilation,
padding,
in_shape,
out_shape,
layouts,
acc_config,
expected_block_configs,
):
nhwc_to_nhcwb16 = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
]
nhcwb16_to_nhwc = [
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 16, 0, 1, -16],
[0, 0, 0, 0, 0, 1],
]
ifm_matrix, ifm_offset, weight_matrix, weight_offset, _, _ = make_matrices(
op_type, kernel, stride, padding, layouts[0], layouts[1], dilation, in_shape[3]
)
ofm_channels = out_shape[3]
ifm_channels = in_shape[3]
if layouts[0] == "NHCWB16":
in_shape = [
int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, in_shape + (1,)).tolist()[:-1]
]
if layouts[1] == "NHCWB16":
out_shape = [
int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, out_shape + (1,)).tolist()[:-1]
]
propagator = cs.Propagator(ifm_matrix, ifm_offset)
weight_propagator = cs.Propagator(weight_matrix, weight_offset)
subkernels = ((kernel[0] + 7) // 8) * ((kernel[1] + 7) // 8)
op_attrs = {
"op": op_type,
"activation": activation,
"stride_h": stride[0],
"stride_w": stride[1],
"dilation_h": dilation[0],
"dilation_w": dilation[1],
}
device_config = cs.EthosuDeviceConfig(acc_config)
block_configs = device_config.get_valid_block_configs(
propagator,
op_attrs,
out_shape,
ofm_channels,
ifm_channels,
layouts[1],
layouts[0],
"int8",
"int8",
kernel[0],
kernel[1],
)
output_quantum = [1, 1, 2, 8]
if layouts[1] == "NHCWB16":
output_quantum = [1, 1, 1, 2, 8]
# Create EthosUPart
te_subgraph = cs.TESubgraph([], None)
part = cs.EthosuPart(
te_subgraph,
[propagator, weight_propagator],
output_quantum,
subkernels,
block_configs,
1,
)
order = [1, 2, 3, 4] if layouts[1] == "NHCWB16" else [1, 2, 4, 3, 0]
stripes = [1] * len(output_quantum)
offset = [0] * len(output_quantum)
stripe_config = cs.StripeConfig(out_shape, out_shape, out_shape, order, stripes, offset)
block = part.get_block_config(stripe_config)
block_shape = tuple(int(a) for a in block.output_shape)
assert block_shape in expected_block_configs[test_id]
if __name__ == "__main__":
pytest.main([__file__])
| [
"pytest.importorskip",
"tvm.contrib.ethosu.cascader.EthosuPart",
"tvm.contrib.ethosu.cascader.TESubgraph",
"math.ceil",
"pytest.main",
"tvm.contrib.ethosu.cascader.Propagator",
"numpy.matmul",
"tvm.contrib.ethosu.cascader.EthosuDeviceConfig",
"tvm.contrib.ethosu.cascader.StripeConfig",
"pytest.mar... | [((800, 834), 'pytest.importorskip', 'pytest.importorskip', (['"""ethosu.vela"""'], {}), "('ethosu.vela')\n", (819, 834), False, 'import pytest\n'), ((946, 2182), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_id, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape"""', "[(0, 'ethosu_conv2d', 'NONE', (34, 19), (2, 2), (1, 1), (0, 0, 0, 0), (1, \n 266, 111, 15), (1, 117, 47, 15)), (1, 'ethosu_conv2d', 'NONE', (14, 14),\n (1, 1), (1, 1), (0, 0, 0, 0), (1, 125, 63, 64), (1, 112, 50, 128)), (2,\n 'ethosu_conv2d', 'NONE', (7, 1), (2, 1), (1, 1), (0, 0, 0, 0), (1, 13, \n 4, 12), (1, 4, 4, 511)), (3, 'ethosu_conv2d', 'NONE', (5, 5), (1, 1), (\n 1, 1), (0, 0, 0, 0), (1, 96, 16, 276), (1, 92, 12, 16)), (4,\n 'ethosu_conv2d', 'NONE', (5, 5), (1, 1), (1, 1), (0, 0, 0, 0), (1, 96, \n 16, 276), (1, 92, 12, 1)), (5, 'ethosu_conv2d', 'NONE', (3, 3), (1, 1),\n (2, 2), (0, 0, 0, 0), (1, 62, 94, 32), (1, 58, 90, 16)), (6,\n 'ethosu_depthwise_conv2d', 'NONE', (3, 5), (1, 1), (1, 1), (0, 0, 0, 0),\n (1, 77, 23, 18), (1, 75, 19, 18)), (7, 'ethosu_depthwise_conv2d',\n 'NONE', (3, 3), (2, 2), (1, 1), (1, 1, 1, 1), (1, 25, 10, 276), (1, 13,\n 5, 276)), (8, 'ethosu_pooling', 'NONE', (13, 5), (1, 1), (1, 1), (0, 0,\n 0, 0), (1, 13, 5, 276), (1, 1, 1, 276)), (9, 'ethosu_pooling', 'NONE',\n (7, 3), (2, 1), (1, 1), (0, 0, 0, 0), (1, 317, 14, 21), (1, 156, 12, 21))]"], {}), "(\n 'test_id, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape'\n , [(0, 'ethosu_conv2d', 'NONE', (34, 19), (2, 2), (1, 1), (0, 0, 0, 0),\n (1, 266, 111, 15), (1, 117, 47, 15)), (1, 'ethosu_conv2d', 'NONE', (14,\n 14), (1, 1), (1, 1), (0, 0, 0, 0), (1, 125, 63, 64), (1, 112, 50, 128)),\n (2, 'ethosu_conv2d', 'NONE', (7, 1), (2, 1), (1, 1), (0, 0, 0, 0), (1, \n 13, 4, 12), (1, 4, 4, 511)), (3, 'ethosu_conv2d', 'NONE', (5, 5), (1, 1\n ), (1, 1), (0, 0, 0, 0), (1, 96, 16, 276), (1, 92, 12, 16)), (4,\n 'ethosu_conv2d', 'NONE', (5, 5), (1, 1), (1, 1), (0, 0, 0, 0), (1, 96, \n 16, 276), (1, 92, 12, 1)), (5, 'ethosu_conv2d', 'NONE', (3, 3), (1, 1),\n (2, 2), (0, 0, 0, 0), (1, 62, 94, 32), (1, 58, 90, 16)), (6,\n 'ethosu_depthwise_conv2d', 'NONE', (3, 5), (1, 1), (1, 1), (0, 0, 0, 0),\n (1, 77, 23, 18), (1, 75, 19, 18)), (7, 'ethosu_depthwise_conv2d',\n 'NONE', (3, 3), (2, 2), (1, 1), (1, 1, 1, 1), (1, 25, 10, 276), (1, 13,\n 5, 276)), (8, 'ethosu_pooling', 'NONE', (13, 5), (1, 1), (1, 1), (0, 0,\n 0, 0), (1, 13, 5, 276), (1, 1, 1, 276)), (9, 'ethosu_pooling', 'NONE',\n (7, 3), (2, 1), (1, 1), (0, 0, 0, 0), (1, 317, 14, 21), (1, 156, 12, 21))])\n", (969, 2182), False, 'import pytest\n'), ((3465, 3590), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""layouts"""', "[('NHWC', 'NHWC'), ('NHCWB16', 'NHCWB16'), ('NHWC', 'NHCWB16'), ('NHCWB16',\n 'NHWC')]"], {}), "('layouts', [('NHWC', 'NHWC'), ('NHCWB16', 'NHCWB16'\n ), ('NHWC', 'NHCWB16'), ('NHCWB16', 'NHWC')])\n", (3488, 3590), False, 'import pytest\n'), ((3637, 5368), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""acc_config, expected_block_configs"""', "[('ethos-u55-32', [((1, 8, 4, 16), (1, 8, 1, 4, 16)), ((1, 6, 5, 16), (1, 6,\n 1, 5, 16)), ((1, 4, 4, 16), (1, 4, 1, 4, 16)), ((1, 8, 4, 16), (1, 8, 1,\n 4, 16)), ((1, 10, 6, 4), (1, 5, 1, 12, 4), (1, 16, 1, 4, 4)), ((1, 6, 5,\n 16), (1, 6, 1, 5, 16)), ((1, 6, 10, 16), (1, 6, 1, 10, 16)), ((1, 7, 5,\n 16), (1, 7, 1, 5, 16)), ((1, 1, 1, 16), (1, 1, 1, 1, 16)), ((1, 9, 6, \n 16), (1, 9, 1, 6, 16))]), ('ethos-u55-64', [((1, 8, 4, 16), (1, 8, 1, 4,\n 16)), ((1, 6, 5, 16), (1, 6, 1, 5, 16)), ((1, 4, 4, 16), (1, 4, 1, 4, \n 16)), ((1, 8, 4, 16), (1, 8, 1, 4, 16)), ((1, 10, 6, 8), (1, 16, 1, 4, \n 8)), ((1, 6, 5, 16), (1, 6, 1, 5, 16)), ((1, 6, 10, 16), (1, 6, 1, 10, \n 16)), ((1, 7, 5, 16), (1, 7, 1, 5, 16)), ((1, 1, 1, 16), (1, 1, 1, 1, \n 16)), ((1, 9, 6, 16), (1, 9, 1, 6, 16))]), ('ethos-u55-128', [((1, 7, 6,\n 16), (1, 7, 1, 6, 16)), ((1, 5, 8, 16), (1, 5, 1, 8, 16)), ((1, 4, 4, \n 16), (1, 4, 1, 4, 16)), ((1, 16, 4, 16), (1, 16, 1, 4, 16)), ((1, 8, 12,\n 8), (1, 8, 1, 12, 8)), ((1, 10, 6, 16), (1, 10, 1, 6, 16)), ((1, 7, 10,\n 16), (1, 7, 1, 10, 16)), ((1, 7, 6, 16), (1, 7, 1, 6, 16)), ((1, 1, 2, \n 80), (1, 1, 5, 2, 16)), ((1, 10, 6, 16), (1, 10, 1, 6, 16))]), (\n 'ethos-u55-256', [((1, 14, 8, 16), (1, 14, 1, 8, 16)), ((1, 16, 8, 16),\n (1, 16, 1, 8, 16)), ((1, 4, 4, 16), (1, 4, 1, 4, 16)), ((1, 32, 4, 16),\n (1, 10, 12, 16), (1, 32, 1, 4, 16), (1, 10, 1, 12, 16)), ((1, 20, 12, 8\n ), (1, 20, 1, 12, 8)), ((1, 12, 10, 16), (1, 12, 1, 10, 16)), ((1, 8, \n 20, 16), (1, 8, 1, 20, 16)), ((1, 14, 6, 16), (1, 14, 1, 6, 16)), ((1, \n 2, 2, 48), (1, 2, 3, 2, 16)), ((1, 10, 12, 16), (1, 10, 1, 12, 16))])]"], {}), "('acc_config, expected_block_configs', [(\n 'ethos-u55-32', [((1, 8, 4, 16), (1, 8, 1, 4, 16)), ((1, 6, 5, 16), (1,\n 6, 1, 5, 16)), ((1, 4, 4, 16), (1, 4, 1, 4, 16)), ((1, 8, 4, 16), (1, 8,\n 1, 4, 16)), ((1, 10, 6, 4), (1, 5, 1, 12, 4), (1, 16, 1, 4, 4)), ((1, 6,\n 5, 16), (1, 6, 1, 5, 16)), ((1, 6, 10, 16), (1, 6, 1, 10, 16)), ((1, 7,\n 5, 16), (1, 7, 1, 5, 16)), ((1, 1, 1, 16), (1, 1, 1, 1, 16)), ((1, 9, 6,\n 16), (1, 9, 1, 6, 16))]), ('ethos-u55-64', [((1, 8, 4, 16), (1, 8, 1, 4,\n 16)), ((1, 6, 5, 16), (1, 6, 1, 5, 16)), ((1, 4, 4, 16), (1, 4, 1, 4, \n 16)), ((1, 8, 4, 16), (1, 8, 1, 4, 16)), ((1, 10, 6, 8), (1, 16, 1, 4, \n 8)), ((1, 6, 5, 16), (1, 6, 1, 5, 16)), ((1, 6, 10, 16), (1, 6, 1, 10, \n 16)), ((1, 7, 5, 16), (1, 7, 1, 5, 16)), ((1, 1, 1, 16), (1, 1, 1, 1, \n 16)), ((1, 9, 6, 16), (1, 9, 1, 6, 16))]), ('ethos-u55-128', [((1, 7, 6,\n 16), (1, 7, 1, 6, 16)), ((1, 5, 8, 16), (1, 5, 1, 8, 16)), ((1, 4, 4, \n 16), (1, 4, 1, 4, 16)), ((1, 16, 4, 16), (1, 16, 1, 4, 16)), ((1, 8, 12,\n 8), (1, 8, 1, 12, 8)), ((1, 10, 6, 16), (1, 10, 1, 6, 16)), ((1, 7, 10,\n 16), (1, 7, 1, 10, 16)), ((1, 7, 6, 16), (1, 7, 1, 6, 16)), ((1, 1, 2, \n 80), (1, 1, 5, 2, 16)), ((1, 10, 6, 16), (1, 10, 1, 6, 16))]), (\n 'ethos-u55-256', [((1, 14, 8, 16), (1, 14, 1, 8, 16)), ((1, 16, 8, 16),\n (1, 16, 1, 8, 16)), ((1, 4, 4, 16), (1, 4, 1, 4, 16)), ((1, 32, 4, 16),\n (1, 10, 12, 16), (1, 32, 1, 4, 16), (1, 10, 1, 12, 16)), ((1, 20, 12, 8\n ), (1, 20, 1, 12, 8)), ((1, 12, 10, 16), (1, 12, 1, 10, 16)), ((1, 8, \n 20, 16), (1, 8, 1, 20, 16)), ((1, 14, 6, 16), (1, 14, 1, 6, 16)), ((1, \n 2, 2, 48), (1, 2, 3, 2, 16)), ((1, 10, 12, 16), (1, 10, 1, 12, 16))])])\n", (3660, 5368), False, 'import pytest\n'), ((7652, 7689), 'tvm.contrib.ethosu.cascader.Propagator', 'cs.Propagator', (['ifm_matrix', 'ifm_offset'], {}), '(ifm_matrix, ifm_offset)\n', (7665, 7689), True, 'import tvm.contrib.ethosu.cascader as cs\n'), ((7714, 7757), 'tvm.contrib.ethosu.cascader.Propagator', 'cs.Propagator', (['weight_matrix', 'weight_offset'], {}), '(weight_matrix, weight_offset)\n', (7727, 7757), True, 'import tvm.contrib.ethosu.cascader as cs\n'), ((8058, 8091), 'tvm.contrib.ethosu.cascader.EthosuDeviceConfig', 'cs.EthosuDeviceConfig', (['acc_config'], {}), '(acc_config)\n', (8079, 8091), True, 'import tvm.contrib.ethosu.cascader as cs\n'), ((8519, 8542), 'tvm.contrib.ethosu.cascader.TESubgraph', 'cs.TESubgraph', (['[]', 'None'], {}), '([], None)\n', (8532, 8542), True, 'import tvm.contrib.ethosu.cascader as cs\n'), ((8554, 8663), 'tvm.contrib.ethosu.cascader.EthosuPart', 'cs.EthosuPart', (['te_subgraph', '[propagator, weight_propagator]', 'output_quantum', 'subkernels', 'block_configs', '(1)'], {}), '(te_subgraph, [propagator, weight_propagator], output_quantum,\n subkernels, block_configs, 1)\n', (8567, 8663), True, 'import tvm.contrib.ethosu.cascader as cs\n'), ((8889, 8961), 'tvm.contrib.ethosu.cascader.StripeConfig', 'cs.StripeConfig', (['out_shape', 'out_shape', 'out_shape', 'order', 'stripes', 'offset'], {}), '(out_shape, out_shape, out_shape, order, stripes, offset)\n', (8904, 8961), True, 'import tvm.contrib.ethosu.cascader as cs\n'), ((9164, 9187), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (9175, 9187), False, 'import pytest\n'), ((7381, 7393), 'math.ceil', 'math.ceil', (['n'], {}), '(n)\n', (7390, 7393), False, 'import math\n'), ((7542, 7554), 'math.ceil', 'math.ceil', (['n'], {}), '(n)\n', (7551, 7554), False, 'import math\n'), ((7404, 7447), 'numpy.matmul', 'np.matmul', (['nhwc_to_nhcwb16', '(in_shape + (1,))'], {}), '(nhwc_to_nhcwb16, in_shape + (1,))\n', (7413, 7447), True, 'import numpy as np\n'), ((7565, 7609), 'numpy.matmul', 'np.matmul', (['nhwc_to_nhcwb16', '(out_shape + (1,))'], {}), '(nhwc_to_nhcwb16, out_shape + (1,))\n', (7574, 7609), True, 'import numpy as np\n')] |
import pyrender
import numpy as np
from matplotlib import pyplot
import math
# render settings
img_h = 480
img_w = 480
fx = 480.
fy = 480.
cx = 240
cy = 240
def model():
# note that xx is height here!
xx = -0.2
yy = -0.2
zz = -0.2
v000 = (xx, yy, zz) # 0
v001 = (xx, yy, zz + 0.4) # 1
v010 = (xx, yy + 0.4, zz) # 2
v011 = (xx, yy + 0.4, zz + 0.4) # 3
v100 = (xx + 0.4, yy, zz) # 4
v101 = (xx + 0.4, yy, zz + 0.4) # 5
v110 = (xx + 0.4, yy + 0.4, zz) # 6
v111 = (xx + 0.4, yy + 0.4, zz + 0.4) # 7
f1 = [0, 2, 4]
f2 = [4, 2, 6]
f3 = [1, 3, 5]
f4 = [5, 3, 7]
f5 = [0, 1, 2]
f6 = [1, 3, 2]
f7 = [4, 5, 7]
f8 = [4, 7, 6]
f9 = [4, 0, 1]
f10 = [4, 5, 1]
f11 = [2, 3, 6]
f12 = [3, 7, 6]
vertices = []
vertices.append(v000)
vertices.append(v001)
vertices.append(v010)
vertices.append(v011)
vertices.append(v100)
vertices.append(v101)
vertices.append(v110)
vertices.append(v111)
faces = []
faces.append(f1)
faces.append(f2)
faces.append(f3)
faces.append(f4)
faces.append(f5)
faces.append(f6)
faces.append(f7)
faces.append(f8)
faces.append(f9)
faces.append(f10)
faces.append(f11)
faces.append(f12)
return vertices, faces
def render(vertices, faces):
x = 0
y = math.pi/4
z = 0
R_x = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]])
R_y = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]])
R_z = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]])
R = R_z.dot(R_y.dot(R_x))
np_vertices = np.array(vertices).astype(np.float64)
np_vertices = R.dot(np_vertices.T).T
np_vertices[:, 2] += 1.5
np_faces = np.array(faces).astype(np.float64)
np_faces += 1
depthmap, mask, img = pyrender.render(np_vertices.T.copy(), np_faces.T.copy(), np.array([fx, fy, cx, cy]), np.array([1., 2.]), np.array([img_h, img_w], dtype=np.int32))
pyplot.imshow(depthmap)
pyplot.show()
pyplot.imshow(img)
pyplot.show()
if __name__ == '__main__':
vertices, faces = model()
render(vertices, faces)
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"math.sin",
"numpy.array",
"math.cos"
] | [((2071, 2094), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['depthmap'], {}), '(depthmap)\n', (2084, 2094), False, 'from matplotlib import pyplot\n'), ((2099, 2112), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2110, 2112), False, 'from matplotlib import pyplot\n'), ((2117, 2135), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['img'], {}), '(img)\n', (2130, 2135), False, 'from matplotlib import pyplot\n'), ((2140, 2153), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2151, 2153), False, 'from matplotlib import pyplot\n'), ((1977, 2003), 'numpy.array', 'np.array', (['[fx, fy, cx, cy]'], {}), '([fx, fy, cx, cy])\n', (1985, 2003), True, 'import numpy as np\n'), ((2005, 2025), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2013, 2025), True, 'import numpy as np\n'), ((2025, 2065), 'numpy.array', 'np.array', (['[img_h, img_w]'], {'dtype': 'np.int32'}), '([img_h, img_w], dtype=np.int32)\n', (2033, 2065), True, 'import numpy as np\n'), ((1717, 1735), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (1725, 1735), True, 'import numpy as np\n'), ((1840, 1855), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (1848, 1855), True, 'import numpy as np\n'), ((1418, 1429), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (1426, 1429), False, 'import math\n'), ((1450, 1461), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (1458, 1461), False, 'import math\n'), ((1463, 1474), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (1471, 1474), False, 'import math\n'), ((1499, 1510), 'math.cos', 'math.cos', (['y'], {}), '(y)\n', (1507, 1510), False, 'import math\n'), ((1515, 1526), 'math.sin', 'math.sin', (['y'], {}), '(y)\n', (1523, 1526), False, 'import math\n'), ((1558, 1569), 'math.cos', 'math.cos', (['y'], {}), '(y)\n', (1566, 1569), False, 'import math\n'), ((1594, 1605), 'math.cos', 'math.cos', (['z'], {}), '(z)\n', (1602, 1605), False, 'import math\n'), ((1626, 1637), 'math.sin', 'math.sin', (['z'], {}), '(z)\n', (1634, 1637), False, 'import math\n'), ((1639, 1650), 'math.cos', 'math.cos', (['z'], {}), '(z)\n', (1647, 1650), False, 'import math\n'), ((1432, 1443), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (1440, 1443), False, 'import math\n'), ((1542, 1553), 'math.sin', 'math.sin', (['y'], {}), '(y)\n', (1550, 1553), False, 'import math\n'), ((1608, 1619), 'math.sin', 'math.sin', (['z'], {}), '(z)\n', (1616, 1619), False, 'import math\n')] |
import numpy as np
FABRIC_WIDTH = 1000
FABRIC_HEIGHT = 1000
inputs = open('../input.txt', 'r')
data = inputs.readlines()
def parse_claim(current_claim):
[raw_id, _, raw_offset, raw_dimensions] = current_claim.split(' ')
claim_id = int(raw_id.lstrip('#'))
[x_offset_raw, y_offset_raw] = raw_offset.rstrip(':').split(',')
offsets = (int(x_offset_raw), int(y_offset_raw))
[width_raw, height_raw] = raw_dimensions.split('x')
claim_size = (int(width_raw), int(height_raw))
return claim_id, offsets, claim_size
def apply_claim(fabric_matrix, offsets, claim_size, current_claim_id):
(o_x, o_y) = offsets
(width, height) = claim_size
x_max = o_x + width
y_max = o_y + height
current_claim = np.full(claim_size, current_claim_id, dtype=int)
existing_claims = np.unique(fabric_matrix[o_x:x_max, o_y:y_max].flatten())
existing_claims = existing_claims[np.where(existing_claims > 0)]
fabric_matrix[o_x:x_max, o_y:y_max] += current_claim
return existing_claims
fabric = np.zeros((FABRIC_WIDTH, FABRIC_HEIGHT), dtype=int)
valid_claims = set()
for claim in data:
claim_id, offset_coords, size = parse_claim(claim)
valid_claims.add(claim_id)
(min_x, min_y) = offset_coords
(w, h) = size
max_x = min_x + w
max_y = min_y + h
remove_claims = apply_claim(fabric, offset_coords, size, claim_id)
if remove_claims.size > 0:
valid_claims.remove(claim_id)
for c in remove_claims.tolist():
if c in valid_claims:
valid_claims.remove(c)
if len(valid_claims) > 0:
remaining_claim = valid_claims.pop()
print('remaining set: {}'.format(remaining_claim)) | [
"numpy.full",
"numpy.where",
"numpy.zeros"
] | [((1029, 1079), 'numpy.zeros', 'np.zeros', (['(FABRIC_WIDTH, FABRIC_HEIGHT)'], {'dtype': 'int'}), '((FABRIC_WIDTH, FABRIC_HEIGHT), dtype=int)\n', (1037, 1079), True, 'import numpy as np\n'), ((737, 785), 'numpy.full', 'np.full', (['claim_size', 'current_claim_id'], {'dtype': 'int'}), '(claim_size, current_claim_id, dtype=int)\n', (744, 785), True, 'import numpy as np\n'), ((903, 932), 'numpy.where', 'np.where', (['(existing_claims > 0)'], {}), '(existing_claims > 0)\n', (911, 932), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tqdm.auto import trange
class EpochLoggerCallback(tf.keras.callbacks.Callback):
"""
Log the result every epoch instead of every step
"""
def __init__(self, keys, epochs, logger=None, decs='Training', decimal=2):
"""
Args:
keys: a tuple from display key to logs key. (e.g. [(TrainLoss, loss), (ValLoss, loss)])
epochs: total number of epochs
logger: epoch logger or None
decs: display message
"""
self.logger = logger
self.epochs = epochs
self.keys = keys
self.decs = decs
self.decimal = decimal
super(EpochLoggerCallback, self).__init__()
def on_train_begin(self, logs=None):
self.t = trange(self.epochs, desc=self.decs, leave=True)
def on_train_end(self, logs=None):
self.t.close()
def on_epoch_end(self, epoch, logs=None):
display = []
for display_key, log_key in self.keys:
display.append((display_key, logs[log_key]))
assert not np.isnan(logs[log_key])
if self.logger is not None:
display = {key: value for key, value in display}
self.logger.store(**display)
else:
display = [f'{key}: {value:.{self.decimal}f}' for key, value in display]
message = ', '.join(display)
self.t.set_description(message)
self.t.update(1)
| [
"numpy.isnan",
"tqdm.auto.trange"
] | [((786, 833), 'tqdm.auto.trange', 'trange', (['self.epochs'], {'desc': 'self.decs', 'leave': '(True)'}), '(self.epochs, desc=self.decs, leave=True)\n', (792, 833), False, 'from tqdm.auto import trange\n'), ((1092, 1115), 'numpy.isnan', 'np.isnan', (['logs[log_key]'], {}), '(logs[log_key])\n', (1100, 1115), True, 'import numpy as np\n')] |
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from BoManifolds.Riemannian_utils.utils import rotation_matrix_from_axis_angle
from BoManifolds.plot_utils.manifolds_plots import plot_spd_cone
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = r'\usepackage{bm}'
'''
This file is part of the GaBOtorch library.
Authors: <NAME> and <NAME>, 2020
License: MIT
Contact: <EMAIL>, <EMAIL>
The functions of this file are based on the function of botorch (in botorch.optim).
'''
def bo_plot_function_sphere(ax, function, true_opt_x=None, true_opt_y=None, xs=None, max_colors=None,
alpha=0.4, elev=30, azim=-60, n_elems=100):
"""
Plot a function on the surface of a 2-sphere
Parameters
----------
:param ax: figure axis
:param function: function to plot
Optional parameters
-------------------
:param true_opt_x: true minimum point on the sphere [1 x 3]
:param true_opt_y: true minimum value
:param xs: samples of the BO [n x 3]
:param max_colors: maximum value (to bound the colors)
:param alpha: transparency
:param elev: axis elevation
:param azim: axis azimut
:param n_elems: number of elements to approximate the sphere
Returns
-------
:return: max_colors
"""
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
ax.view_init(elev=elev, azim=azim)
# Sphere
u = np.linspace(0.0001, 2 * np.pi, n_elems)
v = np.linspace(0.0001, np.pi, n_elems)
r = 1
x_sphere = r * np.outer(np.cos(u), np.sin(v))
y_sphere = r * np.outer(np.sin(u), np.sin(v))
z_sphere = r * np.outer(np.ones(np.size(u)), np.cos(v))
# Colors in function of the function
colors = np.zeros(x_sphere.shape)
for i in range(x_sphere.shape[0]):
for j in range(x_sphere.shape[1]):
data_tmp = torch.Tensor([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]])
colors[i, j] = function(data_tmp).detach().numpy()
if true_opt_y is not None:
min_colors = true_opt_y
else:
min_colors = np.min(colors)
colors = colors - min_colors
if max_colors is None:
max_colors = np.max(colors)
else:
np.min([colors, max_colors * np.ones(colors.shape)], axis=0)
colors = pl.cm.inferno(np.ones(colors.shape) - colors / max_colors)
ax.plot_surface(x_sphere, y_sphere, z_sphere, rstride=4, cstride=4, facecolors=colors, linewidth=0., alpha=alpha)
# Plots xs
if xs is not None:
for n in range(xs.shape[0]):
ax.scatter(xs[n, 0], xs[n, 1], xs[n, 2], c='k')
# Plot true minimum
if true_opt_x is not None:
ax.scatter(true_opt_x[0, 0], true_opt_x[0, 1], true_opt_x[0, 2], s=100, c='limegreen', marker='*')
# Limits
lim = 1.1
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.set_zlim([-lim, lim])
return max_colors
def bo_plot_function_sphere_planar(fig, function, xs=None, ys=None, true_opt_x=None, true_opt_y = None,
max_colors=None, alpha=0.2, n_elems=100):
"""
Plot a function on the sphere as 2d-projections
Parameters
----------
:param fig: figure
:param function: function to plot
Optional parameters
-------------------
:param xs: observations of the GP (samples of the BO) [n x 3]
:param ys: value of the observations
:param true_opt_x: true optimum [1 x 3]
:param true_opt_y: true optimum value
:param max_colors: maximum value (to bound the colors)
:param n_elems: number of elements to approximate the sphere
Returns
-------
:return: axis of the two subplots
"""
# Sphere
u = np.linspace(0, 2 * np.pi, n_elems)
v = np.linspace(0, np.pi, n_elems)
r = 1
x_sphere = r * np.outer(np.cos(u), np.sin(v))
y_sphere = r * np.outer(np.sin(u), np.sin(v))
z_sphere = r * np.outer(np.ones(np.size(u)), np.cos(v))
# Value of the function
fmean = np.zeros(x_sphere.shape)
for i in range(x_sphere.shape[0]):
for j in range(x_sphere.shape[1]):
data_tmp = torch.Tensor([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]])
fmean[i, j] = function(data_tmp).detach().numpy()
if true_opt_y is not None:
min_colors = true_opt_y
else:
min_colors = np.min(fmean)
colors = fmean - min_colors
if max_colors is None:
max_colors = np.max(colors)
else:
np.min([colors, max_colors * np.ones(colors.shape)], axis=0)
colors = colors / max_colors
colors = pl.cm.inferno(np.ones(colors.shape) - colors)
if ys is not None:
colors_ys = pl.cm.inferno(np.ones(ys.shape) - (ys - min_colors) / max_colors)
# Plot x vs y
ax1 = fig.add_subplot(121, projection='3d')
# Make the panes transparent
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.plot_surface(x_sphere, y_sphere, fmean, rstride=8, cstride=8, facecolors=colors, alpha=alpha,
edgecolor='white', linewidth=0.3)
# ax1.plot_surface(x_sphere, y_sphere, fmean, rstride=8, cstride=8, color='deepskyblue', alpha=0.2,
# edgecolor='gray', linewidth=0.3)
# Plots xs
if xs is not None and ys is not None:
for n in range(xs.shape[0]):
ax1.scatter(xs[n, 0], xs[n, 1], ys[n], c='darkblue', s=25)
# ax1.scatter(xs[n, 0], xs[n, 1], ys[n], c=colors_ys[n], s=25)
# Plot true minimum
if true_opt_x is not None and true_opt_y is not None:
ax1.scatter(true_opt_x[0, 0], true_opt_x[0, 1], true_opt_y, s=100, c='limegreen', marker='*')
ax1.locator_params(axis='x', nbins=4)
ax1.locator_params(axis='y', nbins=4)
ax1.locator_params(axis='z', nbins=4)
ax1.tick_params(labelsize=16)
ax1.set_xlabel(r'$x_1$', fontsize=24)
ax1.set_ylabel(r'$x_2$', fontsize=24)
ax1.set_zlabel(r'$f(\bm{x})$', fontsize=24)
# ax1.zaxis.set_rotate_label(False) # disable automatic rotation
# ax1.set_zlabel('Cost value', fontsize=20, rotation=90)
# Plot y vs z
ax2 = fig.add_subplot(122, projection='3d')
# Make the panes transparent
ax2.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax2.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax2.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax2.plot_surface(y_sphere, z_sphere, fmean, rstride=8, cstride=8, facecolors=colors, alpha=0.2, edgecolor='white',
linewidth=0.3)
# ax2.plot_surface(y_sphere, z_sphere, fmean, rstride=8, cstride=8, color='deepskyblue', alpha=0.2,
# edgecolor='gray', linewidth=0.3)
# Plots xs
if xs is not None and ys is not None:
for n in range(xs.shape[0]):
ax2.scatter(xs[n, 1], xs[n, 2], ys[n], c='darkblue', s=25)
# ax2.scatter(xs[n, 1], xs[n, 2], ys[n], c=colors_ys[n], s=25)
# Plot true minimum
if true_opt_x is not None and true_opt_y is not None:
ax2.scatter(true_opt_x[0, 1], true_opt_x[0, 2], true_opt_y, s=100, c='limegreen', marker='*')
ax2.locator_params(axis='x', nbins=4)
ax2.locator_params(axis='y', nbins=4)
ax2.locator_params(axis='z', nbins=4)
ax2.tick_params(labelsize=16)
ax2.set_xlabel(r'$x_2$', fontsize=24)
ax2.set_ylabel(r'$x_3$', fontsize=24)
ax2.set_zlabel(r'$f(\bm{x})$', fontsize=24)
# ax2.zaxis.set_rotate_label(False) # disable automatic rotation
# ax2.set_zlabel('Cost value', fontsize=20, rotation=90)
return ax1, ax2
def bo_plot_acquisition_sphere(ax, acq_fct, xs=None, opt_x=None, true_opt_x=None, alpha=0.4, elev=30, azim=-60,
n_elems=100):
"""
Plot an acquisition function at the surface of the sphere
Parameters
----------
:param ax: figure axis
:param acq_fct: acquisition function
Optional parameters
-------------------
:param xs: samples of the BO [n x 3]
:param opt_x: current best optimizer of the BO [1 x 3]
:param true_opt_x: true best optimizer [1 x 3]
:param alpha: transparency
:param elev: axis elevation
:param azim: axis azimut
:param n_elems: number of elements to approximate the sphere
Returns
-------
:return: -
"""
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
ax.view_init(elev=elev, azim=azim)
# Sphere
u = np.linspace(0, 2 * np.pi, n_elems)
v = np.linspace(0, np.pi, n_elems)
r = 1
x_sphere = r * np.outer(np.cos(u), np.sin(v))
y_sphere = r * np.outer(np.sin(u), np.sin(v))
z_sphere = r * np.outer(np.ones(np.size(u)), np.cos(v))
# Colors in function of acquisition function
colors = np.zeros(x_sphere.shape)
for i in range(x_sphere.shape[0]):
for j in range(x_sphere.shape[1]):
data_tmp = torch.Tensor([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]).double()
colors[i, j] = acq_fct(data_tmp).detach().numpy()
colors = colors - np.min(colors)
colors = colors / np.max(colors)
colors = pl.cm.inferno(np.ones(colors.shape) - colors)
ax.plot_surface(x_sphere, y_sphere, z_sphere, rstride=4, cstride=4, facecolors=colors, linewidth=0., alpha=alpha)
# Plots xs
if xs is not None:
for n in range(xs.shape[0]):
ax.scatter(xs[n, 0], xs[n, 1], xs[n, 2], c='k')
# Plot opt x
if opt_x is not None:
ax.scatter(opt_x[0, 0], opt_x[0, 1], opt_x[0, 2], s=30, c='deepskyblue', marker='D')
# Plot true minimum
if true_opt_x is not None:
ax.scatter(true_opt_x[0, 0], true_opt_x[0, 1], true_opt_x[0, 2], s=100, c='limegreen', marker='*')
# Limits
lim = 1.1
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.set_zlim([-lim, lim])
def bo_plot_gp_sphere(ax, model, xs=None, opt_x=None, true_opt_x=None, true_opt_y=None, max_colors=None,
elev=30, azim=-60, n_elems=100):
"""
Plot a GP at the surface of the sphere
Parameters
----------
:param ax: figure axis
:param model: GP model
Optional parameters
-------------------
:param xs: observations of the GP (samples of the BO) [n x 3]
:param opt_x: current best optimizer (of the BO) [1 x 3]
:param true_opt_x: true optimum [1 x 3]
:param true_opt_y: true optimum value
:param max_colors: maximum value (to bound the colors)
:param elev: axis elevation
:param azim: axis azimut
:param n_elems: number of elements to approximate the sphere
Returns
-------
:return: -
"""
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
# ax._axis3don = False
# Initial view
ax.view_init(elev=elev, azim=azim)
# Sphere
u = np.linspace(0, 2 * np.pi, n_elems)
v = np.linspace(0, np.pi, n_elems)
r = 1
x_sphere = r * np.outer(np.cos(u), np.sin(v))
y_sphere = r * np.outer(np.sin(u), np.sin(v))
z_sphere = r * np.outer(np.ones(np.size(u)), np.cos(v))
# Colors in function of acquisition function
colors = np.zeros(x_sphere.shape)
for i in range(x_sphere.shape[0]):
for j in range(x_sphere.shape[1]):
data_tmp = torch.Tensor([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]).double()
colors[i, j] = model(data_tmp).mean.detach().numpy()
if true_opt_y is not None:
min_colors = true_opt_y
else:
min_colors = np.min(colors)
colors = colors - min_colors
if max_colors is None:
max_colors = np.max(colors)
else:
np.min([colors, max_colors * np.ones(colors.shape)], axis=0)
colors = colors/max_colors
colors = pl.cm.inferno(np.ones(colors.shape) - colors)
ax.plot_surface(x_sphere, y_sphere, z_sphere, rstride=4, cstride=4, facecolors=colors, linewidth=0., alpha=0.4)
# Plots xs
if xs is not None:
for n in range(xs.shape[0]):
ax.scatter(xs[n, 0], xs[n, 1], xs[n, 2], c='k')
# Plot opt x
if opt_x is not None:
ax.scatter(opt_x[0, 0], opt_x[0, 1], opt_x[0, 2], s=50, c='deepskyblue', marker='D')
# Plot true minimum
if true_opt_x is not None:
ax.scatter(true_opt_x[0, 0], true_opt_x[0, 1], true_opt_x[0, 2], s=100, c='limegreen', marker='*')
# Limits
lim = 1.1
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.set_zlim([-lim, lim])
# Labels
ax.locator_params(axis='x', nbins=4)
ax.locator_params(axis='y', nbins=4)
ax.locator_params(axis='z', nbins=4)
ax.tick_params(labelsize=16)
ax.set_xlabel(r'$x_1$', fontsize=24)
ax.set_ylabel(r'$x_2$', fontsize=24)
ax.set_zlabel(r'$x_3$', fontsize=24)
def bo_plot_gp_sphere_planar(fig, model, var_fact=2., xs=None, ys=None, opt_x=None, opt_y=None, true_opt_x=None,
true_opt_y = None, max_colors=None, n_elems=100):
"""
Plot a GP on the sphere as 2d-projections
Parameters
----------
:param fig: figure
:param model: GP model
Optional parameters
-------------------
:param var_fact: displayed variance factor
:param xs: observations of the GP (samples of the BO) [n x 3]
:param ys: value of the observations
:param opt_x: current best optimizer (of the BO) [1 x 3]
:param opt_y: value of the current best optimizer (of the BO)
:param true_opt_x: true optimum [1 x 3]
:param true_opt_y: true optimum value
:param max_colors: maximum value (to bound the colors)
:param n_elems: number of elements to approximate the sphere
Returns
-------
:return: axis of the two subplots
"""
# Sphere
u = np.linspace(0, 2 * np.pi, n_elems)
v = np.linspace(0, np.pi, n_elems)
r = 1
x_sphere = r * np.outer(np.cos(u), np.sin(v))
y_sphere = r * np.outer(np.sin(u), np.sin(v))
z_sphere = r * np.outer(np.ones(np.size(u)), np.cos(v))
# Values and color in function of function
fmean = np.zeros(x_sphere.shape)
fvar = np.zeros(x_sphere.shape)
for i in range(x_sphere.shape[0]):
for j in range(x_sphere.shape[1]):
data_tmp = torch.tensor([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]).double()
data_tmp_vals = model(data_tmp)
fmean[i, j] = data_tmp_vals.mean.detach().numpy()
fvar[i, j] = data_tmp_vals.variance.detach().numpy()
if true_opt_y is not None:
min_colors = true_opt_y
else:
min_colors = np.min(fmean)
colors = fmean - min_colors
if max_colors is None:
max_colors = np.max(colors)
else:
np.min([colors, max_colors * np.ones(colors.shape)], axis=0)
colors = colors / max_colors
colors = pl.cm.inferno(np.ones(colors.shape) - colors)
if ys is not None:
colors_ys = pl.cm.inferno(np.ones(ys.shape) - (ys - min_colors) / max_colors)
# Plot x vs y
ax1 = fig.add_subplot(121, projection='3d')
# Make the panes transparent
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.plot_surface(x_sphere, y_sphere, fmean, rstride=8, cstride=8, facecolors=colors, alpha=0.2, edgecolor='white',
linewidth=0.3)
# ax1.plot_surface(x_sphere, y_sphere, fmean, rstride=8, cstride=8, color='deepskyblue', alpha=0.2,
# edgecolor='gray', linewidth=0.3)
ax1.plot_surface(x_sphere, y_sphere, fmean + var_fact*fvar, rstride=4, cstride=4, color=[0.5, 0.5, 0.5], alpha=0.1)
ax1.plot_surface(x_sphere, y_sphere, fmean - var_fact*fvar, rstride=4, cstride=4, color=[0.5, 0.5, 0.5], alpha=0.1)
# Plots xs
if xs is not None and ys is not None:
for n in range(xs.shape[0]):
# ax1.scatter(xs[n, 0], xs[n, 1], ys[n], c='darkblue', s=25)
ax1.scatter(xs[n, 0], xs[n, 1], ys[n], c=colors_ys[n], s=25)
# Plot opt x
if opt_x is not None and opt_y is not None:
ax1.scatter(opt_x[0, 0], opt_x[0, 1], opt_y, s=50, c='deepskyblue', marker='D')
# Plot true minimum
if true_opt_x is not None and true_opt_y is not None:
ax1.scatter(true_opt_x[0, 0], true_opt_x[0, 1], true_opt_y, s=100, c='limegreen', marker='*')
ax1.locator_params(axis='x', nbins=4)
ax1.locator_params(axis='y', nbins=4)
ax1.locator_params(axis='z', nbins=4)
ax1.tick_params(labelsize=16)
ax1.set_xlabel(r'$x_1$', fontsize=24)
ax1.set_ylabel(r'$x_2$', fontsize=24)
ax1.set_zlabel(r'$f(\bm{x})$', fontsize=24)
# ax1.zaxis.set_rotate_label(False) # disable automatic rotation
# ax1.set_zlabel('Cost value', fontsize=20, rotation=90)
# Plot y vs z
ax2 = fig.add_subplot(122, projection='3d')
# Make the panes transparent
ax2.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax2.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax2.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax2.plot_surface(y_sphere, z_sphere, fmean, rstride=8, cstride=8, facecolors=colors, alpha=0.2,
edgecolor='white', linewidth=0.3)
# ax2.plot_surface(y_sphere, z_sphere, fmean, rstride=8, cstride=8, color='deepskyblue', alpha=0.2,
# edgecolor='gray', linewidth=0.3)
ax2.plot_surface(y_sphere, z_sphere, fmean + var_fact*fvar, rstride=4, cstride=4, color=[0.5, 0.5, 0.5], alpha=0.1)
ax2.plot_surface(y_sphere, z_sphere, fmean - var_fact*fvar, rstride=4, cstride=4, color=[0.5, 0.5, 0.5], alpha=0.1)
# Plots xs
if xs is not None and ys is not None:
for n in range(xs.shape[0]):
# ax2.scatter(xs[n, 1], xs[n, 2], ys[n], c='darkblue', s=25)
ax2.scatter(xs[n, 1], xs[n, 2], ys[n], c=colors_ys[n], s=25)
# Plot opt x
if opt_x is not None and opt_y is not None:
ax2.scatter(opt_x[0, 1], opt_x[0, 2], opt_y, s=50, c='deepskyblue', marker='D')
# Plot true minimum
if true_opt_x is not None and true_opt_y is not None:
ax2.scatter(true_opt_x[0, 1], true_opt_x[0, 2], true_opt_y, s=100, c='limegreen', marker='*')
ax2.locator_params(axis='x', nbins=4)
ax2.locator_params(axis='y', nbins=4)
ax2.locator_params(axis='z', nbins=4)
ax2.tick_params(labelsize=16)
ax2.set_xlabel(r'$x_2$', fontsize=24)
ax2.set_ylabel(r'$x_3$', fontsize=24)
ax2.set_zlabel(r'$f(\bm{x})$', fontsize=24)
# ax2.zaxis.set_rotate_label(False) # disable automatic rotation
# ax2.set_zlabel('Cost value', fontsize=20, rotation=90)
return ax1, ax2
def bo_plot_function_spd(ax, function, r_cone, true_opt_x=None, true_opt_y=None, chol=False, max_colors=None,
alpha=0.3, elev=10, azim=-20, n_elems=100, n_elems_h=10):
"""
Plot a function in the SPD cone
Parameters
----------
:param ax: figure axis
:param function: function
:param r_cone: cone radius
Optional parameters
-------------------
:param true_opt_x: true minimum point on the manifold [1 x 3]
:param true_opt_y: true minimum value
:param chol: if True, the Cholesky decomposition is used
:param max_colors: maximum value (to bound the colors)
:param alpha: transparency
:param elev: axis elevation
:param azim: axis azimut
:param n_elems: number of elements to plot in a slice of the cone
:param n_elems_h: number of slices of the cone to plot
Returns
-------
:return: max_colors
"""
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
ax.view_init(elev=elev, azim=azim)
# ax.view_init(elev=10, azim=50.)
# Plot SPD cone
plot_spd_cone(ax, r=r_cone, lim_fact=0.8)
# Values of test function for points on the manifold
phi = np.linspace(0, 2 * np.pi, n_elems)
# Matrix for rotation of 45° of the cone
dir = np.cross(np.array([1, 0, 0]), np.array([1., 1., 0.]))
R = rotation_matrix_from_axis_angle(dir, np.pi / 4.)
# Points of the cone
h = np.linspace(0.01, r_cone, n_elems_h)
x_cone = np.zeros((n_elems_h, n_elems, n_elems))
y_cone = np.zeros((n_elems_h, n_elems, n_elems))
z_cone = np.zeros((n_elems_h, n_elems, n_elems))
colors = np.zeros((n_elems_h, n_elems, n_elems))
for k in range(n_elems_h):
r = np.linspace(0, h[k] - 0.01, n_elems)
for i in range(n_elems):
# Points on a plane cutting the cone
xyz = np.vstack((h[k] * np.ones(n_elems), r[i] * np.sin(phi), r[i] / np.sqrt(2) * np.cos(phi)))
# Rotation
xyz = R.dot(xyz)
# Coordinates
x_cone[k, i] = xyz[0]
y_cone[k, i] = xyz[1]
z_cone[k, i] = xyz[2]
# Compute the function values at given points
for i in range(n_elems):
for j in range(n_elems):
if not chol:
data_tmp = torch.tensor([[x_cone[k, i, j], y_cone[k, i, j], z_cone[k, i, j] * np.sqrt(2)]]).double()
colors[k, i, j] = function(data_tmp).detach().numpy()
else:
indices = np.tril_indices(2)
data_tmp = np.array([[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i, j]]])
data_chol_tmp = torch.tensor(np.linalg.cholesky(data_tmp), dtype=torch.float64)
colors[k, i, j] = function(data_chol_tmp[indices]).detach().numpy()
# Rescale the colors
if true_opt_y is not None:
min_colors = true_opt_y
else:
min_colors = np.min(colors)
colors = (colors - min_colors)
if max_colors is None:
max_colors = np.max(colors)
else:
np.min([colors, max_colors * np.ones(colors.shape)], axis=0)
colors = colors / max_colors
# Plot surfaces
for k in range(n_elems_h):
colors_plot = pl.cm.inferno(np.ones((n_elems, n_elems)) - colors[k])
ax.plot_surface(x_cone[k], y_cone[k], z_cone[k], rstride=4, cstride=4, facecolors=colors_plot, linewidth=0., alpha=alpha)
# Plot optimal point
if true_opt_x is not None:
ax.scatter(true_opt_x[0, 0], true_opt_x[1, 1], true_opt_x[0, 1], s=100, c='g', marker='*')
return max_colors
def bo_plot_acquisition_spd(ax, acq_fct, r_cone, xs=None, ys=None, opt_x=None, true_opt_x=None, chol=False, alpha=0.3,
elev=10, azim=-20, n_elems=100, n_elems_h=10):
"""
Plot an acquisition function in the SPD cone
Parameters
----------
:param ax: figure axis
:param acq_fct: acquisition function
:param r_cone: cone radius
Optional parameters
-------------------
:param xs: samples of the BO [n x 3]
:param ys: value of the samples of the BO
:param opt_x: current best optimizer of the BO [1 x 3]
:param true_opt_x: true minimum point [1 x 3]
:param chol: if True, the Cholesky decomposition is used
:param alpha: transparency
:param elev: axis elevation
:param azim: axis azimut
:param n_elems: number of elements to plot in a slice of the cone
:param n_elems_h: number of slices of the cone to plot
Returns
-------
:return: -
"""
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
ax.view_init(elev=elev, azim=azim)
# Plot SPD cone
plot_spd_cone(ax, r=r_cone, lim_fact=0.8)
# Values of test function for points on the manifold
phi = np.linspace(0, 2 * np.pi, n_elems)
# Matrix for rotation of 45° of the cone
dir = np.cross(np.array([1, 0, 0]), np.array([1., 1., 0.]))
R = rotation_matrix_from_axis_angle(dir, np.pi / 4.)
# Points of the cone
h = np.linspace(0.01, r_cone, n_elems_h)
x_cone = np.zeros((n_elems_h, n_elems, n_elems))
y_cone = np.zeros((n_elems_h, n_elems, n_elems))
z_cone = np.zeros((n_elems_h, n_elems, n_elems))
colors = np.zeros((n_elems_h, n_elems, n_elems))
for k in range(n_elems_h):
r = np.linspace(0, h[k] - 0.01, n_elems)
for i in range(n_elems):
# Points on a plane cutting the cone
xyz = np.vstack((h[k] * np.ones(n_elems), r[i] * np.sin(phi), r[i] / np.sqrt(2) * np.cos(phi)))
# Rotation
xyz = R.dot(xyz)
# Coordinates
x_cone[k, i] = xyz[0]
y_cone[k, i] = xyz[1]
z_cone[k, i] = xyz[2]
for i in range(n_elems):
for j in range(n_elems):
if not chol:
data_tmp = torch.tensor([[x_cone[k, i, j], y_cone[k, i, j], z_cone[k, i, j] * np.sqrt(2)]]).double()
colors[k, i, j] = acq_fct(data_tmp).detach().numpy()
else:
indices = np.tril_indices(2)
data_tmp = np.array([[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i, j]]])
data_chol_tmp = torch.tensor(np.linalg.cholesky(data_tmp), dtype=torch.float64)
colors[k, i, j] = acq_fct(data_chol_tmp[indices][None]).detach().numpy()
min_colors = np.min(colors)
colors = (colors - min_colors)
max_colors = np.max(colors)
colors = np.min([max_colors * np.ones(colors.shape), colors], axis=0)
colors = colors / max_colors
if ys is not None:
colors_ys = pl.cm.inferno(np.ones(ys.shape) - (ys - min_colors) / max_colors)
for k in range(n_elems_h):
colors_plot = pl.cm.inferno(np.ones((n_elems, n_elems)) - colors[k])
ax.plot_surface(x_cone[k], y_cone[k], z_cone[k], rstride=4, cstride=4, facecolors=colors_plot, linewidth=0.,
alpha=alpha)
# Plots xs
if xs is not None and ys is not None:
for n in range(xs.shape[0]):
ax.scatter(xs[n, 0], xs[n, 1], xs[n, 2] / np.sqrt(2), s=30, c='k')
# ax.scatter(xs[n, 0], xs[n, 1], xs[n, 2] / np.sqrt(2), s=30, c=colors_ys[n])
# Plot opt x
if opt_x is not None:
ax.scatter(opt_x[0, 0], opt_x[0, 1], opt_x[0, 2] / np.sqrt(2), s=60, c='deepskyblue', marker='D')
# Plot true minimum
if true_opt_x is not None:
ax.scatter(true_opt_x[0, 0], true_opt_x[0, 1], true_opt_x[0, 2] / np.sqrt(2), s=100, c='g', marker='*')
def bo_plot_gp_spd(ax, model, r_cone, xs=None, ys=None, opt_x=None, true_opt_x=None, true_opt_y=None, chol=False,
max_colors=None, alpha=0.3, elev=10, azim=-20, n_elems=100, n_elems_h=10):
"""
Plot a GP in the SPD cone
Parameters
----------
:param ax: figure axis
:param model: GP model
:param r_cone: cone radius
Optional parameters
-------------------
:param xs: samples of the BO [n x 3]
:param ys: value of the samples of the BO
:param opt_x: current best optimizer of the BO [1 x 3]
:param true_opt_x: true minimum point [1 x 3]
:param true_opt_y: true minimum value
:param chol: if True, the Cholesky decomposition is used
:param max_colors: maximum value (to bound the colors)
:param alpha: transparency
:param elev: axis elevation
:param azim: axis azimut
:param n_elems: number of elements to plot in a slice of the cone
:param n_elems_h: number of slices of the cone to plot
Returns
-------
:return: -
"""
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
ax.view_init(elev=elev, azim=azim)
# Plot SPD cone
plot_spd_cone(ax, r=r_cone, lim_fact=0.8)
# Value of test function for points on the manifold
# Values of test function for points on the manifold
phi = np.linspace(0, 2 * np.pi, n_elems)
# Matrix for rotation of 45° of the cone
dir = np.cross(np.array([1, 0, 0]), np.array([1., 1., 0.]))
R = rotation_matrix_from_axis_angle(dir, np.pi / 4.)
# Points of the cone
h = np.linspace(0.01, r_cone, n_elems_h)
x_cone = np.zeros((n_elems_h, n_elems, n_elems))
y_cone = np.zeros((n_elems_h, n_elems, n_elems))
z_cone = np.zeros((n_elems_h, n_elems, n_elems))
colors = np.zeros((n_elems_h, n_elems, n_elems))
var = np.zeros((n_elems_h, n_elems, n_elems))
for k in range(n_elems_h):
r = np.linspace(0, h[k] - 0.01, n_elems)
for i in range(n_elems):
# Points on a plane cutting the cone
xyz = np.vstack((h[k] * np.ones(n_elems), r[i] * np.sin(phi), r[i] / np.sqrt(2) * np.cos(phi)))
# Rotation
xyz = R.dot(xyz)
# Coordinates
x_cone[k, i] = xyz[0]
y_cone[k, i] = xyz[1]
z_cone[k, i] = xyz[2]
for i in range(n_elems):
for j in range(n_elems):
if not chol:
data_tmp = torch.tensor([[x_cone[k, i, j], y_cone[k, i, j], z_cone[k, i, j] * np.sqrt(2)]]).double()
data_tmp_vals = model(data_tmp)
colors[k, i, j] = data_tmp_vals.mean.detach().numpy()
var[k, i, j] = data_tmp_vals.variance.detach().numpy()
else:
indices = np.tril_indices(2)
data_tmp = np.array([[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i, j]]])
data_chol_tmp = np.linalg.cholesky(data_tmp)
data_tmp_vals = model(data_chol_tmp[indices][None])
colors[k, i, j] = data_tmp_vals.mean.detach().numpy()
var[k, i, j] = data_tmp_vals.variance.detach().numpy()
if true_opt_y is not None:
min_colors = true_opt_y
else:
min_colors = np.min(colors)
colors = (colors - min_colors)
if max_colors is None:
max_colors = np.max(colors)
else:
np.min([colors, max_colors * np.ones(colors.shape)], axis=0)
colors = colors / max_colors
if ys is not None:
colors_ys = pl.cm.inferno(np.ones(ys.shape) - (ys - min_colors) / max_colors)
for k in range(n_elems_h):
colors_plot = pl.cm.inferno(np.ones((n_elems, n_elems)) - colors[k])
ax.plot_surface(x_cone[k], y_cone[k], z_cone[k], rstride=4, cstride=4, facecolors=colors_plot, linewidth=0., alpha=alpha)
# Plots xs
if xs is not None and ys is not None:
for n in range(xs.shape[0]):
ax.scatter(xs[n, 0], xs[n, 1], xs[n, 2] / np.sqrt(2), s=30, c='k')
# ax.scatter(xs[n, 0], xs[n, 1], xs[n, 2] / np.sqrt(2), s=30, c=colors_ys[n])
# Plot opt x
if opt_x is not None:
ax.scatter(opt_x[0, 0], opt_x[0, 1], opt_x[0, 2] / np.sqrt(2), s=60, c='deepskyblue', marker='D')
# Plot true minimum
if true_opt_x is not None:
ax.scatter(true_opt_x[0, 0], true_opt_x[0, 1], true_opt_x[0, 2] / np.sqrt(2), s=100, c='g', marker='*')
def bo_plot_gp_spd_planar(fig, model, r_cone, var_fact=2., xs=None, ys=None, opt_x=None, opt_y=None, true_opt_x=None,
true_opt_y=None, max_colors=None, n_elems=10):
return 0
| [
"numpy.tril_indices",
"numpy.size",
"BoManifolds.plot_utils.manifolds_plots.plot_spd_cone",
"numpy.zeros",
"numpy.ones",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"torch.Tensor",
"numpy.sqrt",
"numpy.linalg.cholesky",
"torch.tensor",
"BoManifo... | [((1902, 1941), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(2 * np.pi)', 'n_elems'], {}), '(0.0001, 2 * np.pi, n_elems)\n', (1913, 1941), True, 'import numpy as np\n'), ((1950, 1985), 'numpy.linspace', 'np.linspace', (['(0.0001)', 'np.pi', 'n_elems'], {}), '(0.0001, np.pi, n_elems)\n', (1961, 1985), True, 'import numpy as np\n'), ((2212, 2236), 'numpy.zeros', 'np.zeros', (['x_sphere.shape'], {}), '(x_sphere.shape)\n', (2220, 2236), True, 'import numpy as np\n'), ((4229, 4263), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_elems'], {}), '(0, 2 * np.pi, n_elems)\n', (4240, 4263), True, 'import numpy as np\n'), ((4272, 4302), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_elems'], {}), '(0, np.pi, n_elems)\n', (4283, 4302), True, 'import numpy as np\n'), ((4515, 4539), 'numpy.zeros', 'np.zeros', (['x_sphere.shape'], {}), '(x_sphere.shape)\n', (4523, 4539), True, 'import numpy as np\n'), ((9441, 9475), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_elems'], {}), '(0, 2 * np.pi, n_elems)\n', (9452, 9475), True, 'import numpy as np\n'), ((9484, 9514), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_elems'], {}), '(0, np.pi, n_elems)\n', (9495, 9514), True, 'import numpy as np\n'), ((9749, 9773), 'numpy.zeros', 'np.zeros', (['x_sphere.shape'], {}), '(x_sphere.shape)\n', (9757, 9773), True, 'import numpy as np\n'), ((12189, 12223), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_elems'], {}), '(0, 2 * np.pi, n_elems)\n', (12200, 12223), True, 'import numpy as np\n'), ((12232, 12262), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_elems'], {}), '(0, np.pi, n_elems)\n', (12243, 12262), True, 'import numpy as np\n'), ((12497, 12521), 'numpy.zeros', 'np.zeros', (['x_sphere.shape'], {}), '(x_sphere.shape)\n', (12505, 12521), True, 'import numpy as np\n'), ((15135, 15169), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_elems'], {}), '(0, 2 * np.pi, n_elems)\n', (15146, 15169), True, 'import numpy as np\n'), ((15178, 15208), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n_elems'], {}), '(0, np.pi, n_elems)\n', (15189, 15208), True, 'import numpy as np\n'), ((15440, 15464), 'numpy.zeros', 'np.zeros', (['x_sphere.shape'], {}), '(x_sphere.shape)\n', (15448, 15464), True, 'import numpy as np\n'), ((15476, 15500), 'numpy.zeros', 'np.zeros', (['x_sphere.shape'], {}), '(x_sphere.shape)\n', (15484, 15500), True, 'import numpy as np\n'), ((21468, 21509), 'BoManifolds.plot_utils.manifolds_plots.plot_spd_cone', 'plot_spd_cone', (['ax'], {'r': 'r_cone', 'lim_fact': '(0.8)'}), '(ax, r=r_cone, lim_fact=0.8)\n', (21481, 21509), False, 'from BoManifolds.plot_utils.manifolds_plots import plot_spd_cone\n'), ((21578, 21612), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_elems'], {}), '(0, 2 * np.pi, n_elems)\n', (21589, 21612), True, 'import numpy as np\n'), ((21731, 21780), 'BoManifolds.Riemannian_utils.utils.rotation_matrix_from_axis_angle', 'rotation_matrix_from_axis_angle', (['dir', '(np.pi / 4.0)'], {}), '(dir, np.pi / 4.0)\n', (21762, 21780), False, 'from BoManifolds.Riemannian_utils.utils import rotation_matrix_from_axis_angle\n'), ((21814, 21850), 'numpy.linspace', 'np.linspace', (['(0.01)', 'r_cone', 'n_elems_h'], {}), '(0.01, r_cone, n_elems_h)\n', (21825, 21850), True, 'import numpy as np\n'), ((21864, 21903), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (21872, 21903), True, 'import numpy as np\n'), ((21917, 21956), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (21925, 21956), True, 'import numpy as np\n'), ((21970, 22009), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (21978, 22009), True, 'import numpy as np\n'), ((22023, 22062), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (22031, 22062), True, 'import numpy as np\n'), ((25530, 25571), 'BoManifolds.plot_utils.manifolds_plots.plot_spd_cone', 'plot_spd_cone', (['ax'], {'r': 'r_cone', 'lim_fact': '(0.8)'}), '(ax, r=r_cone, lim_fact=0.8)\n', (25543, 25571), False, 'from BoManifolds.plot_utils.manifolds_plots import plot_spd_cone\n'), ((25640, 25674), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_elems'], {}), '(0, 2 * np.pi, n_elems)\n', (25651, 25674), True, 'import numpy as np\n'), ((25793, 25842), 'BoManifolds.Riemannian_utils.utils.rotation_matrix_from_axis_angle', 'rotation_matrix_from_axis_angle', (['dir', '(np.pi / 4.0)'], {}), '(dir, np.pi / 4.0)\n', (25824, 25842), False, 'from BoManifolds.Riemannian_utils.utils import rotation_matrix_from_axis_angle\n'), ((25876, 25912), 'numpy.linspace', 'np.linspace', (['(0.01)', 'r_cone', 'n_elems_h'], {}), '(0.01, r_cone, n_elems_h)\n', (25887, 25912), True, 'import numpy as np\n'), ((25926, 25965), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (25934, 25965), True, 'import numpy as np\n'), ((25979, 26018), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (25987, 26018), True, 'import numpy as np\n'), ((26032, 26071), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (26040, 26071), True, 'import numpy as np\n'), ((26085, 26124), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (26093, 26124), True, 'import numpy as np\n'), ((27267, 27281), 'numpy.min', 'np.min', (['colors'], {}), '(colors)\n', (27273, 27281), True, 'import numpy as np\n'), ((27334, 27348), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (27340, 27348), True, 'import numpy as np\n'), ((30002, 30043), 'BoManifolds.plot_utils.manifolds_plots.plot_spd_cone', 'plot_spd_cone', (['ax'], {'r': 'r_cone', 'lim_fact': '(0.8)'}), '(ax, r=r_cone, lim_fact=0.8)\n', (30015, 30043), False, 'from BoManifolds.plot_utils.manifolds_plots import plot_spd_cone\n'), ((30168, 30202), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_elems'], {}), '(0, 2 * np.pi, n_elems)\n', (30179, 30202), True, 'import numpy as np\n'), ((30321, 30370), 'BoManifolds.Riemannian_utils.utils.rotation_matrix_from_axis_angle', 'rotation_matrix_from_axis_angle', (['dir', '(np.pi / 4.0)'], {}), '(dir, np.pi / 4.0)\n', (30352, 30370), False, 'from BoManifolds.Riemannian_utils.utils import rotation_matrix_from_axis_angle\n'), ((30404, 30440), 'numpy.linspace', 'np.linspace', (['(0.01)', 'r_cone', 'n_elems_h'], {}), '(0.01, r_cone, n_elems_h)\n', (30415, 30440), True, 'import numpy as np\n'), ((30454, 30493), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (30462, 30493), True, 'import numpy as np\n'), ((30507, 30546), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (30515, 30546), True, 'import numpy as np\n'), ((30560, 30599), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (30568, 30599), True, 'import numpy as np\n'), ((30613, 30652), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (30621, 30652), True, 'import numpy as np\n'), ((30663, 30702), 'numpy.zeros', 'np.zeros', (['(n_elems_h, n_elems, n_elems)'], {}), '((n_elems_h, n_elems, n_elems))\n', (30671, 30702), True, 'import numpy as np\n'), ((2565, 2579), 'numpy.min', 'np.min', (['colors'], {}), '(colors)\n', (2571, 2579), True, 'import numpy as np\n'), ((2663, 2677), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (2669, 2677), True, 'import numpy as np\n'), ((4867, 4880), 'numpy.min', 'np.min', (['fmean'], {}), '(fmean)\n', (4873, 4880), True, 'import numpy as np\n'), ((4963, 4977), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (4969, 4977), True, 'import numpy as np\n'), ((10038, 10052), 'numpy.min', 'np.min', (['colors'], {}), '(colors)\n', (10044, 10052), True, 'import numpy as np\n'), ((10075, 10089), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (10081, 10089), True, 'import numpy as np\n'), ((12861, 12875), 'numpy.min', 'np.min', (['colors'], {}), '(colors)\n', (12867, 12875), True, 'import numpy as np\n'), ((12959, 12973), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (12965, 12973), True, 'import numpy as np\n'), ((15946, 15959), 'numpy.min', 'np.min', (['fmean'], {}), '(fmean)\n', (15952, 15959), True, 'import numpy as np\n'), ((16042, 16056), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (16048, 16056), True, 'import numpy as np\n'), ((21678, 21697), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (21686, 21697), True, 'import numpy as np\n'), ((21699, 21724), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 0.0])\n', (21707, 21724), True, 'import numpy as np\n'), ((22106, 22142), 'numpy.linspace', 'np.linspace', (['(0)', '(h[k] - 0.01)', 'n_elems'], {}), '(0, h[k] - 0.01, n_elems)\n', (22117, 22142), True, 'import numpy as np\n'), ((23357, 23371), 'numpy.min', 'np.min', (['colors'], {}), '(colors)\n', (23363, 23371), True, 'import numpy as np\n'), ((23455, 23469), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (23461, 23469), True, 'import numpy as np\n'), ((25740, 25759), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (25748, 25759), True, 'import numpy as np\n'), ((25761, 25786), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 0.0])\n', (25769, 25786), True, 'import numpy as np\n'), ((26168, 26204), 'numpy.linspace', 'np.linspace', (['(0)', '(h[k] - 0.01)', 'n_elems'], {}), '(0, h[k] - 0.01, n_elems)\n', (26179, 26204), True, 'import numpy as np\n'), ((30268, 30287), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (30276, 30287), True, 'import numpy as np\n'), ((30289, 30314), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 0.0])\n', (30297, 30314), True, 'import numpy as np\n'), ((30746, 30782), 'numpy.linspace', 'np.linspace', (['(0)', '(h[k] - 0.01)', 'n_elems'], {}), '(0, h[k] - 0.01, n_elems)\n', (30757, 30782), True, 'import numpy as np\n'), ((32143, 32157), 'numpy.min', 'np.min', (['colors'], {}), '(colors)\n', (32149, 32157), True, 'import numpy as np\n'), ((32241, 32255), 'numpy.max', 'np.max', (['colors'], {}), '(colors)\n', (32247, 32255), True, 'import numpy as np\n'), ((2025, 2034), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (2031, 2034), True, 'import numpy as np\n'), ((2036, 2045), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (2042, 2045), True, 'import numpy as np\n'), ((2075, 2084), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (2081, 2084), True, 'import numpy as np\n'), ((2086, 2095), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (2092, 2095), True, 'import numpy as np\n'), ((2146, 2155), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (2152, 2155), True, 'import numpy as np\n'), ((2342, 2406), 'torch.Tensor', 'torch.Tensor', (['[[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]'], {}), '([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]])\n', (2354, 2406), False, 'import torch\n'), ((2785, 2806), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (2792, 2806), True, 'import numpy as np\n'), ((4342, 4351), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (4348, 4351), True, 'import numpy as np\n'), ((4353, 4362), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (4359, 4362), True, 'import numpy as np\n'), ((4392, 4401), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (4398, 4401), True, 'import numpy as np\n'), ((4403, 4412), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (4409, 4412), True, 'import numpy as np\n'), ((4463, 4472), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (4469, 4472), True, 'import numpy as np\n'), ((4645, 4709), 'torch.Tensor', 'torch.Tensor', (['[[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]'], {}), '([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]])\n', (4657, 4709), False, 'import torch\n'), ((5118, 5139), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (5125, 5139), True, 'import numpy as np\n'), ((9554, 9563), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (9560, 9563), True, 'import numpy as np\n'), ((9565, 9574), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (9571, 9574), True, 'import numpy as np\n'), ((9604, 9613), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (9610, 9613), True, 'import numpy as np\n'), ((9615, 9624), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (9621, 9624), True, 'import numpy as np\n'), ((9675, 9684), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (9681, 9684), True, 'import numpy as np\n'), ((10117, 10138), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (10124, 10138), True, 'import numpy as np\n'), ((12302, 12311), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (12308, 12311), True, 'import numpy as np\n'), ((12313, 12322), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (12319, 12322), True, 'import numpy as np\n'), ((12352, 12361), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (12358, 12361), True, 'import numpy as np\n'), ((12363, 12372), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (12369, 12372), True, 'import numpy as np\n'), ((12423, 12432), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (12429, 12432), True, 'import numpy as np\n'), ((13111, 13132), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (13118, 13132), True, 'import numpy as np\n'), ((15248, 15257), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (15254, 15257), True, 'import numpy as np\n'), ((15259, 15268), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (15265, 15268), True, 'import numpy as np\n'), ((15298, 15307), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (15304, 15307), True, 'import numpy as np\n'), ((15309, 15318), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (15315, 15318), True, 'import numpy as np\n'), ((15369, 15378), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (15375, 15378), True, 'import numpy as np\n'), ((16197, 16218), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (16204, 16218), True, 'import numpy as np\n'), ((2133, 2143), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (2140, 2143), True, 'import numpy as np\n'), ((4450, 4460), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (4457, 4460), True, 'import numpy as np\n'), ((5208, 5225), 'numpy.ones', 'np.ones', (['ys.shape'], {}), '(ys.shape)\n', (5215, 5225), True, 'import numpy as np\n'), ((9662, 9672), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (9669, 9672), True, 'import numpy as np\n'), ((12410, 12420), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (12417, 12420), True, 'import numpy as np\n'), ((15356, 15366), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (15363, 15366), True, 'import numpy as np\n'), ((16287, 16304), 'numpy.ones', 'np.ones', (['ys.shape'], {}), '(ys.shape)\n', (16294, 16304), True, 'import numpy as np\n'), ((23670, 23697), 'numpy.ones', 'np.ones', (['(n_elems, n_elems)'], {}), '((n_elems, n_elems))\n', (23677, 23697), True, 'import numpy as np\n'), ((27383, 27404), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (27390, 27404), True, 'import numpy as np\n'), ((27514, 27531), 'numpy.ones', 'np.ones', (['ys.shape'], {}), '(ys.shape)\n', (27521, 27531), True, 'import numpy as np\n'), ((27634, 27661), 'numpy.ones', 'np.ones', (['(n_elems, n_elems)'], {}), '((n_elems, n_elems))\n', (27641, 27661), True, 'import numpy as np\n'), ((28196, 28206), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (28203, 28206), True, 'import numpy as np\n'), ((28373, 28383), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (28380, 28383), True, 'import numpy as np\n'), ((32426, 32443), 'numpy.ones', 'np.ones', (['ys.shape'], {}), '(ys.shape)\n', (32433, 32443), True, 'import numpy as np\n'), ((32546, 32573), 'numpy.ones', 'np.ones', (['(n_elems, n_elems)'], {}), '((n_elems, n_elems))\n', (32553, 32573), True, 'import numpy as np\n'), ((33085, 33095), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (33092, 33095), True, 'import numpy as np\n'), ((33262, 33272), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (33269, 33272), True, 'import numpy as np\n'), ((2725, 2746), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (2732, 2746), True, 'import numpy as np\n'), ((5025, 5046), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (5032, 5046), True, 'import numpy as np\n'), ((9879, 9943), 'torch.Tensor', 'torch.Tensor', (['[[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]'], {}), '([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]])\n', (9891, 9943), False, 'import torch\n'), ((12627, 12691), 'torch.Tensor', 'torch.Tensor', (['[[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]'], {}), '([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]])\n', (12639, 12691), False, 'import torch\n'), ((13021, 13042), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (13028, 13042), True, 'import numpy as np\n'), ((15606, 15670), 'torch.tensor', 'torch.tensor', (['[[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]]'], {}), '([[x_sphere[i, j], y_sphere[i, j], z_sphere[i, j]]])\n', (15618, 15670), False, 'import torch\n'), ((16104, 16125), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (16111, 16125), True, 'import numpy as np\n'), ((22916, 22934), 'numpy.tril_indices', 'np.tril_indices', (['(2)'], {}), '(2)\n', (22931, 22934), True, 'import numpy as np\n'), ((22966, 23052), 'numpy.array', 'np.array', (['[[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i, j]]]'], {}), '([[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i,\n j]]])\n', (22974, 23052), True, 'import numpy as np\n'), ((23517, 23538), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (23524, 23538), True, 'import numpy as np\n'), ((26923, 26941), 'numpy.tril_indices', 'np.tril_indices', (['(2)'], {}), '(2)\n', (26938, 26941), True, 'import numpy as np\n'), ((26973, 27059), 'numpy.array', 'np.array', (['[[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i, j]]]'], {}), '([[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i,\n j]]])\n', (26981, 27059), True, 'import numpy as np\n'), ((27978, 27988), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (27985, 27988), True, 'import numpy as np\n'), ((31629, 31647), 'numpy.tril_indices', 'np.tril_indices', (['(2)'], {}), '(2)\n', (31644, 31647), True, 'import numpy as np\n'), ((31679, 31765), 'numpy.array', 'np.array', (['[[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i, j]]]'], {}), '([[x_cone[k, i, j], z_cone[k, i, j]], [z_cone[k, i, j], y_cone[k, i,\n j]]])\n', (31687, 31765), True, 'import numpy as np\n'), ((31798, 31826), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['data_tmp'], {}), '(data_tmp)\n', (31816, 31826), True, 'import numpy as np\n'), ((32303, 32324), 'numpy.ones', 'np.ones', (['colors.shape'], {}), '(colors.shape)\n', (32310, 32324), True, 'import numpy as np\n'), ((32867, 32877), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (32874, 32877), True, 'import numpy as np\n'), ((22261, 22277), 'numpy.ones', 'np.ones', (['n_elems'], {}), '(n_elems)\n', (22268, 22277), True, 'import numpy as np\n'), ((22286, 22297), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (22292, 22297), True, 'import numpy as np\n'), ((22319, 22330), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (22325, 22330), True, 'import numpy as np\n'), ((23098, 23126), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['data_tmp'], {}), '(data_tmp)\n', (23116, 23126), True, 'import numpy as np\n'), ((26323, 26339), 'numpy.ones', 'np.ones', (['n_elems'], {}), '(n_elems)\n', (26330, 26339), True, 'import numpy as np\n'), ((26348, 26359), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (26354, 26359), True, 'import numpy as np\n'), ((26381, 26392), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (26387, 26392), True, 'import numpy as np\n'), ((27105, 27133), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['data_tmp'], {}), '(data_tmp)\n', (27123, 27133), True, 'import numpy as np\n'), ((30901, 30917), 'numpy.ones', 'np.ones', (['n_elems'], {}), '(n_elems)\n', (30908, 30917), True, 'import numpy as np\n'), ((30926, 30937), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (30932, 30937), True, 'import numpy as np\n'), ((30959, 30970), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (30965, 30970), True, 'import numpy as np\n'), ((22306, 22316), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (22313, 22316), True, 'import numpy as np\n'), ((26368, 26378), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (26375, 26378), True, 'import numpy as np\n'), ((30946, 30956), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (30953, 30956), True, 'import numpy as np\n'), ((22767, 22777), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (22774, 22777), True, 'import numpy as np\n'), ((26775, 26785), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (26782, 26785), True, 'import numpy as np\n'), ((31353, 31363), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (31360, 31363), True, 'import numpy as np\n')] |
import numpy as np
try:
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GridSearchCV
from ..api import (multinomial,
multinomial_lagrange,
multinomial_classifier,
multinomial_classifier_lagrange)
have_sklearn = True
except ImportError:
have_sklearn = False
from ...api import l1_l2
from ...tests.decorators import set_seed_for_test
@np.testing.dec.skipif(True or not have_sklearn, msg='multinomial not working on its own yet')
@set_seed_for_test()
def test_multinomial():
n, p, q = 100, 20, 3
X = np.random.standard_normal((n, p))
Z = np.random.standard_normal((n, q))
A = np.argmax(Z, 1)
Y = np.zeros_like(Z)
for i in range(n):
Y[i,A[i]] = 1
response = Y
atom = l1_l2
atom_args = {'shape':(p, q), 'bound':3}
multinomial_lasso = multinomial(atom,
atom_args)
multinomial_lasso.fit(X, response)
print(cross_validate(multinomial_lasso, X, response, cv=10))
# grid search
params = {'atom_params':[{'shape':(p, q),
'bound': b} for b in [3, 4, 5]]}
lasso_cv = GridSearchCV(multinomial_lasso, params, cv=3)
lasso_cv.fit(X, response)
multinomial_lasso_offset = multinomial(atom,
atom_args,
offset=True)
response_offset = np.hstack([response, np.random.standard_normal((n, 1))])
multinomial_lasso_offset.fit(X, response_offset)
multinomial_lasso_weights = multinomial(atom,
atom_args,
case_weights=True)
response_weights = np.hstack([response, np.ones((n, 1))])
multinomial_lasso_weights.fit(X, response_weights)
np.testing.assert_allclose(multinomial_lasso_weights._coefs,
multinomial_lasso._coefs)
GridSearchCV(multinomial_lasso_offset, params, cv=3).fit(X, response_offset)
GridSearchCV(multinomial_lasso_weights, params, cv=3).fit(X, response_weights)
def atom_constructor(null_grad, **args):
return group_lasso(**args)
multinomial_lasso_lag = multinomial_lagrange(atom_constructor,
atom_args,
case_weights=True,
score_method='R2')
GridSearchCV(multinomial_lasso_lag, params, cv=3).fit(X, response_weights)
| [
"sklearn.model_selection.GridSearchCV",
"numpy.zeros_like",
"numpy.argmax",
"sklearn.model_selection.cross_validate",
"numpy.ones",
"numpy.random.standard_normal",
"numpy.testing.assert_allclose",
"numpy.testing.dec.skipif"
] | [((463, 561), 'numpy.testing.dec.skipif', 'np.testing.dec.skipif', (['(True or not have_sklearn)'], {'msg': '"""multinomial not working on its own yet"""'}), "(True or not have_sklearn, msg=\n 'multinomial not working on its own yet')\n", (484, 561), True, 'import numpy as np\n'), ((636, 669), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n, p)'], {}), '((n, p))\n', (661, 669), True, 'import numpy as np\n'), ((678, 711), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n, q)'], {}), '((n, q))\n', (703, 711), True, 'import numpy as np\n'), ((720, 735), 'numpy.argmax', 'np.argmax', (['Z', '(1)'], {}), '(Z, 1)\n', (729, 735), True, 'import numpy as np\n'), ((744, 760), 'numpy.zeros_like', 'np.zeros_like', (['Z'], {}), '(Z)\n', (757, 760), True, 'import numpy as np\n'), ((1223, 1268), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['multinomial_lasso', 'params'], {'cv': '(3)'}), '(multinomial_lasso, params, cv=3)\n', (1235, 1268), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1883, 1973), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['multinomial_lasso_weights._coefs', 'multinomial_lasso._coefs'], {}), '(multinomial_lasso_weights._coefs,\n multinomial_lasso._coefs)\n', (1909, 1973), True, 'import numpy as np\n'), ((1025, 1078), 'sklearn.model_selection.cross_validate', 'cross_validate', (['multinomial_lasso', 'X', 'response'], {'cv': '(10)'}), '(multinomial_lasso, X, response, cv=10)\n', (1039, 1078), False, 'from sklearn.model_selection import cross_validate\n'), ((1502, 1535), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n, 1)'], {}), '((n, 1))\n', (1527, 1535), True, 'import numpy as np\n'), ((1805, 1820), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1812, 1820), True, 'import numpy as np\n'), ((2006, 2058), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['multinomial_lasso_offset', 'params'], {'cv': '(3)'}), '(multinomial_lasso_offset, params, cv=3)\n', (2018, 2058), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2087, 2140), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['multinomial_lasso_weights', 'params'], {'cv': '(3)'}), '(multinomial_lasso_weights, params, cv=3)\n', (2099, 2140), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2514, 2563), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['multinomial_lasso_lag', 'params'], {'cv': '(3)'}), '(multinomial_lasso_lag, params, cv=3)\n', (2526, 2563), False, 'from sklearn.model_selection import GridSearchCV\n')] |
import dlr
import cv2
import numpy as np
from dlr import DLRModel
label_map = {
0: 'brown_abnormal_chinese',
1: 'brown_abnormal_korean',
2: 'brown_normal_chinese',
3: 'brown_normal_korean',
4: 'no_box',
5: 'red_abnormal',
6: 'red_normal'
}
def softmax(x):
x_exp = np.exp(x - np.max(x))
f_x = x_exp / np.sum(x_exp)
return f_x
def predict_from_image(image):
cvimage = cv2.resize(image, config_utils.SHAPE)
config_utils.logger.info("img shape after resize: '{}'.".format(cvimage.shape))
img = np.asarray(cvimage, dtype='float32')
img /= 255.0 # scale 0 to 1
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (img - mean) / std
img = np.transpose(img, (2,0,1))
img = np.expand_dims(img, axis=0) # e.g., [1x3x224x224]
config_utils.logger.info("img shape final: '{}'.".format(img.shape))
predict(img)
def preprocess_image(image):
cvimage = cv2.resize(image, (224,224))
#config_utils.logger.info("img shape after resize: '{}'.".format(cvimage.shape))
img = np.asarray(cvimage, dtype='float32')
img /= 255.0 # scale 0 to 1
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (img - mean) / std
img = np.transpose(img, (2,0,1))
img = np.expand_dims(img, axis=0) # e.g., [1x3x224x224]
return img
image_data = cv2.imread('sample_images/red_normal.jpeg')
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
image_data = preprocess_image(image_data)
device = 'cpu'
model = DLRModel(f'model_{device}', device)
output = model.run(image_data)
probs = softmax(output[0][0])
sort_classes_by_probs = np.argsort(probs)[::-1]
idx = sort_classes_by_probs[0]
msg = f'predicted = {label_map[idx]}, {probs[idx]*100:.2f}%'
print('ground_truth = red_normal')
print(msg) | [
"numpy.sum",
"cv2.cvtColor",
"numpy.asarray",
"numpy.transpose",
"numpy.expand_dims",
"dlr.DLRModel",
"cv2.imread",
"numpy.argsort",
"numpy.max",
"numpy.array",
"cv2.resize"
] | [((1403, 1446), 'cv2.imread', 'cv2.imread', (['"""sample_images/red_normal.jpeg"""'], {}), "('sample_images/red_normal.jpeg')\n", (1413, 1446), False, 'import cv2\n'), ((1460, 1503), 'cv2.cvtColor', 'cv2.cvtColor', (['image_data', 'cv2.COLOR_BGR2RGB'], {}), '(image_data, cv2.COLOR_BGR2RGB)\n', (1472, 1503), False, 'import cv2\n'), ((1582, 1617), 'dlr.DLRModel', 'DLRModel', (['f"""model_{device}"""', 'device'], {}), "(f'model_{device}', device)\n", (1590, 1617), False, 'from dlr import DLRModel\n'), ((416, 453), 'cv2.resize', 'cv2.resize', (['image', 'config_utils.SHAPE'], {}), '(image, config_utils.SHAPE)\n', (426, 453), False, 'import cv2\n'), ((549, 585), 'numpy.asarray', 'np.asarray', (['cvimage'], {'dtype': '"""float32"""'}), "(cvimage, dtype='float32')\n", (559, 585), True, 'import numpy as np\n'), ((629, 660), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (637, 660), True, 'import numpy as np\n'), ((672, 703), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (680, 703), True, 'import numpy as np\n'), ((743, 771), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (755, 771), True, 'import numpy as np\n'), ((781, 808), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (795, 808), True, 'import numpy as np\n'), ((968, 997), 'cv2.resize', 'cv2.resize', (['image', '(224, 224)'], {}), '(image, (224, 224))\n', (978, 997), False, 'import cv2\n'), ((1092, 1128), 'numpy.asarray', 'np.asarray', (['cvimage'], {'dtype': '"""float32"""'}), "(cvimage, dtype='float32')\n", (1102, 1128), True, 'import numpy as np\n'), ((1172, 1203), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1180, 1203), True, 'import numpy as np\n'), ((1215, 1246), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1223, 1246), True, 'import numpy as np\n'), ((1286, 1314), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (1298, 1314), True, 'import numpy as np\n'), ((1324, 1351), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1338, 1351), True, 'import numpy as np\n'), ((1706, 1723), 'numpy.argsort', 'np.argsort', (['probs'], {}), '(probs)\n', (1716, 1723), True, 'import numpy as np\n'), ((341, 354), 'numpy.sum', 'np.sum', (['x_exp'], {}), '(x_exp)\n', (347, 354), True, 'import numpy as np\n'), ((312, 321), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (318, 321), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
m_dpi = 600
def create_font(fontname='Tahoma', fontsize=10):
"""
Create a font object to be used in matplotlib figures.
Parameters
----------
fontname : string
The desired font, i.e., Times New Roman, Verdana, etc.
fontsize : integer
The fontsize used for display on the figure.
Returns
-------
mpl_font : dictionary
A font dictionary used by matplotlib figures.
"""
return { 'fontname': fontname, 'fontsize': fontsize }
def plot_faces(title, images, rows=3, cols=6, sptitle="subplot", sptitles=[], colormap=cm.jet, filename=None):
"""
Plots a grid of faces, useful for plotting eigenfaces/fisherfaces.
Parameters
----------
title : string
Title for the whole set of plots.
images : array-like
The set of images to be plotted.
rows : integer
Number of rows in face image grid.
cols : integer
Number of columns in the face image grid.
sptitle : string
The title to be numbered and used on each subplot.
sptitles : list of strings
A set of different titles to be called for each sequential subplot.
colormap : matplotlib cm
The color gradient that represents intensity on the figure.
filename : string
Output filename to save the image to disk.
Returns
-------
None
"""
plt.clf()
fig = plt.figure(figsize=(15, 8), dpi=m_dpi)
fig.text(.5, .95, title, horizontalalignment='center')
for i in xrange(len(images)):
ax0 = fig.add_subplot(rows,cols,(i+1))
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
if len(sptitles) == len(images):
plt.title(r'%s %s' % (sptitle, str(sptitles[i])), create_font('Tahoma', 10))
else:
plt.title(r'%s %d' % (sptitle, (i+1)), create_font('Tahoma',10))
plt.imshow(np.asarray(images[i]), cmap=colormap)
if filename is None:
plt.show()
else:
fig.savefig(filename)
plt.close()
def normalize(a, lo, hi):
"""Maps an array linear into a new range.
Parameters
----------
a : array-like
The data to be adjusted
lo : float
The lowest desired value
hi : float
The highest desired value
Returns
-------
a : array-like
The remapped input array.
"""
amin = np.min(a)
amax = np.max(a)
sf = float(hi - lo) / (amax - amin)
a -= amin
a *= sf
return a
def scatter_face(title, faceweights, x1=1, x2=2, filename=None):
"""Create a nice plot meant to show clustering in the PCA weights.
Parameters
----------
title : string
The title of the plot
faceweights : array-like
The projected data going into the plot.
x1 : integer
The principal component index on the x-axis
x2 : integer
The principal component index on the y-axis
filename : string
If given, the plot will be saved as this file.
Returns
-------
None
"""
plt.clf()
fig = plt.figure(figsize=(12, 8), dpi=m_dpi)
#fig.text(0.5, 0.95, title, horizontalalignment='center')
colors = cm.gist_ncar(np.linspace(0, 1, len(faceweights)))
# Need to find average distance, and I can't think of a better way
davg = 0
dcount = 0
for name in faceweights:
for vec in faceweights[name]:
davg += np.sum(vec[2])
dcount += 1
davg /= dcount
# Now loop and plot all the points
for name, color in zip(faceweights, colors):
x = []
y = []
s = []
for vec in faceweights[name]:
x.append(vec[0])
y.append(vec[1])
s.append(5 + 295 * (np.tanh(vec[2] / davg - 1) + 1.0))
plt.scatter(x, y, s=s, c=color, alpha=0.5, label=name)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(axis='x',labelbottom='off')
plt.tick_params(axis='y',labelleft='off')
plt.xlim(plt.xlim()[0], plt.xlim()[1] + 0.2 * (plt.xlim()[1] - plt.xlim()[0]))
plt.xlabel(r'$\omega_' + str(x1) + r'$', fontsize=16)
plt.ylabel(r'$\omega_' + str(x2) + r'$', fontsize=16)
plt.legend(loc=4)
if filename is None:
plt.show()
else:
fig.savefig(filename)
plt.close()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.tanh",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"numpy.asarray",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.min",
"numpy.max",
"matplotlib.p... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""PDF"""'], {}), "('PDF')\n", (32, 39), False, 'import matplotlib\n'), ((1368, 1377), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1375, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1386, 1424), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)', 'dpi': 'm_dpi'}), '(figsize=(15, 8), dpi=m_dpi)\n', (1396, 1424), True, 'import matplotlib.pyplot as plt\n'), ((1969, 1980), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1978, 1980), True, 'import matplotlib.pyplot as plt\n'), ((2283, 2292), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (2289, 2292), True, 'import numpy as np\n'), ((2301, 2310), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (2307, 2310), True, 'import numpy as np\n'), ((2878, 2887), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2885, 2887), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2934), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)', 'dpi': 'm_dpi'}), '(figsize=(12, 8), dpi=m_dpi)\n', (2906, 2934), True, 'import matplotlib.pyplot as plt\n'), ((3564, 3591), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (3570, 3591), True, 'import matplotlib.pyplot as plt\n'), ((3593, 3623), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (3599, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3626, 3670), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'labelbottom': '"""off"""'}), "(axis='x', labelbottom='off')\n", (3641, 3670), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3713), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'labelleft': '"""off"""'}), "(axis='y', labelleft='off')\n", (3686, 3713), True, 'import matplotlib.pyplot as plt\n'), ((3904, 3921), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (3914, 3921), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4003), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4001, 4003), True, 'import matplotlib.pyplot as plt\n'), ((1924, 1934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1932, 1934), True, 'import matplotlib.pyplot as plt\n'), ((3507, 3561), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': 's', 'c': 'color', 'alpha': '(0.5)', 'label': 'name'}), '(x, y, s=s, c=color, alpha=0.5, label=name)\n', (3518, 3561), True, 'import matplotlib.pyplot as plt\n'), ((3947, 3957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3955, 3957), True, 'import matplotlib.pyplot as plt\n'), ((1860, 1881), 'numpy.asarray', 'np.asarray', (['images[i]'], {}), '(images[i])\n', (1870, 1881), True, 'import numpy as np\n'), ((3215, 3229), 'numpy.sum', 'np.sum', (['vec[2]'], {}), '(vec[2])\n', (3221, 3229), True, 'import numpy as np\n'), ((3723, 3733), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (3731, 3733), True, 'import matplotlib.pyplot as plt\n'), ((3738, 3748), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (3746, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3761, 3771), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (3769, 3771), True, 'import matplotlib.pyplot as plt\n'), ((3777, 3787), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (3785, 3787), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3495), 'numpy.tanh', 'np.tanh', (['(vec[2] / davg - 1)'], {}), '(vec[2] / davg - 1)\n', (3476, 3495), True, 'import numpy as np\n')] |
import numpy as np
import itertools
import math
from tqdm import tqdm
import scipy
import functools
import operator
from sympy import primefactors, sieve
import random
from sympy import *
from itertools import combinations
from itertools import permutations
def forfor(a):
return [item for sublist in a for item in sublist]
def comd_12():
dict_n = {}
arr_out = []
numbers = list(range(1, 100))
#print(math.comb(200, 12))
for k in tqdm(range(10_000_000)):
i = np.random.randint(0, 350, 9)
a = i[0]**3 + i[1]**3 + i[2]**3
b = i[3]**3 + i[4]**3 + i[5]**3
if a != b:
continue
c = i[6]**3 + i[7]**3 + i[8]**3
if b == c:
print(a, i)
def comb_cube():
dict_n = {}
arr_out = []
numbers = list(range(1, 250))
for i in combinations(numbers, 3):
n = i[0]**3 + i[1]**3 + i[2]**3
v = dict_n.get(n, 0)
if v != 0:
dict_n[n] = v + [i]
elif n > 0:
dict_n[n] = [i]
for i, (k, v) in enumerate(dict_n.items()):
if len(v) >= 3 and len(set(forfor(v))) == len(forfor(v)):
arr_out.append((k, v, len(v)))
print('done combinations')
arr_out = sorted(arr_out, key = lambda x: x[0])
print('done sort')
k = 0
for i in arr_out:
print (i, ',')
# dict_of_min_n = {i: (0, 0, 0) for i in range(100)}
# for i in arr_out:
# if dict_of_min_n[i[2]] [0] == 0 or dict_of_min_n[i[2]] [0] > i[0]:
# dict_of_min_n[i[2]] = i
# for i, (k, v) in enumerate(dict_of_min_n.items()):
# if v != (0, 0, 0):
# print(v)
def per_cube():
up_to_n = 1_00_000
cubes = [i**3 for i in range(1, int(math.pow(up_to_n, 1/3)) + 2)]
#print(cubes)
min_dict = {}
for i in tqdm(range(1, up_to_n)):
arr_tripl = [i]
for x in cubes:
if i - x > 1:
for y in cubes:
if i - x - y > 0:
for z in cubes:
if i - x - y - z == 0 and x > y and y > z :
arr_tripl.append( (x, y, z) )
if arr_tripl != [i] and len(set( forfor (arr_tripl[1:]) )) == len (forfor (arr_tripl[1:]) ):
k = len(arr_tripl[1:])
v = min_dict.get(k, 0)
if v != 0:
if v[0] > arr_tripl[0]:
min_dict[k] = arr_tripl
else:
min_dict[k] = arr_tripl
for i, (k, v) in enumerate(min_dict.items()):
print(v)
def per_cube_range():
low_b = 1_00_000 - 1
up_to_n = 746_992 + 100
#cubes = [i**3 for i in range(1, int(math.pow(up_to_n, 1/3)) + 2)]
#print(cubes)
min_dict = {}
for i in tqdm(range(1, up_to_n)):
arr_tripl = [i]
for x in range(1, int(math.pow(i, 1/3)) + 2 ):
if i - x**3 > 1:
for y in range(1, x):
for z in range(1, y):
if i - x**3 - y**3 - z**3 == 0 :
arr_tripl.append( (x, y, z) )
if arr_tripl != [i] and len(set( forfor (arr_tripl[1:]) )) == len (forfor (arr_tripl[1:]) ):
k = len(arr_tripl[1:])
v = min_dict.get(k, 0)
if v != 0:
if v[0] > arr_tripl[0]:
min_dict[k] = arr_tripl
else:
min_dict[k] = arr_tripl
for i, (k, v) in enumerate(min_dict.items()):
print(v)
arr_cubes = [i for i in range(1, int(math.pow(177663375, 1/3)) + 4)]
def decomp_n(n):
arr_tripl = []
for x in arr_cubes:
if n - x**3 > 1:
for y in arr_cubes:
if n - x**3 - y**3 > 0:
for z in arr_cubes:
if x>=y and y>= z and n - x**3 - y**3 - z**3 == 0 :
arr_tripl.append( (x, y, z) )
return arr_tripl
def dec_from_file():
with open('five or more ways.txt') as reader:
for line in reader.readlines()[::-1]:
line = line.replace("(", ' ')
line = line.replace(")", ' ')
line = line.replace(",", ' ')
line = line.split()
nums = [int(i) for i in line]
dec_n = nums[1]
decomp_arr = decomp_n(dec_n)
if len(set( forfor (decomp_arr) )) == len (forfor (decomp_arr) ):
print(dec_n, len (decomp_arr))
def check_remainders_of_sum_cubes(mod):
rems = [i for i in range(1, mod+1)]
possible_rems_of_n = []
for i in itertools.combinations_with_replacement(rems, 3):
temp_rem = ( i[0]**3+ i[1]**3+i[2]**3)%mod
if temp_rem not in possible_rems_of_n:
possible_rems_of_n.append(temp_rem)
return possible_rems_of_n
if __name__ == "__main__":
#print(decomp_n(5104))
#dec_from_file()
sieve._reset() # this line for doctest only
sieve.extend_to_no(50)
primes = sieve._list
arr_of_mod = []
for i in primes:
rems = check_remainders_of_sum_cubes(i)
effectivnes = len(rems)/i
arr_of_mod.append((i, effectivnes, sorted(rems)))
arr_of_mod = sorted(arr_of_mod, key = lambda x: x[1])
for i in arr_of_mod:
print(i[:2:])
# def forfor(a):
# return [item for sublist in a for item in sublist]
# up_to_n = 1_000_00
# min_dict = {}
# for i in tqdm(range(1, up_to_n)):
# arr_tripl = [i]
# for x in range(1, int(math.pow(i, 1/3)) + 2 ):
# if i - x**3 > 1:
# for y in range(1, x):
# for z in range(1, y):
# if i - x**3 - y**3 - z**3 == 0 :
# arr_tripl.append( (x, y, z) )
# if arr_tripl != [i] and len(set( forfor (arr_tripl[1:]) )) == len (forfor (arr_tripl[1:]) ):
# k = len(arr_tripl[1:])
# v = min_dict.get(k, 0)
# if v != 0:
# if v[0] > arr_tripl[0]:
# min_dict[k] = arr_tripl
# else:
# min_dict[k] = arr_tripl
# for i, (k, v) in enumerate(min_dict.items()):
# print(v)
| [
"math.pow",
"sympy.sieve.extend_to_no",
"itertools.combinations_with_replacement",
"itertools.combinations",
"sympy.sieve._reset",
"numpy.random.randint"
] | [((848, 872), 'itertools.combinations', 'combinations', (['numbers', '(3)'], {}), '(numbers, 3)\n', (860, 872), False, 'from itertools import combinations\n'), ((4624, 4672), 'itertools.combinations_with_replacement', 'itertools.combinations_with_replacement', (['rems', '(3)'], {}), '(rems, 3)\n', (4663, 4672), False, 'import itertools\n'), ((4936, 4950), 'sympy.sieve._reset', 'sieve._reset', ([], {}), '()\n', (4948, 4950), False, 'from sympy import primefactors, sieve\n'), ((4984, 5006), 'sympy.sieve.extend_to_no', 'sieve.extend_to_no', (['(50)'], {}), '(50)\n', (5002, 5006), False, 'from sympy import primefactors, sieve\n'), ((506, 534), 'numpy.random.randint', 'np.random.randint', (['(0)', '(350)', '(9)'], {}), '(0, 350, 9)\n', (523, 534), True, 'import numpy as np\n'), ((2881, 2899), 'math.pow', 'math.pow', (['i', '(1 / 3)'], {}), '(i, 1 / 3)\n', (2889, 2899), False, 'import math\n'), ((3579, 3605), 'math.pow', 'math.pow', (['(177663375)', '(1 / 3)'], {}), '(177663375, 1 / 3)\n', (3587, 3605), False, 'import math\n'), ((1764, 1788), 'math.pow', 'math.pow', (['up_to_n', '(1 / 3)'], {}), '(up_to_n, 1 / 3)\n', (1772, 1788), False, 'import math\n')] |
# core/_base.py
"""Abstract base classes for reduced-order models."""
__all__ = []
import abc
import numpy as np
from . import operators
_isparametricop = operators.is_parametric_operator
class _BaseROM(abc.ABC):
"""Base class for all rom_operator_inference reduced model classes."""
_MODELFORM_KEYS = "cAHGB" # Constant, Linear, Quadratic, Cubic, Input.
_LHS_ARGNAME = "lhs"
_LHS_LABEL = None
_STATE_LABEL = None
_INPUT_LABEL = None
def __init__(self, modelform):
"""Set the model form (ROM structure)."""
self.modelform = modelform
def __str__(self):
"""String representation: structure of the model, dimensions, etc."""
# Build model structure.
lhs, q, u = self._LHS_LABEL, self._STATE_LABEL, self._INPUT_LABEL
out, terms = [], []
if 'c' in self.modelform:
terms.append("c")
if 'A' in self.modelform:
terms.append(f"A{q}")
if 'H' in self.modelform:
terms.append(f"H[{q} ⊗ {q}]")
if 'G' in self.modelform:
terms.append(f"G[{q} ⊗ {q} ⊗ {q}]")
if 'B' in self.modelform:
terms.append(f"B{u}")
structure = " + ".join(terms)
out.append(f"Reduced-order model structure: {lhs} = {structure}")
# Report dimensions.
if self.n:
out.append(f"Full-order dimension n = {self.n:d}")
if self.m:
out.append(f"Input/control dimension m = {self.m:d}")
if self.r:
out.append(f"Reduced-order dimension r = {self.r:d}")
# TODO: out.append(f"Total degrees of freedom = {}")
return '\n'.join(out)
def __repr__(self):
"""Unique ID + string representation."""
uniqueID = f"<{self.__class__.__name__} object at {hex(id(self))}>"
return f"{uniqueID}\n{str(self)}"
def _clear(self):
"""Set private attributes as None, erasing any previously stored basis,
dimensions, or ROM operators.
"""
self.__m = None if 'B' in self.modelform else 0
self.__r = None
self.__basis = None
self.__c_ = None
self.__A_ = None
self.__H_ = None
self.__G_ = None
self.__B_ = None
self._projected_operators_ = ""
# Properties: modelform ---------------------------------------------------
@property
def modelform(self):
"""Structure of the reduced-order model."""
return self.__form
@modelform.setter
def modelform(self, form):
"""Set the modelform, which – if successful – resets the entire ROM."""
form = ''.join(sorted(form,
key=lambda k: self._MODELFORM_KEYS.find(k)))
for key in form:
if key not in self._MODELFORM_KEYS:
raise ValueError(f"invalid modelform key '{key}'; options "
"are " + ', '.join(self._MODELFORM_KEYS))
self.__form = form
self._clear()
# Properties: dimensions --------------------------------------------------
@property
def n(self):
"""Dimension of the full-order model."""
return self.basis.shape[0] if self.basis is not None else None
@n.setter
def n(self, n):
"""Setting this dimension is not allowed."""
raise AttributeError("can't set attribute (n = basis.shape[0])")
@property
def m(self):
"""Dimension of the input term, if present."""
return self.__m
@m.setter
def m(self, m):
"""Set input dimension; only allowed if 'B' in modelform
and the operator B_ is None.
"""
if 'B' not in self.modelform and m != 0:
raise AttributeError("can't set attribute ('B' not in modelform)")
elif self.B_ is not None:
raise AttributeError("can't set attribute (m = B_.shape[1])")
self.__m = m
@property
def r(self):
"""Dimension of the reduced-order model."""
return self.__r
@r.setter
def r(self, r):
"""Set ROM dimension; only allowed if the basis is None."""
if self.basis is not None:
raise AttributeError("can't set attribute (r = basis.shape[1])")
if any(op is not None for op in self):
raise AttributeError("can't set attribute (call fit() to reset)")
self.__r = r
@property
def _r2(self):
"""Dimension of the compact quadratic Kronecker product."""
r = self.r
return r * (r + 1) // 2
@property
def _r3(self):
"""Dimension of the compact cubic Kronecker product."""
r = self.r
return r * (r + 1) * (r + 2) // 6
# Properties: basis -------------------------------------------------------
@property
def basis(self):
"""Basis for the linear reduced space (e.g., POD ), of shape (n, r)."""
return self.__basis
@basis.setter
def basis(self, basis):
"""Set the basis, thereby fixing the dimensions n and r."""
self.__basis = basis
if basis is not None:
if basis.shape[0] < basis.shape[1]:
raise ValueError("basis must be n x r with n > r")
self.__r = basis.shape[1]
@basis.deleter
def basis(self):
self.__basis = None
# Properties: reduced-order operators -------------------------------------
@property
def c_(self):
"""ROM constant operator, of shape (r,)."""
return self.__c_
@c_.setter
def c_(self, c_):
self._check_operator_matches_modelform(c_, 'c')
if c_ is not None:
if not operators.is_operator(c_):
c_ = operators.ConstantOperator(c_)
self._check_rom_operator_shape(c_, 'c')
self.__c_ = c_
@property
def A_(self):
"""ROM linear state operator, of shape (r, r)."""
return self.__A_
@A_.setter
def A_(self, A_):
# TODO: what happens if model.A_ = something but model.r is None?
self._check_operator_matches_modelform(A_, 'A')
if A_ is not None:
if not operators.is_operator(A_):
A_ = operators.LinearOperator(A_)
self._check_rom_operator_shape(A_, 'A')
self.__A_ = A_
@property
def H_(self):
"""ROM quadratic state opeator, of shape (r, r(r+1)/2)."""
return self.__H_
@H_.setter
def H_(self, H_):
self._check_operator_matches_modelform(H_, 'H')
if H_ is not None:
if not operators.is_operator(H_):
H_ = operators.QuadraticOperator(H_)
self._check_rom_operator_shape(H_, 'H')
self.__H_ = H_
@property
def G_(self):
"""ROM cubic state operator, of shape (r, r(r+1)(r+2)/6)."""
return self.__G_
@G_.setter
def G_(self, G_):
self._check_operator_matches_modelform(G_, 'G')
if G_ is not None:
if not operators.is_operator(G_):
G_ = operators.CubicOperator(G_)
self._check_rom_operator_shape(G_, 'G')
self.__G_ = G_
@property
def B_(self):
"""ROM input operator, of shape (r, m)."""
return self.__B_
@B_.setter
def B_(self, B_):
self._check_operator_matches_modelform(B_, 'B')
if B_ is not None:
if not operators.is_operator(B_):
B_ = operators.LinearOperator(B_)
self._check_rom_operator_shape(B_, 'B')
self.__B_ = B_
def _set_operators(self, basis,
c_=None, A_=None, H_=None, G_=None, B_=None):
"""Set the ROM operators and corresponding dimensions.
Parameters
----------
basis : (n, r) ndarray or None
Basis for the linear reduced space (e.g., POD basis matrix).
If None, then r is inferred from one of the reduced operators.
c_ : (r,) ndarray or None
Reduced-order constant term.
A_ : (r, r) ndarray or None
Reduced-order linear state matrix.
H_ : (r, r(r+1)/2) ndarray or None
Reduced-order (compact) quadratic state matrix.
G_ : (r, r(r+1)(r+2)/6) ndarray or None
Reduced-order (compact) cubic state matrix.
B_ : (r, m) ndarray or None
Reduced-order input matrix.
Returns
-------
self
"""
self._clear()
operators = [c_, A_, H_, G_, B_]
# Save the low-dimensional basis. Sets self.n and self.r if given.
self.basis = basis
# Set the input dimension 'm'.
if 'B' in self.modelform:
if B_ is not None:
self.m = 1 if len(B_.shape) == 1 else B_.shape[1]
else:
self.m = 0
# Determine the ROM dimension 'r' if no basis was given.
if basis is None:
self.r = None
for op in operators:
if op is not None:
self.r = op.shape[0]
break
# Insert the operators. Raises exceptions if shapes are bad, etc.
self.c_, self.A_, self.H_, self.G_, self.B_, = c_, A_, H_, G_, B_
return self
def __iter__(self):
for key in self.modelform:
yield getattr(self, f"{key}_")
def __eq__(self, other):
"""Two ROMs are equal if they are of the same type and have the same
bases and reduced-order operators.
"""
if self.__class__ != other.__class__:
return False
if self.modelform != other.modelform:
return False
if self.basis is None:
if other.basis is not None:
return False
else:
if other.basis is None:
return False
if not np.allclose(self.basis, other.basis):
return False
for opL, opR in zip(self, other):
if not (opL is opR is None) and opL != opR:
return False
return True
# Validation methods ------------------------------------------------------
def _check_operator_matches_modelform(self, operator, key):
"""Raise a TypeError if the given operator is incompatible with the
modelform.
Parameters
----------
operator : ndarray or None
Operator (ndarray, etc.) data to be attached as an attribute.
key : str
A single character from 'cAHGB', indicating which operator to set.
"""
if (key in self.modelform) and (operator is None):
raise TypeError(f"'{key}' in modelform requires {key}_ != None")
if (key not in self.modelform) and (operator is not None):
raise TypeError(f"'{key}' not in modelform requires {key}_ = None")
def _check_rom_operator_shape(self, operator, key):
"""Ensure that the given operator has the correct shape."""
# First, check that the required dimensions exist.
if self.r is None:
raise AttributeError("no reduced dimension 'r' (call fit())")
if key == 'B' and (self.m is None):
raise AttributeError("no input dimension 'm' (call fit())")
r, m = self.r, self.m
# Check operator shape.
if key == "c" and operator.shape != (r,):
raise ValueError(f"c_.shape = {operator.shape}, "
f"must be (r,) with r = {r}")
elif key == "A" and operator.shape != (r, r):
raise ValueError(f"A_.shape = {operator.shape}, "
f"must be (r, r) with r = {r}")
elif key == "H" and operator.shape != (r, r*(r + 1)//2):
raise ValueError(f"H_.shape = {operator.shape}, must be "
f"(r, r(r+1)/2) with r = {r}")
elif key == "G" and operator.shape != (r, r*(r + 1)*(r + 2)//6):
raise ValueError(f"G_.shape = {operator.shape}, must be "
f"(r, r(r+1)(r+2)/6) with r = {r}")
elif key == "B" and operator.shape != (r, m):
raise ValueError(f"B_.shape = {operator.shape}, must be "
f"(r, m) with r = {r}, m = {m}")
def _check_inputargs(self, u, argname):
"""Check that the modelform agrees with input arguments."""
if 'B' in self.modelform and u is None:
raise ValueError(f"argument '{argname}' required"
" since 'B' in modelform")
if 'B' not in self.modelform and u is not None:
raise ValueError(f"argument '{argname}' invalid"
" since 'B' in modelform")
def _check_is_trained(self):
"""Ensure that the model is trained and ready for prediction."""
try:
for key in self.modelform:
op = getattr(self, key+'_')
self._check_operator_matches_modelform(op, key)
self._check_rom_operator_shape(op, key)
except Exception as e:
raise AttributeError("model not trained (call fit())") from e
# Projection / reconstruction ---------------------------------------------
def _project_operators(self, known_operators):
"""Project known full-order operators to the reduced-order space.
Parameters
----------
known_operators : dict(str -> ndarray)
Dictionary of known full-order or reduced-order operators.
Corresponding reduced-order operators are computed directly
through projection; remaining operators are inferred.
Keys must match the modelform, values are ndarrays:
* 'c': (n,) constant term c.
* 'A': (n, n) linear state matrix A.
* 'H': (n, n**2) quadratic state matrix H.
* 'G': (n, n**3) cubic state matrix G.
* 'B': (n, m) input matrix B.
"""
# Do nothing if there are no operators to project.
if known_operators is None or len(known_operators) == 0:
return
# If there is no basis, we must have only reduced-order operators..
if self.basis is None:
# Require r so we can tell between full and reduced order.
if self.r is None:
raise ValueError("dimension r required to use known operators")
elif not (all(op.shape[0] == self.r
for op in known_operators.values())):
raise ValueError("basis required "
"to project full-order operators")
# Validate the keys of the operator dictionary.
surplus = [repr(key)
for key in known_operators.keys()
if key not in self.modelform]
if surplus:
_noun = "key" + ('' if len(surplus) == 1 else 's')
raise KeyError(f"invalid operator {_noun} {', '.join(surplus)}")
# Project full-order operators.
if ('H' in self.modelform) or ('G' in self.modelform):
basis2 = np.kron(self.basis, self.basis)
if 'c' in known_operators: # Constant term.
c = known_operators['c'] # c = multiple of vector of ones.
if np.isscalar(c):
c = c * self.basis.sum(axis=0)
if c.shape[0] != self.r:
c = self.basis.T @ c
self.c_ = c
if 'A' in known_operators: # Linear state matrix.
A = known_operators['A']
if isinstance(A, str) and A.lower() in ("i", "id", "identity"):
A = 1
if np.isscalar(A): # A = multiple of identity.
A = A * np.eye(self.r)
if A.shape[0] != self.r:
A = self.basis.T @ A @ self.basis
self.A_ = A
if 'H' in known_operators: # Quadratic state matrix.
H = known_operators['H']
# TODO: fast projection.
# TODO: special case for q^2.
if H.shape[0] != self.r:
H = self.basis.T @ H @ basis2
self.H_ = H
if 'G' in known_operators: # Cubic state matrix.
G = known_operators['G']
# TODO: fast projection?
# TODO: special case for q^3.
if G.shape[0] != self.r:
G = self.basis.T @ G @ np.kron(self.basis, basis2)
self.G_ = G
if 'B' in known_operators: # Linear input matrix.
B = known_operators['B']
if B.ndim == 1:
B = B.reshape((-1, 1))
self.m = B.shape[1]
if B.shape[0] != self.r:
B = self.basis.T @ B
self.B_ = B
# Save keys of known operators.
self._projected_operators_ = ''.join(known_operators.keys())
def project(self, state, label="argument"):
"""Project a high-dimensional state to its low-dimensional
representation.
Parameters
----------
state : (n, ...) or (r, ...) ndarray
High- or low-dimensional state vector or a collection of these.
If state.shape[0] == r (already low-dimensional), do nothing.
label : str
Name for state (used only in error reporting).
Returns
-------
state_ : (r, ...) ndarray
Low-dimensional projection of state.
"""
if self.r is None:
raise AttributeError("reduced dimension not set")
if state.shape[0] not in (self.r, self.n):
if self.basis is None:
raise AttributeError("basis not set")
raise ValueError(f"{label} not aligned with basis")
return (self.basis.T @ state) if state.shape[0] == self.n else state
def reconstruct(self, state_, label="argument"):
"""Reconstruct a high-dimensional state from its low-dimensional
representation.
Parameters
----------
state_ : (r, ...) ndarray
Low-dimensional state vector or a collection of these.
label : str
Name for state_ (used only in error reporting).
Returns
-------
state : (n, ...) ndarray
High-dimensional reconstruction of state_.
"""
if self.basis is None:
raise AttributeError("basis not set")
if state_.shape[0] != self.r:
raise ValueError(f"{label} not aligned with basis")
return self.basis @ state_
# ROM evaluation ----------------------------------------------------------
def evaluate(self, state_, input_=None):
"""Evaluate the right-hand side of the model, i.e., the f() of
* g = f(q, u) (steady state)
* q_{j+1} = f(q_{j}, u_{j}) (discrete time)
* dq / dt = f(q(t), u(t)) (continuous time)
Parameters
----------
state_ : (r,) ndarray
Low-dimensional state vector.
input_ : (m,) ndarray or None
Input vector corresponding to the state.
Returns
-------
f(state_, input_): (r,) ndarray
Evaluation of the right-hand side of the model.
"""
out = np.zeros(self.r, dtype=float)
if 'c' in self.modelform:
out += self.c_()
if 'A' in self.modelform:
out += self.A_(state_)
if 'H' in self.modelform:
out += self.H_(state_)
if 'G' in self.modelform:
out += self.G_(state_)
if 'B' in self.modelform:
out += self.B_(input_)
return out
# TODO: jacobian(self, state_)
# Abstract public methods (must be implemented by child classes) ----------
@abc.abstractmethod
def fit(*args, **kwargs):
"""Train the reduced-order model with the specified data."""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def predict(*args, **kwargs):
"""Solve the reduced-order model under specified conditions."""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def save(*args, **kwargs):
"""Save the reduced-order structure / operators in HDF5 format."""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def load(*args, **kwargs):
"""Load a previously saved reduced-order model from an HDF5 file."""
raise NotImplementedError # pragma: no cover
class _BaseParametricROM(_BaseROM):
"""Base class for all parametric reduced-order model classes."""
# Must be specified by child classes.
_ModelClass = NotImplemented
# ModelClass properties ---------------------------------------------------
@property
def _LHS_ARGNAME(self): # pragma: no cover
return self._ModelClass._LHS_ARGNAME
@property
def _LHS_LABEL(self): # pragma: no cover
return self._ModelClass._LHS_LABEL
@property
def _STATE_LABEL(self): # pragma: no cover
return self._ModelClass._STATE_LABEL
@property
def _INPUT_LABEL(self): # pragma: no cover
return self._ModelClass._INPUT_LABEL
@property
def ModelClass(self):
"""Class of nonparametric ROM to represent this parametric ROM
at a particular parameter, a subclass of core._base._BaseROM:
>>> type(MyParametricROM(init_args).fit(fit_args)(parameter_value)).
"""
return self._ModelClass
# Constructor -------------------------------------------------------------
def __init__(self, modelform):
"""Set the modelform.
Parameters
----------
modelform : str
See _BaseROM.modelform.
"""
_BaseROM.__init__(self, modelform)
# Valiate the ModelClass.
if not issubclass(self.ModelClass, _BaseROM):
raise RuntimeError("invalid ModelClass "
f"'{self.ModelClass.__name__}'")
def _clear(self):
"""Set private attributes as None, erasing any previously stored basis,
dimensions, or ROM operators.
"""
_BaseROM._clear(self)
self.__p = None
# Properties: dimensions --------------------------------------------------
@property
def p(self):
"""Dimension of the parameter space."""
return self.__p
def _set_parameter_dimension(self, parameters):
"""Extract and save the dimension of the parameter space."""
shape = np.shape(parameters)
if len(shape) == 1:
self.__p = 1
elif len(shape) == 2:
self.__p = shape[1]
else:
raise ValueError("parameter values must be scalars or 1D arrays")
# Parametric evaluation ---------------------------------------------------
def __call__(self, parameter):
"""Construct a non-parametric ROM at the given parameter value."""
# Evaluate the parametric operators at the parameter value.
c_ = self.c_(parameter) if _isparametricop(self.c_) else self.c_
A_ = self.A_(parameter) if _isparametricop(self.A_) else self.A_
H_ = self.H_(parameter) if _isparametricop(self.H_) else self.H_
G_ = self.G_(parameter) if _isparametricop(self.G_) else self.G_
B_ = self.B_(parameter) if _isparametricop(self.B_) else self.B_
# Construct a nonparametric ROM with the evaluated operators.
rom = self.ModelClass(self.modelform)
return rom._set_operators(basis=self.basis,
c_=c_, A_=A_, H_=H_, G_=G_, B_=B_)
def evaluate(self, parameter, *args, **kwargs):
"""Evaluate the right-hand side of the model at the given parameter."""
return self(parameter).evaluate(*args, **kwargs)
def predict(self, parameter, *args, **kwargs):
"""Solve the reduced-order model at the given parameter."""
return self(parameter).predict(*args, **kwargs)
| [
"numpy.isscalar",
"numpy.allclose",
"numpy.zeros",
"numpy.shape",
"numpy.kron",
"numpy.eye"
] | [((19236, 19265), 'numpy.zeros', 'np.zeros', (['self.r'], {'dtype': 'float'}), '(self.r, dtype=float)\n', (19244, 19265), True, 'import numpy as np\n'), ((22697, 22717), 'numpy.shape', 'np.shape', (['parameters'], {}), '(parameters)\n', (22705, 22717), True, 'import numpy as np\n'), ((15045, 15076), 'numpy.kron', 'np.kron', (['self.basis', 'self.basis'], {}), '(self.basis, self.basis)\n', (15052, 15076), True, 'import numpy as np\n'), ((15232, 15246), 'numpy.isscalar', 'np.isscalar', (['c'], {}), '(c)\n', (15243, 15246), True, 'import numpy as np\n'), ((15611, 15625), 'numpy.isscalar', 'np.isscalar', (['A'], {}), '(A)\n', (15622, 15625), True, 'import numpy as np\n'), ((9821, 9857), 'numpy.allclose', 'np.allclose', (['self.basis', 'other.basis'], {}), '(self.basis, other.basis)\n', (9832, 9857), True, 'import numpy as np\n'), ((15692, 15706), 'numpy.eye', 'np.eye', (['self.r'], {}), '(self.r)\n', (15698, 15706), True, 'import numpy as np\n'), ((16371, 16398), 'numpy.kron', 'np.kron', (['self.basis', 'basis2'], {}), '(self.basis, basis2)\n', (16378, 16398), True, 'import numpy as np\n')] |
#File: setup.py
#!/usr/bin/python
from distutils.core import setup, Extension
# Third-party modules - we depend on numpy for everything
import numpy
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
pht_module = Extension('_bilateralfilter',
sources=['bilateralfilter_wrap.cxx',
'bilateralfilter.cpp',
'permutohedral.cpp'
],
extra_compile_args = ["-fopenmp"],
include_dirs = [numpy_include]
)
setup(name = 'bilateralfilter',
version = '0.1',
author = 'SWIG Docs',
description = 'Simple swig pht from docs',
ext_modules = [pht_module],
py_modules = ['bilateralfilter'],
)
| [
"distutils.core.Extension",
"numpy.get_numpy_include",
"numpy.get_include",
"distutils.core.setup"
] | [((362, 545), 'distutils.core.Extension', 'Extension', (['"""_bilateralfilter"""'], {'sources': "['bilateralfilter_wrap.cxx', 'bilateralfilter.cpp', 'permutohedral.cpp']", 'extra_compile_args': "['-fopenmp']", 'include_dirs': '[numpy_include]'}), "('_bilateralfilter', sources=['bilateralfilter_wrap.cxx',\n 'bilateralfilter.cpp', 'permutohedral.cpp'], extra_compile_args=[\n '-fopenmp'], include_dirs=[numpy_include])\n", (371, 545), False, 'from distutils.core import setup, Extension\n'), ((733, 904), 'distutils.core.setup', 'setup', ([], {'name': '"""bilateralfilter"""', 'version': '"""0.1"""', 'author': '"""SWIG Docs"""', 'description': '"""Simple swig pht from docs"""', 'ext_modules': '[pht_module]', 'py_modules': "['bilateralfilter']"}), "(name='bilateralfilter', version='0.1', author='SWIG Docs',\n description='Simple swig pht from docs', ext_modules=[pht_module],\n py_modules=['bilateralfilter'])\n", (738, 904), False, 'from distutils.core import setup, Extension\n'), ((255, 274), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (272, 274), False, 'import numpy\n'), ((318, 343), 'numpy.get_numpy_include', 'numpy.get_numpy_include', ([], {}), '()\n', (341, 343), False, 'import numpy\n')] |
import tensorflow as tf
import numpy as np
import os, json, uuid
from flask import jsonify
embedding_dim = 256
units = 512
max_length = 20
attention_features_shape = 64
data_dir = "/data"
images_dir = os.path.join(data_dir, "images")
# model_dir = os.path.join("model")
model_dir = "/app/model"
ckp_path = os.path.join(model_dir, "ckpt-58")
annotation_file = os.path.join(model_dir, "captions_title_LECO2019.json")
checkpoint_path = model_dir
if (not os.path.exists(images_dir)):
os.mkdir(images_dir)
# VOCABULARY FROM CAPTIONS
with open(annotation_file, 'r') as f:
annotations = json.load(f)
all_captions = []
MAX_CAPTION_LENGTH = 100
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
if (len(caption) < MAX_CAPTION_LENGTH):
all_captions.append(caption)
train_captions = all_captions
num_examples = 1298560
train_captions = train_captions[:num_examples]
top_k = 23000
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k,
oov_token="<unk>",
filters='!"#$%&()*+.,-/:;=?@[\]^_`{|}~ ')
tokenizer.fit_on_texts(train_captions)
train_seqs = tokenizer.texts_to_sequences(train_captions)
vocab_size = len(tokenizer.word_index)
# CREATE MODEL
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
hidden_with_time_axis = tf.expand_dims(hidden, 1)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
attention_weights = tf.nn.softmax(self.V(score), axis=1)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class CNN_Encoder(tf.keras.Model):
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
context_vector, attention_weights = self.attention(features, hidden)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
x = self.fc1(output)
x = tf.reshape(x, (-1, x.shape[2]))
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
# IMPORT CHECKPOINT
ckpt = tf.train.Checkpoint(encoder=encoder,
decoder=decoder,
optimizer = optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=60)
start_epoch = 0
if ckpt_manager.latest_checkpoint:
status = ckpt.restore(ckpt_manager.latest_checkpoint)
# print (status)
start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])
# print (start_epoch)
status = ckpt.restore(ckp_path)
# EVALUATE
def evaluate(image_path):
attention_plot = np.zeros((max_length, attention_features_shape))
hidden = decoder.reset_state(batch_size=1)
temp_input = tf.expand_dims(load_image(image_path)[0], 0)
img_tensor_val = image_features_extract_model(temp_input)
img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))
features = encoder(img_tensor_val)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
for i in range(max_length):
predictions, hidden, attention_weights = decoder(dec_input, features, hidden)
attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result.append(tokenizer.index_word[predicted_id])
if tokenizer.index_word[predicted_id] == '<end>':
return result, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
attention_plot = attention_plot[:len(result), :]
return result, attention_plot
# LOAD AND MODIFY IMAGE
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
image_model = tf.keras.applications.InceptionV3(include_top=False,
weights='imagenet')
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
# GENERATE CAPTION
def generate_caption(file):
file_id = str(uuid.uuid4())
image_path = os.path.join(images_dir, file_id + ".jpg")
file.save(image_path)
result_list, _ = evaluate(image_path)
result = " ".join(result_list[:-1])
return jsonify(**{"status": 200, "response": result,})
| [
"os.mkdir",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"flask.jsonify",
"os.path.join",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.nn.relu",
"tensorflow.train.Checkpoint",
"os.path.exi... | [((214, 246), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (226, 246), False, 'import os, json, uuid\n'), ((326, 360), 'os.path.join', 'os.path.join', (['model_dir', '"""ckpt-58"""'], {}), "(model_dir, 'ckpt-58')\n", (338, 360), False, 'import os, json, uuid\n'), ((380, 435), 'os.path.join', 'os.path.join', (['model_dir', '"""captions_title_LECO2019.json"""'], {}), "(model_dir, 'captions_title_LECO2019.json')\n", (392, 435), False, 'import os, json, uuid\n'), ((997, 1117), 'tensorflow.keras.preprocessing.text.Tokenizer', 'tf.keras.preprocessing.text.Tokenizer', ([], {'num_words': 'top_k', 'oov_token': '"""<unk>"""', 'filters': '"""!"#$%&()*+.,-/:;=?@[\\\\]^_`{|}~ """'}), '(num_words=top_k, oov_token=\'<unk>\',\n filters=\'!"#$%&()*+.,-/:;=?@[\\\\]^_`{|}~ \')\n', (1034, 1117), True, 'import tensorflow as tf\n'), ((3481, 3507), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (3505, 3507), True, 'import tensorflow as tf\n'), ((3523, 3609), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': '"""none"""'}), "(from_logits=True, reduction=\n 'none')\n", (3568, 3609), True, 'import tensorflow as tf\n'), ((3865, 3939), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'encoder': 'encoder', 'decoder': 'decoder', 'optimizer': 'optimizer'}), '(encoder=encoder, decoder=decoder, optimizer=optimizer)\n', (3884, 3939), True, 'import tensorflow as tf\n'), ((4014, 4079), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'checkpoint_path'], {'max_to_keep': '(60)'}), '(ckpt, checkpoint_path, max_to_keep=60)\n', (4040, 4079), True, 'import tensorflow as tf\n'), ((5745, 5817), 'tensorflow.keras.applications.InceptionV3', 'tf.keras.applications.InceptionV3', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (5778, 5817), True, 'import tensorflow as tf\n'), ((5978, 6017), 'tensorflow.keras.Model', 'tf.keras.Model', (['new_input', 'hidden_layer'], {}), '(new_input, hidden_layer)\n', (5992, 6017), True, 'import tensorflow as tf\n'), ((476, 502), 'os.path.exists', 'os.path.exists', (['images_dir'], {}), '(images_dir)\n', (490, 502), False, 'import os, json, uuid\n'), ((508, 528), 'os.mkdir', 'os.mkdir', (['images_dir'], {}), '(images_dir)\n', (516, 528), False, 'import os, json, uuid\n'), ((621, 633), 'json.load', 'json.load', (['f'], {}), '(f)\n', (630, 633), False, 'import os, json, uuid\n'), ((3746, 3778), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'loss_.dtype'}), '(mask, dtype=loss_.dtype)\n', (3753, 3778), True, 'import tensorflow as tf\n'), ((3808, 3829), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_'], {}), '(loss_)\n', (3822, 3829), True, 'import tensorflow as tf\n'), ((4411, 4459), 'numpy.zeros', 'np.zeros', (['(max_length, attention_features_shape)'], {}), '((max_length, attention_features_shape))\n', (4419, 4459), True, 'import numpy as np\n'), ((4656, 4743), 'tensorflow.reshape', 'tf.reshape', (['img_tensor_val', '(img_tensor_val.shape[0], -1, img_tensor_val.shape[3])'], {}), '(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.\n shape[3]))\n', (4666, 4743), True, 'import tensorflow as tf\n'), ((4800, 4852), 'tensorflow.expand_dims', 'tf.expand_dims', (["[tokenizer.word_index['<start>']]", '(0)'], {}), "([tokenizer.word_index['<start>']], 0)\n", (4814, 4852), True, 'import tensorflow as tf\n'), ((5511, 5538), 'tensorflow.io.read_file', 'tf.io.read_file', (['image_path'], {}), '(image_path)\n', (5526, 5538), True, 'import tensorflow as tf\n'), ((5550, 5587), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (5570, 5587), True, 'import tensorflow as tf\n'), ((5599, 5631), 'tensorflow.image.resize', 'tf.image.resize', (['img', '(299, 299)'], {}), '(img, (299, 299))\n', (5614, 5631), True, 'import tensorflow as tf\n'), ((5643, 5699), 'tensorflow.keras.applications.inception_v3.preprocess_input', 'tf.keras.applications.inception_v3.preprocess_input', (['img'], {}), '(img)\n', (5694, 5699), True, 'import tensorflow as tf\n'), ((6120, 6162), 'os.path.join', 'os.path.join', (['images_dir', "(file_id + '.jpg')"], {}), "(images_dir, file_id + '.jpg')\n", (6132, 6162), False, 'import os, json, uuid\n'), ((6282, 6328), 'flask.jsonify', 'jsonify', ([], {}), "(**{'status': 200, 'response': result})\n", (6289, 6328), False, 'from flask import jsonify\n'), ((1510, 1538), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units'], {}), '(units)\n', (1531, 1538), True, 'import tensorflow as tf\n'), ((1554, 1582), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units'], {}), '(units)\n', (1575, 1582), True, 'import tensorflow as tf\n'), ((1597, 1621), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (1618, 1621), True, 'import tensorflow as tf\n'), ((1690, 1715), 'tensorflow.expand_dims', 'tf.expand_dims', (['hidden', '(1)'], {}), '(hidden, 1)\n', (1704, 1715), True, 'import tensorflow as tf\n'), ((1927, 1964), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['context_vector'], {'axis': '(1)'}), '(context_vector, axis=1)\n', (1940, 1964), True, 'import tensorflow as tf\n'), ((2153, 2189), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['embedding_dim'], {}), '(embedding_dim)\n', (2174, 2189), True, 'import tensorflow as tf\n'), ((2253, 2266), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (2263, 2266), True, 'import tensorflow as tf\n'), ((2467, 2519), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['vocab_size', 'embedding_dim'], {}), '(vocab_size, embedding_dim)\n', (2492, 2519), True, 'import tensorflow as tf\n'), ((2536, 2653), 'tensorflow.keras.layers.GRU', 'tf.keras.layers.GRU', (['self.units'], {'return_sequences': '(True)', 'return_state': '(True)', 'recurrent_initializer': '"""glorot_uniform"""'}), "(self.units, return_sequences=True, return_state=True,\n recurrent_initializer='glorot_uniform')\n", (2555, 2653), True, 'import tensorflow as tf\n'), ((2774, 2807), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.units'], {}), '(self.units)\n', (2795, 2807), True, 'import tensorflow as tf\n'), ((2824, 2857), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['vocab_size'], {}), '(vocab_size)\n', (2845, 2857), True, 'import tensorflow as tf\n'), ((3189, 3220), 'tensorflow.reshape', 'tf.reshape', (['x', '(-1, x.shape[2])'], {}), '(x, (-1, x.shape[2]))\n', (3199, 3220), True, 'import tensorflow as tf\n'), ((3334, 3368), 'tensorflow.zeros', 'tf.zeros', (['(batch_size, self.units)'], {}), '((batch_size, self.units))\n', (3342, 3368), True, 'import tensorflow as tf\n'), ((3675, 3697), 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), '(real, 0)\n', (3688, 3697), True, 'import tensorflow as tf\n'), ((5315, 5348), 'tensorflow.expand_dims', 'tf.expand_dims', (['[predicted_id]', '(0)'], {}), '([predicted_id], 0)\n', (5329, 5348), True, 'import tensorflow as tf\n'), ((6090, 6102), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6100, 6102), False, 'import os, json, uuid\n'), ((3073, 3106), 'tensorflow.expand_dims', 'tf.expand_dims', (['context_vector', '(1)'], {}), '(context_vector, 1)\n', (3087, 3106), True, 'import tensorflow as tf\n'), ((5023, 5059), 'tensorflow.reshape', 'tf.reshape', (['attention_weights', '(-1,)'], {}), '(attention_weights, (-1,))\n', (5033, 5059), True, 'import tensorflow as tf\n'), ((5095, 5120), 'tensorflow.argmax', 'tf.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (5104, 5120), True, 'import tensorflow as tf\n')] |
import os
import star
import numpy as np
import tables
from numpy.linalg import lstsq
from phd.thunderstorm import atmosphere
import matplotlib.pyplot as plt
def get_minimal_field(height = 0.0):
"""
:param height: meters
:return:
"""
material = star.electron.PredefinedMaterials.AIR_DRY_NEAR_SEA_LEVEL
density = atmosphere.ISACalculator.density(height) # kg/m3
data = star.electron.calculate_estar_table(material)
stopPower = data["stopping_power_total"].min()
energy_index = data["stopping_power_total"].argmin()
energy = data["energy"][energy_index]
return stopPower*density
def get_group(path):
with tables.open_file(path) as h5file:
result = {}
for group in h5file.root:
table = h5file.get_node(group, "stacking_simple")
# data = table.read()
field = table.attrs["values_gdml_field"][0]
height = table.attrs["values_gdml_height"][0]
key = height
if key in result.keys():
result[key].append((field, group._v_name))
else:
result[key] = [(field, group._v_name)]
for value in result.values():
value.sort(key=lambda x: x[0])
return result
def plot_minimal_field_production(path, output="plot"):
if not os.path.exists(output):
os.mkdir(output)
groups = get_group(path)
bins = np.arange(-500.0, 501, 1)
x = bins[:-1]
with tables.open_file(path) as h5file:
for height, value in groups.items():
plt.clf()
for field, group_name in value:
table: tables.Table = h5file.get_node("/{}".format(group_name), "stacking_simple")
data = table.read()
number = table.attrs["values_macros_number"]
temp, _ = np.histogram(data["z"], bins=bins)
temp = np.cumsum(temp[::-1])
y = temp / number
plt.plot(x, y, label="{:.2f}kV/m".format(field*1e4))
path = os.path.join(output, "{}m.png".format(height))
plt.xlabel("Height, meters")
plt.ylabel("Cumulative number of electron")
plt.legend()
# plt.yscale("log")
plt.tight_layout()
plt.savefig(path, format="png", transparent=True, dpi = 600)
return 0
def find_minimal_field(path):
groups = get_group(path)
bins = np.arange(-500.0, 501, 1)
x = bins[:-1]
M = x[:, np.newaxis] ** [0, 1]
result = []
dtype = np.dtype(
[
("height", "d"),
("field", "d"),
("baseline", "d"),
("coverage", np.bool_)
]
)
with tables.open_file(path) as h5file:
for height, value in groups.items():
res_height = []
baseline = get_minimal_field(height)*1e-4
for field, group_name in value:
table: tables.Table = h5file.get_node("/{}".format(group_name), "stacking_simple")
data = table.read()
number = table.attrs["values_macros_number"]
temp, _ = np.histogram(data["z"], bins=bins)
temp = np.cumsum(temp[::-1])
y = temp / number
res_height.append((field, y))
field, y = res_height[-1]
if y[-1] <= 2:
result.append((height, field, baseline, False))
else:
prev_field = res_height[-1][0]
for field, y in res_height[::-1]:
p, res, rnk, s = lstsq(M, y)
k = p[1]
if k<0.001:
field = (field + prev_field)/2
result.append((height, field, baseline, True))
break
return np.array(result, dtype=dtype)
| [
"os.mkdir",
"phd.thunderstorm.atmosphere.ISACalculator.density",
"numpy.linalg.lstsq",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.dtype",
"os.path.exists",
"numpy.cumsum",
"numpy.histogram",
"numpy.array",
"numpy.arange",
"star.electron.calculate_estar_table",
"matplotlib.py... | [((339, 379), 'phd.thunderstorm.atmosphere.ISACalculator.density', 'atmosphere.ISACalculator.density', (['height'], {}), '(height)\n', (371, 379), False, 'from phd.thunderstorm import atmosphere\n'), ((400, 445), 'star.electron.calculate_estar_table', 'star.electron.calculate_estar_table', (['material'], {}), '(material)\n', (435, 445), False, 'import star\n'), ((1412, 1437), 'numpy.arange', 'np.arange', (['(-500.0)', '(501)', '(1)'], {}), '(-500.0, 501, 1)\n', (1421, 1437), True, 'import numpy as np\n'), ((2424, 2449), 'numpy.arange', 'np.arange', (['(-500.0)', '(501)', '(1)'], {}), '(-500.0, 501, 1)\n', (2433, 2449), True, 'import numpy as np\n'), ((2531, 2621), 'numpy.dtype', 'np.dtype', (["[('height', 'd'), ('field', 'd'), ('baseline', 'd'), ('coverage', np.bool_)]"], {}), "([('height', 'd'), ('field', 'd'), ('baseline', 'd'), ('coverage',\n np.bool_)])\n", (2539, 2621), True, 'import numpy as np\n'), ((3809, 3838), 'numpy.array', 'np.array', (['result'], {'dtype': 'dtype'}), '(result, dtype=dtype)\n', (3817, 3838), True, 'import numpy as np\n'), ((658, 680), 'tables.open_file', 'tables.open_file', (['path'], {}), '(path)\n', (674, 680), False, 'import tables\n'), ((1323, 1345), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (1337, 1345), False, 'import os\n'), ((1355, 1371), 'os.mkdir', 'os.mkdir', (['output'], {}), '(output)\n', (1363, 1371), False, 'import os\n'), ((1465, 1487), 'tables.open_file', 'tables.open_file', (['path'], {}), '(path)\n', (1481, 1487), False, 'import tables\n'), ((2700, 2722), 'tables.open_file', 'tables.open_file', (['path'], {}), '(path)\n', (2716, 2722), False, 'import tables\n'), ((1556, 1565), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1563, 1565), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2121), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Height, meters"""'], {}), "('Height, meters')\n", (2103, 2121), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2177), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative number of electron"""'], {}), "('Cumulative number of electron')\n", (2144, 2177), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2202), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2200, 2202), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2265), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2263, 2265), True, 'import matplotlib.pyplot as plt\n'), ((2278, 2336), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': '"""png"""', 'transparent': '(True)', 'dpi': '(600)'}), "(path, format='png', transparent=True, dpi=600)\n", (2289, 2336), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1866), 'numpy.histogram', 'np.histogram', (["data['z']"], {'bins': 'bins'}), "(data['z'], bins=bins)\n", (1844, 1866), True, 'import numpy as np\n'), ((1890, 1911), 'numpy.cumsum', 'np.cumsum', (['temp[::-1]'], {}), '(temp[::-1])\n', (1899, 1911), True, 'import numpy as np\n'), ((3127, 3161), 'numpy.histogram', 'np.histogram', (["data['z']"], {'bins': 'bins'}), "(data['z'], bins=bins)\n", (3139, 3161), True, 'import numpy as np\n'), ((3185, 3206), 'numpy.cumsum', 'np.cumsum', (['temp[::-1]'], {}), '(temp[::-1])\n', (3194, 3206), True, 'import numpy as np\n'), ((3569, 3580), 'numpy.linalg.lstsq', 'lstsq', (['M', 'y'], {}), '(M, y)\n', (3574, 3580), False, 'from numpy.linalg import lstsq\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pa
# local_conn = mu.get_conn()
# local_conn = create_engine('mysql+pymysql://root:root@localhost:3306/test?charset=utf8')
# 显示所有列
pa.set_option('display.max_columns', None)
# 显示所有行
pa.set_option('display.max_rows', None)
path = r'C:\Users\AL\Desktop\test\text\text_data.csv'
text_df = pa.read_csv(path)
text_df.info()
text_df.head()
text_df.shape
text_df.count()
temp_data = text_df.groupby('user_id').apply(lambda t: t[t.buy_time == t.buy_time.max()])
temp_data.shape
text_df.info()
use_clo = ['send_text', 's_time', 's_u', 're_u']
temp_data = text_df[text_df['s_u'] == 47]
temp_data.size
text_df.groupby('text_t').size()
temp_data = text_df[use_clo][text_df['s_u'] == 47].head(5)
temp_data.head()
text_df[text_df['s_u'] == 47].head(5)
temp_data = text_df[((text_df['s_u'] == 47) & (text_df['re_u'] == 4003)) | (
(text_df['s_u'] == 4003) & (text_df['re_u'] == 47))]
temp_data = text_df[(text_df['s_u'] == 47) | (text_df['re_u'] == 4003)]
null_data = text_df[text_df['send_text'].isna()]
not_null_data = text_df[text_df['send_text'].notna()]
temp_data.groupby('text_t').size()
temp_data.groupby('re_u').size()
temp_data.groupby('text_t').count()
temp_data.groupby('text_t')['text_t'].count()
temp_data.groupby('text_t').agg({'s_time': np.mean, 'text_t': np.size})
# text_df.to_sql('text_data', con=local_conn, if_exists='replace')
df1 = pa.DataFrame({'key': ['A', 'B', 'C', 'D'],
'value': np.random.randn(4)})
df2 = pa.DataFrame({'key': ['B', 'D', 'D', 'E'],
'value': np.random.randn(4)})
pa.merge(df1, df2, on='key')
pa.concat([df1, df2])
pa.concat([df1, df2]).drop_duplicates()
# temp_data.nlargest(10 + 1, columns='re_u').tail(10)
path_random = r'C:\Users\AL\Desktop\test\test.csv'
test_data_df = pa.read_csv(path_random)
test_data_df.head()
# 获取重复值
test_data_df[test_data_df.duplicated()]
np.sum(test_data_df.duplicated())
# 删除重复值
test_data_df.drop_duplicates(inplace=True)
test_data_df.isnull
test_data_df.isna
test_data_df.prod
np.sum(test_data_df.isnull(), axis=1)
test_data_df.apply(lambda x: sum(x.isnull()) / len(x), axis=1)
# 删除缺失值
test_data_df.dropna(inplace=True)
# 填补缺失值
test_data_df.fillna(test_data_df.mean())
# 不同的列按照不同的标准选择缺失值
test_data_df.fillna(value={"name1": 123, "name2": test_data_df.name2.mean()})
# 用前一个填补缺失值
test_data_df.fillna(method="ffill")
# 异常值处理
s_mean = test_data_df['age'].mean()
s_std = test_data_df['age'].std()
s_mean + s_std * 2
s_mean - s_std * 2
test_data_df['age'] > s_mean + s_std * 2
test_data_df['age'] < s_mean - s_std * 2
test_data_df['age'].plot(kind="hist")
plt.show()
text_df.dtypes
text_df.head()
text_df.describe()
# delete a columns
text_df = text_df.drop(['diff_date'], axis=1)
# text_df = text_df.drop(columns=['time_stamp'], axis=1)
# 把时间戳 转换 日期
text_df['s_time'] = pa.to_datetime(text_df['s_time'], unit='s')
# text_df['s_time'] = pa.to_timedelta(text_df['s_time'],unit='s')
# 日期格式转换
# 方法 1
# text_df['s_time'] = text_df['s_time'].apply(lambda x : x.strftime('%Y-%m-%d'))
# 方法 2 参数 M 表示月份,Q 表示季度,A 表示年度,D 表示按天,这几个参数比较常用。
text_df['test_time'] = text_df['s_time'].dt.to_period('D')
text_df['test_price'] = text_df['s_u'].astype(float)
text_df['diff_date'] = pa.datetime.today() - text_df['s_time']
text_df['diff_year'] = pa.datetime.today().year - text_df['s_time'].dt.year
# apply
text_df['total_price'] = text_df[['test_price', 're_u']].apply(np.prod, axis=1)
# groupby
text_df_group = text_df.groupby(by='test_time').count()
text_df_group = text_df.groupby(by='test_time').sum()
# take some columns
col_n = ['test_time', 'test_price', 'total_price']
temp_df = pa.DataFrame(text_df, columns=col_n)
temp_df.head()
temp_df = temp_df.groupby(by='test_time').sum()
temp_df.index = pa.to_datetime(temp_df.index)
# 下面一个减去上面一个数 的 %
temp_df['总价变化率%'] = temp_df['total_price'].pct_change()
temp_df.rename(columns={'总价变化率': '总价变化率%'}, inplace=True)
temp_df.drop(index=[7], axis=1)
# 每5行去一个平均值
temp_df['sma_5'] = temp_df['total_price'].rolling(5).mean()
temp_df['sma_10'] = temp_df['total_price'].rolling(10).mean()
temp_df[['sma_5', 'sma_10']].plot()
plt.show()
# 上下一定某列
temp_df['total_price_before'] = temp_df['总价变化率'].shift(-1)
temp_df['total_price_diff%'] = (temp_df['总价变化率'].shift(-1) - temp_df['total_price']) / temp_df['总价变化率']
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"numpy.random.randn",
"pandas.read_csv",
"pandas.datetime.today",
"pandas.merge",
"pandas.to_datetime",
"pandas.set_option",
"pandas.concat"
] | [((202, 244), 'pandas.set_option', 'pa.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (215, 244), True, 'import pandas as pa\n'), ((253, 292), 'pandas.set_option', 'pa.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (266, 292), True, 'import pandas as pa\n'), ((358, 375), 'pandas.read_csv', 'pa.read_csv', (['path'], {}), '(path)\n', (369, 375), True, 'import pandas as pa\n'), ((1619, 1647), 'pandas.merge', 'pa.merge', (['df1', 'df2'], {'on': '"""key"""'}), "(df1, df2, on='key')\n", (1627, 1647), True, 'import pandas as pa\n'), ((1649, 1670), 'pandas.concat', 'pa.concat', (['[df1, df2]'], {}), '([df1, df2])\n', (1658, 1670), True, 'import pandas as pa\n'), ((1833, 1857), 'pandas.read_csv', 'pa.read_csv', (['path_random'], {}), '(path_random)\n', (1844, 1857), True, 'import pandas as pa\n'), ((2646, 2656), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2654, 2656), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2908), 'pandas.to_datetime', 'pa.to_datetime', (["text_df['s_time']"], {'unit': '"""s"""'}), "(text_df['s_time'], unit='s')\n", (2879, 2908), True, 'import pandas as pa\n'), ((3666, 3702), 'pandas.DataFrame', 'pa.DataFrame', (['text_df'], {'columns': 'col_n'}), '(text_df, columns=col_n)\n', (3678, 3702), True, 'import pandas as pa\n'), ((3783, 3812), 'pandas.to_datetime', 'pa.to_datetime', (['temp_df.index'], {}), '(temp_df.index)\n', (3797, 3812), True, 'import pandas as pa\n'), ((4149, 4159), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4157, 4159), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3277), 'pandas.datetime.today', 'pa.datetime.today', ([], {}), '()\n', (3275, 3277), True, 'import pandas as pa\n'), ((1498, 1516), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (1513, 1516), True, 'import numpy as np\n'), ((1598, 1616), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (1613, 1616), True, 'import numpy as np\n'), ((1671, 1692), 'pandas.concat', 'pa.concat', (['[df1, df2]'], {}), '([df1, df2])\n', (1680, 1692), True, 'import pandas as pa\n'), ((3321, 3340), 'pandas.datetime.today', 'pa.datetime.today', ([], {}), '()\n', (3338, 3340), True, 'import pandas as pa\n')] |
#!/usr/bin/env python3
# coding=utf-8
"""
A potential tutorial for GRUCell
https://towardsdatascience.com/encoder-decoder-model-for-multistep-time-series-forecasting-using-pytorch-5d54c6af6e60
"""
import copy
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from ..mimic3.dataset import HypotensionDataset, HypotensionWithBCProbDataset
from ..random_search import RandomSearch
from .utils import create_fc_layer
class HypotensionGRULightning(pl.LightningModule):
monitor_metric = 'val_loss'
monitor_mode = 'min'
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
self.init_model()
def init_model(self):
# Initialize the action embedding
act_in_dim = HypotensionDataset.NUM_FLUID_BINS + HypotensionDataset.NUM_VASO_BINS
self.act_embedding = nn.Identity()
if self.hparams.act_n_layer > 0:
self.act_embedding = create_fc_layer(
n_in=act_in_dim,
n_layer=self.hparams.act_n_layer,
n_hidden=self.hparams.act_n_hidden,
n_out=self.hparams.act_n_out,
dropout=self.hparams.act_dropout,
is_3d_input=True,
)
act_in_dim = self.hparams.act_n_out
self.gru = nn.GRU(
len(HypotensionDataset.all_cols) + act_in_dim,
self.hparams.n_hidden,
self.hparams.n_layer,
batch_first=True,
dropout=self.hparams.dropout,
)
# Arch for final MLP: with BN, Dropout and ELU...
self.out = create_fc_layer(
n_in=self.hparams.n_hidden,
n_layer=self.hparams.fc_n_layer,
n_hidden=self.hparams.fc_n_hidden,
n_out=2 * HypotensionDataset.len_f if self.hparams.obj == 'gaussian'
else HypotensionDataset.len_f,
dropout=self.hparams.fc_dropout,
in_bn=True,
is_3d_input=True,
)
def forward(self, x):
raise NotImplemented('Do not call this')
def unzip(self, output):
num_f = output.shape[-1] // 2
return output[..., :num_f], output[..., num_f:]
def simulate(self, exp_states, actions_fn=None, rollout=False, sample_stdev=0.):
'''
exp_states: [B, T, D] the expert states
actions_fn: take in cur obs and gru hidden state up to t-1
and output action indexes
sample_stdev: when sampling from Gaussian, what is the stdev ratio from Gaussian.
If 0., just return mean. If 1, just sample from normal Gaussian.
Usually in test time we want it to be < 1 to get more-likely outcomes like in
VAE we sample from the prior < 1.
'''
hiddens = None
if actions_fn is None:
exp_act = HypotensionDataset.extract_cur_a(exp_states, form='twohot')
states = exp_states.clone()
T = exp_states.shape[1]
preds = []
for t in range(T-1):
# Run through DQN to get actions
if actions_fn is None:
cur_act = exp_act[:, t, :]
else:
cur_act_idx = actions_fn(states[:, t, :])
cur_act = HypotensionDataset.convert_act_idx_to_twohot(cur_act_idx)
# Convert into 3-d to pass through MLP...
cur_act_emb = self.act_embedding(cur_act.unsqueeze(dim=1)).squeeze_(dim=1)
cur_s_and_a = torch.cat([states[:, t, :], cur_act_emb], dim=-1).unsqueeze(dim=1)
out, hiddens = self.gru(cur_s_and_a, hx=hiddens)
pred = self.out(out)
val = pred[:, 0, :] # for l1 or l2 loss
if 'obj' in self.hparams and self.hparams.obj == 'gaussian':
mu, logvar = self.unzip(val)
val = self.reparametrize(mu, logvar, sample_stdev=sample_stdev)
preds.append(val)
if rollout:
### Modify next-state
states[:, (t+1), HypotensionDataset.f_idxes] = val
# Copy and modify next-state summary actions
states[:, (t+1), HypotensionDataset.f_actions_idxes] = \
HypotensionDataset.update_act_summary(states[:, :(t+1)], cur_act)[:, HypotensionDataset.f_actions_idxes]
if rollout:
return states
preds = torch.stack(preds, dim=1)
states[:, 1:, HypotensionDataset.f_idxes] = preds
return states
@staticmethod
def reparametrize(mu, logvar, sample_stdev=1.):
std = logvar.mul(0.5).exp_()
eps = sample_stdev * std.new(std.size()).normal_()
return eps.mul(std).add_(mu)
def training_step(self, batch, batch_idx, hiddens=None):
ret_dict = self._step(batch, hiddens, is_training=True)
avg_reg_loss = (ret_dict['sum_reg_loss'] / ret_dict['num_reg'])
loss = avg_reg_loss
# alpha = self.hparams.reg_alpha
# loss = alpha * avg_reg_loss + (1. - alpha) * avg_ind_loss
logs = {'train_loss': loss, 'train_reg_loss': avg_reg_loss}
self.log_dict(logs)
result = {'loss': loss, 'log': logs, 'progress_bar': logs}
return result
def validation_step(self, batch, batch_nb):
print('Come in')
batch_cp = copy.deepcopy(batch)
del batch
return self._step(batch_cp)
def test_step(self, batch, batch_nb):
return self._step(batch)
def _step(self, batch, hiddens=None, is_training=False):
x_list = batch['x_list']
x_len = [v.size(0) for v in x_list]
x_pad = pad_sequence(x_list, batch_first=True)
# Get current action in each x
cur_act = HypotensionDataset.extract_cur_a(x_pad, form='twohot')
cur_act = self.act_embedding(cur_act)
# Append a last time dimension
cur_act = torch.cat([cur_act, cur_act.new_zeros(cur_act.shape[0], 1, cur_act.shape[2])], dim=1)
x_pad = torch.cat([x_pad, cur_act], dim=-1)
# Just change to always use one_step_pred
preds = self.one_step_ahead_pred(x_pad, x_len, hiddens=hiddens)
propensity = None
if 'bc_prob' in batch and self.hparams.iptw:
cur_act_idx = HypotensionDataset.extract_cur_a(x_pad, form='act_idx')
bc_prob = pad_sequence(batch['bc_prob'], batch_first=True)
bc_prob = bc_prob[:, :-1, :]
propensity = bc_prob.gather(-1, cur_act_idx.unsqueeze(-1)).squeeze(-1)
# Calculate marginal probability to calculate stabilized weights
marginal_p = HypotensionWithBCProbDataset.get_marginal_prob(self.device)
tmp = marginal_p.unsqueeze(0).unsqueeze(0).expand(*cur_act_idx.shape[:2], marginal_p.shape[0])
marginal = tmp.gather(-1, cur_act_idx.unsqueeze(-1)).squeeze(-1)
return self.cal_loss(preds, x_pad, propensity, marginal)
def one_step_ahead_pred(self, x_pad, x_len, hiddens=None):
# Implement BPTT here if needed
x_packed = pack_padded_sequence(
x_pad, x_len, enforce_sorted=False, batch_first=True)
out, hiddens = self.gru(x_packed, hiddens)
out_padded, _ = pad_packed_sequence(out, batch_first=True)
# ^-- [batch_size, max_len, hidden dim]
pred = self.out(out_padded)
return pred[:, :-1, :] # Ignore the last pred
def rollout(self, x_pad, tf_ratio=0.):
preds = []
hiddens = None
in_data = x_pad[:, 0:1, :].clone() # First time step
for t in range(x_pad.shape[1] - 1):
out, hiddens = self.gru(in_data, hx=hiddens)
pred = self.out(out)
preds.append(pred)
val = pred # for l1 or l2 loss
if self.hparams.obj == 'gaussian':
mu, logvar = self.unzip(pred)
val = self.reparametrize(mu, logvar)
if tf_ratio > 0:
cond = torch.bernoulli(
self.hparams.tf_ratio * torch.ones(*val.shape, device=self.device))
val = torch.where(
cond == 1, x_pad[:, (t + 1):(t + 2), HypotensionDataset.f_idxes], val)
if self.hparams.bptt_steps > 0 and t % self.hparams.bptt_steps == 0:
hiddens = hiddens.detach()
val = val.detach()
in_data = x_pad[:, (t + 1):(t + 2), :].clone() # Later time step
in_data[:, :, HypotensionDataset.f_idxes] = val
preds = torch.cat(preds, dim=1)
return preds
def cal_loss(self, pred, x_pad, propensity=None, marginal=None):
'''
pred has only T-1 time steps, while x_pad has T time steps
'''
y = x_pad[:, 1:, HypotensionDataset.f_idxes]
# Note ind is also padded with 0, so rest of dim isn't included in loss
ind = x_pad[:, 1:, HypotensionDataset.f_ind_idxes]
# Loss averaged per-prediction
ret_dict = {}
if self.hparams.obj == 'gaussian':
mu, logvar = self.unzip(pred)
# Calculate unnormalized prediction Gaussian loss
loss = (0.5 * (logvar + (y - mu) ** 2 * torch.exp(-logvar))) * ind
elif self.hparams.obj == 'l2':
loss = F.mse_loss(pred, y, reduction='none') * ind
else: # smooth_l1
loss = F.smooth_l1_loss(pred, y, reduction='none') * ind
num_reg = torch.sum(ind)
if propensity is not None:
# To zero-out the padding time, we check if the sum of ind is zero
is_pad = (torch.sum(ind, dim=-1) == 0).float()
weights = (marginal / (propensity + 1e-4)) * (1. - is_pad)
loss *= weights.unsqueeze(-1)
num_reg = torch.sum(ind * weights.unsqueeze(-1))
loss = torch.sum(loss)
ret_dict['sum_reg_loss'] = loss
ret_dict['num_reg'] = num_reg
return ret_dict
def validation_epoch_end(self, outputs):
return self._epoch_end(outputs, prefix='val')
def test_epoch_end(self, outputs):
return self._epoch_end(outputs, prefix='test')
def _epoch_end(self, outputs, prefix='val'):
def get_loss(name='reg'):
all_loss = torch.stack([x[f'sum_{name}_loss'] for x in outputs])
all_num = torch.stack([x[f'num_{name}'] for x in outputs])
avg_loss = torch.sum(all_loss) / torch.sum(all_num)
return avg_loss
avg_reg_loss = get_loss('reg')
tensorboard_logs = {f'{prefix}_loss': avg_reg_loss}
self.log_dict(tensorboard_logs)
result = {'log': tensorboard_logs}
result.update(tensorboard_logs)
return result
def configure_optimizers(self): # REQUIRED
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.wd)
def train_dataloader(self):
cls = HypotensionWithBCProbDataset if self.hparams.iptw else HypotensionDataset
return cls.make_loader(
split='train',
batch_size=self.hparams.batch_size,
shuffle=True,
num_workers=self.hparams.get('workers', 0),
debug=self.hparams.name.startswith('debug'),
)
def val_dataloader(self):
cls = HypotensionWithBCProbDataset if self.hparams.iptw else HypotensionDataset
return cls.make_loader(
split='val',
batch_size=2 * self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.get('workers', 0),
debug=self.hparams.name.startswith('debug'),
)
def test_dataloader(self):
cls = HypotensionWithBCProbDataset if self.hparams.iptw else HypotensionDataset
return cls.make_loader(
split='test',
batch_size=2 * self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.get('workers', 0),
debug=self.hparams.name.startswith('debug'),
)
@classmethod
def get_rs_loader(cls, args):
rs = RandomSearch(hparams=args, seed=args.seed)
# rs.add_rs_hparams('seed', short_name='s', chose_from=[321])
rs.add_rs_hparams('seed', short_name='s', gen=lambda hparams: rs.np_gen.randint(200))
rs.add_rs_hparams('lr', chose_from=[5e-4, 1e-3])
rs.add_rs_hparams('wd', short_name='wd', chose_from=[0., 1e-5])
rs.add_rs_hparams('batch_size', short_name='bs', chose_from=[128, 256])
rs.add_rs_hparams('n_hidden', short_name='nh', chose_from=[64])
rs.add_rs_hparams('n_layer', short_name='nl', chose_from=[1])
rs.add_rs_hparams('dropout', short_name='dr',
gen=lambda hparams: 0. if hparams.n_layer <= 1 else rs.np_gen.choice([0.3, 0.5]))
rs.add_rs_hparams('fc_n_hidden', short_name='fnh', chose_from=[256, 384, 512])
rs.add_rs_hparams('fc_n_layer', short_name='fnl', chose_from=[2])
rs.add_rs_hparams('fc_dropout', short_name='fdr', chose_from=[0.15]) # better than 0.3
rs.add_rs_hparams('act_n_hidden', short_name='anh',
gen=lambda hparams: 0 if hparams.act_n_layer <= 1 else rs.np_gen.choice([64, 128]))
rs.add_rs_hparams('act_n_layer', short_name='anl', chose_from=[0, 1, 2]) # No 4
rs.add_rs_hparams('act_n_out', short_name='ano', chose_from=[32, 64, 96])
rs.add_rs_hparams('act_dropout', short_name='adr',
gen=lambda hparams: 0. if hparams.act_n_layer <= 1 else rs.np_gen.choice([0.3]))
# rs.add_rs_hparams('tf_epochs', short_name='tfe', chose_from=[10, 20, 40])
# rs.add_rs_hparams('tf_ratio', short_name='tfr', chose_from=[0.])
return rs
@staticmethod
def add_model_specific_args(parser):
"""
Specify the hyperparams for this LightningModule
"""
# MODEL specific
parser.add_argument('--workers', type=int, default=0)
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--patience', type=int, default=50)
parser.add_argument('--lr', default=1e-3, type=float)
parser.add_argument('--wd', default=0., type=float)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--n_hidden', default=16, type=int)
parser.add_argument('--n_layer', default=1, type=int)
parser.add_argument('--dropout', default=0., type=float)
parser.add_argument('--fc_n_hidden', default=16, type=int)
parser.add_argument('--fc_n_layer', default=2, type=int)
parser.add_argument('--fc_dropout', default=0.1, type=float)
parser.add_argument('--act_n_hidden', default=16, type=int)
parser.add_argument('--act_n_layer', default=2, type=int)
parser.add_argument('--act_n_out', default=16, type=int)
parser.add_argument('--act_dropout', default=0.5, type=float)
parser.add_argument('--bptt_steps', default=0, type=int)
# Teacher forcing flag......
# parser.add_argument('--tf_epochs', default=1, type=int,
# help='In first few epochs, use teahcer forcing to train')
# parser.add_argument('--tf_ratio', default=0.5, type=float,
# help='After first X epochs of teacher forcing, do student '
# 'learning with random masks of 0.5. Maybe anneal it to 0?')
parser.add_argument('--obj', default='gaussian', type=str, choices=['gaussian', 'l1', 'l2'],
help='The objective would be Gaussian likelihood, l1 loss or l2 loss')
parser.add_argument('--iptw', default=0, type=int,
help='do inverse proposensity weighting or not')
# Indicator loss: is it helpful?
# parser.add_argument('--reg_alpha', default=0.9, type=float,
# help='If 1, no indicator loss. If 0 no regression loss. '
# 'Should be > 0.5.')
# parser.add_argument('--use_ind_in_test', default=0, type=int,
# help='If 1, use the indicator prediction to roll-out.')
return parser
def trainer_args(self):
return dict(
gradient_clip_val=1,
stochastic_weight_avg=True, # Did not test if it improves but seems cool
)
class HypotensionLRLightning(HypotensionGRULightning):
def init_model(self):
self.model = nn.Linear(len(HypotensionDataset.all_cols), 2*HypotensionDataset.len_f)
def forward(self, x):
return self.model(x)
def _step(self, batch):
x_list = batch['x_list']
x_pad = pad_sequence(x_list, batch_first=True)
# ^-- [batch_size, max_len, input dim]
out = self.model(x_pad)
# ^-- [batch_size, max_len, out dim]
return self.cal_loss(out, x_pad)
@classmethod
def get_rs_loader(cls, args):
rs = RandomSearch(hparams=args, seed=args.seed)
# rs.add_rs_hparams('seed', short_name='s', chose_from=[321])
rs.add_rs_hparams('seed', short_name='s', gen=lambda hparams: np.random.randint(100))
rs.add_rs_hparams('lr', chose_from=[2e-4, 5e-4, 1e-3, 2e-3])
rs.add_rs_hparams('batch_size', short_name='bs', chose_from=[16, 32, 64])
return rs
@staticmethod
def add_model_specific_args(parser):
"""
Specify the hyperparams for this LightningModule
"""
# MODEL specific
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--lr', default=1e-3, type=float)
parser.add_argument('--batch_size', default=16, type=int)
return parser
def trainer_args(self):
return dict(
stochastic_weight_avg=True, # Did not test if it improves but seems cool
)
| [
"torch.ones",
"copy.deepcopy",
"torch.stack",
"torch.nn.utils.rnn.pad_sequence",
"torch.where",
"torch.nn.functional.mse_loss",
"torch.nn.functional.smooth_l1_loss",
"torch.cat",
"torch.exp",
"numpy.random.randint",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.utils.rnn.pack_padded_sequ... | [((1003, 1016), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1014, 1016), True, 'import torch.nn as nn\n'), ((4503, 4528), 'torch.stack', 'torch.stack', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (4514, 4528), False, 'import torch\n'), ((5430, 5450), 'copy.deepcopy', 'copy.deepcopy', (['batch'], {}), '(batch)\n', (5443, 5450), False, 'import copy\n'), ((5737, 5775), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['x_list'], {'batch_first': '(True)'}), '(x_list, batch_first=True)\n', (5749, 5775), False, 'from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n'), ((6095, 6130), 'torch.cat', 'torch.cat', (['[x_pad, cur_act]'], {'dim': '(-1)'}), '([x_pad, cur_act], dim=-1)\n', (6104, 6130), False, 'import torch\n'), ((7147, 7221), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['x_pad', 'x_len'], {'enforce_sorted': '(False)', 'batch_first': '(True)'}), '(x_pad, x_len, enforce_sorted=False, batch_first=True)\n', (7167, 7221), False, 'from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n'), ((7310, 7352), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['out'], {'batch_first': '(True)'}), '(out, batch_first=True)\n', (7329, 7352), False, 'from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n'), ((8596, 8619), 'torch.cat', 'torch.cat', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (8605, 8619), False, 'import torch\n'), ((9498, 9512), 'torch.sum', 'torch.sum', (['ind'], {}), '(ind)\n', (9507, 9512), False, 'import torch\n'), ((9877, 9892), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (9886, 9892), False, 'import torch\n'), ((16742, 16780), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['x_list'], {'batch_first': '(True)'}), '(x_list, batch_first=True)\n', (16754, 16780), False, 'from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n'), ((6438, 6486), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (["batch['bc_prob']"], {'batch_first': '(True)'}), "(batch['bc_prob'], batch_first=True)\n", (6450, 6486), False, 'from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n'), ((10297, 10350), 'torch.stack', 'torch.stack', (["[x[f'sum_{name}_loss'] for x in outputs]"], {}), "([x[f'sum_{name}_loss'] for x in outputs])\n", (10308, 10350), False, 'import torch\n'), ((10373, 10421), 'torch.stack', 'torch.stack', (["[x[f'num_{name}'] for x in outputs]"], {}), "([x[f'num_{name}'] for x in outputs])\n", (10384, 10421), False, 'import torch\n'), ((8176, 8254), 'torch.where', 'torch.where', (['(cond == 1)', 'x_pad[:, t + 1:t + 2, HypotensionDataset.f_idxes]', 'val'], {}), '(cond == 1, x_pad[:, t + 1:t + 2, HypotensionDataset.f_idxes], val)\n', (8187, 8254), False, 'import torch\n'), ((10445, 10464), 'torch.sum', 'torch.sum', (['all_loss'], {}), '(all_loss)\n', (10454, 10464), False, 'import torch\n'), ((10467, 10485), 'torch.sum', 'torch.sum', (['all_num'], {}), '(all_num)\n', (10476, 10485), False, 'import torch\n'), ((3609, 3658), 'torch.cat', 'torch.cat', (['[states[:, t, :], cur_act_emb]'], {'dim': '(-1)'}), '([states[:, t, :], cur_act_emb], dim=-1)\n', (3618, 3658), False, 'import torch\n'), ((9340, 9377), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['pred', 'y'], {'reduction': '"""none"""'}), "(pred, y, reduction='none')\n", (9350, 9377), True, 'from torch.nn import functional as F\n'), ((9429, 9472), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['pred', 'y'], {'reduction': '"""none"""'}), "(pred, y, reduction='none')\n", (9445, 9472), True, 'from torch.nn import functional as F\n'), ((17195, 17217), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (17212, 17217), True, 'import numpy as np\n'), ((8110, 8152), 'torch.ones', 'torch.ones', (['*val.shape'], {'device': 'self.device'}), '(*val.shape, device=self.device)\n', (8120, 8152), False, 'import torch\n'), ((9649, 9671), 'torch.sum', 'torch.sum', (['ind'], {'dim': '(-1)'}), '(ind, dim=-1)\n', (9658, 9671), False, 'import torch\n'), ((9255, 9273), 'torch.exp', 'torch.exp', (['(-logvar)'], {}), '(-logvar)\n', (9264, 9273), False, 'import torch\n')] |
# Authors: <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
#
# Copyright (c) 2020. Johns Hopkins University - All rights reserved.
import os
import numpy as np
import torch.utils.data as data
from PIL import Image
from albumentations import Compose
from natsort import natsorted
from dataset.preprocess import augment, normalization
from dataset.stereo_albumentation import RGBShiftStereo, RandomBrightnessContrastStereo, random_crop
class KITTIBaseDataset(data.Dataset):
def __init__(self, datadir, split='train'):
super(KITTIBaseDataset, self).__init__()
self.datadir = datadir
self.split = split
if split == 'train' or split == 'validation' or split == 'validation_all':
self.sub_folder = 'training/'
elif split == 'test':
self.sub_folder = 'testing/'
# to be set by child classes
self.left_fold = None
self.right_fold = None
self.disp_fold = None
self._augmentation()
def _read_data(self):
assert self.left_fold is not None
self.left_data = natsorted([os.path.join(self.datadir, self.sub_folder, self.left_fold, img) for img in
os.listdir(os.path.join(self.datadir, self.sub_folder, self.left_fold)) if
img.find('_10') > -1])
self.right_data = [img.replace(self.left_fold, self.right_fold) for img in self.left_data]
self.disp_data = [img.replace(self.left_fold, self.disp_fold) for img in self.left_data]
self._split_data()
def _split_data(self):
train_val_frac = 0.95
# split data
if len(self.left_data) > 1:
if self.split == 'train':
self.left_data = self.left_data[:int(len(self.left_data) * train_val_frac)]
self.right_data = self.right_data[:int(len(self.right_data) * train_val_frac)]
self.disp_data = self.disp_data[:int(len(self.disp_data) * train_val_frac)]
elif self.split == 'validation':
self.left_data = self.left_data[int(len(self.left_data) * train_val_frac):]
self.right_data = self.right_data[int(len(self.right_data) * train_val_frac):]
self.disp_data = self.disp_data[int(len(self.disp_data) * train_val_frac):]
def _augmentation(self):
if self.split == 'train':
self.transformation = Compose([
RGBShiftStereo(always_apply=True, p_asym=0.5),
RandomBrightnessContrastStereo(always_apply=True, p_asym=0.5)
])
elif self.split == 'validation' or self.split == 'test' or self.split == 'validation_all':
self.transformation = None
else:
raise Exception("Split not recognized")
def __len__(self):
return len(self.left_data)
def __getitem__(self, idx):
input_data = {}
# left
left_fname = self.left_data[idx]
left = np.array(Image.open(left_fname)).astype(np.uint8)
input_data['left'] = left
# right
right_fname = self.right_data[idx]
right = np.array(Image.open(right_fname)).astype(np.uint8)
input_data['right'] = right
# disp
if not self.split == 'test': # no disp for test files
disp_fname = self.disp_data[idx]
disp = np.array(Image.open(disp_fname)).astype(np.float) / 256.
input_data['disp'] = disp
input_data['occ_mask'] = np.zeros_like(disp).astype(np.bool)
if self.split == 'train':
input_data = random_crop(200, 640, input_data, self.split)
input_data = augment(input_data, self.transformation)
else:
input_data = normalization(**input_data)
return input_data
class KITTI2015Dataset(KITTIBaseDataset):
def __init__(self, datadir, split='train'):
super(KITTI2015Dataset, self).__init__(datadir, split)
self.left_fold = 'image_2/'
self.right_fold = 'image_3/'
self.disp_fold = 'disp_occ_0/' # we read disp data with occlusion since we compute occ directly
self._read_data()
class KITTI2012Dataset(KITTIBaseDataset):
def __init__(self, datadir, split='train'):
super(KITTI2012Dataset, self).__init__(datadir, split)
self.left_fold = 'colored_0/'
self.right_fold = 'colored_1/'
self.disp_fold = 'disp_occ/' # we read disp data with occlusion since we compute occ directly
self._read_data()
class KITTIDataset(KITTIBaseDataset):
"""
Merged KITTI dataset with 2015 and 2012 data
"""
def __init__(self, datadir, split='train'):
super(KITTIDataset, self).__init__(datadir, split)
self.left_fold_2015 = 'image_2'
self.right_fold_2015 = 'image_3'
self.disp_fold_2015 = 'disp_occ_0' # we read disp data with occlusion since we compute occ directly
self.preprend_2015 = '2015'
self.left_fold_2012 = 'colored_0'
self.right_fold_2012 = 'colored_1'
self.disp_fold_2012 = 'disp_occ' # we we read disp data with occlusion since we compute occ directly
self.preprend_2012 = '2012'
self._read_data()
def _read_data(self):
assert self.left_fold_2015 is not None
assert self.left_fold_2012 is not None
left_data_2015 = [os.path.join(self.datadir, self.preprend_2015, self.sub_folder, self.left_fold_2015, img) for
img in os.listdir(os.path.join(self.datadir, '2015', self.sub_folder, self.left_fold_2015)) if
img.find('_10') > -1]
left_data_2015 = natsorted(left_data_2015)
right_data_2015 = [img.replace(self.left_fold_2015, self.right_fold_2015) for img in left_data_2015]
disp_data_2015 = [img.replace(self.left_fold_2015, self.disp_fold_2015) for img in left_data_2015]
left_data_2012 = [os.path.join(self.datadir, self.preprend_2012, self.sub_folder, self.left_fold_2012, img) for
img in os.listdir(os.path.join(self.datadir, '2012', self.sub_folder, self.left_fold_2012)) if
img.find('_10') > -1]
left_data_2012 = natsorted(left_data_2012)
right_data_2012 = [img.replace(self.left_fold_2012, self.right_fold_2012) for img in left_data_2012]
disp_data_2012 = [img.replace(self.left_fold_2012, self.disp_fold_2012) for img in left_data_2012]
self.left_data = natsorted(left_data_2015 + left_data_2012)
self.right_data = natsorted(right_data_2015 + right_data_2012)
self.disp_data = natsorted(disp_data_2015 + disp_data_2012)
self._split_data()
| [
"numpy.zeros_like",
"dataset.stereo_albumentation.RandomBrightnessContrastStereo",
"dataset.stereo_albumentation.random_crop",
"PIL.Image.open",
"dataset.preprocess.normalization",
"os.path.join",
"dataset.preprocess.augment",
"natsort.natsorted",
"dataset.stereo_albumentation.RGBShiftStereo"
] | [((5667, 5692), 'natsort.natsorted', 'natsorted', (['left_data_2015'], {}), '(left_data_2015)\n', (5676, 5692), False, 'from natsort import natsorted\n'), ((6224, 6249), 'natsort.natsorted', 'natsorted', (['left_data_2012'], {}), '(left_data_2012)\n', (6233, 6249), False, 'from natsort import natsorted\n'), ((6492, 6534), 'natsort.natsorted', 'natsorted', (['(left_data_2015 + left_data_2012)'], {}), '(left_data_2015 + left_data_2012)\n', (6501, 6534), False, 'from natsort import natsorted\n'), ((6561, 6605), 'natsort.natsorted', 'natsorted', (['(right_data_2015 + right_data_2012)'], {}), '(right_data_2015 + right_data_2012)\n', (6570, 6605), False, 'from natsort import natsorted\n'), ((6631, 6673), 'natsort.natsorted', 'natsorted', (['(disp_data_2015 + disp_data_2012)'], {}), '(disp_data_2015 + disp_data_2012)\n', (6640, 6673), False, 'from natsort import natsorted\n'), ((3673, 3713), 'dataset.preprocess.augment', 'augment', (['input_data', 'self.transformation'], {}), '(input_data, self.transformation)\n', (3680, 3713), False, 'from dataset.preprocess import augment, normalization\n'), ((3753, 3780), 'dataset.preprocess.normalization', 'normalization', ([], {}), '(**input_data)\n', (3766, 3780), False, 'from dataset.preprocess import augment, normalization\n'), ((5379, 5473), 'os.path.join', 'os.path.join', (['self.datadir', 'self.preprend_2015', 'self.sub_folder', 'self.left_fold_2015', 'img'], {}), '(self.datadir, self.preprend_2015, self.sub_folder, self.\n left_fold_2015, img)\n', (5391, 5473), False, 'import os\n'), ((5936, 6030), 'os.path.join', 'os.path.join', (['self.datadir', 'self.preprend_2012', 'self.sub_folder', 'self.left_fold_2012', 'img'], {}), '(self.datadir, self.preprend_2012, self.sub_folder, self.\n left_fold_2012, img)\n', (5948, 6030), False, 'import os\n'), ((1092, 1156), 'os.path.join', 'os.path.join', (['self.datadir', 'self.sub_folder', 'self.left_fold', 'img'], {}), '(self.datadir, self.sub_folder, self.left_fold, img)\n', (1104, 1156), False, 'import os\n'), ((3601, 3646), 'dataset.stereo_albumentation.random_crop', 'random_crop', (['(200)', '(640)', 'input_data', 'self.split'], {}), '(200, 640, input_data, self.split)\n', (3612, 3646), False, 'from dataset.stereo_albumentation import RGBShiftStereo, RandomBrightnessContrastStereo, random_crop\n'), ((2442, 2487), 'dataset.stereo_albumentation.RGBShiftStereo', 'RGBShiftStereo', ([], {'always_apply': '(True)', 'p_asym': '(0.5)'}), '(always_apply=True, p_asym=0.5)\n', (2456, 2487), False, 'from dataset.stereo_albumentation import RGBShiftStereo, RandomBrightnessContrastStereo, random_crop\n'), ((2505, 2566), 'dataset.stereo_albumentation.RandomBrightnessContrastStereo', 'RandomBrightnessContrastStereo', ([], {'always_apply': '(True)', 'p_asym': '(0.5)'}), '(always_apply=True, p_asym=0.5)\n', (2535, 2566), False, 'from dataset.stereo_albumentation import RGBShiftStereo, RandomBrightnessContrastStereo, random_crop\n'), ((2983, 3005), 'PIL.Image.open', 'Image.open', (['left_fname'], {}), '(left_fname)\n', (2993, 3005), False, 'from PIL import Image\n'), ((3143, 3166), 'PIL.Image.open', 'Image.open', (['right_fname'], {}), '(right_fname)\n', (3153, 3166), False, 'from PIL import Image\n'), ((3497, 3516), 'numpy.zeros_like', 'np.zeros_like', (['disp'], {}), '(disp)\n', (3510, 3516), True, 'import numpy as np\n'), ((5517, 5589), 'os.path.join', 'os.path.join', (['self.datadir', '"""2015"""', 'self.sub_folder', 'self.left_fold_2015'], {}), "(self.datadir, '2015', self.sub_folder, self.left_fold_2015)\n", (5529, 5589), False, 'import os\n'), ((6074, 6146), 'os.path.join', 'os.path.join', (['self.datadir', '"""2012"""', 'self.sub_folder', 'self.left_fold_2012'], {}), "(self.datadir, '2012', self.sub_folder, self.left_fold_2012)\n", (6086, 6146), False, 'import os\n'), ((1215, 1274), 'os.path.join', 'os.path.join', (['self.datadir', 'self.sub_folder', 'self.left_fold'], {}), '(self.datadir, self.sub_folder, self.left_fold)\n', (1227, 1274), False, 'import os\n'), ((3374, 3396), 'PIL.Image.open', 'Image.open', (['disp_fname'], {}), '(disp_fname)\n', (3384, 3396), False, 'from PIL import Image\n')] |
from numpy import array, isclose, pi
from scipy.constants import epsilon_0
from ..egs import egs_force
def test_egs_force():
"""Test the calculation of the bare egs force."""
r = 2.0
alpha = 1.3616
lambda_p = 1.778757e-09
lambda_m = 4.546000e-09
charge = 1.440961e-09
c_const = charge**2 / (4.0 * pi * epsilon_0)
pot_mat = array([c_const * 0.5, 1.0 + alpha, 1.0 - alpha, 1.0 / lambda_m, 1.0 / lambda_p, 1.0e-14])
potential, force = egs_force(r, pot_mat)
assert isclose(potential, -0.9067719924627385)
assert isclose(force, 270184640.33105946)
# def test_update_params():
# # TODO: write a test for update_params
# pass
| [
"numpy.isclose",
"numpy.array"
] | [((358, 449), 'numpy.array', 'array', (['[c_const * 0.5, 1.0 + alpha, 1.0 - alpha, 1.0 / lambda_m, 1.0 / lambda_p, 1e-14\n ]'], {}), '([c_const * 0.5, 1.0 + alpha, 1.0 - alpha, 1.0 / lambda_m, 1.0 /\n lambda_p, 1e-14])\n', (363, 449), False, 'from numpy import array, isclose, pi\n'), ((506, 545), 'numpy.isclose', 'isclose', (['potential', '(-0.9067719924627385)'], {}), '(potential, -0.9067719924627385)\n', (513, 545), False, 'from numpy import array, isclose, pi\n'), ((558, 592), 'numpy.isclose', 'isclose', (['force', '(270184640.33105946)'], {}), '(force, 270184640.33105946)\n', (565, 592), False, 'from numpy import array, isclose, pi\n')] |
"""Model size metrics
"""
import numpy as np
from . import nonzero, dtype2bits
def model_size(model, as_bits=False):
"""Returns absolute and nonzero model size
Arguments:
model {torch.nn.Module} -- Network to compute model size over
Keyword Arguments:
as_bits {bool} -- Whether to account for the size of dtype
Returns:
int -- Total number of weight & bias params
int -- Out total_params exactly how many are nonzero
"""
total_params = 0
nonzero_params = 0
for tensor in model.parameters():
t = np.prod(tensor.shape)
nz = nonzero(tensor.detach().cpu().numpy())
if as_bits:
bits = dtype2bits[tensor.dtype]
t *= bits
nz *= bits
total_params += t
nonzero_params += nz
return int(total_params), int(nonzero_params)
| [
"numpy.prod"
] | [((574, 595), 'numpy.prod', 'np.prod', (['tensor.shape'], {}), '(tensor.shape)\n', (581, 595), True, 'import numpy as np\n')] |
import random
import numpy as np
from collections import deque
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from memory import ReplayMemory
from model_noisy import *
from utils import *
from config import *
import pdb
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, action_size, epsilon=1.0, load_model=False, model_path=None):
self.load_model = load_model
self.action_size = action_size
# These are hyper parameters for the DQN
self.discount_factor = 0.99
self.epsilon = epsilon
self.epsilon_min = 0.01
self.explore_step = 1000000
self.epsilon_decay = (self.epsilon - self.epsilon_min) / self.explore_step
self.train_start = 100000
self.update_target = 1000
# Generate the memory
self.memory = ReplayMemory()
# Create the policy net and the target net
self.policy_net = NoisyDQN(action_size)
self.policy_net.to(device)
self.target_net = NoisyDQN(action_size)
self.target_net.to(device)
self.optimizer = optim.Adam(params=self.policy_net.parameters(), lr=learning_rate)
# initialize target net
self.update_target_net()
if self.load_model:
self.policy_net = torch.load(model_path, map_location=device)
self.target_net = torch.load(model_path, map_location=device)
self.target_net.eval()
# after some time interval update the target net to be same with policy net
def update_target_net(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def get_action(self, state):
state = torch.from_numpy(state).unsqueeze(0).to(device)
#pdb.set_trace()
with torch.no_grad():
a = self.policy_net(state).argmax(dim=1).detach().cpu().numpy()[0]
return a
# pick samples randomly from replay memory (with batch_size)
def train_policy_net(self, frame):
mini_batch = self.memory.sample_mini_batch(frame)
mini_batch = np.array(mini_batch).transpose()
history = np.stack(mini_batch[0], axis=0) #shape: (batch_size,5,84,84)
states = np.float32(history[:, :4, :, :]) / 255. #current state consists of frame(0 to 3)
actions = list(mini_batch[1])
rewards = list(mini_batch[2])
next_states = np.float32(history[:, 1:, :, :]) / 255. #next state consists of frame(1 to 4)
dones = mini_batch[3] # checks if the game is over
#pdb.set_trace()
current_q_values = QValues.get_current(self.policy_net, states, actions)
next_q_values = QValues.get_next(self.target_net, next_states, dones)
rewards = torch.from_numpy(np.float32(np.array(rewards))).to(device)
target_q_values = (next_q_values * self.discount_factor) + rewards
loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1))
#loss = F.smooth_l1_loss(current_q_values, target_q_values.unsqueeze(1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.policy_net.reset_noise()
self.target_net.reset_noise()
return loss
class QValues():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@staticmethod
def get_current(policy_net, states, actions):
states = torch.from_numpy(states).to(device)
actions = torch.from_numpy(np.array(actions)).to(device)
return policy_net(states).gather(dim=1, index=actions.unsqueeze(-1))
@staticmethod
# find q_values of states that are NOT terminal states
# q_values of terminal states are kept at 0
def get_next(target_net, next_states, dones):
next_states = torch.from_numpy(next_states).to(device)
dones = torch.from_numpy(dones.astype(bool)).to(device)
non_final_state_locations = (dones == False)
non_final_states = next_states[non_final_state_locations]
batch_size = next_states.shape[0]
values = torch.zeros(batch_size).to(QValues.device)
values[non_final_state_locations] = target_net(non_final_states).max(dim=1)[0].detach()
return values
| [
"numpy.stack",
"memory.ReplayMemory",
"numpy.array",
"numpy.float32"
] | [((857, 871), 'memory.ReplayMemory', 'ReplayMemory', ([], {}), '()\n', (869, 871), False, 'from memory import ReplayMemory\n'), ((1978, 2009), 'numpy.stack', 'np.stack', (['mini_batch[0]'], {'axis': '(0)'}), '(mini_batch[0], axis=0)\n', (1986, 2009), True, 'import numpy as np\n'), ((2050, 2082), 'numpy.float32', 'np.float32', (['history[:, :4, :, :]'], {}), '(history[:, :4, :, :])\n', (2060, 2082), True, 'import numpy as np\n'), ((2211, 2243), 'numpy.float32', 'np.float32', (['history[:, 1:, :, :]'], {}), '(history[:, 1:, :, :])\n', (2221, 2243), True, 'import numpy as np\n'), ((1932, 1952), 'numpy.array', 'np.array', (['mini_batch'], {}), '(mini_batch)\n', (1940, 1952), True, 'import numpy as np\n'), ((3177, 3194), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (3185, 3194), True, 'import numpy as np\n'), ((2551, 2568), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (2559, 2568), True, 'import numpy as np\n')] |
# Using chrisjmccormick's github for the basic word2vec import
from gensim import utils, matutils
from itertools import chain
import logging
from six import string_types
import numpy as np
import os
import sys
import pandas as pd
import time
from os.path import exists
from os import mkdir
from models import vocabulary_model, load_model, MODELS
## Version modifiée de gensim, peut être à verifier quels sont les changements exactement (le but est principalement d'autorizer la version vanilla)
def most_similar(model, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None, ignore=True):
if positive is None:
positive = []
if negative is None:
negative = []
model.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (np.ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (np.ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, np.ndarray):
mean.append(weight * word)
else:
mean.append(weight * model.word_vec(word, use_norm=True))
if word in model.vocab:
all_words.add(model.vocab[word].index)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = model.vectors_norm if restrict_vocab is None else model.vectors_norm[:restrict_vocab]
dists = np.dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
if ignore:
result = [(model.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
else:
result = [(model.index2word[sim], float(dists[sim])) for sim in best]
return result[:topn]
def evaluate_word_analogies_bats(model, directory, restrict_vocab=300000, case_insensitive=True, dummy4unknown=False):
logger = logging.getLogger(__name__)
print("# Computing analogy scores for category type: ", str(directory))
ok_vocab = [(w, model.vocab[w]) for w in model.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
oov = 0
# logger.info("Evaluating word analogies for top %i words in the model on %s", restrict_vocab, analogies)
sections, section = [], None
quadruplets_no = 0
directions_names_bats = []
pairs_sets = []
scores_bats = [] #dict()
scores_bats_vanilla = [] #dict()
for f in os.listdir('BATS_3.0/' + str(directory)): #..
directions_names_bats.append(str(f)[:-4])
pairs_sets.append(set())
with utils.open_file('BATS_3.0/' + str(directory) + '/' + str(f)) as fin:
for line_no, line in enumerate(fin):
line = utils.to_unicode(line)
a, b = [word.lower() for word in line.split()]
list_b = b.split('/')
if list_b[0] != a:
pairs_sets[-1].add((a.upper(), list_b[0].upper()))
for i in range(len(directions_names_bats)):
if section:
# store the last section, too
sections.append(section)
# model._log_evaluate_word_analogies(section)
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
score = correct / (correct + incorrect)
logger.info("%s: %.1f%% (%i/%i)", section['section'], 100.0 * score, correct, correct + incorrect)
scores_bats.append([section['section'], score, correct, correct + incorrect])
else:
print('No score for ', section['section'])
correct, incorrect = len(section['correct_vanilla']), len(section['incorrect_vanilla'])
if correct + incorrect > 0:
score = correct / (correct + incorrect)
logger.info("%s: %.1f%% (%i/%i) VANILLA", section['section'], 100.0 * score, correct,
correct + incorrect)
scores_bats_vanilla.append([section['section'], score, correct, correct + incorrect])
total_section = len(section['correct_vanilla']) + len(section['incorrect_vanilla'])
if total_section > 0:
logger.info('Number of predictions equal to a: %i (%d), a*: %i (%d), b: %i (%d)',
section['n_a'], section['n_a'] / total_section,
section['n_a*'], section['n_a*'] / total_section,
section['n_b'], section['n_b'] / total_section)
section = {'section': directions_names_bats[i], 'correct': [], 'incorrect': [],
'correct_vanilla': [], 'incorrect_vanilla': [], 'n_a': 0, 'n_a*': 0, 'n_b': 0,
'cd': [], 'badc': [], 'bac': [], 'n/cba': [], 'n/c': [], 'n/d': []}
tuples = pairs_sets[i]
for t1 in tuples:
for t2 in tuples:
a, b = t1
c, expected = t2
if a != c:
quadruplets_no += 1
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
oov += 1
# if dummy4unknown:
# logger.debug('Zero accuracy for line #%d with OOV words: %s', line_no, line.strip())
# section['incorrect'].append((a, b, c, expected))
# else:
# logger.debug("Skipping line with OOV words")
continue
original_vocab = model.vocab
model.vocab = ok_vocab
predicted = None
ignore = {a, b, c} # input words to be ignored
ignore_bool = False
positive = [b, c]
negative = [a]
# find the most likely prediction using 3CosAdd (vector offset) method
# TODO: implement 3CosMul and set-based methods for solving analogies
sims = most_similar(model, positive=positive, negative=negative, topn=5,
restrict_vocab=restrict_vocab, ignore=ignore_bool)
model.vocab = original_vocab
# predicted = sims[0][0].upper() if case_insensitive else sims[0][0]
for element in sims:
predicted = element[0].upper() if case_insensitive else element[0]
if predicted in ok_vocab and predicted not in ignore:
break
for element in sims:
predicted_ignore = element[0].upper() if case_insensitive else element[0]
if predicted_ignore in ok_vocab:
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if predicted_ignore == expected:
section['correct_vanilla'].append((a, b, c, expected))
else:
section['incorrect_vanilla'].append((a, b, c, expected))
if predicted_ignore == a:
section['n_a'] += 1
if predicted_ignore == b:
section['n_a*'] += 1
if predicted_ignore == c:
section['n_b'] += 1
if section:
# store the last section, too
sections.append(section)
# model._log_evaluate_word_analogies(section)
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
score = correct / (correct + incorrect)
logger.info("%s: %.1f%% (%i/%i)", section['section'], 100.0 * score, correct, correct + incorrect)
scores_bats.append([section['section'], score, correct, correct + incorrect])
else:
print('No score for ', section['section'])
correct, incorrect = len(section['correct_vanilla']), len(section['incorrect_vanilla'])
if correct + incorrect > 0:
score = correct / (correct + incorrect)
logger.info("%s: %.1f%% (%i/%i) VANILLA", section['section'], 100.0 * score, correct, correct + incorrect)
scores_bats_vanilla.append([section['section'], score, correct, correct + incorrect])
total_section = len(section['correct_vanilla']) + len(section['incorrect_vanilla'])
if total_section > 0:
logger.info('Number of predictions equal to a: %i (%d), a*: %i (%d), b: %i (%d)',
section['n_a'], section['n_a'] / total_section,
section['n_a*'], section['n_a*'] / total_section,
section['n_b'], section['n_b'] / total_section)
total = {
'section': 'Total accuracy',
'correct': list(chain.from_iterable(s['correct'] for s in sections)),
'incorrect': list(chain.from_iterable(s['incorrect'] for s in sections)),
'correct_vanilla': list(chain.from_iterable(s['correct_vanilla'] for s in sections)),
'incorrect_vanilla': list(chain.from_iterable(s['incorrect_vanilla'] for s in sections)),
}
oov_ratio = float(oov) / quadruplets_no * 100
logger.info('Quadruplets with out-of-vocabulary words: %.1f%%', oov_ratio)
if not dummy4unknown:
logger.info(
'NB: analogies containing OOV words were skipped from evaluation! '
'To change this behavior, use "dummy4unknown=True"'
)
# analogies_score = model._log_evaluate_word_analogies(total)
correct, incorrect = len(total['correct']), len(total['incorrect'])
# print(total)
if correct + incorrect > 0:
score = correct / (correct + incorrect)
logger.info("%s: %.1f%% (%i/%i)", total['section'], 100.0 * score, correct, correct + incorrect)
total_score = ["# Total " + str(directory), score, correct, correct + incorrect]
analogies_score = score
correct_vanilla, incorrect_vanilla = len(total['correct_vanilla']), len(total['incorrect_vanilla'])
# print(total)
if correct_vanilla + incorrect_vanilla > 0:
score = correct_vanilla / (correct_vanilla + incorrect_vanilla)
logger.info("%s: %.1f%% (%i/%i) VANILLA", total['section'], 100.0 * score, correct_vanilla,
correct_vanilla + incorrect_vanilla)
total_score_vanilla = ["# Total " + str(directory), score, correct_vanilla, correct_vanilla + incorrect_vanilla]
analogies_score = score
sections.append(total)
bats_scores = [total_score, total_score_vanilla, scores_bats, scores_bats_vanilla]
# Return the overall score and the full lists of correct and incorrect analogies
return bats_scores #[analogies_score, sections, bats_scores]
def bats_test(model):
results = []
for d in os.listdir('../BATS_3.0'):
if d != 'metadata.json':
results.append(evaluate_word_analogies_bats(model, directory=d))
return (results)
def save_analogy_test(results):
total_results = []
total_results_vanilla = []
for r in results:
t, t_v, s, s_v = r[0], r[1], r[2], r[3]
for si in s:
total_results.append(si)
total_results.append(t)
for si_v in s_v:
total_results_vanilla.append(si_v)
total_results_vanilla.append(t_v)
columns = ['Categories', 'Accuracy', 'Nb correct', 'Nb total']
df = pd.DataFrame(total_results, columns=columns)
df_v = pd.DataFrame(total_results_vanilla, columns=columns)
if not exists('results'):
print("# ", str('results'), "not found, creating dir.")
mkdir('results')
timestr = time.strftime("%Y%m%d-%H%M%S")
namepath = 'results/' + 'analogy_test' + '-' + str(timestr) + '.csv'
namepath_v = 'results/' + 'analogy_test_vanilla' + '-' + str(timestr) + '.csv'
df.to_csv(namepath, index=False)
df_v.to_csv(namepath_v, index=False)
print("# Successfully saved the analogy tests to ", str(namepath), "and ", str(namepath_v))
if __name__ == "__main__":
# execute only if run as a script
if len(sys.argv) < 2:
raise("# Please provide a model (all, name, or filename for a custom model)")
name = sys.argv[1]
if name == 'all':
for name in MODELS:
model = load_model(name)
print("# Computing the analogy test accuracy from ", str(name))
results = bats_test(model)
print("# Sucessfully computed the analogy test accuracy from ", str(name))
save_analogy_test(results)
else:
model = load_model(name)
print("# Computing the analogy test accuracy from ", str(name))
results = bats_test(model)
print("# Sucessfully computed the analogy test accuracy from ", str(name))
save_analogy_test(results)
| [
"pandas.DataFrame",
"os.mkdir",
"os.path.exists",
"time.strftime",
"models.load_model",
"numpy.array",
"numpy.dot",
"itertools.chain.from_iterable",
"os.listdir",
"logging.getLogger",
"gensim.utils.to_unicode"
] | [((2019, 2040), 'numpy.dot', 'np.dot', (['limited', 'mean'], {}), '(limited, mean)\n', (2025, 2040), True, 'import numpy as np\n'), ((2569, 2596), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2586, 2596), False, 'import logging\n'), ((11779, 11804), 'os.listdir', 'os.listdir', (['"""../BATS_3.0"""'], {}), "('../BATS_3.0')\n", (11789, 11804), False, 'import os\n'), ((12375, 12419), 'pandas.DataFrame', 'pd.DataFrame', (['total_results'], {'columns': 'columns'}), '(total_results, columns=columns)\n', (12387, 12419), True, 'import pandas as pd\n'), ((12431, 12483), 'pandas.DataFrame', 'pd.DataFrame', (['total_results_vanilla'], {'columns': 'columns'}), '(total_results_vanilla, columns=columns)\n', (12443, 12483), True, 'import pandas as pd\n'), ((12618, 12648), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (12631, 12648), False, 'import time\n'), ((12496, 12513), 'os.path.exists', 'exists', (['"""results"""'], {}), "('results')\n", (12502, 12513), False, 'from os.path import exists\n'), ((12587, 12603), 'os.mkdir', 'mkdir', (['"""results"""'], {}), "('results')\n", (12592, 12603), False, 'from os import mkdir\n'), ((13538, 13554), 'models.load_model', 'load_model', (['name'], {}), '(name)\n', (13548, 13554), False, 'from models import vocabulary_model, load_model, MODELS\n'), ((9779, 9830), 'itertools.chain.from_iterable', 'chain.from_iterable', (["(s['correct'] for s in sections)"], {}), "(s['correct'] for s in sections)\n", (9798, 9830), False, 'from itertools import chain\n'), ((9859, 9912), 'itertools.chain.from_iterable', 'chain.from_iterable', (["(s['incorrect'] for s in sections)"], {}), "(s['incorrect'] for s in sections)\n", (9878, 9912), False, 'from itertools import chain\n'), ((9947, 10006), 'itertools.chain.from_iterable', 'chain.from_iterable', (["(s['correct_vanilla'] for s in sections)"], {}), "(s['correct_vanilla'] for s in sections)\n", (9966, 10006), False, 'from itertools import chain\n'), ((10043, 10104), 'itertools.chain.from_iterable', 'chain.from_iterable', (["(s['incorrect_vanilla'] for s in sections)"], {}), "(s['incorrect_vanilla'] for s in sections)\n", (10062, 10104), False, 'from itertools import chain\n'), ((13253, 13269), 'models.load_model', 'load_model', (['name'], {}), '(name)\n', (13263, 13269), False, 'from models import vocabulary_model, load_model, MODELS\n'), ((3449, 3471), 'gensim.utils.to_unicode', 'utils.to_unicode', (['line'], {}), '(line)\n', (3465, 3471), False, 'from gensim import utils, matutils\n'), ((1781, 1795), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (1789, 1795), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _of_broadcast_to_compatible_with(x, compatible_shape, x_shape=None):
assert isinstance(compatible_shape, (list, tuple))
if x_shape is None:
x_shape = x.shape
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.ListNumpy.Placeholder(shape=x_shape, dtype=flow.float)
):
compatible_var = [
flow.get_variable(
"compatible_var_{}".format(i),
shape=cp_shape,
dtype=flow.float,
initializer=flow.random_normal_initializer(),
trainable=False,
)
for (i, cp_shape) in enumerate(compatible_shape)
]
return flow.broadcast_to_compatible_with(x_def, compatible_var)
return broadcast_to_compatible_with_fn([x]).get().numpy_list()[0]
def _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_shape=None, a_shape=None, b_shape=None
):
if x_shape is None:
x_shape = x.shape
if a_shape is None:
a_shape = a.shape
if b_shape is None:
b_shape = b.shape
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.ListNumpy.Placeholder(x_shape, dtype=flow.float),
a_def: oft.ListNumpy.Placeholder(a_shape, dtype=flow.float),
b_def: oft.ListNumpy.Placeholder(b_shape, dtype=flow.float),
):
return flow.broadcast_to_compatible_with(
x_def, [flow.identity(a_def), flow.identity(b_def)]
)
return broadcast_to_compatible_with_fn([x], [a], [b]).get().numpy_list()[0]
def _of_broadcast_to_compatible_with_grad(x, compatible_shape, dx_watcher):
assert isinstance(compatible_shape, (list, tuple))
assert callable(dx_watcher)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(type="train", function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.Numpy.Placeholder(x.shape, dtype=flow.float)
):
x_var = flow.get_variable(
"x_var",
shape=x.shape,
dtype=flow.float,
initializer=flow.constant_initializer(0),
trainable=True,
)
compatible_var = [
flow.get_variable(
"compatible_var_{}".format(i),
shape=cp_shape,
dtype=flow.float,
initializer=flow.random_normal_initializer(),
trainable=False,
)
for (i, cp_shape) in enumerate(compatible_shape)
]
x_var = x_var + x_def
y = flow.broadcast_to_compatible_with(x_var, compatible_var)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(y)
flow.watch_diff(x_var, dx_watcher)
return y
return broadcast_to_compatible_with_fn(x).get().numpy()
@flow.unittest.skip_unless_1n1d()
class TestBroadcastToCompatibleWith(flow.unittest.TestCase):
def test_broadcast_to_compatible_with(test_case):
x = np.random.standard_normal((5, 2)).astype(np.float32)
compatible_shape = [[4, 5, 2], [4, 5, 1]]
ret = _of_broadcast_to_compatible_with(x, compatible_shape)
expected_ret = np.broadcast_to(x, [4, 5, 2])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_dynamic_broadcast_to_compatible_with(test_case):
x = np.random.standard_normal((10, 6)).astype(np.float32)
x_static_shape = (15, 6)
a = np.random.standard_normal((3, 10, 6)).astype(np.float32)
a_static_shape = (3, 15, 6)
b = np.random.standard_normal((3, 10, 1)).astype(np.float32)
b_static_shape = (3, 15, 1)
ret = _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_static_shape, a_static_shape, b_static_shape
)
expected_ret = np.broadcast_to(x, [3, 10, 6])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_dynamic_broadcast_to_compatible_with_case_2(test_case):
x = np.random.standard_normal((20, 1, 1)).astype(np.float32)
x_static_shape = (23, 1, 1)
a = np.random.standard_normal((11, 1)).astype(np.float32)
a_static_shape = (15, 1)
b = np.random.standard_normal((7,)).astype(np.float32)
b_static_shape = (8,)
ret = _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_static_shape, a_static_shape, b_static_shape
)
expected_ret = np.broadcast_to(x, [20, 11, 7])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_broadcast_to_compatible_with_grad(test_case):
x = np.random.standard_normal((7, 1, 4)).astype(np.float32)
compatible_shape = [[7, 1, 4], [5, 4]]
def compare_dy(dx_blob):
dx = np.ones([7, 5, 4], dtype=np.float32).sum(axis=1).reshape(x.shape)
test_case.assertTrue(np.array_equal(dx, dx_blob.numpy()))
ret = _of_broadcast_to_compatible_with_grad(x, compatible_shape, compare_dy)
exp_ret = np.broadcast_to(x, [7, 5, 4])
test_case.assertTrue(np.array_equal(exp_ret, ret))
def test_broadcast_to_compatible_with_grad_case_2(test_case):
x = np.random.standard_normal((7, 1, 4)).astype(np.float32)
compatible_shape = [[1, 7, 5, 4]]
def compare_dy(dx_blob):
dx = np.ones([7, 5, 4], dtype=np.float32).sum(axis=1).reshape(x.shape)
test_case.assertTrue(np.array_equal(dx, dx_blob.numpy()))
ret = _of_broadcast_to_compatible_with_grad(x, compatible_shape, compare_dy)
exp_ret = np.broadcast_to(x, [1, 7, 5, 4])
test_case.assertTrue(np.array_equal(exp_ret, ret))
def test_broadcast_to_compatible_with_no_broadcast(test_case):
x = np.random.standard_normal((9, 9, 6)).astype(np.float32)
x_static_shape = (10, 9, 6)
compatible_shape = [[6], [9, 1]]
ret = _of_broadcast_to_compatible_with(x, compatible_shape, x_static_shape)
test_case.assertTrue(np.array_equal(x, ret))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.typing.ListNumpy.Placeholder",
"numpy.ones",
"oneflow.compatible.single_client.constant_initializer",
"oneflow.compatible.single_client.watch_diff",
"unittest.main",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.broadcast_to_compat... | [((4221, 4253), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4251, 4253), True, 'from oneflow.compatible import single_client as flow\n'), ((973, 1001), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (999, 1001), True, 'from oneflow.compatible import single_client as flow\n'), ((1020, 1041), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1039, 1041), True, 'from oneflow.compatible import single_client as flow\n'), ((1159, 1208), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1179, 1208), True, 'from oneflow.compatible import single_client as flow\n'), ((2084, 2112), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2110, 2112), True, 'from oneflow.compatible import single_client as flow\n'), ((2131, 2152), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2150, 2152), True, 'from oneflow.compatible import single_client as flow\n'), ((2270, 2319), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2290, 2319), True, 'from oneflow.compatible import single_client as flow\n'), ((2949, 2977), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2975, 2977), True, 'from oneflow.compatible import single_client as flow\n'), ((2996, 3017), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3015, 3017), True, 'from oneflow.compatible import single_client as flow\n'), ((3137, 3200), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3157, 3200), True, 'from oneflow.compatible import single_client as flow\n'), ((7411, 7426), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7424, 7426), False, 'import unittest\n'), ((1125, 1151), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1149, 1151), True, 'from oneflow.compatible import single_client as flow\n'), ((1697, 1753), 'oneflow.compatible.single_client.broadcast_to_compatible_with', 'flow.broadcast_to_compatible_with', (['x_def', 'compatible_var'], {}), '(x_def, compatible_var)\n', (1730, 1753), True, 'from oneflow.compatible import single_client as flow\n'), ((2236, 2262), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2260, 2262), True, 'from oneflow.compatible import single_client as flow\n'), ((3101, 3129), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (3127, 3129), True, 'from oneflow.compatible import single_client as flow\n'), ((3911, 3967), 'oneflow.compatible.single_client.broadcast_to_compatible_with', 'flow.broadcast_to_compatible_with', (['x_var', 'compatible_var'], {}), '(x_var, compatible_var)\n', (3944, 3967), True, 'from oneflow.compatible import single_client as flow\n'), ((4105, 4139), 'oneflow.compatible.single_client.watch_diff', 'flow.watch_diff', (['x_var', 'dx_watcher'], {}), '(x_var, dx_watcher)\n', (4120, 4139), True, 'from oneflow.compatible import single_client as flow\n'), ((4575, 4604), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[4, 5, 2]'], {}), '(x, [4, 5, 2])\n', (4590, 4604), True, 'import numpy as np\n'), ((5198, 5228), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[3, 10, 6]'], {}), '(x, [3, 10, 6])\n', (5213, 5228), True, 'import numpy as np\n'), ((5817, 5848), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[20, 11, 7]'], {}), '(x, [20, 11, 7])\n', (5832, 5848), True, 'import numpy as np\n'), ((6379, 6408), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[7, 5, 4]'], {}), '(x, [7, 5, 4])\n', (6394, 6408), True, 'import numpy as np\n'), ((6936, 6968), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[1, 7, 5, 4]'], {}), '(x, [1, 7, 5, 4])\n', (6951, 6968), True, 'import numpy as np\n'), ((1265, 1323), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', ([], {'shape': 'x_shape', 'dtype': 'flow.float'}), '(shape=x_shape, dtype=flow.float)\n', (1290, 1323), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2376, 2428), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['x_shape'], {'dtype': 'flow.float'}), '(x_shape, dtype=flow.float)\n', (2401, 2428), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2445, 2497), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['a_shape'], {'dtype': 'flow.float'}), '(a_shape, dtype=flow.float)\n', (2470, 2497), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2514, 2566), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['b_shape'], {'dtype': 'flow.float'}), '(b_shape, dtype=flow.float)\n', (2539, 2566), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3257, 3305), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x.shape'], {'dtype': 'flow.float'}), '(x.shape, dtype=flow.float)\n', (3278, 3305), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((4634, 4667), 'numpy.array_equal', 'np.array_equal', (['expected_ret', 'ret'], {}), '(expected_ret, ret)\n', (4648, 4667), True, 'import numpy as np\n'), ((5258, 5291), 'numpy.array_equal', 'np.array_equal', (['expected_ret', 'ret'], {}), '(expected_ret, ret)\n', (5272, 5291), True, 'import numpy as np\n'), ((5878, 5911), 'numpy.array_equal', 'np.array_equal', (['expected_ret', 'ret'], {}), '(expected_ret, ret)\n', (5892, 5911), True, 'import numpy as np\n'), ((6438, 6466), 'numpy.array_equal', 'np.array_equal', (['exp_ret', 'ret'], {}), '(exp_ret, ret)\n', (6452, 6466), True, 'import numpy as np\n'), ((6998, 7026), 'numpy.array_equal', 'np.array_equal', (['exp_ret', 'ret'], {}), '(exp_ret, ret)\n', (7012, 7026), True, 'import numpy as np\n'), ((7354, 7376), 'numpy.array_equal', 'np.array_equal', (['x', 'ret'], {}), '(x, ret)\n', (7368, 7376), True, 'import numpy as np\n'), ((2645, 2665), 'oneflow.compatible.single_client.identity', 'flow.identity', (['a_def'], {}), '(a_def)\n', (2658, 2665), True, 'from oneflow.compatible import single_client as flow\n'), ((2667, 2687), 'oneflow.compatible.single_client.identity', 'flow.identity', (['b_def'], {}), '(b_def)\n', (2680, 2687), True, 'from oneflow.compatible import single_client as flow\n'), ((3450, 3478), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (3475, 3478), True, 'from oneflow.compatible import single_client as flow\n'), ((4381, 4414), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(5, 2)'], {}), '((5, 2))\n', (4406, 4414), True, 'import numpy as np\n'), ((4744, 4778), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 6)'], {}), '((10, 6))\n', (4769, 4778), True, 'import numpy as np\n'), ((4843, 4880), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(3, 10, 6)'], {}), '((3, 10, 6))\n', (4868, 4880), True, 'import numpy as np\n'), ((4948, 4985), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(3, 10, 1)'], {}), '((3, 10, 1))\n', (4973, 4985), True, 'import numpy as np\n'), ((5375, 5412), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(20, 1, 1)'], {}), '((20, 1, 1))\n', (5400, 5412), True, 'import numpy as np\n'), ((5480, 5514), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(11, 1)'], {}), '((11, 1))\n', (5505, 5514), True, 'import numpy as np\n'), ((5579, 5610), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(7,)'], {}), '((7,))\n', (5604, 5610), True, 'import numpy as np\n'), ((5985, 6021), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(7, 1, 4)'], {}), '((7, 1, 4))\n', (6010, 6021), True, 'import numpy as np\n'), ((6547, 6583), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(7, 1, 4)'], {}), '((7, 1, 4))\n', (6572, 6583), True, 'import numpy as np\n'), ((7108, 7144), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(9, 9, 6)'], {}), '((9, 9, 6))\n', (7133, 7144), True, 'import numpy as np\n'), ((1530, 1562), 'oneflow.compatible.single_client.random_normal_initializer', 'flow.random_normal_initializer', ([], {}), '()\n', (1560, 1562), True, 'from oneflow.compatible import single_client as flow\n'), ((3717, 3749), 'oneflow.compatible.single_client.random_normal_initializer', 'flow.random_normal_initializer', ([], {}), '()\n', (3747, 3749), True, 'from oneflow.compatible import single_client as flow\n'), ((4008, 4062), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (4049, 4062), True, 'from oneflow.compatible import single_client as flow\n'), ((6139, 6175), 'numpy.ones', 'np.ones', (['[7, 5, 4]'], {'dtype': 'np.float32'}), '([7, 5, 4], dtype=np.float32)\n', (6146, 6175), True, 'import numpy as np\n'), ((6696, 6732), 'numpy.ones', 'np.ones', (['[7, 5, 4]'], {'dtype': 'np.float32'}), '([7, 5, 4], dtype=np.float32)\n', (6703, 6732), True, 'import numpy as np\n')] |
# Libraries for system and debug
from ast import Param
import sys
import pdb
import os
from datetime import datetime
# Class for converting sequences to tensors
from seq2tensor import s2t
# Libraries for neural network training
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D
from tensorflow.keras.layers import Add, Flatten, subtract, multiply, concatenate
from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.layers import Dropout, BatchNormalization
from tensorflow.keras.utils import Sequence
from tensorflow.keras import mixed_precision
from tensorflow import keras
from tensorboard.plugins.hparams import api as hp
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.layers import Activation
from keras.callbacks import ModelCheckpoint
from tensorflow.keras import regularizers
import tensorflow_addons as tfa
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import train_test_split
# Import accessory modules
import numpy as np
import h5py
import gc
from tqdm import tqdm
# Function
def generator_pair(seq_tensor, class_labels, pair_index):
for index in pair_index:
yield {"seq1": seq_tensor[seq_index1[index]], "seq2": seq_tensor[seq_index2[index]]}, class_labels[index]
def generator_pair_predict(seq_tensor, class_labels, pair_index):
for index in pair_index:
yield {"seq1": seq_tensor[seq_index1[index]], "seq2": seq_tensor[seq_index2[index]]}
def input_preprocess(id2seq_file, ds_file, use_emb):
id2index = {}
seqs = []
index = 0
sid1_index = 0
sid2_index = 1
label_index = 2
for line in open(id2seq_file):
line = line.strip().split('\t')
id2index[line[0]] = index
seqs.append(line[1])
index += 1
seq_array = []
id2_aid = {}
sid = 0
seq2t = s2t(use_emb)
max_data = -1
limit_data = max_data > 0
raw_data = []
skip_head = True
x = None
count = 0
# Create sequence array as a list of protein strings
for line in tqdm(open(ds_file)):
if skip_head:
skip_head = False
continue
line = line.rstrip('\n').rstrip('\r').split('\t')
if id2index.get(line[sid1_index]) is None or id2index.get(line[sid2_index]) is None:
continue
if id2_aid.get(line[sid1_index]) is None:
id2_aid[line[sid1_index]] = sid
sid += 1
seq_array.append(seqs[id2index[line[sid1_index]]])
line[sid1_index] = id2_aid[line[sid1_index]]
if id2_aid.get(line[sid2_index]) is None:
id2_aid[line[sid2_index]] = sid
sid += 1
seq_array.append(seqs[id2index[line[sid2_index]]])
line[sid2_index] = id2_aid[line[sid2_index]]
raw_data.append(line)
if limit_data:
count += 1
if count >= max_data:
break
len_m_seq = np.array([len(line.split()) for line in seq_array])
avg_m_seq = int(np.average(len_m_seq)) + 1
max_m_seq = max(len_m_seq)
dim = seq2t.dim
# seq_tensor is tensor representation of dataset having shape of (number_of_sequences, padding_length, embedding_dim_of_aa)
# Random for distribution of class labels
seq_tensor = np.array([seq2t.embed_normalized(line, seq_size)
for line in tqdm(seq_array)]).astype('float16')
# Extract index of 1st and 2nd sequences in pairs
seq_index1 = np.array([line[sid1_index] for line in tqdm(raw_data)])
seq_index2 = np.array([line[sid2_index] for line in tqdm(raw_data)])
# Assign labels for pairs of sequences
class_map = {'0': 1, '1': 0}
class_labels = np.zeros((len(raw_data), 2))
for i in range(len(raw_data)):
class_labels[i][class_map[raw_data[i][label_index]]] = 1
return seq_tensor, seq_index1, seq_index2, class_labels
def build_model(hparams):
# Input of sequence tensor representations
seq_input1 = Input(shape=(seq_size, dim), name='seq1')
seq_input2 = Input(shape=(seq_size, dim), name='seq2')
# Define Conv1D and Bi-RNN (GRU/LSTM) use in architecture
l1 = Conv1D(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE],
activation=hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])
r1 = Bidirectional(GRU(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True))
l2 = Conv1D(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE],
activation=hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])
r2 = Bidirectional(GRU(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True))
l3 = Conv1D(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE],
activation=hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])
r3 = Bidirectional(GRU(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True))
l4 = Conv1D(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE],
activation=hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])
r4 = Bidirectional(GRU(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True))
l5 = Conv1D(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE],
activation=hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])
r5 = Bidirectional(GRU(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True))
l6 = Conv1D(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE],
activation=hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])
# Siamese architecture
### 1st sibling
# 1st Block RCNN
s1 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l1(seq_input1))
s1 = concatenate([r1(s1), s1])
# 2nd Block RCNN
s1 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l2(s1))
s1 = concatenate([r2(s1), s1])
# 3rd Block RCNN
s1 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l3(s1))
s1 = concatenate([r3(s1), s1])
# 4th Block RCNN
s1 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l4(s1))
s1 = concatenate([r4(s1), s1])
# 5th Block RCNN
s1 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l5(s1))
s1 = concatenate([r5(s1), s1])
# Last convolution
s1 = l6(s1)
s1 = GlobalAveragePooling1D()(s1)
### 2nd sibling
# 1st block RCNN
s2 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l1(seq_input2))
s2 = concatenate([r1(s2), s2])
# 2nd block RCNN
s2 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l2(s2))
s2 = concatenate([r2(s2), s2])
# 3rd block RCNN
s2 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l3(s2))
s2 = concatenate([r3(s2), s2])
# 4th block RCNN
s2 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l4(s2))
s2 = concatenate([r4(s2), s2])
# 5th block RCNN
s2 = MaxPooling1D(hparams[HP_POOLING_KERNEL])(l5(s2))
s2 = concatenate([r5(s2), s2])
# Last convolution
s2 = l6(s2)
s2 = GlobalAveragePooling1D()(s2)
### Combine two siblings of siamese architecture
merge_text = multiply([s1, s2])
#### MLP Part
# Set initializer
# First dense
x = Dense(hparams[HP_FIRST_DENSE],
activation=hparams[HP_ACTIVATION])(merge_text)
# x = tf.keras.layers.LeakyReLU(alpha=.3)(x)
x = Dropout(hparams[HP_DROPOUT])(x)
# Second dense
x = Dense(int((hparams[HP_CONV_HIDDEN_DIM]+7)/2),
activation=hparams[HP_ACTIVATION])(x)
# x = tf.keras.layers.LeakyReLU(alpha=.3)(x)
x = Dropout(hparams[HP_DROPOUT])(x)
# Last softmax
main_output = Dense(2, activation='softmax')(x)
# Combine to form functional model
merge_model = Model(inputs=[seq_input1, seq_input2], outputs=[main_output])
return merge_model
if __name__ == "main":
# ============================================
# Optimisation Flags - Do not remove
# ============================================
# Disables caching (when set to 1) or enables caching (when set to 0) for just-in-time-compilation. When disabled,
# no binary code is added to or retrieved from the cache.
os.environ['CUDA_CACHE_DISABLE'] = '0' # orig is 0
# When set to 1, forces the device driver to ignore any binary code embedded in an application
# (see Application Compatibility) and to just-in-time compile embedded PTX code instead.
# If a kernel does not have embedded PTX code, it will fail to load. This environment variable can be used to
# validate that PTX code is embedded in an application and that its just-in-time compilation works as expected to guarantee application
# forward compatibility with future architectures.
os.environ['CUDA_FORCE_PTX_JIT'] = '1' # no orig
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '1'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ['TF_DISABLE_NVTX_RANGES'] = '1'
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
# =================================================
# mixed_precision.set_global_policy('mixed_float16')
### Setting RAM GPU for training growth
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
| [
"tqdm.tqdm",
"numpy.average",
"tensorflow.keras.layers.multiply",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.GRU",
"tensorflow.config.list_logical_devices",
"tensorflow.keras.layers.MaxPooling1D",
"tensorflow.confi... | [((2014, 2026), 'seq2tensor.s2t', 's2t', (['use_emb'], {}), '(use_emb)\n', (2017, 2026), False, 'from seq2tensor import s2t\n'), ((4034, 4075), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(seq_size, dim)', 'name': '"""seq1"""'}), "(shape=(seq_size, dim), name='seq1')\n", (4039, 4075), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((4093, 4134), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(seq_size, dim)', 'name': '"""seq2"""'}), "(shape=(seq_size, dim), name='seq2')\n", (4098, 4134), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((4207, 4346), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['hparams[HP_CONV_HIDDEN_DIM]', 'hparams[HP_KERNEL_SIZE]'], {'activation': 'hparams[HP_ACTIVATION_CONV]', 'padding': 'hparams[HP_CONV_PADDING]'}), '(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE], activation=\n hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])\n', (4213, 4346), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((4446, 4585), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['hparams[HP_CONV_HIDDEN_DIM]', 'hparams[HP_KERNEL_SIZE]'], {'activation': 'hparams[HP_ACTIVATION_CONV]', 'padding': 'hparams[HP_CONV_PADDING]'}), '(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE], activation=\n hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])\n', (4452, 4585), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((4685, 4824), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['hparams[HP_CONV_HIDDEN_DIM]', 'hparams[HP_KERNEL_SIZE]'], {'activation': 'hparams[HP_ACTIVATION_CONV]', 'padding': 'hparams[HP_CONV_PADDING]'}), '(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE], activation=\n hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])\n', (4691, 4824), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((4924, 5063), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['hparams[HP_CONV_HIDDEN_DIM]', 'hparams[HP_KERNEL_SIZE]'], {'activation': 'hparams[HP_ACTIVATION_CONV]', 'padding': 'hparams[HP_CONV_PADDING]'}), '(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE], activation=\n hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])\n', (4930, 5063), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((5163, 5302), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['hparams[HP_CONV_HIDDEN_DIM]', 'hparams[HP_KERNEL_SIZE]'], {'activation': 'hparams[HP_ACTIVATION_CONV]', 'padding': 'hparams[HP_CONV_PADDING]'}), '(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE], activation=\n hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])\n', (5169, 5302), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((5402, 5541), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['hparams[HP_CONV_HIDDEN_DIM]', 'hparams[HP_KERNEL_SIZE]'], {'activation': 'hparams[HP_ACTIVATION_CONV]', 'padding': 'hparams[HP_CONV_PADDING]'}), '(hparams[HP_CONV_HIDDEN_DIM], hparams[HP_KERNEL_SIZE], activation=\n hparams[HP_ACTIVATION_CONV], padding=hparams[HP_CONV_PADDING])\n', (5408, 5541), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((7016, 7034), 'tensorflow.keras.layers.multiply', 'multiply', (['[s1, s2]'], {}), '([s1, s2])\n', (7024, 7034), False, 'from tensorflow.keras.layers import Add, Flatten, subtract, multiply, concatenate\n'), ((7629, 7690), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[seq_input1, seq_input2]', 'outputs': '[main_output]'}), '(inputs=[seq_input1, seq_input2], outputs=[main_output])\n', (7634, 7690), False, 'from tensorflow.keras.models import Model\n'), ((9404, 9442), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (9435, 9442), True, 'import tensorflow as tf\n'), ((4381, 4435), 'tensorflow.keras.layers.GRU', 'GRU', (['hparams[HP_RNN_HIDDEN_DIM]'], {'return_sequences': '(True)'}), '(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True)\n', (4384, 4435), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((4620, 4674), 'tensorflow.keras.layers.GRU', 'GRU', (['hparams[HP_RNN_HIDDEN_DIM]'], {'return_sequences': '(True)'}), '(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True)\n', (4623, 4674), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((4859, 4913), 'tensorflow.keras.layers.GRU', 'GRU', (['hparams[HP_RNN_HIDDEN_DIM]'], {'return_sequences': '(True)'}), '(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True)\n', (4862, 4913), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((5098, 5152), 'tensorflow.keras.layers.GRU', 'GRU', (['hparams[HP_RNN_HIDDEN_DIM]'], {'return_sequences': '(True)'}), '(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True)\n', (5101, 5152), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((5337, 5391), 'tensorflow.keras.layers.GRU', 'GRU', (['hparams[HP_RNN_HIDDEN_DIM]'], {'return_sequences': '(True)'}), '(hparams[HP_RNN_HIDDEN_DIM], return_sequences=True)\n', (5340, 5391), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((5633, 5673), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (5645, 5673), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((5756, 5796), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (5768, 5796), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((5871, 5911), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (5883, 5911), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((5986, 6026), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (5998, 6026), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6101, 6141), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (6113, 6141), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6234, 6258), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (6256, 6258), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6315, 6355), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (6327, 6355), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6438, 6478), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (6450, 6478), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6553, 6593), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (6565, 6593), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6668, 6708), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (6680, 6708), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6783, 6823), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['hparams[HP_POOLING_KERNEL]'], {}), '(hparams[HP_POOLING_KERNEL])\n', (6795, 6823), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((6916, 6940), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (6938, 6940), False, 'from tensorflow.keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, MaxPooling2D\n'), ((7103, 7168), 'tensorflow.keras.layers.Dense', 'Dense', (['hparams[HP_FIRST_DENSE]'], {'activation': 'hparams[HP_ACTIVATION]'}), '(hparams[HP_FIRST_DENSE], activation=hparams[HP_ACTIVATION])\n', (7108, 7168), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((7252, 7280), 'tensorflow.keras.layers.Dropout', 'Dropout', (['hparams[HP_DROPOUT]'], {}), '(hparams[HP_DROPOUT])\n', (7259, 7280), False, 'from tensorflow.keras.layers import Dropout, BatchNormalization\n'), ((7467, 7495), 'tensorflow.keras.layers.Dropout', 'Dropout', (['hparams[HP_DROPOUT]'], {}), '(hparams[HP_DROPOUT])\n', (7474, 7495), False, 'from tensorflow.keras.layers import Dropout, BatchNormalization\n'), ((7537, 7567), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (7542, 7567), False, 'from tensorflow.keras.layers import Dense, GRU, LSTM, Bidirectional, Input, Conv1D, Conv2D\n'), ((9653, 9690), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (9683, 9690), True, 'import tensorflow as tf\n'), ((3064, 3085), 'numpy.average', 'np.average', (['len_m_seq'], {}), '(len_m_seq)\n', (3074, 3085), True, 'import numpy as np\n'), ((9576, 9627), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (9616, 9627), True, 'import tensorflow as tf\n'), ((3568, 3582), 'tqdm.tqdm', 'tqdm', (['raw_data'], {}), '(raw_data)\n', (3572, 3582), False, 'from tqdm import tqdm\n'), ((3641, 3655), 'tqdm.tqdm', 'tqdm', (['raw_data'], {}), '(raw_data)\n', (3645, 3655), False, 'from tqdm import tqdm\n'), ((3421, 3436), 'tqdm.tqdm', 'tqdm', (['seq_array'], {}), '(seq_array)\n', (3425, 3436), False, 'from tqdm import tqdm\n')] |
#!/usr/bin/env python
import numpy as np, os, sys
from scipy.io import loadmat
from run_12ECG_classifier import load_12ECG_artifacts, run_12ECG_classifier
import tqdm
from os.path import join
from pathlib import Path
def load_challenge_data(filename):
x = loadmat(filename)
data = np.asarray(x['val'], dtype=np.float64)
new_file = filename.replace('.mat','.hea')
input_header_file = os.path.join(new_file)
with open(input_header_file,'r') as f:
header_data=f.readlines()
return data, header_data
def save_challenge_predictions(output_directory, filename, scores, labels, classes):
recording = os.path.splitext(filename)[0]
new_filename = filename.replace('.mat','.csv')
output_file = os.path.join(output_directory, new_filename)
# Include the filename as the recording number
recording_string = '#{}'.format(recording)
class_string = ','.join(classes)
label_string = ','.join(str(i) for i in labels)
score_string = ','.join(str(i) for i in scores)
with open(output_file, 'w') as f:
f.write(recording_string + '\n' + class_string + '\n' + label_string + '\n' + score_string + '\n')
if __name__ == '__main__':
# Parse arguments.
if len(sys.argv) != 4:
raise Exception('Include the model, input and output directories as arguments, e.g., python driver.py model input output.')
model_input = sys.argv[1]
input_directory = sys.argv[2]
output_directory = sys.argv[3]
# Find files.
if not os.path.isfile(join(model_input, "header_files_test.npy")):
input_files = []
for f in os.listdir(input_directory):
if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('mat'):
input_files.append(f)
else:
input_files = list(np.load(join(model_input, "header_files_test.npy")))
input_files = [f.replace(".hea", ".mat") for f in input_files if f.lower().endswith(".hea") and not f.startswith('.')]
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
# Load model.
print('Loading 12ECG model...')
artifacts = load_12ECG_artifacts(model_input)
print("Making predictions...")
num_files = len(input_files)
for i, f in enumerate(input_files):
if i % 1000 == 1:
print('\t{}/{}...'.format(i+1, num_files))
data,header_data = load_challenge_data(f)
current_label, current_score, classes = run_12ECG_classifier(data, header_data, artifacts)
# Save results.
save_challenge_predictions(output_directory, Path(f).name, current_score, current_label, classes)
print('Done.')
| [
"os.mkdir",
"run_12ECG_classifier.run_12ECG_classifier",
"scipy.io.loadmat",
"os.path.isdir",
"numpy.asarray",
"pathlib.Path",
"os.path.splitext",
"run_12ECG_classifier.load_12ECG_artifacts",
"os.path.join",
"os.listdir"
] | [((263, 280), 'scipy.io.loadmat', 'loadmat', (['filename'], {}), '(filename)\n', (270, 280), False, 'from scipy.io import loadmat\n'), ((292, 330), 'numpy.asarray', 'np.asarray', (["x['val']"], {'dtype': 'np.float64'}), "(x['val'], dtype=np.float64)\n", (302, 330), True, 'import numpy as np, os, sys\n'), ((403, 425), 'os.path.join', 'os.path.join', (['new_file'], {}), '(new_file)\n', (415, 425), False, 'import numpy as np, os, sys\n'), ((737, 781), 'os.path.join', 'os.path.join', (['output_directory', 'new_filename'], {}), '(output_directory, new_filename)\n', (749, 781), False, 'import numpy as np, os, sys\n'), ((2180, 2213), 'run_12ECG_classifier.load_12ECG_artifacts', 'load_12ECG_artifacts', (['model_input'], {}), '(model_input)\n', (2200, 2213), False, 'from run_12ECG_classifier import load_12ECG_artifacts, run_12ECG_classifier\n'), ((638, 664), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (654, 664), False, 'import numpy as np, os, sys\n'), ((1616, 1643), 'os.listdir', 'os.listdir', (['input_directory'], {}), '(input_directory)\n', (1626, 1643), False, 'import numpy as np, os, sys\n'), ((2041, 2072), 'os.path.isdir', 'os.path.isdir', (['output_directory'], {}), '(output_directory)\n', (2054, 2072), False, 'import numpy as np, os, sys\n'), ((2082, 2108), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (2090, 2108), False, 'import numpy as np, os, sys\n'), ((2504, 2554), 'run_12ECG_classifier.run_12ECG_classifier', 'run_12ECG_classifier', (['data', 'header_data', 'artifacts'], {}), '(data, header_data, artifacts)\n', (2524, 2554), False, 'from run_12ECG_classifier import load_12ECG_artifacts, run_12ECG_classifier\n'), ((1529, 1571), 'os.path.join', 'join', (['model_input', '"""header_files_test.npy"""'], {}), "(model_input, 'header_files_test.npy')\n", (1533, 1571), False, 'from os.path import join\n'), ((1857, 1899), 'os.path.join', 'join', (['model_input', '"""header_files_test.npy"""'], {}), "(model_input, 'header_files_test.npy')\n", (1861, 1899), False, 'from os.path import join\n'), ((2632, 2639), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (2636, 2639), False, 'from pathlib import Path\n'), ((1675, 1707), 'os.path.join', 'os.path.join', (['input_directory', 'f'], {}), '(input_directory, f)\n', (1687, 1707), False, 'import numpy as np, os, sys\n')] |
import numpy as np
from attackgraph.gambit_analysis import do_gambit_analysis
cost = 0.5
ub_p = 10
discount = 0.5
bankrupt_threshold1 = -3
bankrupt_threshold2 = -3
bankrupt_penalty = -100
lb_q = 0
ub_q = 11
step_size = 0.5
mono_q = 9.5/2
mono_util = (9.5/2)**2
def bankrpt(u1, u2):
if u1 > bankrupt_threshold1 and u2 < bankrupt_threshold2:
u1 += discount * mono_util
u2 += bankrupt_penalty
elif u1 < bankrupt_threshold1 and u2 > bankrupt_threshold2:
u1 += bankrupt_penalty
u2 += discount * mono_util
else:
u1 += bankrupt_penalty
u2 += bankrupt_penalty
return u1, u2
def utility(q1, q2):
if q1 + q2 > ub_p:
u1 = -cost * q1
u2 = -cost * q2
u1, u2 = bankrpt(u1, u2)
else:
p = ub_p - q1 - q2
u1 = q1 * (p - cost)
u2 = q2 * (p - cost)
return u1, u2
def BR(nash_idx, nash, p1_payoff):
dim = len(np.arange(lb_q, ub_q, step_size))
act = np.zeros(dim)
for idx, ne in zip(nash_idx, nash):
act[int(idx)] = ne
util_vect = np.sum(p1_payoff * act, axis=1)
util_vect = np.reshape(util_vect, newshape=(len(util_vect),))
x = np.argmax(util_vect)
return x/2
def beneficial_dev(nash_idx, nash, p1_payoff):
dim = len(np.arange(lb_q, ub_q, step_size))
act = np.zeros(dim)
for idx, ne in zip(nash_idx, nash):
act[int(idx)] = ne
util_vect = np.sum(p1_payoff * act, axis=1)
util_vect = np.reshape(util_vect, newshape=(len(util_vect),))
x = np.argsort(util_vect)[-2]
return x / 2
def rand(str):
all_acts = np.arange(lb_q, ub_q, step_size)
diff = np.setdiff1d(all_acts, str)
x = np.random.choice(diff)
return x
def create_payoff_matrix():
dim = len(np.arange(lb_q, ub_q, step_size))
p1_payoff = np.zeros((dim, dim))
p2_payoff = np.zeros((dim, dim))
i = 0
for q1 in np.arange(lb_q, ub_q, step_size):
j = 0
for q2 in np.arange(lb_q, ub_q, step_size):
u1, u2 = utility(q1, q2)
p1_payoff[i, j] = u1
p2_payoff[i, j] = u2
j += 1
i += 1
# print(p1_payoff)
return p1_payoff, p2_payoff
def extract_submatrix(idx_x, idx_y, matrix):
submatrix = np.zeros((len(idx_x), len(idx_y)))
for i, idx in enumerate(idx_x):
for j, idy in enumerate(idx_y):
submatrix[i,j] = matrix[int(idx), int(idy)]
return submatrix
def regret(nash_1, nash_2, str_p1, str_p2, subgame_u1, subgame_u2, p1_payoff, p2_payoff):
nash_1 = np.reshape(nash_1, newshape=(len(nash_1),1))
ne_u1 = np.sum(nash_1 * subgame_u1 * nash_2)
ne_u2 = np.sum(nash_1 * subgame_u2 * nash_2)
dim, _ = np.shape(p1_payoff)
ne_1 = np.zeros(dim)
ne_2 = np.zeros(dim)
for i, value in zip(str_p1, nash_1):
ne_1[int(i*2)] = value
ne_1 = np.reshape(ne_1, newshape=(len(ne_1), 1))
for i, value in zip(str_p2, nash_2):
ne_2[int(i*2)] = value
max_u1 = np.max(np.sum(p1_payoff * ne_2, axis=1))
max_u2 = np.max(np.sum(ne_1 * p2_payoff, axis=0))
regret_p1 = np.maximum(max_u1 - ne_u1, 0)
regret_p2 = np.maximum(max_u2 - ne_u2, 0)
return np.maximum(regret_p1, regret_p2)
def NashConv(nash_1, nash_2, str_p1, str_p2, subgame_u1, subgame_u2, p1_payoff, p2_payoff):
nash_1 = np.reshape(nash_1, newshape=(len(nash_1),1))
ne_u1 = np.sum(nash_1 * subgame_u1 * nash_2)
ne_u2 = np.sum(nash_1 * subgame_u2 * nash_2)
dim, _ = np.shape(p1_payoff)
ne_1 = np.zeros(dim)
ne_2 = np.zeros(dim)
for i, value in zip(str_p1, nash_1):
ne_1[int(i*2)] = value
ne_1 = np.reshape(ne_1, newshape=(len(ne_1), 1))
for i, value in zip(str_p2, nash_2):
ne_2[int(i*2)] = value
max_u1 = np.max(np.sum(p1_payoff * ne_2, axis=1))
max_u2 = np.max(np.sum(ne_1 * p2_payoff, axis=0))
regret_p1 = np.maximum(max_u1 - ne_u1, 0)
regret_p2 = np.maximum(max_u2 - ne_u2, 0)
return regret_p1 + regret_p2
def run(p1_payoff, p2_payoff):
np.random.seed(0)
regret_list = []
str_p1 = []
str_p2 = []
epoch = 0
x1, x2 = 0, 0
str_p1.append(x1)
str_p2.append(x2)
subgame_u1 = extract_submatrix(np.array(str_p1) * 2, np.array(str_p2) * 2, p1_payoff)
subgame_u2 = extract_submatrix(np.array(str_p1) * 2, np.array(str_p2) * 2, p2_payoff)
is_terminal = True
switch = False
while is_terminal:
epoch += 1
nelist = do_gambit_analysis(subgame_u1, subgame_u2, return_list=True)
# nash_2, nash_1 = do_gambit_analysis(subgame_u1, subgame_u2, maxent=False, minent=True)
nash_2, nash_1 = do_gambit_analysis(subgame_u1, subgame_u2, maxent=True, minent=False)
regret_list.append(regret(nash_1, nash_2, np.array(str_p1), np.array(str_p2), subgame_u1, subgame_u2, p1_payoff, p2_payoff))
# DO solver
if switch:
x1 = BR(np.array(str_p2) * 2, nash_2, p1_payoff)
x2 = BR(np.array(str_p1) * 2, nash_1, p1_payoff)
# Beneficial Deviation
if not switch:
x1 = beneficial_dev(np.array(str_p2) * 2, nash_2, p1_payoff)
x2 = beneficial_dev(np.array(str_p1) * 2, nash_1, p1_payoff)
# random
# x1 = rand(np.array(str_p1))
# x2 = rand(np.array(str_p2))
if epoch == 10:
switch = True
str_p1.append(x1)
str_p2.append(x2)
print("--------------------------------")
print("Current Epoch is ", epoch)
print("ne_list:", nelist)
print("Current NE is ", nash_1, nash_2)
print("x1:", str_p1)
print("x2:", str_p2)
# if x1 not in str_p1:
# str_p1.append(x1)
# if x2 not in str_p2:
# str_p2.append(x2)
subgame_u1 = extract_submatrix(np.array(str_p1) * 2, np.array(str_p2) * 2, p1_payoff)
subgame_u2 = extract_submatrix(np.array(str_p1) * 2, np.array(str_p2) * 2, p2_payoff)
if epoch == 20:
is_terminal = False
print(regret_list)
print("x1:", str_p1)
print("x2:", str_p2)
# p1_payoff, p2_payoff = create_payoff_matrix()
# run(p1_payoff, p2_payoff)
| [
"numpy.sum",
"numpy.maximum",
"numpy.random.seed",
"numpy.argmax",
"numpy.setdiff1d",
"numpy.zeros",
"numpy.shape",
"numpy.argsort",
"attackgraph.gambit_analysis.do_gambit_analysis",
"numpy.arange",
"numpy.array",
"numpy.random.choice"
] | [((969, 982), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (977, 982), True, 'import numpy as np\n'), ((1066, 1097), 'numpy.sum', 'np.sum', (['(p1_payoff * act)'], {'axis': '(1)'}), '(p1_payoff * act, axis=1)\n', (1072, 1097), True, 'import numpy as np\n'), ((1172, 1192), 'numpy.argmax', 'np.argmax', (['util_vect'], {}), '(util_vect)\n', (1181, 1192), True, 'import numpy as np\n'), ((1314, 1327), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (1322, 1327), True, 'import numpy as np\n'), ((1411, 1442), 'numpy.sum', 'np.sum', (['(p1_payoff * act)'], {'axis': '(1)'}), '(p1_payoff * act, axis=1)\n', (1417, 1442), True, 'import numpy as np\n'), ((1593, 1625), 'numpy.arange', 'np.arange', (['lb_q', 'ub_q', 'step_size'], {}), '(lb_q, ub_q, step_size)\n', (1602, 1625), True, 'import numpy as np\n'), ((1637, 1664), 'numpy.setdiff1d', 'np.setdiff1d', (['all_acts', 'str'], {}), '(all_acts, str)\n', (1649, 1664), True, 'import numpy as np\n'), ((1673, 1695), 'numpy.random.choice', 'np.random.choice', (['diff'], {}), '(diff)\n', (1689, 1695), True, 'import numpy as np\n'), ((1803, 1823), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1811, 1823), True, 'import numpy as np\n'), ((1840, 1860), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1848, 1860), True, 'import numpy as np\n'), ((1886, 1918), 'numpy.arange', 'np.arange', (['lb_q', 'ub_q', 'step_size'], {}), '(lb_q, ub_q, step_size)\n', (1895, 1918), True, 'import numpy as np\n'), ((2591, 2627), 'numpy.sum', 'np.sum', (['(nash_1 * subgame_u1 * nash_2)'], {}), '(nash_1 * subgame_u1 * nash_2)\n', (2597, 2627), True, 'import numpy as np\n'), ((2640, 2676), 'numpy.sum', 'np.sum', (['(nash_1 * subgame_u2 * nash_2)'], {}), '(nash_1 * subgame_u2 * nash_2)\n', (2646, 2676), True, 'import numpy as np\n'), ((2691, 2710), 'numpy.shape', 'np.shape', (['p1_payoff'], {}), '(p1_payoff)\n', (2699, 2710), True, 'import numpy as np\n'), ((2723, 2736), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (2731, 2736), True, 'import numpy as np\n'), ((2748, 2761), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (2756, 2761), True, 'import numpy as np\n'), ((3088, 3117), 'numpy.maximum', 'np.maximum', (['(max_u1 - ne_u1)', '(0)'], {}), '(max_u1 - ne_u1, 0)\n', (3098, 3117), True, 'import numpy as np\n'), ((3134, 3163), 'numpy.maximum', 'np.maximum', (['(max_u2 - ne_u2)', '(0)'], {}), '(max_u2 - ne_u2, 0)\n', (3144, 3163), True, 'import numpy as np\n'), ((3176, 3208), 'numpy.maximum', 'np.maximum', (['regret_p1', 'regret_p2'], {}), '(regret_p1, regret_p2)\n', (3186, 3208), True, 'import numpy as np\n'), ((3372, 3408), 'numpy.sum', 'np.sum', (['(nash_1 * subgame_u1 * nash_2)'], {}), '(nash_1 * subgame_u1 * nash_2)\n', (3378, 3408), True, 'import numpy as np\n'), ((3421, 3457), 'numpy.sum', 'np.sum', (['(nash_1 * subgame_u2 * nash_2)'], {}), '(nash_1 * subgame_u2 * nash_2)\n', (3427, 3457), True, 'import numpy as np\n'), ((3472, 3491), 'numpy.shape', 'np.shape', (['p1_payoff'], {}), '(p1_payoff)\n', (3480, 3491), True, 'import numpy as np\n'), ((3504, 3517), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3512, 3517), True, 'import numpy as np\n'), ((3529, 3542), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3537, 3542), True, 'import numpy as np\n'), ((3869, 3898), 'numpy.maximum', 'np.maximum', (['(max_u1 - ne_u1)', '(0)'], {}), '(max_u1 - ne_u1, 0)\n', (3879, 3898), True, 'import numpy as np\n'), ((3915, 3944), 'numpy.maximum', 'np.maximum', (['(max_u2 - ne_u2)', '(0)'], {}), '(max_u2 - ne_u2, 0)\n', (3925, 3944), True, 'import numpy as np\n'), ((4015, 4032), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4029, 4032), True, 'import numpy as np\n'), ((925, 957), 'numpy.arange', 'np.arange', (['lb_q', 'ub_q', 'step_size'], {}), '(lb_q, ub_q, step_size)\n', (934, 957), True, 'import numpy as np\n'), ((1270, 1302), 'numpy.arange', 'np.arange', (['lb_q', 'ub_q', 'step_size'], {}), '(lb_q, ub_q, step_size)\n', (1279, 1302), True, 'import numpy as np\n'), ((1518, 1539), 'numpy.argsort', 'np.argsort', (['util_vect'], {}), '(util_vect)\n', (1528, 1539), True, 'import numpy as np\n'), ((1753, 1785), 'numpy.arange', 'np.arange', (['lb_q', 'ub_q', 'step_size'], {}), '(lb_q, ub_q, step_size)\n', (1762, 1785), True, 'import numpy as np\n'), ((1952, 1984), 'numpy.arange', 'np.arange', (['lb_q', 'ub_q', 'step_size'], {}), '(lb_q, ub_q, step_size)\n', (1961, 1984), True, 'import numpy as np\n'), ((2983, 3015), 'numpy.sum', 'np.sum', (['(p1_payoff * ne_2)'], {'axis': '(1)'}), '(p1_payoff * ne_2, axis=1)\n', (2989, 3015), True, 'import numpy as np\n'), ((3037, 3069), 'numpy.sum', 'np.sum', (['(ne_1 * p2_payoff)'], {'axis': '(0)'}), '(ne_1 * p2_payoff, axis=0)\n', (3043, 3069), True, 'import numpy as np\n'), ((3764, 3796), 'numpy.sum', 'np.sum', (['(p1_payoff * ne_2)'], {'axis': '(1)'}), '(p1_payoff * ne_2, axis=1)\n', (3770, 3796), True, 'import numpy as np\n'), ((3818, 3850), 'numpy.sum', 'np.sum', (['(ne_1 * p2_payoff)'], {'axis': '(0)'}), '(ne_1 * p2_payoff, axis=0)\n', (3824, 3850), True, 'import numpy as np\n'), ((4443, 4503), 'attackgraph.gambit_analysis.do_gambit_analysis', 'do_gambit_analysis', (['subgame_u1', 'subgame_u2'], {'return_list': '(True)'}), '(subgame_u1, subgame_u2, return_list=True)\n', (4461, 4503), False, 'from attackgraph.gambit_analysis import do_gambit_analysis\n'), ((4626, 4695), 'attackgraph.gambit_analysis.do_gambit_analysis', 'do_gambit_analysis', (['subgame_u1', 'subgame_u2'], {'maxent': '(True)', 'minent': '(False)'}), '(subgame_u1, subgame_u2, maxent=True, minent=False)\n', (4644, 4695), False, 'from attackgraph.gambit_analysis import do_gambit_analysis\n'), ((4197, 4213), 'numpy.array', 'np.array', (['str_p1'], {}), '(str_p1)\n', (4205, 4213), True, 'import numpy as np\n'), ((4219, 4235), 'numpy.array', 'np.array', (['str_p2'], {}), '(str_p2)\n', (4227, 4235), True, 'import numpy as np\n'), ((4287, 4303), 'numpy.array', 'np.array', (['str_p1'], {}), '(str_p1)\n', (4295, 4303), True, 'import numpy as np\n'), ((4309, 4325), 'numpy.array', 'np.array', (['str_p2'], {}), '(str_p2)\n', (4317, 4325), True, 'import numpy as np\n'), ((4746, 4762), 'numpy.array', 'np.array', (['str_p1'], {}), '(str_p1)\n', (4754, 4762), True, 'import numpy as np\n'), ((4764, 4780), 'numpy.array', 'np.array', (['str_p2'], {}), '(str_p2)\n', (4772, 4780), True, 'import numpy as np\n'), ((5793, 5809), 'numpy.array', 'np.array', (['str_p1'], {}), '(str_p1)\n', (5801, 5809), True, 'import numpy as np\n'), ((5815, 5831), 'numpy.array', 'np.array', (['str_p2'], {}), '(str_p2)\n', (5823, 5831), True, 'import numpy as np\n'), ((5887, 5903), 'numpy.array', 'np.array', (['str_p1'], {}), '(str_p1)\n', (5895, 5903), True, 'import numpy as np\n'), ((5909, 5925), 'numpy.array', 'np.array', (['str_p2'], {}), '(str_p2)\n', (5917, 5925), True, 'import numpy as np\n'), ((4889, 4905), 'numpy.array', 'np.array', (['str_p2'], {}), '(str_p2)\n', (4897, 4905), True, 'import numpy as np\n'), ((4950, 4966), 'numpy.array', 'np.array', (['str_p1'], {}), '(str_p1)\n', (4958, 4966), True, 'import numpy as np\n'), ((5078, 5094), 'numpy.array', 'np.array', (['str_p2'], {}), '(str_p2)\n', (5086, 5094), True, 'import numpy as np\n'), ((5151, 5167), 'numpy.array', 'np.array', (['str_p1'], {}), '(str_p1)\n', (5159, 5167), True, 'import numpy as np\n')] |
"Layer visualization of Comics Net models"
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
from IPython.display import clear_output
from torch import Tensor, tensor
def init_pixel_buf(size: int, cuda: bool = False, seed=None) -> Tensor:
"""Initialize a pixel buffer.
Args:
size (int): Size of buffer
Returns:
Tensor: Pixel buffer
"""
if seed is not None:
np.random.seed(seed)
img_buf = torch.empty(1, 3, size, size).normal_(mean=0, std=0.01)
if cuda:
img_buf = torch.sigmoid(tensor(img_buf)).cuda()
else:
img_buf = torch.sigmoid(tensor(img_buf))
return img_buf
def init_fft_buf(size: int, sd: float = 0.01, cuda: bool = False, seed=None) -> Tensor:
"""Initialize a Discrete Fourier Transform buffer.
Args:
size (int): Size of buffer
Returns:
Tensor: Discrete Fourier Transform buffer
"""
if seed is not None:
np.random.seed(seed)
# size => output shape; scale => standard deviation of gaussian distribution
img_buf = np.random.normal(size=(1, 3, size, size // 2 + 1, 2), scale=sd).astype(
np.float32
)
if cuda:
spectrum_t = tensor(img_buf).float().cuda()
else:
spectrum_t = tensor(img_buf).float()
return spectrum_t
def fft_to_rgb(
t: Tensor,
d: float = 0.5,
decay_power: int = 1,
cuda: bool = False,
seed=None,
**kwargs,
) -> Tensor:
"""Transform a Discrete Fourier Transform to an RGB object.
Args:
t: Tensor to transform
d (optional): Sample spacing (inverse of the sampling rate). Defaults to 0.5.
decay_power (optional): Applies to scale rate. Defaults to 1.
Returns:
Tensor: ...
"""
if seed is not None:
np.random.seed(seed)
size = t.shape[-3]
fy = np.fft.fftfreq(size, d=d)[:, None]
fx = np.fft.fftfreq(size, d=d)[: size // 2 + 1]
freqs = np.sqrt(fx * fx + fy * fy)
scale = 1.0 / np.maximum(freqs, 1.0 / size) ** decay_power
if cuda:
scale = tensor(scale).float()[None, None, ..., None].cuda()
else:
scale = tensor(scale).float()[None, None, ..., None]
scale *= size
t = scale * t
image_t = torch.irfft(t, signal_ndim=2, signal_sizes=(size, size))
image_t = image_t / 4.0
return image_t
def lucid_to_rgb(t: Tensor, cuda: bool = False, seed=None) -> Tensor:
"""Decorrelate color of an RGB tensor.
Args:
t (Tensor): Tensor to decorrelate
Returns:
Tensor: Decorrelated tensor
"""
if seed is not None:
np.random.seed(seed)
color_correlation_svd_sqrt = np.asarray(
[[0.26, 0.09, 0.02], [0.27, 0.00, -0.05], [0.27, -0.09, 0.03]]
).astype(np.float32)
max_norm_svd_sqrt = np.max(np.linalg.norm(color_correlation_svd_sqrt, axis=0))
t_flat = t.permute(0, 2, 3, 1)
if cuda:
color_correlation_normalized = tensor(
color_correlation_svd_sqrt / max_norm_svd_sqrt
).cuda()
else:
color_correlation_normalized = tensor(
color_correlation_svd_sqrt / max_norm_svd_sqrt
)
t_flat = torch.matmul(t_flat, color_correlation_normalized.T)
t = t_flat.permute(0, 3, 1, 2)
return t
def image_buf_to_rgb(
img_buf: Tensor, jitter: float, decorrelate=True, fft=True, **kwargs
) -> Tensor:
"""Transform an image buffer to RGB buffer
Args:
img_buf (Tensor): Image buffer to transform
jitter (float): Amount to jitter pixel locations by
decorrelate (bool, optional): ???. Defaults to True.
fft (bool, optional): ???. Defaults to True.
Returns:
Tensor: Transformed RGB tensor
"""
img = img_buf.detach()
if fft:
img = fft_to_rgb(img, **kwargs)
size = img.shape[-1]
x_off, y_off = jitter // 2, jitter // 2
if decorrelate:
img = lucid_to_rgb(img)
img = torch.sigmoid(img)
img = img[
:, :, x_off : x_off + size - jitter, y_off : y_off + size - jitter
] # jitter
img = img.squeeze()
return img
def show_rgb(img: Tensor, label=None, ax=None, dpi=25, **kwargs) -> None:
"""Plot an image.
Args:
img (Tensor): [description]
label ([type], optional): [description]. Defaults to None.
ax ([type], optional): [description]. Defaults to None.
dpi (int, optional): [description]. Defaults to 25.
"""
plt_show = True if ax == None else False
if ax == None:
_, ax = plt.subplots(figsize=(img.shape[1] / dpi, img.shape[2] / dpi))
x = img.cpu().permute(1, 2, 0).numpy()
ax.imshow(x)
ax.axis("off")
ax.set_title(label)
if plt_show:
plt.show()
def visualize_feature(
model,
layer,
feature,
size=200,
jitter=25,
steps=2000,
lr=0.05,
decorrelate=True,
fft=True,
debug=False,
frames=10,
show=True,
**kwargs,
):
img_buf = init_fft_buf(size + jitter) if fft else init_pixel_buf(size + jitter)
img_buf.requires_grad_()
opt = torch.optim.Adam([img_buf], lr=lr)
hook_out = None
def callback(m, i, o):
nonlocal hook_out
hook_out = o
hook = layer.register_forward_hook(callback)
for i in range(1, steps + 1):
x_off, y_off = (
int(np.random.random() * jitter),
int(np.random.random() * jitter),
)
img = fft_to_rgb(img_buf, **kwargs) if fft else img_buf
img = img[:, :, x_off : x_off + size + 1, y_off : y_off + size + 1] # jitter
if decorrelate:
img = lucid_to_rgb(img) # decorrelate color
model(img.cuda())
opt.zero_grad()
loss = -1 * hook_out[0][feature].mean()
loss.backward()
opt.step()
if debug and (i) % (steps / frames) == 0:
clear_output(wait=True)
show_rgb(
image_buf_to_rgb(
img_buf, jitter, decorrelate=decorrelate, fft=fft, **kwargs
),
label=f"step: {i} loss: {loss}",
**kwargs,
)
hook.remove()
retval = image_buf_to_rgb(
img_buf, jitter, decorrelate=decorrelate, fft=fft, **kwargs
)
if show:
if not debug:
show_rgb(retval, **kwargs)
else:
return retval
| [
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.maximum",
"torch.irfft",
"numpy.asarray",
"torch.empty",
"torch.sigmoid",
"torch.optim.Adam",
"numpy.fft.fftfreq",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.random.random",
"IPython.display.clear_output",
"torch.matmul",
"mat... | [((1966, 1992), 'numpy.sqrt', 'np.sqrt', (['(fx * fx + fy * fy)'], {}), '(fx * fx + fy * fy)\n', (1973, 1992), True, 'import numpy as np\n'), ((2259, 2315), 'torch.irfft', 'torch.irfft', (['t'], {'signal_ndim': '(2)', 'signal_sizes': '(size, size)'}), '(t, signal_ndim=2, signal_sizes=(size, size))\n', (2270, 2315), False, 'import torch\n'), ((3182, 3234), 'torch.matmul', 'torch.matmul', (['t_flat', 'color_correlation_normalized.T'], {}), '(t_flat, color_correlation_normalized.T)\n', (3194, 3234), False, 'import torch\n'), ((3948, 3966), 'torch.sigmoid', 'torch.sigmoid', (['img'], {}), '(img)\n', (3961, 3966), False, 'import torch\n'), ((5079, 5113), 'torch.optim.Adam', 'torch.optim.Adam', (['[img_buf]'], {'lr': 'lr'}), '([img_buf], lr=lr)\n', (5095, 5113), False, 'import torch\n'), ((435, 455), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (449, 455), True, 'import numpy as np\n'), ((972, 992), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (986, 992), True, 'import numpy as np\n'), ((1812, 1832), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1826, 1832), True, 'import numpy as np\n'), ((1867, 1892), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['size'], {'d': 'd'}), '(size, d=d)\n', (1881, 1892), True, 'import numpy as np\n'), ((1911, 1936), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['size'], {'d': 'd'}), '(size, d=d)\n', (1925, 1936), True, 'import numpy as np\n'), ((2623, 2643), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2637, 2643), True, 'import numpy as np\n'), ((2817, 2867), 'numpy.linalg.norm', 'np.linalg.norm', (['color_correlation_svd_sqrt'], {'axis': '(0)'}), '(color_correlation_svd_sqrt, axis=0)\n', (2831, 2867), True, 'import numpy as np\n'), ((3091, 3145), 'torch.tensor', 'tensor', (['(color_correlation_svd_sqrt / max_norm_svd_sqrt)'], {}), '(color_correlation_svd_sqrt / max_norm_svd_sqrt)\n', (3097, 3145), False, 'from torch import Tensor, tensor\n'), ((4536, 4598), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(img.shape[1] / dpi, img.shape[2] / dpi)'}), '(figsize=(img.shape[1] / dpi, img.shape[2] / dpi))\n', (4548, 4598), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4737), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4735, 4737), True, 'import matplotlib.pyplot as plt\n'), ((471, 500), 'torch.empty', 'torch.empty', (['(1)', '(3)', 'size', 'size'], {}), '(1, 3, size, size)\n', (482, 500), False, 'import torch\n'), ((639, 654), 'torch.tensor', 'tensor', (['img_buf'], {}), '(img_buf)\n', (645, 654), False, 'from torch import Tensor, tensor\n'), ((1089, 1152), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3, size, size // 2 + 1, 2)', 'scale': 'sd'}), '(size=(1, 3, size, size // 2 + 1, 2), scale=sd)\n', (1105, 1152), True, 'import numpy as np\n'), ((2011, 2040), 'numpy.maximum', 'np.maximum', (['freqs', '(1.0 / size)'], {}), '(freqs, 1.0 / size)\n', (2021, 2040), True, 'import numpy as np\n'), ((2678, 2751), 'numpy.asarray', 'np.asarray', (['[[0.26, 0.09, 0.02], [0.27, 0.0, -0.05], [0.27, -0.09, 0.03]]'], {}), '([[0.26, 0.09, 0.02], [0.27, 0.0, -0.05], [0.27, -0.09, 0.03]])\n', (2688, 2751), True, 'import numpy as np\n'), ((5856, 5879), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (5868, 5879), False, 'from IPython.display import clear_output\n'), ((1283, 1298), 'torch.tensor', 'tensor', (['img_buf'], {}), '(img_buf)\n', (1289, 1298), False, 'from torch import Tensor, tensor\n'), ((2958, 3012), 'torch.tensor', 'tensor', (['(color_correlation_svd_sqrt / max_norm_svd_sqrt)'], {}), '(color_correlation_svd_sqrt / max_norm_svd_sqrt)\n', (2964, 3012), False, 'from torch import Tensor, tensor\n'), ((573, 588), 'torch.tensor', 'tensor', (['img_buf'], {}), '(img_buf)\n', (579, 588), False, 'from torch import Tensor, tensor\n'), ((2163, 2176), 'torch.tensor', 'tensor', (['scale'], {}), '(scale)\n', (2169, 2176), False, 'from torch import Tensor, tensor\n'), ((5336, 5354), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5352, 5354), True, 'import numpy as np\n'), ((5382, 5400), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5398, 5400), True, 'import numpy as np\n'), ((1221, 1236), 'torch.tensor', 'tensor', (['img_buf'], {}), '(img_buf)\n', (1227, 1236), False, 'from torch import Tensor, tensor\n'), ((2085, 2098), 'torch.tensor', 'tensor', (['scale'], {}), '(scale)\n', (2091, 2098), False, 'from torch import Tensor, tensor\n')] |
import json
import numpy as np
import os
import random
from tqdm import tqdm
RATIO1 = 0.8
RATIO2 = 0.9
weapon_index_dict = {}
def weapon2index(weapon_list):
# global w
global weapon_index_dict
res = []
for weapon in weapon_list:
if weapon in weapon_index_dict:
res.append(weapon_index_dict[weapon])
# sort
res.sort()
return res
def process_data(data):
# data for each player (10 in total)
processed_data = {} # [data, label]
prev_round_score = {}
for round in range(2, 31):
if str(round) not in data:
if round == 2:
return None
break
if round != 2 and len(processed_data) != 10:
return None
round_valid = True
round_data = {}
teams = data[str(round)]["teams"]
for _, team in teams.items():
players = team["players"]
for _, player in players.items():
player_name = player["player_name"]
if round == 2:
processed_data[player_name] = []
if player["team_number"] is None:
return None
is_terrorist = int(player["team_number"]) == 2
round_start = player["round_start"]
if round_start["weapons"] is None:
round_valid = False
continue
weapon_start = round_start["weapons"].split(',')
if round_start["has_defuser"]:
weapon_start.append("defuser")
if round_start["armor"] > 0:
if round_start["has_helmet"]:
weapon_start.append("vesthelm")
else:
weapon_start.append("vest")
weapon_start = weapon2index(weapon_start)
if round == 16:
continue
# round is not 1 or 16, add round data to result only if data is valid
player_data = []
# player's team, 0 for terrorist and 1 for counter terrorist
player_data.append([0 if is_terrorist else 1])
# player's weapons at round start
player_data.append(weapon_start)
# player's money at round start, divided by 1k for normalization
player_data.append([int(player["round_start"]["account"]) / 1000])
# player's performance score at round start, divided by 10*round_num for normalization
player_score = int(player["round_start"]["player_score"])
prev_round_score[player_name] = player_score
player_data.append([player_score / (round * 10)])
# team vs opponent score
if data[str(round)]["TvsCT"] is None or not isinstance(data[str(round)]["TvsCT"], str):
# data anomaly
round_valid = False
continue
# T VS CT score
T, CT = data[str(round)]["TvsCT"].split("vs")
if is_terrorist:
player_data.append([int(T) / 15, int(CT) / 15])
else:
player_data.append([int(CT) / 15, int(T) / 15])
teammate_data = []
valid = True
for _, p2 in players.items():
if p2["round_start"]["weapons"] is None:
# data anomaly
valid = False
break
weapon_start = p2["round_start"]["weapons"].split(',')
if p2["round_start"]["has_defuser"]:
weapon_start.append("defuser")
if p2["round_start"]["armor"] > 0:
if p2["round_start"]["has_helmet"]:
weapon_start.append("vesthelm")
else:
weapon_start.append("vest")
teammate_weapons = weapon2index(weapon_start)
teammate_money = [int(p2["round_start"]["account"]) / 1000]
if p2["round_start"]["player_score"] is None:
# data anomaly
valid = False
break
teammate_score = [int(p2["round_start"]["player_score"]) / (round * 10)]
# teammates' money, weapon and score after purchasing
teammate_data.append([teammate_weapons, teammate_money, teammate_score])
if not valid:
round_valid = False
continue
player_data.append(teammate_data)
# opponets' data
valid = True
opponents_data = []
for _, t2 in teams.items():
for _, p2 in t2["players"].items():
if p2["team_number"] is None:
valid = False
break
if int(p2["team_number"]) != int(player["team_number"]):
opponent_money = [int(p2["round_start"]["account"]) / 1000]
opponent_score = [int(p2["round_start"]["player_score"]) / (round * 10)]
# teammates' money score at round start, weapons round start
if p2["round_start"]["weapons"] is None:
# data anomaly
valid = False
break
weapon_start = p2["round_start"]["weapons"].split(',')
if p2["round_start"]["has_defuser"]:
weapon_start.append("defuser")
if p2["round_start"]["armor"] > 0:
if p2["round_start"]["has_helmet"]:
weapon_start.append("vesthelm")
else:
weapon_start.append("vest")
opponent_weapons = weapon2index(weapon_start)
opponents_data.append([opponent_weapons, opponent_money, opponent_score])
if not valid:
round_valid = False
continue
player_data.append(opponents_data)
# weapons round_freeze_end
round_freeze_end = player["round_freeze_end"]
if round_freeze_end["weapons"] is None:
# data anomaly
round_valid = False
continue
weapon_freeze_end = round_freeze_end["weapons"].split(',')
if round_freeze_end["has_defuser"]:
weapon_freeze_end.append("defuser")
if round_freeze_end["armor"] > 0:
if round_freeze_end["has_helmet"]:
weapon_freeze_end.append("vesthelm")
else:
weapon_freeze_end.append("vest")
weapon_freeze_end = weapon2index(weapon_freeze_end)
# player's purchasing actions
pickups = []
for _, pickup in player["pickup"].items():
if pickup["price"] is not None and pickup["price"] > 0:
pickups.append(pickup)
pickups.sort(key=lambda x: x["timestamp"])
player_label = []
for pickup in pickups:
for weapon in pickup["equip_names"]:
player_label.append(weapon)
if len(player_label) > 10:
# might be a noisy data
round_valid = False
continue
player_label = weapon2index(player_label)
# add data to round_data
player_score_cur = [player_score - prev_round_score[player_name]]
round_data[player_name] = [player_data, player_label, weapon_freeze_end, player_score_cur]
# add data of this round to result
if round_valid:
for player_name, r_data in round_data.items():
processed_data[player_name].append(r_data)
for player_name, p_data in processed_data.items():
if len(p_data) < 7:
return None
return processed_data
def process_dataset(dataset_dir):
global weapon_index_dict
with open("../data/weapon_index.json") as f:
weapon_index_dict = json.load(f)
processed_data = []
for file in tqdm(os.listdir(dataset_dir + "raw/")):
with open(os.path.join(dataset_dir + "raw/", file)) as f:
match = json.load(f)
match_data = process_data(match) # len == 10
if match_data is None:
continue
if len(match_data) != 10:
break
processed_data.append(match_data)
random.seed(4164)
random.shuffle(processed_data)
train_set = []
val_set = []
test_set = []
total = len(processed_data)
for i, match_data in enumerate(processed_data):
md = []
for _, pd in match_data.items():
md.append(pd)
if 0 <= i < int(RATIO1 * total):
train_set.append(np.asarray(md))
elif int(RATIO1 * total) <= i < int(RATIO2 * total):
val_set.append(np.asarray(md))
else:
test_set.append(np.asarray(md))
print("train set: ", len(train_set), end=" ")
print("val set: ", len(val_set), end=" ")
print("test set: ", len(test_set))
np.save(dataset_dir + "processed.npy", (train_set, val_set, test_set))
def read_dataset(dataset_dir):
train_set, val_set, test_set = np.load(dataset_dir + "processed.npy", allow_pickle=True)
print("train set: ", len(train_set), end=" ")
print("val set: ", len(val_set), end=" ")
print("test set: ", len(test_set))
return train_set, val_set, test_set
if __name__ == "__main__":
dataset_dir = "../data/dataset/"
process_dataset(dataset_dir)
# train_set, val_set, test_set = read_dataset(dataset_dir) | [
"numpy.load",
"numpy.save",
"json.load",
"random.shuffle",
"numpy.asarray",
"random.seed",
"os.path.join",
"os.listdir"
] | [((9208, 9225), 'random.seed', 'random.seed', (['(4164)'], {}), '(4164)\n', (9219, 9225), False, 'import random\n'), ((9230, 9260), 'random.shuffle', 'random.shuffle', (['processed_data'], {}), '(processed_data)\n', (9244, 9260), False, 'import random\n'), ((9886, 9956), 'numpy.save', 'np.save', (["(dataset_dir + 'processed.npy')", '(train_set, val_set, test_set)'], {}), "(dataset_dir + 'processed.npy', (train_set, val_set, test_set))\n", (9893, 9956), True, 'import numpy as np\n'), ((10024, 10081), 'numpy.load', 'np.load', (["(dataset_dir + 'processed.npy')"], {'allow_pickle': '(True)'}), "(dataset_dir + 'processed.npy', allow_pickle=True)\n", (10031, 10081), True, 'import numpy as np\n'), ((8797, 8809), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8806, 8809), False, 'import json\n'), ((8856, 8888), 'os.listdir', 'os.listdir', (["(dataset_dir + 'raw/')"], {}), "(dataset_dir + 'raw/')\n", (8866, 8888), False, 'import os\n'), ((8977, 8989), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8986, 8989), False, 'import json\n'), ((8909, 8949), 'os.path.join', 'os.path.join', (["(dataset_dir + 'raw/')", 'file'], {}), "(dataset_dir + 'raw/', file)\n", (8921, 8949), False, 'import os\n'), ((9567, 9581), 'numpy.asarray', 'np.asarray', (['md'], {}), '(md)\n', (9577, 9581), True, 'import numpy as np\n'), ((9671, 9685), 'numpy.asarray', 'np.asarray', (['md'], {}), '(md)\n', (9681, 9685), True, 'import numpy as np\n'), ((9729, 9743), 'numpy.asarray', 'np.asarray', (['md'], {}), '(md)\n', (9739, 9743), True, 'import numpy as np\n')] |
# Author: <NAME>
# Date: 2020, November 1st
# Location: China Ningxia Yinchuan
import numpy as np
class Agent(object):
def __init__(self, alpha, gamma, epsilon = 0):
self.alpha = alpha
self.gamma = gamma
self.epsilon = epsilon
def optimal_states(self, state_options):
# state_options is a dictionary with:
# key: state pointer
# value: return, i.e. Q(s,a)
ans = []
temp = max(state_options, key = state_options.get)
for i in state_options:
if state_options[i] == state_options[temp]:
ans.append(i)
return ans
class Sarsa_Agent(Agent):
def selectAction(self, state_options):
# this is en epsilon-greey action selection
# state_options is a dictionary with:
# key: state pointer
# value: return, i.e. Q(s,a)
opt_option = np.random.choice(self.optimal_states(state_options)) # random tie breaking
p = np.random.rand() # returns random decimal between 0 and 1
if p < self.epsilon: # do random action
return np.random.choice(list(state_options.keys()))
else: # do optimal option
return opt_option
def updateActionValue(self, savt, cur_state, cur_action, next_state, next_action, ret):
# i.e Q(s,a) <- Q(s,a) + alpha[ret + gamma*Q(s',a') - Q(s,a)]
cur_stateActionValue = savt.getStateActionValue(cur_state, cur_action)
next_stateActionValue = savt.getStateActionValue(next_state, next_action)
td_error = ret + self.gamma * next_stateActionValue - cur_stateActionValue
new_value = cur_stateActionValue + self.alpha * td_error
savt.setStateActionValue(cur_state, cur_action, new_value)
class Q_Learning_Agent(Agent):
def selectAction(self, state_options):
# this is en epsilon-greey action selection
# state_options is a dictionary with:
# key: state pointer
# value: return, i.e. Q(s,a)
opt_option = np.random.choice(self.optimal_states(state_options)) # random tie breaking
p = np.random.rand() # returns random decimal between 0 and 1
if p < self.epsilon: # do random action
return np.random.choice(list(state_options.keys()))
else: # do optimal option
return opt_option
def updateActionValue(self, savt, cur_state, cur_action, opt_next_state, opt_next_action, ret):
# i.e Q(s,a) <- Q(s,a) + alpha[ret + gamma * max(Q(s',a')) - Q(s,a)]
cur_stateActionValue = savt.getStateActionValue(cur_state, cur_action)
opt_next_stateActionValue = savt.getStateActionValue(opt_next_state, opt_next_action)
td_error = ret + self.gamma * opt_next_stateActionValue - cur_stateActionValue
new_value = cur_stateActionValue + self.alpha * td_error
savt.setStateActionValue(cur_state, cur_action, new_value)
| [
"numpy.random.rand"
] | [((843, 859), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (857, 859), True, 'import numpy as np\n'), ((1855, 1871), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1869, 1871), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import os
from pprint import pprint
import pyopencl as cl
import sys
from lib.clip import *
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
def loadMakeImageProgram(width, height, pcount, colorDimensions, precision):
precisionMultiplier = int(10 ** precision)
# the kernel function
srcCode = """
static float normF(float value, float a, float b) {
float n = (value - a) / (b - a);
return n;
}
static int4 blendColors(int4 color1, int4 color2, float amount) {
float invAmount = 1.0 - amount;
// x, y, z, w = r, g, b, a
int r = (int) round(((float) color1.x * amount) + ((float) color2.x * invAmount));
int g = (int) round(((float) color1.y * amount) + ((float) color2.y * invAmount));
int b = (int) round(((float) color1.z * amount) + ((float) color2.z * invAmount));
int a = (int) round(((float) color1.w * amount) + ((float) color2.w * invAmount));
return (int4)(r, g, b, a);
}
static int4 setBrightness(int4 color, float brightness) {
// x, y, z, w = r, g, b, a
int r = (int) round((float) color.x * brightness);
int g = (int) round((float) color.y * brightness);
int b = (int) round((float) color.z * brightness);
int a = color.w; // retain alpha
return (int4)(r, g, b, a);
}
int4 getPixel(__global uchar *pdata, int x, int y, int h, int w, int dim, int offset);
int4 getPixelF(__global uchar *pdata, float xF, float yF, int h, int w, int dim, int offset);
int4 getPixel(__global uchar *pdata, int x, int y, int h, int w, int dim, int offset) {
// check bounds; retain rgb color of edge, but make alpha=0
bool isVisible = true;
if (x < 0) { isVisible = false; x = 0; }
if (y < 0) { isVisible = false; y = 0; }
if (x >= w) { isVisible = false; x = w-1; }
if (y >= h) { isVisible = false; y = h-1; }
int index = y * w * dim + x * dim + offset;
int r = pdata[index];
int g = pdata[index+1];
int b = pdata[index+2];
int a = 255;
if (dim > 3) {
a = pdata[index+3];
}
if (!isVisible) {
a = 0;
}
return (int4)(r, g, b, a);
}
int4 getPixelF(__global uchar *pdata, float xF, float yF, int h, int w, int dim, int offset) {
if (xF < -1.0) { xF = -1.0; }
if (yF < -1.0) { yF = -1.0; }
if (xF > (float)(w+1)) { xF = (float)(w+1); }
if (yF > (float)(h+1)) { yF = (float)(h+1); }
int x0 = (int) floor(xF);
int x1 = (int) ceil(xF);
float xLerp = xF - (float) x0;
int y0 = (int) floor(yF);
int y1 = (int) ceil(yF);
float yLerp = yF - (float) y0;
xLerp = 1.0 - xLerp;
yLerp = 1.0 - yLerp;
int4 colorTL = getPixel(pdata, x0, y0, h, w, dim, offset);
int4 colorTR = getPixel(pdata, x1, y0, h, w, dim, offset);
int4 colorBL = getPixel(pdata, x0, y1, h, w, dim, offset);
int4 colorBR = getPixel(pdata, x1, y1, h, w, dim, offset);
int4 colorT = blendColors(colorTL, colorTR, xLerp);
int4 colorB = blendColors(colorBL, colorBR, xLerp);
int4 finalcolor = blendColors(colorT, colorB, yLerp);
// avoid dark corners
//if (colorT.w < 255 && colorB.w < 255) {
// finalcolor.w = max(colorT.w, colorB.w);
//}
return finalcolor;
}
__kernel void makeImage(__global uchar *pdata, __global int *props, __global int *zvalues, __global uchar *result){
int canvasW = %d;
int canvasH = %d;
int i = get_global_id(0);
int pcount = %d;
int colorDimensions = %d;
int precisionMultiplier = %d;
int offset = props[i*pcount];
float xF = (float) props[i*pcount+1] / (float) precisionMultiplier;
float yF = (float) props[i*pcount+2] / (float) precisionMultiplier;
int x = (int) floor(xF);
int y = (int) floor(yF);
float remainderX = xF - (float) x;
float remainderY = yF - (float) y;
int w = props[i*pcount+3];
int h = props[i*pcount+4];
float twF = (float) props[i*pcount+5] / (float) precisionMultiplier;
float thF = (float) props[i*pcount+6] / (float) precisionMultiplier;
float remainderW = (remainderX+twF) - floor(remainderX+twF);
float remainderH = (remainderY+thF) - floor(remainderY+thF);
//int tw = (int) ceil(twF);
//int th = (int) ceil(thF);
int tw = (int) ceil(remainderX+twF);
int th = (int) ceil(remainderY+thF);
float falpha = (float) props[i*pcount+7] / (float) precisionMultiplier;
int alpha = (int)round(falpha*(float)255.0);
int zdindex = props[i*pcount+8];
float fbrightness = (float) props[i*pcount+9] / (float) precisionMultiplier;
for (int row=0; row<th; row++) {
for (int col=0; col<tw; col++) {
int dstX = col + x;
int dstY = row + y;
float srcNX = normF((float) col, remainderX, remainderX+twF-1.0);
float srcNY = normF((float) row, remainderY, remainderY+thF-1.0);
float srcXF = srcNX * (float) (w-1);
float srcYF = srcNY * (float) (h-1);
//float srcXF = normF((float) col, remainderX, remainderX+twF) * (float) (w-1);
//float srcYF = normF((float) row, remainderY, remainderY+thF) * (float) (h-1);
if (srcNX < 0.0) { srcXF = -remainderX; }
if (srcNY < 0.0) { srcYF = -remainderY; }
if (srcNX > 1.0) { srcXF = (float) (w-1) + (1.0-remainderW); }
if (srcNY > 1.0) { srcYF = (float) (h-1) + (1.0-remainderH); }
if (dstX >= 0 && dstX < canvasW && dstY >= 0 && dstY < canvasH) {
int4 srcColor = getPixelF(pdata, srcXF, srcYF, h, w, colorDimensions, offset);
if (fbrightness < 1.0) {
srcColor = setBrightness(srcColor, fbrightness);
}
int destIndex = dstY * canvasW * 3 + dstX * 3;
int destZIndex = dstY * canvasW * 2 + dstX * 2;
int destZValue = zvalues[destZIndex];
int destZAlpha = zvalues[destZIndex+1];
// nothing is there yet, give it full opacity
if (destZIndex <= 0) {
destZAlpha = 255;
}
float dalpha = (float) destZAlpha / (float) 255.0;
float salpha = (float) srcColor.w / (float) 255.0;
float talpha = salpha * falpha;
// r, g, b, a = x, y, z, w
// if alpha is greater than zero and there's not already a pixel there with full opacity and higher zindex
if (talpha > 0.0 && (zdindex > destZValue || dalpha < 1.0)) {
// there's already a pixel there; place it behind it using its alpha
if (zdindex < destZValue) {
talpha = (1.0 - dalpha) * talpha;
}
// mix the existing color with new color if necessary
int dr = result[destIndex];
int dg = result[destIndex+1];
int db = result[destIndex+2];
int4 destColor = (int4)(dr, dg, db, destZAlpha);
int4 blendedColor = blendColors(srcColor, destColor, talpha);
result[destIndex] = blendedColor.x;
result[destIndex+1] = blendedColor.y;
result[destIndex+2] = blendedColor.z;
// assign new zindex if it's greater
if (zdindex > destZValue) {
zvalues[destZIndex] = zdindex;
zvalues[destZIndex+1] = blendedColor.w;
}
}
}
}
}
}
""" % (width, height, pcount, colorDimensions, precisionMultiplier)
return loadGPUProgram(srcCode)
def clipsToImageGPU(width, height, flatPixelData, properties, colorDimensions, precision, gpuProgram=None, baseImage=None):
count, pcount = properties.shape
# blank image if no clip data
if count <= 0 and baseImage is None:
return np.zeros((height, width, 3), dtype=np.uint8)
# base image if exists
elif count <= 0:
return np.array(baseImage, dtype=np.uint8)
properties = properties.reshape(-1)
zvalues = np.zeros(width * height * 2, dtype=np.int32)
result = np.zeros(width * height * 3, dtype=np.uint8) if baseImage is None else np.array(baseImage, dtype=np.uint8).reshape(-1)
# baseImage = np.copy(result)
ctx = prg = None
if gpuProgram is not None:
ctx, prg = gpuProgram
else:
ctx, prg = loadMakeImageProgram(width, height, pcount, colorDimensions, precision)
# Create queue for each kernel execution
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
bufIn1 = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=flatPixelData)
bufIn2 = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=properties)
bufInZ = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=zvalues)
# bufInB = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=baseImage)
bufOut = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=result)
prg.makeImage(queue, (count, ), None , bufIn1, bufIn2, bufInZ, bufOut)
# Copy result
cl.enqueue_copy(queue, result, bufOut)
result = result.reshape(height, width, 3)
return result
def clipsToImageGPULite(width, height, flatPixelData, properties):
count, pcount = properties.shape
properties = properties.reshape(-1)
result = np.zeros(width * height * 3, dtype=np.uint8)
# the kernel function
srcCode = """
int4 getPixel(__global uchar *pdata, int x, int y, int h, int w, int dim, int offset);
int4 getPixel(__global uchar *pdata, int x, int y, int h, int w, int dim, int offset) {
if (x < 0 || y < 0 || x >= w || y >= h) {
return (int4)(0, 0, 0, 0);
}
int index = y * w * dim + x * dim + offset;
int r = pdata[index];
int g = pdata[index+1];
int b = pdata[index+2];
return (int4)(r, g, b, 0);
}
__kernel void makeImageLite(__global uchar *pdata, __global int *props, __global uchar *result){
int canvasW = %d;
int canvasH = %d;
int i = get_global_id(0);
int pcount = %d;
int colorDimensions = 3;
int offset = props[i*pcount];
int x = props[i*pcount+1];
int y = props[i*pcount+2];
int w = props[i*pcount+3];
int h = props[i*pcount+4];
for (int row=0; row<h; row++) {
for (int col=0; col<w; col++) {
int dstX = col + x;
int dstY = row + y;
if (dstX >= 0 && dstX < canvasW && dstY >= 0 && dstY < canvasH) {
int4 srcColor = getPixel(pdata, col, row, h, w, colorDimensions, offset);
int destIndex = dstY * canvasW * 3 + dstX * 3;
result[destIndex] = srcColor.x;
result[destIndex+1] = srcColor.y;
result[destIndex+2] = srcColor.z;
}
}
}
}
""" % (width, height, pcount)
ctx, prg = loadGPUProgram(srcCode)
# Create queue for each kernel execution
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
bufIn1 = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=flatPixelData)
bufIn2 = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=properties)
bufOut = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=result)
prg.makeImageLite(queue, (count, ), None , bufIn1, bufIn2, bufOut)
# Copy result
cl.enqueue_copy(queue, result, bufOut)
result = result.reshape(height, width, 3)
return result
def loadGPUProgram(srcCode):
# Get platforms, both CPU and GPU
plat = cl.get_platforms()
GPUs = plat[0].get_devices(device_type=cl.device_type.GPU)
CPU = plat[0].get_devices()
# prefer GPUs
if GPUs and len(GPUs) > 0:
ctx = cl.Context(devices=GPUs)
else:
print("Warning: using CPU instead of GPU")
ctx = cl.Context(CPU)
# Kernel function instantiation
prg = cl.Program(ctx, srcCode).build()
return (ctx, prg)
| [
"pyopencl.get_platforms",
"pyopencl.enqueue_copy",
"pyopencl.Context",
"numpy.zeros",
"pyopencl.CommandQueue",
"pyopencl.Buffer",
"pyopencl.Program",
"numpy.array"
] | [((8723, 8767), 'numpy.zeros', 'np.zeros', (['(width * height * 2)'], {'dtype': 'np.int32'}), '(width * height * 2, dtype=np.int32)\n', (8731, 8767), True, 'import numpy as np\n'), ((9176, 9196), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (9191, 9196), True, 'import pyopencl as cl\n'), ((9234, 9304), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'flatPixelData'}), '(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=flatPixelData)\n', (9243, 9304), True, 'import pyopencl as cl\n'), ((9319, 9386), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'properties'}), '(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=properties)\n', (9328, 9386), True, 'import pyopencl as cl\n'), ((9400, 9465), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'zvalues'}), '(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=zvalues)\n', (9409, 9465), True, 'import pyopencl as cl\n'), ((9562, 9626), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'result'}), '(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=result)\n', (9571, 9626), True, 'import pyopencl as cl\n'), ((9725, 9763), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue', 'result', 'bufOut'], {}), '(queue, result, bufOut)\n', (9740, 9763), True, 'import pyopencl as cl\n'), ((9986, 10030), 'numpy.zeros', 'np.zeros', (['(width * height * 3)'], {'dtype': 'np.uint8'}), '(width * height * 3, dtype=np.uint8)\n', (9994, 10030), True, 'import numpy as np\n'), ((11707, 11727), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (11722, 11727), True, 'import pyopencl as cl\n'), ((11765, 11835), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'flatPixelData'}), '(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=flatPixelData)\n', (11774, 11835), True, 'import pyopencl as cl\n'), ((11850, 11917), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'properties'}), '(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=properties)\n', (11859, 11917), True, 'import pyopencl as cl\n'), ((11931, 11995), 'pyopencl.Buffer', 'cl.Buffer', (['ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'result'}), '(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=result)\n', (11940, 11995), True, 'import pyopencl as cl\n'), ((12090, 12128), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue', 'result', 'bufOut'], {}), '(queue, result, bufOut)\n', (12105, 12128), True, 'import pyopencl as cl\n'), ((12272, 12290), 'pyopencl.get_platforms', 'cl.get_platforms', ([], {}), '()\n', (12288, 12290), True, 'import pyopencl as cl\n'), ((8524, 8568), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'np.uint8'}), '((height, width, 3), dtype=np.uint8)\n', (8532, 8568), True, 'import numpy as np\n'), ((8781, 8825), 'numpy.zeros', 'np.zeros', (['(width * height * 3)'], {'dtype': 'np.uint8'}), '(width * height * 3, dtype=np.uint8)\n', (8789, 8825), True, 'import numpy as np\n'), ((12449, 12473), 'pyopencl.Context', 'cl.Context', ([], {'devices': 'GPUs'}), '(devices=GPUs)\n', (12459, 12473), True, 'import pyopencl as cl\n'), ((12549, 12564), 'pyopencl.Context', 'cl.Context', (['CPU'], {}), '(CPU)\n', (12559, 12564), True, 'import pyopencl as cl\n'), ((8632, 8667), 'numpy.array', 'np.array', (['baseImage'], {'dtype': 'np.uint8'}), '(baseImage, dtype=np.uint8)\n', (8640, 8667), True, 'import numpy as np\n'), ((12612, 12636), 'pyopencl.Program', 'cl.Program', (['ctx', 'srcCode'], {}), '(ctx, srcCode)\n', (12622, 12636), True, 'import pyopencl as cl\n'), ((8852, 8887), 'numpy.array', 'np.array', (['baseImage'], {'dtype': 'np.uint8'}), '(baseImage, dtype=np.uint8)\n', (8860, 8887), True, 'import numpy as np\n')] |
import psycopg2
import numpy as np
from shutil import copyfile
import faiss as fs
import os
class Faisser:
def __init__(self, faiss_path):
if not os.path.exists(faiss_path):
message = {
'status': 'error',
'message': 'NO FAISS FILE FOUND, PLEASE CHECK LOCATION OF INDEX'
}
print(message)
else:
self.faiss_index = fs.read_index(faiss_path, fs.IO_FLAG_ONDISK_SAME_DIR)
self.faiss_path = faiss_path
def get_records_amount(self):
"""Getting records amount stored in faiss index
Returns
-------
amount : int
amount of records in faiss index
"""
amount = self.faiss_index.ntotal
return amount
def save_faiss_index(self, path_to_save):
"""Saving modified faiss index in the give location
Parameters
----------
path_to_save : str
Path to the location where to save the file
Returns
-------
boolean : bool
if successful return True, else False
"""
try:
fs.write_index(self.faiss_index, path_to_save)
return True
except:
return False
def delete_from_faiss(self, ud_code):
"""Delete record from faiss index by id
Parameters
----------
ud_code : str
Id of the current image obtained from the name of image
Returns
-------
boolean : bool
if successful returns True, else False
"""
try:
self.faiss_index.remove_ids(np.array([int(ud_code)]))
return True
except:
return False
def search_from_faiss_top_n(self, one_vector, top_n):
"""Search top 1 record from faiss index that matches given embedding
Parameters
----------
one_vector : np.array
(1,512) array with feature embedding
top_n : int
integer in range 1 to size of index
Returns
-------
distance, index : tuple
tuple with distances and indices of records or (None, None) if not found
"""
try:
topn = 1
if self.faiss_index.ntotal >= top_n:
topn = top_n
else:
topn = self.faiss_index.ntotal
if self.faiss_index.ntotal > 1000000:
self.faiss_index.nprobe = 4096
else:
self.faiss_index.nprobe = 256
query = np.array([one_vector], dtype=np.float32)
D, I = self.faiss_index.search(query, topn)
return D, I
except:
return None, None
def search_from_faiss_top_1(self, one_vector, threshold):
"""Search top 1 record from faiss index that matches given embedding
Parameters
----------
one_vector : np.array
(1,512) array with feature embedding
threshold : int
integer in range 0 to 100
Returns
-------
distance, index : tuple
tuple with distance and index of the record or (None, None) if not found
"""
try:
topn = 1
if self.faiss_index.ntotal > 1000000:
self.faiss_index.nprobe = 4096
else:
self.faiss_index.nprobe = 256
query = np.array([one_vector], dtype=np.float32)
D, I = self.faiss_index.search(query, topn)
distance = float(threshold)/100
if D[0][0] >= distance:
return D[0], I[0]
else:
return None, None
except:
return None, None
def insert_into_faiss(self, ud_code, feature):
"""Inserting id and embedding into faiss index
Parameters
----------
ud_code: str
id of the current image
feature : np.array
(1,512) sized feature embedding of the image
Returns
-------
boolean : bool
if successfule returns True, else False
"""
try:
vector = np.array([feature], dtype=np.float32)
ids = np.array([int(ud_code)])
# self.faiss_index.train(vector)
self.faiss_index.add_with_ids(vector, ids)
return True
except:
return False
def search_new_person(self, vector, distance):
res = None
try:
vectors_archive = pd.read_sql('SELECT * FROM fr.vectors_archive', self.engine)
# if this does not work, try to divide table into multiple dataframes
unique_id = vectors_archive['unique_id']
vectors = vectors_archive['vector']
cameras = vectors_archive['camera_id']
# servers = vectors_archive['server_id']
except:
print('Could not get data from fr.vectors_archive')
return res
distance = float(distance)/100
try:
res = {'person': []}
for i in range(len(vectors)):
vec = np.fromstring(vectors[i][1:-1], dtype=float, sep=',')
dist = np.dot(vector, vec)
if dist > distance:
dct = {'unique_id': unique_id[i], 'score': dist, 'camera': cameras[i]}
res['person'].append(dct)
copyfile('/home/dastrix/PROJECTS/face_reco_2_loc/face_reco_2_loc/application/static/frames_folder/' + str(unique_id[i]) + '_' + str(cameras[i]) + '.jpg', '/home/dastrix/PROJECTS/face_reco_2_loc/face_reco_2_loc/application/static/search_result/' + str(unique_id[i]) + '_' + str(cameras[i]) + '.jpg')
except:
print('Could not compare features')
return res
def insert_person_into_faiss(self, ud_code, feature):
"""Inserting id and embedding into faiss index
Parameters
----------
ud_code: str
id of the current image
feature : np.array
(1,512) sized feature embedding of the image
Returns
-------
message : dict
dictionary with statuses
"""
message = None
current_faiss = self.get_records_amount()
if current_faiss > 0:
faiss_res = self.insert_into_faiss(ud_code, feature)
if faiss_res:
save = self.save_faiss_index(self.faiss_path)
if save:
if self.get_records_amount() > current_faiss:
message = { 'status': 'success',
'message': 'Record successfully inserted into INDEX',
'ud_code': ud_code,
'previous_amount': current_faiss,
'updated_amount': self.get_records_amount()
}
else:
message = {
'status': 'error',
'message': 'INDEX NOT CHANGED'
}
else:
message = {
'status': 'error',
'message': 'COULD NOT SAVE INDEX'
}
return message
else:
message = {
'status': 'error',
'message': 'INDEX IS EMPTY OR NOT PROPERLY READ'
}
return message
def delete_person_from_faiss(self, ud_code):
"""Delete person by id from faiss index
Parameters
----------
ud_code : str
Id of the current image obtained from the name of image
Returns
-------
message : dict
dictionary with statuses
"""
message = None
previous = self.get_records_amount()
# faiss_index.remove_ids(np.array([int(ud_code)]))
delete = delete_from_faiss(ud_code)
if delete:
if self.get_records_amount() < previous:
save = self.save_faiss_index(self.faiss_path)
if save:
message = {
'status': 'success',
'message': 'Person deleted from INDEX',
'ud_code': ud_code,
'number of people in faiss': self.get_records_amount()
}
else:
message = {
'status': 'error',
'message': 'Deletion not successful. Index not changed.'
}
return message
else:
message = {
'status': 'error',
'message': 'Could not delete from FAISS index. Person is already deleted or does not exist.'
}
return message
else:
message = {
'status': 'error',
'message': 'Deletion not successful. Index not changed.'
}
return message
def search_person_from_faiss(self, feature, threshold):
"""Test search of the person from faiss index
Parameters
----------
feature : np.array
(1,512) array with feature embedding
threshold : int
integer in range 0 to 100
Returns
-------
message : dict
dictionary with statuses
"""
message = None
if len(feature) > 0:
distance, index = self.search_from_faiss_top_1(feature, threshold)
if index is not None:
message = {
'status': 'success',
'distance': distance,
'ud_code': index,
'message': 'Record successfully found'
}
return message
else:
message = {
'status': 'error',
'ud_code': None,
'message': 'No person found'
}
return message
else:
message = {'status': 'error', 'message': 'wrong embedding'}
return message
| [
"faiss.write_index",
"faiss.read_index",
"os.path.exists",
"numpy.array",
"numpy.dot",
"numpy.fromstring"
] | [((160, 186), 'os.path.exists', 'os.path.exists', (['faiss_path'], {}), '(faiss_path)\n', (174, 186), False, 'import os\n'), ((430, 483), 'faiss.read_index', 'fs.read_index', (['faiss_path', 'fs.IO_FLAG_ONDISK_SAME_DIR'], {}), '(faiss_path, fs.IO_FLAG_ONDISK_SAME_DIR)\n', (443, 483), True, 'import faiss as fs\n'), ((1160, 1206), 'faiss.write_index', 'fs.write_index', (['self.faiss_index', 'path_to_save'], {}), '(self.faiss_index, path_to_save)\n', (1174, 1206), True, 'import faiss as fs\n'), ((2602, 2642), 'numpy.array', 'np.array', (['[one_vector]'], {'dtype': 'np.float32'}), '([one_vector], dtype=np.float32)\n', (2610, 2642), True, 'import numpy as np\n'), ((3468, 3508), 'numpy.array', 'np.array', (['[one_vector]'], {'dtype': 'np.float32'}), '([one_vector], dtype=np.float32)\n', (3476, 3508), True, 'import numpy as np\n'), ((4220, 4257), 'numpy.array', 'np.array', (['[feature]'], {'dtype': 'np.float32'}), '([feature], dtype=np.float32)\n', (4228, 4257), True, 'import numpy as np\n'), ((5181, 5234), 'numpy.fromstring', 'np.fromstring', (['vectors[i][1:-1]'], {'dtype': 'float', 'sep': '""","""'}), "(vectors[i][1:-1], dtype=float, sep=',')\n", (5194, 5234), True, 'import numpy as np\n'), ((5258, 5277), 'numpy.dot', 'np.dot', (['vector', 'vec'], {}), '(vector, vec)\n', (5264, 5277), True, 'import numpy as np\n')] |
#! /usr/env/python
"""
flow_director_mfd.py: provides the component FlowDirectorMFD.
This components finds the steepest single-path steepest descent flow
directions. It is equivalent to D4 method in the special case of a raster grid
in that it does not consider diagonal links between nodes. For that capability,
use FlowDirectorD8.
"""
from landlab.components.flow_director.flow_director_to_many import _FlowDirectorToMany
from landlab.components.flow_director import flow_direction_mfd
from landlab import VoronoiDelaunayGrid
from landlab import FIXED_VALUE_BOUNDARY, FIXED_GRADIENT_BOUNDARY, BAD_INDEX_VALUE
import numpy
class FlowDirectorMFD(_FlowDirectorToMany):
"""Multiple-path flow direction with or without out diagonals.
Directs flow by the multiple flow direction method. Each node is assigned
multiple flow directions, toward all of the N neighboring nodes that are
lower than it. If none of the neighboring nodes are lower, the location is
identified as a pit. Flow proportions can be calculated as proportional to
slope or proportional to the square root of slope, which is the solution to
a steady kinematic wave.
Specifically, it stores as ModelGrid fields:
- Node array of receivers (nodes that receive flow), or ITS OWN ID if
there is no receiver: *'flow__receiver_node'*. This array is 2D, and is
of dimension (number of nodes x max number of receivers).
- Node array of flow proportions: *'flow__receiver_proportions'*. This
array is 2D, and is of dimension (number of nodes x max number of
receivers).
- Node array of links carrying flow: *'flow__link_to_receiver_node'*.
This array is 2D, and is of dimension (number of nodes x max number of
receivers).
- Node array of downhill slopes corresponding to each receiver.:
*'topographic__steepest_slope'* This array is 2D, and is
of dimension (number of nodes x max number of receivers). If the slope is
uphill or flat, the value is assigned zero.
- Boolean node array of all local lows: *'flow__sink_flag'*
The primary method of this class is :func:`run_one_step`.
Examples
--------
This method works for both raster and irregular grids. First we will look
at a raster example, and then an irregular example.
>>> import numpy as numpy
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowDirectorMFD
>>> mg = RasterModelGrid((3,3), spacing=(1, 1))
>>> mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
>>> _ = mg.add_field('topographic__elevation',
... mg.node_x + mg.node_y,
... at = 'node')
The MFD flow director can be uses for raster and irregular grids. For
raster grids, use of diagonal links is specified with the keyword
*diagonals* (default is False).
>>> fd = FlowDirectorMFD(mg, 'topographic__elevation', diagonals = True)
>>> fd.surface_values
array([ 0., 1., 2., 1., 2., 3., 2., 3., 4.])
>>> fd.run_one_step()
Unlike flow directors that only direct flow to one node, FlowDirectorMFD
directs flow to all downstream nodes. It stores the receiver information
is a (number of nodes x maximum number or receivers) shape field at nodes.
>>> mg.at_node['flow__receiver_node']
array([[ 0, -1, -1, -1, -1, -1, -1, -1],
[ 1, -1, -1, -1, -1, -1, -1, -1],
[ 2, -1, -1, -1, -1, -1, -1, -1],
[ 3, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 1, -1, -1, 0, -1],
[ 5, -1, -1, -1, -1, -1, -1, -1],
[ 6, -1, -1, -1, -1, -1, -1, -1],
[ 7, -1, -1, -1, -1, -1, -1, -1],
[ 8, -1, -1, -1, -1, -1, -1, -1]])
It also stores the proportions of flow going to each receiver, the link on
which the flow moves in at node arrays, and the slope of each link.
>>> mg.at_node['flow__receiver_proportions'] # doctest: +NORMALIZE_WHITESPACE
array([[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0.41421356, 0. ,
0. , 0.58578644, 0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ]])
>>> mg.at_node['flow__link_to_receiver_node']
array([[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 3, -1, -1, 12, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1]])
>>> mg.at_node['topographic__steepest_slope'] # doctest: +NORMALIZE_WHITESPACE
array([[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , 0. ,
0. , 1.41421356, 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ]])
Finally, FlowDirectorMFD identifies sinks, or local lows.
>>> mg.at_node['flow__sink_flag']
array([1, 1, 1, 1, 0, 1, 1, 1, 1], dtype=int8)
The flow directors also have the ability to return the flow receiver nodes.
For this example, we will turn the diagonals off. This is the default
value.
>>> fd = FlowDirectorMFD(mg, 'topographic__elevation')
>>> fd.run_one_step()
>>> receivers, proportions = fd.direct_flow()
>>> receivers
array([[ 0, -1, -1, -1],
[ 1, -1, -1, -1],
[ 2, -1, -1, -1],
[ 3, -1, -1, -1],
[-1, -1, -1, 1],
[ 5, -1, -1, -1],
[ 6, -1, -1, -1],
[ 7, -1, -1, -1],
[ 8, -1, -1, -1]])
>>> proportions # doctest: +NORMALIZE_WHITESPACE
array([[ 1., 0., 0., 0.],
[ 1., 0., 0., 0.],
[ 1., 0., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 1., 0., 0., 0.],
[ 1., 0., 0., 0.],
[ 1., 0., 0., 0.],
[ 1., 0., 0., 0.]])
For each donor node (represented by each row) the proportions should sum to
one.
>>> proportions.sum(axis=1)
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1.])
For the second example we will use a Hexagonal Model Grid, a special type
of Voroni Grid that has regularly spaced hexagonal cells. FlowDirectorMFD
has multiple ways to partition flow based on slope. The default method is
based on the slope angle. A secondary methods is to partion based on the
square root of slope. This represents the solution to a steady kinematic
wave.
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(5,3)
>>> _ = mg.add_field('topographic__elevation',
... mg.node_x + numpy.round(mg.node_y),
... at = 'node')
>>> fd = FlowDirectorMFD(mg, 'topographic__elevation',
... partition_method='square_root_of_slope')
>>> fd.surface_values # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 2. ,
0.5, 1.5, 2.5, 3.5,
1. , 2. , 3. , 4. , 5. ,
2.5, 3.5, 4.5, 5.5,
3. , 4. , 5. ])
>>> fd.run_one_step()
>>> mg.at_node['flow__receiver_node']
array([[ 0, -1, -1, -1, -1, -1],
[ 1, -1, -1, -1, -1, -1],
[ 2, -1, -1, -1, -1, -1],
[ 3, -1, -1, -1, -1, -1],
[-1, -1, -1, 3, 0, 1],
[-1, -1, -1, 4, 1, 2],
[ 6, -1, -1, -1, -1, -1],
[ 7, -1, -1, -1, -1, -1],
[-1, -1, -1, 7, 3, 4],
[-1, -1, -1, 8, 4, 5],
[-1, -1, -1, 9, 5, 6],
[11, -1, -1, -1, -1, -1],
[12, -1, -1, -1, -1, -1],
[-1, -1, 16, 12, 8, 9],
[-1, -1, 17, 13, 9, 10],
[15, -1, -1, -1, -1, -1],
[16, -1, -1, -1, -1, -1],
[17, -1, -1, -1, -1, -1],
[18, -1, -1, -1, -1, -1]])
>>> mg.at_node['flow__receiver_proportions'] # doctest: +NORMALIZE_WHITESPACE
array([[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 0. , 0. , 0. , 0.34108138, 0.41773767,
0.24118095],
[ 0. , 0. , 0. , 0.34108138, 0.41773767,
0.24118095],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 0. , 0. , 0. , 0.34108138, 0.41773767,
0.24118095],
[ 0. , 0. , 0. , 0.34108138, 0.41773767,
0.24118095],
[ 0. , 0. , 0. , 0.34108138, 0.41773767,
0.24118095],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 0. , 0. , 0.19431571, 0.27480391, 0.33656468,
0.19431571],
[ 0. , 0. , 0.19431571, 0.27480391, 0.33656468,
0.19431571],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ],
[ 1. , 0. , 0. , 0. , 0. ,
0. ]])
>>> mg.at_node['flow__link_to_receiver_node']
array([[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, 8, 3, 4],
[-1, -1, -1, 9, 5, 6],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, 19, 12, 13],
[-1, -1, -1, 20, 14, 15],
[-1, -1, -1, 21, 16, 17],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, 35, 31, 25, 26],
[-1, -1, 37, 32, 27, 28],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1]])
>>> mg.at_node['topographic__steepest_slope'] # doctest: +NORMALIZE_WHITESPACE
array([[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , 1.5, 0.5],
[ 0. , 0. , 0. , 1. , 1.5, 0.5],
[ 0. , 0. , 1. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 1. , 1.5, 0.5],
[ 0. , 0. , 0. , 1. , 1.5, 0.5],
[ 0. , 0. , 0. , 1. , 1.5, 0.5],
[ 0. , 1. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0.5, 0. , 0. ],
[ 0. , 0. , 0.5, 1. , 1.5, 0.5],
[ 0. , 0. , 0.5, 1. , 1.5, 0.5],
[ 0. , 1. , 1.5, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0.5, 0. , 0. , 0. ],
[ 0. , 0.5, 0. , 0. , 0. , 0. ]])
>>> mg.at_node['flow__sink_flag']
array([1, 1, 1,
1, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 1,
1, 1, 1], dtype=int8)
"""
_name = "FlowDirectorMFD"
def __init__(self, grid, surface="topographic__elevation", **kwargs):
"""
Parameters
----------
grid : ModelGrid
A grid.
surface : field name at node or array of length node, optional
The surface to direct flow across, default is field at node:
topographic__self.surface_valuesation.
partition_method: string, optional
Method for partitioning flow. Options include 'slope' (default) and
'square_root_of_slope'.
"""
# unpack kwargs:
partition_method = kwargs.get("partition_method", "slope")
diagonals = kwargs.get("diagonals", False)
self.method = "MFD"
super(FlowDirectorMFD, self).__init__(grid, surface)
self._is_Voroni = isinstance(self._grid, VoronoiDelaunayGrid)
if self._is_Voroni:
diagonals = False
self.updated_boundary_conditions()
self.partition_method = partition_method
self.diagonals = diagonals
if self._is_Voroni == False and diagonals == False:
self.max_receivers = 4
if self._is_Voroni == False and diagonals == True:
self.max_receivers = 8
else:
self.max_receivers = self._grid.adjacent_nodes_at_node.shape[1]
# set the number of recievers, proportions, and receiver links with the
# right size.
self.receivers = grid.add_field(
"flow__receiver_node",
BAD_INDEX_VALUE
* numpy.ones((self._grid.number_of_nodes, self.max_receivers), dtype=int),
at="node",
dtype=int,
noclobber=False,
)
self.steepest_slope = grid.add_field(
"topographic__steepest_slope",
BAD_INDEX_VALUE
* numpy.ones((self._grid.number_of_nodes, self.max_receivers), dtype=float),
at="node",
dtype=float,
noclobber=False,
)
self.receiver_links = grid.add_field(
"flow__link_to_receiver_node",
BAD_INDEX_VALUE
* numpy.ones((self._grid.number_of_nodes, self.max_receivers), dtype=int),
at="node",
dtype=int,
noclobber=False,
)
self.proportions = grid.add_field(
"flow__receiver_proportions",
BAD_INDEX_VALUE
* numpy.ones((self._grid.number_of_nodes, self.max_receivers), dtype=float),
at="node",
dtype=int,
noclobber=False,
)
def updated_boundary_conditions(self):
"""
Method to update FlowDirectorMFD when boundary conditions change.
Call this if boundary conditions on the grid are updated after the
component is instantiated.
"""
self._active_links = self.grid.active_links
self._activelink_tail = self.grid.node_at_link_tail[self.grid.active_links]
self._activelink_head = self.grid.node_at_link_head[self.grid.active_links]
def run_one_step(self):
"""
Find flow directions and save to the model grid.
run_one_step() checks for updated boundary conditions, calculates
slopes on links, finds basself.surface_valuesel nodes based on the
status at node, calculates flow directions, and saves results to the
grid.
An alternative to run_one_step() is direct_flow() which does the same
things but also returns the receiver nodes not return values.
"""
self.direct_flow()
def direct_flow(self):
"""
Find flow directions, save to the model grid, and return receivers.
direct_flow() checks for updated boundary conditions, calculates
slopes on links, finds basself.surface_valuesel nodes based on the status at node,
calculates flow directions, saves results to the grid, and returns a
at-node array of receiver nodes. This array is stored in the grid at:
grid['node']['flow__receiver_nodes']
An alternative to direct_flow() is run_one_step() which does the same
things but also returns a at-node array of receiver nodes. This array
is stored in the grid at:
grid['node']['flow__receiver_nodes']
"""
self._check_updated_bc()
# step 1. Required inumpyuts for flow_directions_MFD
# this is where diagonals are or are not included in
# flow direction calculations
# Option for no diagonals (default)
if self.diagonals == False:
neighbors_at_node = self.grid.adjacent_nodes_at_node
links_at_node = self.grid.links_at_node
active_link_dir_at_node = self.grid.active_link_dirs_at_node
# this needs to be the gradient
link_slope = self.grid.calc_grad_at_link(self.surface_values)
# Option with diagonals.
else:
# need to create a list of diagonal links since it doesn't exist.
diag_links = numpy.sort(numpy.unique(self.grid.d8s_at_node[:, 4:]))
diag_links = diag_links[diag_links > 0]
# get diagonal active links (though this actually includes ALL
# active links)
dal = self.grid.active_d8
# calculate graidents across diagonals
diag_grads = numpy.zeros(diag_links.shape)
where_active_diag = dal >= diag_links.min()
active_diags_inds = dal[where_active_diag] - diag_links.min()
active_diag_grads = self.grid._calculate_gradients_at_d8_active_links(
self.surface_values
)
diag_grads[active_diags_inds] = active_diag_grads[where_active_diag]
# calculate gradients on orthogonal links
ortho_grads = self.grid.calc_grad_at_link(self.surface_values)
# concatenate the diagonal and orthogonal grid elements
neighbors_at_node = numpy.hstack(
(
self.grid.adjacent_nodes_at_node,
self.grid.diagonal_adjacent_nodes_at_node,
)
)
active_link_dir_at_node = numpy.hstack(
(
self.grid.active_link_dirs_at_node,
self.grid.active_diagonal_dirs_at_node,
)
)
link_slope = numpy.hstack((ortho_grads, diag_grads))
links_at_node = self.grid.d8s_at_node
# Step 2. Find and save base level nodes.
(baselevel_nodes,) = numpy.where(
numpy.logical_or(
self._grid.status_at_node == FIXED_VALUE_BOUNDARY,
self._grid.status_at_node == FIXED_GRADIENT_BOUNDARY,
)
)
# Calculate flow directions
(
self.receivers,
self.proportions,
slopes_to_receivers,
steepest_slope,
steepest_receiver,
sink,
receiver_links,
steepest_link,
) = flow_direction_mfd.flow_directions_mfd(
self.surface_values,
neighbors_at_node,
links_at_node,
active_link_dir_at_node,
link_slope,
baselevel_nodes=baselevel_nodes,
partition_method=self.partition_method,
)
# Save the four ouputs of this component.
self._grid["node"]["flow__receiver_node"][:] = self.receivers
self._grid["node"]["flow__receiver_proportions"][:] = self.proportions
self._grid["node"]["topographic__steepest_slope"][:] = slopes_to_receivers
self._grid["node"]["flow__link_to_receiver_node"][:] = receiver_links
self._grid["node"]["flow__sink_flag"][:] = False
self._grid["node"]["flow__sink_flag"][sink] = True
return (self.receivers, self.proportions)
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
| [
"numpy.unique",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.logical_or",
"landlab.components.flow_director.flow_direction_mfd.flow_directions_mfd",
"doctest.testmod"
] | [((21834, 21851), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (21849, 21851), False, 'import doctest\n'), ((20934, 21145), 'landlab.components.flow_director.flow_direction_mfd.flow_directions_mfd', 'flow_direction_mfd.flow_directions_mfd', (['self.surface_values', 'neighbors_at_node', 'links_at_node', 'active_link_dir_at_node', 'link_slope'], {'baselevel_nodes': 'baselevel_nodes', 'partition_method': 'self.partition_method'}), '(self.surface_values,\n neighbors_at_node, links_at_node, active_link_dir_at_node, link_slope,\n baselevel_nodes=baselevel_nodes, partition_method=self.partition_method)\n', (20972, 21145), False, 'from landlab.components.flow_director import flow_direction_mfd\n'), ((19248, 19277), 'numpy.zeros', 'numpy.zeros', (['diag_links.shape'], {}), '(diag_links.shape)\n', (19259, 19277), False, 'import numpy\n'), ((19853, 19949), 'numpy.hstack', 'numpy.hstack', (['(self.grid.adjacent_nodes_at_node, self.grid.diagonal_adjacent_nodes_at_node)'], {}), '((self.grid.adjacent_nodes_at_node, self.grid.\n diagonal_adjacent_nodes_at_node))\n', (19865, 19949), False, 'import numpy\n'), ((20072, 20167), 'numpy.hstack', 'numpy.hstack', (['(self.grid.active_link_dirs_at_node, self.grid.active_diagonal_dirs_at_node)'], {}), '((self.grid.active_link_dirs_at_node, self.grid.\n active_diagonal_dirs_at_node))\n', (20084, 20167), False, 'import numpy\n'), ((20277, 20316), 'numpy.hstack', 'numpy.hstack', (['(ortho_grads, diag_grads)'], {}), '((ortho_grads, diag_grads))\n', (20289, 20316), False, 'import numpy\n'), ((20473, 20599), 'numpy.logical_or', 'numpy.logical_or', (['(self._grid.status_at_node == FIXED_VALUE_BOUNDARY)', '(self._grid.status_at_node == FIXED_GRADIENT_BOUNDARY)'], {}), '(self._grid.status_at_node == FIXED_VALUE_BOUNDARY, self.\n _grid.status_at_node == FIXED_GRADIENT_BOUNDARY)\n', (20489, 20599), False, 'import numpy\n'), ((15425, 15496), 'numpy.ones', 'numpy.ones', (['(self._grid.number_of_nodes, self.max_receivers)'], {'dtype': 'int'}), '((self._grid.number_of_nodes, self.max_receivers), dtype=int)\n', (15435, 15496), False, 'import numpy\n'), ((15715, 15788), 'numpy.ones', 'numpy.ones', (['(self._grid.number_of_nodes, self.max_receivers)'], {'dtype': 'float'}), '((self._grid.number_of_nodes, self.max_receivers), dtype=float)\n', (15725, 15788), False, 'import numpy\n'), ((16009, 16080), 'numpy.ones', 'numpy.ones', (['(self._grid.number_of_nodes, self.max_receivers)'], {'dtype': 'int'}), '((self._grid.number_of_nodes, self.max_receivers), dtype=int)\n', (16019, 16080), False, 'import numpy\n'), ((16295, 16368), 'numpy.ones', 'numpy.ones', (['(self._grid.number_of_nodes, self.max_receivers)'], {'dtype': 'float'}), '((self._grid.number_of_nodes, self.max_receivers), dtype=float)\n', (16305, 16368), False, 'import numpy\n'), ((18933, 18975), 'numpy.unique', 'numpy.unique', (['self.grid.d8s_at_node[:, 4:]'], {}), '(self.grid.d8s_at_node[:, 4:])\n', (18945, 18975), False, 'import numpy\n')] |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Test code for gshard_layers."""
from lingvo import compat as tf
from lingvo.core import gshard_builder
from lingvo.core import gshard_layers
from lingvo.core import test_utils
import numpy as np
FLAGS = tf.flags.FLAGS
class CausalDepthwiseConv1DLayerTest(test_utils.TestCase):
def _GetRefParams(self, kernel_size, dim):
builder = gshard_builder.MoEBuilder.Params().Set(
model_dim=dim).Instantiate()
return builder.DepthwiseConvAutoregressive('conv', kernel_size)
def _GetParams(self, kernel_size, dim):
p = gshard_layers.CausalDepthwiseConv1DLayer.Params().Set(
name='conv',
kernel_size=kernel_size,
model_dims=dim,
compatible_with_mtf_ckpt=True)
return p
def _GetInputs(self, batch, seqlen, dim):
np.random.seed(None)
return tf.convert_to_tensor(
np.random.rand(batch, seqlen, dim).astype(np.float32))
def testEqualToDepthwiseConvAutoregressive(self):
b, seqlen, d, k = 2, 8, 4, 3
with tf.variable_scope('ref'):
ref_l = self._GetRefParams(k, d).Instantiate()
with tf.variable_scope('act'):
exp_l = self._GetParams(k, d).Instantiate()
inputs = self._GetInputs(b, seqlen, d)
# [b, t, d]
ref_out = ref_l.FProp(ref_l.theta, inputs)
# [b, t, d]
act_out = exp_l.FProp(exp_l.theta, inputs)
init_op = tf.global_variables_initializer()
with self.session(use_gpu=False) as sess:
sess.run(init_op)
expected, actual = sess.run([ref_out, act_out])
self.assertAllClose(expected, actual)
class Conv1DStateLayerTest(test_utils.TestCase):
def _GetParams(self, kernel_size, dim):
p = gshard_layers.CausalDepthwiseConv1DLayer.Params().Set(
name='conv', kernel_size=kernel_size, model_dims=dim)
p.state_layer = gshard_layers.Conv1DStateLayer.Params().Set(
shape=[None, None, dim])
return p
def _GetInputs(self, batch, seqlen, dim):
np.random.seed(None)
np_inputs = np.random.rand(batch, seqlen, dim).astype(np.float32)
tf.logging.info(f'np_inputs: {np_inputs}')
return tf.convert_to_tensor(np_inputs)
def testSingleStep(self):
b, seqlen, dim, k, beam = 2, 8, 2, 3, 1
inputs = self._GetInputs(b, seqlen * beam, dim)
l = self._GetParams(k, dim).Instantiate()
# Normal Fprop with a len=seqlen sequence.
outputs = l.FProp(l.theta, inputs)
state0 = gshard_layers.StateLayer.InitState(l, [b, beam, k])
tf.logging.info(f'state0: {repr(state0)}')
all_outputs = []
state_t = state0
theta_t = l.theta.DeepCopy()
for i in range(seqlen):
inputs_t = inputs[:, i:i + 1 * beam, :]
# Copies state to theta.
theta_t = gshard_layers.StateLayer.UpdateTheta(l, theta_t, state_t, t=i)
tf.logging.info(f'theta_{i}: {repr(theta_t)}')
# Updates theta inplace.
out_t = l.FProp(theta_t, inputs_t)
# Copies theta to state.
state_t = gshard_layers.StateLayer.UpdateState(l, theta_t, state_t)
tf.logging.info(f'state_{i}: {repr(state_t)}')
all_outputs.append(out_t)
# seqlen steps of FProp(), each with len=1.
concat_step_outputs = tf.concat(all_outputs, axis=1)
init_op = tf.global_variables_initializer()
with self.session(use_gpu=False) as sess:
sess.run(init_op)
expected, actual = sess.run([outputs, concat_step_outputs])
print(f'expected: {expected}')
print(f'actual: {actual}')
self.assertAllClose(expected, actual)
if __name__ == '__main__':
tf.test.main()
| [
"lingvo.compat.variable_scope",
"lingvo.core.gshard_layers.StateLayer.UpdateState",
"lingvo.compat.test.main",
"numpy.random.seed",
"lingvo.core.gshard_layers.CausalDepthwiseConv1DLayer.Params",
"lingvo.core.gshard_builder.MoEBuilder.Params",
"lingvo.compat.concat",
"lingvo.core.gshard_layers.StateLay... | [((4195, 4209), 'lingvo.compat.test.main', 'tf.test.main', ([], {}), '()\n', (4207, 4209), True, 'from lingvo import compat as tf\n'), ((1483, 1503), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (1497, 1503), True, 'import numpy as np\n'), ((2045, 2078), 'lingvo.compat.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2076, 2078), True, 'from lingvo import compat as tf\n'), ((2627, 2647), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (2641, 2647), True, 'import numpy as np\n'), ((2722, 2764), 'lingvo.compat.logging.info', 'tf.logging.info', (['f"""np_inputs: {np_inputs}"""'], {}), "(f'np_inputs: {np_inputs}')\n", (2737, 2764), True, 'from lingvo import compat as tf\n'), ((2776, 2807), 'lingvo.compat.convert_to_tensor', 'tf.convert_to_tensor', (['np_inputs'], {}), '(np_inputs)\n', (2796, 2807), True, 'from lingvo import compat as tf\n'), ((3081, 3132), 'lingvo.core.gshard_layers.StateLayer.InitState', 'gshard_layers.StateLayer.InitState', (['l', '[b, beam, k]'], {}), '(l, [b, beam, k])\n', (3115, 3132), False, 'from lingvo.core import gshard_layers\n'), ((3834, 3864), 'lingvo.compat.concat', 'tf.concat', (['all_outputs'], {'axis': '(1)'}), '(all_outputs, axis=1)\n', (3843, 3864), True, 'from lingvo import compat as tf\n'), ((3880, 3913), 'lingvo.compat.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3911, 3913), True, 'from lingvo import compat as tf\n'), ((1696, 1720), 'lingvo.compat.variable_scope', 'tf.variable_scope', (['"""ref"""'], {}), "('ref')\n", (1713, 1720), True, 'from lingvo import compat as tf\n'), ((1784, 1808), 'lingvo.compat.variable_scope', 'tf.variable_scope', (['"""act"""'], {}), "('act')\n", (1801, 1808), True, 'from lingvo import compat as tf\n'), ((3378, 3440), 'lingvo.core.gshard_layers.StateLayer.UpdateTheta', 'gshard_layers.StateLayer.UpdateTheta', (['l', 'theta_t', 'state_t'], {'t': 'i'}), '(l, theta_t, state_t, t=i)\n', (3414, 3440), False, 'from lingvo.core import gshard_layers\n'), ((3615, 3672), 'lingvo.core.gshard_layers.StateLayer.UpdateState', 'gshard_layers.StateLayer.UpdateState', (['l', 'theta_t', 'state_t'], {}), '(l, theta_t, state_t)\n', (3651, 3672), False, 'from lingvo.core import gshard_layers\n'), ((1249, 1298), 'lingvo.core.gshard_layers.CausalDepthwiseConv1DLayer.Params', 'gshard_layers.CausalDepthwiseConv1DLayer.Params', ([], {}), '()\n', (1296, 1298), False, 'from lingvo.core import gshard_layers\n'), ((2350, 2399), 'lingvo.core.gshard_layers.CausalDepthwiseConv1DLayer.Params', 'gshard_layers.CausalDepthwiseConv1DLayer.Params', ([], {}), '()\n', (2397, 2399), False, 'from lingvo.core import gshard_layers\n'), ((2487, 2526), 'lingvo.core.gshard_layers.Conv1DStateLayer.Params', 'gshard_layers.Conv1DStateLayer.Params', ([], {}), '()\n', (2524, 2526), False, 'from lingvo.core import gshard_layers\n'), ((2664, 2698), 'numpy.random.rand', 'np.random.rand', (['batch', 'seqlen', 'dim'], {}), '(batch, seqlen, dim)\n', (2678, 2698), True, 'import numpy as np\n'), ((1545, 1579), 'numpy.random.rand', 'np.random.rand', (['batch', 'seqlen', 'dim'], {}), '(batch, seqlen, dim)\n', (1559, 1579), True, 'import numpy as np\n'), ((1053, 1087), 'lingvo.core.gshard_builder.MoEBuilder.Params', 'gshard_builder.MoEBuilder.Params', ([], {}), '()\n', (1085, 1087), False, 'from lingvo.core import gshard_builder\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.